1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
4 | * Copyright (c) 2014- QLogic Corporation. |
5 | * All rights reserved |
6 | * www.qlogic.com |
7 | * |
8 | * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. |
9 | */ |
10 | |
11 | #include "bfad_drv.h" |
12 | #include "bfa_modules.h" |
13 | |
14 | BFA_TRC_FILE(HAL, FCPIM); |
15 | |
16 | /* |
17 | * BFA ITNIM Related definitions |
18 | */ |
19 | static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); |
20 | |
21 | #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ |
22 | (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) |
23 | |
24 | #define bfa_fcpim_additn(__itnim) \ |
25 | list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) |
26 | #define bfa_fcpim_delitn(__itnim) do { \ |
27 | WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ |
28 | bfa_itnim_update_del_itn_stats(__itnim); \ |
29 | list_del(&(__itnim)->qe); \ |
30 | WARN_ON(!list_empty(&(__itnim)->io_q)); \ |
31 | WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \ |
32 | WARN_ON(!list_empty(&(__itnim)->pending_q)); \ |
33 | } while (0) |
34 | |
35 | #define bfa_itnim_online_cb(__itnim) do { \ |
36 | if ((__itnim)->bfa->fcs) \ |
37 | bfa_cb_itnim_online((__itnim)->ditn); \ |
38 | else { \ |
39 | bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ |
40 | __bfa_cb_itnim_online, (__itnim)); \ |
41 | } \ |
42 | } while (0) |
43 | |
44 | #define bfa_itnim_offline_cb(__itnim) do { \ |
45 | if ((__itnim)->bfa->fcs) \ |
46 | bfa_cb_itnim_offline((__itnim)->ditn); \ |
47 | else { \ |
48 | bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ |
49 | __bfa_cb_itnim_offline, (__itnim)); \ |
50 | } \ |
51 | } while (0) |
52 | |
53 | #define bfa_itnim_sler_cb(__itnim) do { \ |
54 | if ((__itnim)->bfa->fcs) \ |
55 | bfa_cb_itnim_sler((__itnim)->ditn); \ |
56 | else { \ |
57 | bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ |
58 | __bfa_cb_itnim_sler, (__itnim)); \ |
59 | } \ |
60 | } while (0) |
61 | |
62 | enum bfa_ioim_lm_ua_status { |
63 | BFA_IOIM_LM_UA_RESET = 0, |
64 | BFA_IOIM_LM_UA_SET = 1, |
65 | }; |
66 | |
67 | /* |
68 | * BFA IOIM related definitions |
69 | */ |
70 | #define bfa_ioim_move_to_comp_q(__ioim) do { \ |
71 | list_del(&(__ioim)->qe); \ |
72 | list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \ |
73 | } while (0) |
74 | |
75 | |
76 | #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \ |
77 | if ((__fcpim)->profile_comp) \ |
78 | (__fcpim)->profile_comp(__ioim); \ |
79 | } while (0) |
80 | |
81 | #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \ |
82 | if ((__fcpim)->profile_start) \ |
83 | (__fcpim)->profile_start(__ioim); \ |
84 | } while (0) |
85 | |
86 | |
87 | /* |
88 | * BFA TSKIM related definitions |
89 | */ |
90 | |
91 | /* |
92 | * task management completion handling |
93 | */ |
94 | #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ |
95 | bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\ |
96 | bfa_tskim_notify_comp(__tskim); \ |
97 | } while (0) |
98 | |
99 | #define bfa_tskim_notify_comp(__tskim) do { \ |
100 | if ((__tskim)->notify) \ |
101 | bfa_itnim_tskdone((__tskim)->itnim); \ |
102 | } while (0) |
103 | |
104 | |
105 | /* |
106 | * forward declaration for BFA ITNIM functions |
107 | */ |
108 | static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); |
109 | static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim); |
110 | static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim); |
111 | static void bfa_itnim_cleanp_comp(void *itnim_cbarg); |
112 | static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim); |
113 | static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete); |
114 | static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete); |
115 | static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete); |
116 | static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim); |
117 | static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim); |
118 | static void bfa_itnim_iotov(void *itnim_arg); |
119 | static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); |
120 | static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); |
121 | static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); |
122 | |
123 | /* |
124 | * forward declaration of ITNIM state machine |
125 | */ |
126 | static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, |
127 | enum bfa_itnim_event event); |
128 | static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, |
129 | enum bfa_itnim_event event); |
130 | static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, |
131 | enum bfa_itnim_event event); |
132 | static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, |
133 | enum bfa_itnim_event event); |
134 | static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, |
135 | enum bfa_itnim_event event); |
136 | static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, |
137 | enum bfa_itnim_event event); |
138 | static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, |
139 | enum bfa_itnim_event event); |
140 | static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, |
141 | enum bfa_itnim_event event); |
142 | static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, |
143 | enum bfa_itnim_event event); |
144 | static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, |
145 | enum bfa_itnim_event event); |
146 | static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, |
147 | enum bfa_itnim_event event); |
148 | static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, |
149 | enum bfa_itnim_event event); |
150 | static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, |
151 | enum bfa_itnim_event event); |
152 | static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, |
153 | enum bfa_itnim_event event); |
154 | static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, |
155 | enum bfa_itnim_event event); |
156 | |
157 | /* |
158 | * forward declaration for BFA IOIM functions |
159 | */ |
160 | static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); |
161 | static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim); |
162 | static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); |
163 | static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); |
164 | static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); |
165 | static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete); |
166 | static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); |
167 | static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); |
168 | static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); |
169 | static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); |
170 | |
171 | /* |
172 | * forward declaration of BFA IO state machine |
173 | */ |
174 | static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, |
175 | enum bfa_ioim_event event); |
176 | static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, |
177 | enum bfa_ioim_event event); |
178 | static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, |
179 | enum bfa_ioim_event event); |
180 | static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, |
181 | enum bfa_ioim_event event); |
182 | static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, |
183 | enum bfa_ioim_event event); |
184 | static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, |
185 | enum bfa_ioim_event event); |
186 | static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, |
187 | enum bfa_ioim_event event); |
188 | static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, |
189 | enum bfa_ioim_event event); |
190 | static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, |
191 | enum bfa_ioim_event event); |
192 | static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, |
193 | enum bfa_ioim_event event); |
194 | static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, |
195 | enum bfa_ioim_event event); |
196 | static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, |
197 | enum bfa_ioim_event event); |
198 | /* |
199 | * forward declaration for BFA TSKIM functions |
200 | */ |
201 | static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); |
202 | static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); |
203 | static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, |
204 | struct scsi_lun lun); |
205 | static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); |
206 | static void bfa_tskim_cleanp_comp(void *tskim_cbarg); |
207 | static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); |
208 | static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim); |
209 | static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); |
210 | static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); |
211 | |
212 | /* |
213 | * forward declaration of BFA TSKIM state machine |
214 | */ |
215 | static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, |
216 | enum bfa_tskim_event event); |
217 | static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, |
218 | enum bfa_tskim_event event); |
219 | static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, |
220 | enum bfa_tskim_event event); |
221 | static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, |
222 | enum bfa_tskim_event event); |
223 | static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, |
224 | enum bfa_tskim_event event); |
225 | static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, |
226 | enum bfa_tskim_event event); |
227 | static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, |
228 | enum bfa_tskim_event event); |
229 | /* |
230 | * BFA FCP Initiator Mode module |
231 | */ |
232 | |
233 | /* |
234 | * Compute and return memory needed by FCP(im) module. |
235 | */ |
236 | static void |
237 | bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) |
238 | { |
239 | bfa_itnim_meminfo(cfg, km_len); |
240 | |
241 | /* |
242 | * IO memory |
243 | */ |
244 | *km_len += cfg->fwcfg.num_ioim_reqs * |
245 | (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s)); |
246 | |
247 | /* |
248 | * task management command memory |
249 | */ |
250 | if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) |
251 | cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; |
252 | *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s); |
253 | } |
254 | |
255 | |
256 | static void |
257 | bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad, |
258 | struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) |
259 | { |
260 | struct bfa_fcpim_s *fcpim = &fcp->fcpim; |
261 | struct bfa_s *bfa = fcp->bfa; |
262 | |
263 | bfa_trc(bfa, cfg->drvcfg.path_tov); |
264 | bfa_trc(bfa, cfg->fwcfg.num_rports); |
265 | bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); |
266 | bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); |
267 | |
268 | fcpim->fcp = fcp; |
269 | fcpim->bfa = bfa; |
270 | fcpim->num_itnims = cfg->fwcfg.num_rports; |
271 | fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; |
272 | fcpim->path_tov = cfg->drvcfg.path_tov; |
273 | fcpim->delay_comp = cfg->drvcfg.delay_comp; |
274 | fcpim->profile_comp = NULL; |
275 | fcpim->profile_start = NULL; |
276 | |
277 | bfa_itnim_attach(fcpim); |
278 | bfa_tskim_attach(fcpim); |
279 | bfa_ioim_attach(fcpim); |
280 | } |
281 | |
282 | void |
283 | bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp) |
284 | { |
285 | struct bfa_fcpim_s *fcpim = &fcp->fcpim; |
286 | struct bfa_itnim_s *itnim; |
287 | struct list_head *qe, *qen; |
288 | |
289 | /* Enqueue unused ioim resources to free_q */ |
290 | list_splice_tail_init(list: &fcpim->tskim_unused_q, head: &fcpim->tskim_free_q); |
291 | |
292 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { |
293 | itnim = (struct bfa_itnim_s *) qe; |
294 | bfa_itnim_iocdisable(itnim); |
295 | } |
296 | } |
297 | |
298 | void |
299 | bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) |
300 | { |
301 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
302 | |
303 | fcpim->path_tov = path_tov * 1000; |
304 | if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX) |
305 | fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX; |
306 | } |
307 | |
308 | u16 |
309 | bfa_fcpim_path_tov_get(struct bfa_s *bfa) |
310 | { |
311 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
312 | |
313 | return fcpim->path_tov / 1000; |
314 | } |
315 | |
316 | #define bfa_fcpim_add_iostats(__l, __r, __stats) \ |
317 | (__l->__stats += __r->__stats) |
318 | |
319 | void |
320 | bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats, |
321 | struct bfa_itnim_iostats_s *rstats) |
322 | { |
323 | bfa_fcpim_add_iostats(lstats, rstats, total_ios); |
324 | bfa_fcpim_add_iostats(lstats, rstats, qresumes); |
325 | bfa_fcpim_add_iostats(lstats, rstats, no_iotags); |
326 | bfa_fcpim_add_iostats(lstats, rstats, io_aborts); |
327 | bfa_fcpim_add_iostats(lstats, rstats, no_tskims); |
328 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok); |
329 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun); |
330 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun); |
331 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted); |
332 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout); |
333 | bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort); |
334 | bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err); |
335 | bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err); |
336 | bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed); |
337 | bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free); |
338 | bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts); |
339 | bfa_fcpim_add_iostats(lstats, rstats, iocom_utags); |
340 | bfa_fcpim_add_iostats(lstats, rstats, io_cleanups); |
341 | bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts); |
342 | bfa_fcpim_add_iostats(lstats, rstats, onlines); |
343 | bfa_fcpim_add_iostats(lstats, rstats, offlines); |
344 | bfa_fcpim_add_iostats(lstats, rstats, creates); |
345 | bfa_fcpim_add_iostats(lstats, rstats, deletes); |
346 | bfa_fcpim_add_iostats(lstats, rstats, create_comps); |
347 | bfa_fcpim_add_iostats(lstats, rstats, delete_comps); |
348 | bfa_fcpim_add_iostats(lstats, rstats, sler_events); |
349 | bfa_fcpim_add_iostats(lstats, rstats, fw_create); |
350 | bfa_fcpim_add_iostats(lstats, rstats, fw_delete); |
351 | bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled); |
352 | bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps); |
353 | bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds); |
354 | bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps); |
355 | bfa_fcpim_add_iostats(lstats, rstats, tm_success); |
356 | bfa_fcpim_add_iostats(lstats, rstats, tm_failures); |
357 | bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps); |
358 | bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes); |
359 | bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns); |
360 | bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups); |
361 | bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps); |
362 | bfa_fcpim_add_iostats(lstats, rstats, io_comps); |
363 | bfa_fcpim_add_iostats(lstats, rstats, input_reqs); |
364 | bfa_fcpim_add_iostats(lstats, rstats, output_reqs); |
365 | bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); |
366 | bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); |
367 | } |
368 | |
369 | bfa_status_t |
370 | bfa_fcpim_port_iostats(struct bfa_s *bfa, |
371 | struct bfa_itnim_iostats_s *stats, u8 lp_tag) |
372 | { |
373 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
374 | struct list_head *qe, *qen; |
375 | struct bfa_itnim_s *itnim; |
376 | |
377 | /* accumulate IO stats from itnim */ |
378 | memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); |
379 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { |
380 | itnim = (struct bfa_itnim_s *) qe; |
381 | if (itnim->rport->rport_info.lp_tag != lp_tag) |
382 | continue; |
383 | bfa_fcpim_add_stats(lstats: stats, rstats: &(itnim->stats)); |
384 | } |
385 | return BFA_STATUS_OK; |
386 | } |
387 | |
388 | static void |
389 | bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) |
390 | { |
391 | struct bfa_itnim_latency_s *io_lat = |
392 | &(ioim->itnim->ioprofile.io_latency); |
393 | u32 val, idx; |
394 | |
395 | val = (u32)(jiffies - ioim->start_time); |
396 | idx = bfa_ioim_get_index(n: scsi_bufflen(cmd: (struct scsi_cmnd *)ioim->dio)); |
397 | bfa_itnim_ioprofile_update(ioim->itnim, idx); |
398 | |
399 | io_lat->count[idx]++; |
400 | io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val; |
401 | io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val; |
402 | io_lat->avg[idx] += val; |
403 | } |
404 | |
405 | static void |
406 | bfa_ioim_profile_start(struct bfa_ioim_s *ioim) |
407 | { |
408 | ioim->start_time = jiffies; |
409 | } |
410 | |
411 | bfa_status_t |
412 | bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time) |
413 | { |
414 | struct bfa_itnim_s *itnim; |
415 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
416 | struct list_head *qe, *qen; |
417 | |
418 | /* accumulate IO stats from itnim */ |
419 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { |
420 | itnim = (struct bfa_itnim_s *) qe; |
421 | bfa_itnim_clear_stats(itnim); |
422 | } |
423 | fcpim->io_profile = BFA_TRUE; |
424 | fcpim->io_profile_start_time = time; |
425 | fcpim->profile_comp = bfa_ioim_profile_comp; |
426 | fcpim->profile_start = bfa_ioim_profile_start; |
427 | return BFA_STATUS_OK; |
428 | } |
429 | |
430 | bfa_status_t |
431 | bfa_fcpim_profile_off(struct bfa_s *bfa) |
432 | { |
433 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
434 | fcpim->io_profile = BFA_FALSE; |
435 | fcpim->io_profile_start_time = 0; |
436 | fcpim->profile_comp = NULL; |
437 | fcpim->profile_start = NULL; |
438 | return BFA_STATUS_OK; |
439 | } |
440 | |
441 | u16 |
442 | bfa_fcpim_qdepth_get(struct bfa_s *bfa) |
443 | { |
444 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
445 | |
446 | return fcpim->q_depth; |
447 | } |
448 | |
449 | /* |
450 | * BFA ITNIM module state machine functions |
451 | */ |
452 | |
453 | /* |
454 | * Beginning/unallocated state - no events expected. |
455 | */ |
456 | static void |
457 | bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
458 | { |
459 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
460 | bfa_trc(itnim->bfa, event); |
461 | |
462 | switch (event) { |
463 | case BFA_ITNIM_SM_CREATE: |
464 | bfa_sm_set_state(itnim, bfa_itnim_sm_created); |
465 | itnim->is_online = BFA_FALSE; |
466 | bfa_fcpim_additn(itnim); |
467 | break; |
468 | |
469 | default: |
470 | bfa_sm_fault(itnim->bfa, event); |
471 | } |
472 | } |
473 | |
474 | /* |
475 | * Beginning state, only online event expected. |
476 | */ |
477 | static void |
478 | bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
479 | { |
480 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
481 | bfa_trc(itnim->bfa, event); |
482 | |
483 | switch (event) { |
484 | case BFA_ITNIM_SM_ONLINE: |
485 | if (bfa_itnim_send_fwcreate(itnim)) |
486 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); |
487 | else |
488 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); |
489 | break; |
490 | |
491 | case BFA_ITNIM_SM_DELETE: |
492 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
493 | bfa_fcpim_delitn(itnim); |
494 | break; |
495 | |
496 | case BFA_ITNIM_SM_HWFAIL: |
497 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
498 | break; |
499 | |
500 | default: |
501 | bfa_sm_fault(itnim->bfa, event); |
502 | } |
503 | } |
504 | |
505 | /* |
506 | * Waiting for itnim create response from firmware. |
507 | */ |
508 | static void |
509 | bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
510 | { |
511 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
512 | bfa_trc(itnim->bfa, event); |
513 | |
514 | switch (event) { |
515 | case BFA_ITNIM_SM_FWRSP: |
516 | bfa_sm_set_state(itnim, bfa_itnim_sm_online); |
517 | itnim->is_online = BFA_TRUE; |
518 | bfa_itnim_iotov_online(itnim); |
519 | bfa_itnim_online_cb(itnim); |
520 | break; |
521 | |
522 | case BFA_ITNIM_SM_DELETE: |
523 | bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending); |
524 | break; |
525 | |
526 | case BFA_ITNIM_SM_OFFLINE: |
527 | if (bfa_itnim_send_fwdelete(itnim)) |
528 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); |
529 | else |
530 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); |
531 | break; |
532 | |
533 | case BFA_ITNIM_SM_HWFAIL: |
534 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
535 | break; |
536 | |
537 | default: |
538 | bfa_sm_fault(itnim->bfa, event); |
539 | } |
540 | } |
541 | |
542 | static void |
543 | bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, |
544 | enum bfa_itnim_event event) |
545 | { |
546 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
547 | bfa_trc(itnim->bfa, event); |
548 | |
549 | switch (event) { |
550 | case BFA_ITNIM_SM_QRESUME: |
551 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); |
552 | bfa_itnim_send_fwcreate(itnim); |
553 | break; |
554 | |
555 | case BFA_ITNIM_SM_DELETE: |
556 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
557 | bfa_reqq_wcancel(&itnim->reqq_wait); |
558 | bfa_fcpim_delitn(itnim); |
559 | break; |
560 | |
561 | case BFA_ITNIM_SM_OFFLINE: |
562 | bfa_sm_set_state(itnim, bfa_itnim_sm_offline); |
563 | bfa_reqq_wcancel(&itnim->reqq_wait); |
564 | bfa_itnim_offline_cb(itnim); |
565 | break; |
566 | |
567 | case BFA_ITNIM_SM_HWFAIL: |
568 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
569 | bfa_reqq_wcancel(&itnim->reqq_wait); |
570 | break; |
571 | |
572 | default: |
573 | bfa_sm_fault(itnim->bfa, event); |
574 | } |
575 | } |
576 | |
577 | /* |
578 | * Waiting for itnim create response from firmware, a delete is pending. |
579 | */ |
580 | static void |
581 | bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, |
582 | enum bfa_itnim_event event) |
583 | { |
584 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
585 | bfa_trc(itnim->bfa, event); |
586 | |
587 | switch (event) { |
588 | case BFA_ITNIM_SM_FWRSP: |
589 | if (bfa_itnim_send_fwdelete(itnim)) |
590 | bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); |
591 | else |
592 | bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); |
593 | break; |
594 | |
595 | case BFA_ITNIM_SM_HWFAIL: |
596 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
597 | bfa_fcpim_delitn(itnim); |
598 | break; |
599 | |
600 | default: |
601 | bfa_sm_fault(itnim->bfa, event); |
602 | } |
603 | } |
604 | |
605 | /* |
606 | * Online state - normal parking state. |
607 | */ |
608 | static void |
609 | bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
610 | { |
611 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
612 | bfa_trc(itnim->bfa, event); |
613 | |
614 | switch (event) { |
615 | case BFA_ITNIM_SM_OFFLINE: |
616 | bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); |
617 | itnim->is_online = BFA_FALSE; |
618 | bfa_itnim_iotov_start(itnim); |
619 | bfa_itnim_cleanup(itnim); |
620 | break; |
621 | |
622 | case BFA_ITNIM_SM_DELETE: |
623 | bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); |
624 | itnim->is_online = BFA_FALSE; |
625 | bfa_itnim_cleanup(itnim); |
626 | break; |
627 | |
628 | case BFA_ITNIM_SM_SLER: |
629 | bfa_sm_set_state(itnim, bfa_itnim_sm_sler); |
630 | itnim->is_online = BFA_FALSE; |
631 | bfa_itnim_iotov_start(itnim); |
632 | bfa_itnim_sler_cb(itnim); |
633 | break; |
634 | |
635 | case BFA_ITNIM_SM_HWFAIL: |
636 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
637 | itnim->is_online = BFA_FALSE; |
638 | bfa_itnim_iotov_start(itnim); |
639 | bfa_itnim_iocdisable_cleanup(itnim); |
640 | break; |
641 | |
642 | default: |
643 | bfa_sm_fault(itnim->bfa, event); |
644 | } |
645 | } |
646 | |
647 | /* |
648 | * Second level error recovery need. |
649 | */ |
650 | static void |
651 | bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
652 | { |
653 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
654 | bfa_trc(itnim->bfa, event); |
655 | |
656 | switch (event) { |
657 | case BFA_ITNIM_SM_OFFLINE: |
658 | bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); |
659 | bfa_itnim_cleanup(itnim); |
660 | break; |
661 | |
662 | case BFA_ITNIM_SM_DELETE: |
663 | bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); |
664 | bfa_itnim_cleanup(itnim); |
665 | bfa_itnim_iotov_delete(itnim); |
666 | break; |
667 | |
668 | case BFA_ITNIM_SM_HWFAIL: |
669 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
670 | bfa_itnim_iocdisable_cleanup(itnim); |
671 | break; |
672 | |
673 | default: |
674 | bfa_sm_fault(itnim->bfa, event); |
675 | } |
676 | } |
677 | |
678 | /* |
679 | * Going offline. Waiting for active IO cleanup. |
680 | */ |
681 | static void |
682 | bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, |
683 | enum bfa_itnim_event event) |
684 | { |
685 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
686 | bfa_trc(itnim->bfa, event); |
687 | |
688 | switch (event) { |
689 | case BFA_ITNIM_SM_CLEANUP: |
690 | if (bfa_itnim_send_fwdelete(itnim)) |
691 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); |
692 | else |
693 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); |
694 | break; |
695 | |
696 | case BFA_ITNIM_SM_DELETE: |
697 | bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); |
698 | bfa_itnim_iotov_delete(itnim); |
699 | break; |
700 | |
701 | case BFA_ITNIM_SM_HWFAIL: |
702 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
703 | bfa_itnim_iocdisable_cleanup(itnim); |
704 | bfa_itnim_offline_cb(itnim); |
705 | break; |
706 | |
707 | case BFA_ITNIM_SM_SLER: |
708 | break; |
709 | |
710 | default: |
711 | bfa_sm_fault(itnim->bfa, event); |
712 | } |
713 | } |
714 | |
715 | /* |
716 | * Deleting itnim. Waiting for active IO cleanup. |
717 | */ |
718 | static void |
719 | bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, |
720 | enum bfa_itnim_event event) |
721 | { |
722 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
723 | bfa_trc(itnim->bfa, event); |
724 | |
725 | switch (event) { |
726 | case BFA_ITNIM_SM_CLEANUP: |
727 | if (bfa_itnim_send_fwdelete(itnim)) |
728 | bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); |
729 | else |
730 | bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); |
731 | break; |
732 | |
733 | case BFA_ITNIM_SM_HWFAIL: |
734 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
735 | bfa_itnim_iocdisable_cleanup(itnim); |
736 | break; |
737 | |
738 | default: |
739 | bfa_sm_fault(itnim->bfa, event); |
740 | } |
741 | } |
742 | |
743 | /* |
744 | * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. |
745 | */ |
746 | static void |
747 | bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
748 | { |
749 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
750 | bfa_trc(itnim->bfa, event); |
751 | |
752 | switch (event) { |
753 | case BFA_ITNIM_SM_FWRSP: |
754 | bfa_sm_set_state(itnim, bfa_itnim_sm_offline); |
755 | bfa_itnim_offline_cb(itnim); |
756 | break; |
757 | |
758 | case BFA_ITNIM_SM_DELETE: |
759 | bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); |
760 | break; |
761 | |
762 | case BFA_ITNIM_SM_HWFAIL: |
763 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
764 | bfa_itnim_offline_cb(itnim); |
765 | break; |
766 | |
767 | default: |
768 | bfa_sm_fault(itnim->bfa, event); |
769 | } |
770 | } |
771 | |
772 | static void |
773 | bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, |
774 | enum bfa_itnim_event event) |
775 | { |
776 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
777 | bfa_trc(itnim->bfa, event); |
778 | |
779 | switch (event) { |
780 | case BFA_ITNIM_SM_QRESUME: |
781 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); |
782 | bfa_itnim_send_fwdelete(itnim); |
783 | break; |
784 | |
785 | case BFA_ITNIM_SM_DELETE: |
786 | bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); |
787 | break; |
788 | |
789 | case BFA_ITNIM_SM_HWFAIL: |
790 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
791 | bfa_reqq_wcancel(&itnim->reqq_wait); |
792 | bfa_itnim_offline_cb(itnim); |
793 | break; |
794 | |
795 | default: |
796 | bfa_sm_fault(itnim->bfa, event); |
797 | } |
798 | } |
799 | |
800 | /* |
801 | * Offline state. |
802 | */ |
803 | static void |
804 | bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
805 | { |
806 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
807 | bfa_trc(itnim->bfa, event); |
808 | |
809 | switch (event) { |
810 | case BFA_ITNIM_SM_DELETE: |
811 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
812 | bfa_itnim_iotov_delete(itnim); |
813 | bfa_fcpim_delitn(itnim); |
814 | break; |
815 | |
816 | case BFA_ITNIM_SM_ONLINE: |
817 | if (bfa_itnim_send_fwcreate(itnim)) |
818 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); |
819 | else |
820 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); |
821 | break; |
822 | |
823 | case BFA_ITNIM_SM_HWFAIL: |
824 | bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); |
825 | break; |
826 | |
827 | default: |
828 | bfa_sm_fault(itnim->bfa, event); |
829 | } |
830 | } |
831 | |
832 | static void |
833 | bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, |
834 | enum bfa_itnim_event event) |
835 | { |
836 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
837 | bfa_trc(itnim->bfa, event); |
838 | |
839 | switch (event) { |
840 | case BFA_ITNIM_SM_DELETE: |
841 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
842 | bfa_itnim_iotov_delete(itnim); |
843 | bfa_fcpim_delitn(itnim); |
844 | break; |
845 | |
846 | case BFA_ITNIM_SM_OFFLINE: |
847 | bfa_itnim_offline_cb(itnim); |
848 | break; |
849 | |
850 | case BFA_ITNIM_SM_ONLINE: |
851 | if (bfa_itnim_send_fwcreate(itnim)) |
852 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); |
853 | else |
854 | bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); |
855 | break; |
856 | |
857 | case BFA_ITNIM_SM_HWFAIL: |
858 | break; |
859 | |
860 | default: |
861 | bfa_sm_fault(itnim->bfa, event); |
862 | } |
863 | } |
864 | |
865 | /* |
866 | * Itnim is deleted, waiting for firmware response to delete. |
867 | */ |
868 | static void |
869 | bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
870 | { |
871 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
872 | bfa_trc(itnim->bfa, event); |
873 | |
874 | switch (event) { |
875 | case BFA_ITNIM_SM_FWRSP: |
876 | case BFA_ITNIM_SM_HWFAIL: |
877 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
878 | bfa_fcpim_delitn(itnim); |
879 | break; |
880 | |
881 | default: |
882 | bfa_sm_fault(itnim->bfa, event); |
883 | } |
884 | } |
885 | |
886 | static void |
887 | bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, |
888 | enum bfa_itnim_event event) |
889 | { |
890 | bfa_trc(itnim->bfa, itnim->rport->rport_tag); |
891 | bfa_trc(itnim->bfa, event); |
892 | |
893 | switch (event) { |
894 | case BFA_ITNIM_SM_QRESUME: |
895 | bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); |
896 | bfa_itnim_send_fwdelete(itnim); |
897 | break; |
898 | |
899 | case BFA_ITNIM_SM_HWFAIL: |
900 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
901 | bfa_reqq_wcancel(&itnim->reqq_wait); |
902 | bfa_fcpim_delitn(itnim); |
903 | break; |
904 | |
905 | default: |
906 | bfa_sm_fault(itnim->bfa, event); |
907 | } |
908 | } |
909 | |
910 | /* |
911 | * Initiate cleanup of all IOs on an IOC failure. |
912 | */ |
913 | static void |
914 | bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) |
915 | { |
916 | struct bfa_tskim_s *tskim; |
917 | struct bfa_ioim_s *ioim; |
918 | struct list_head *qe, *qen; |
919 | |
920 | list_for_each_safe(qe, qen, &itnim->tsk_q) { |
921 | tskim = (struct bfa_tskim_s *) qe; |
922 | bfa_tskim_iocdisable(tskim); |
923 | } |
924 | |
925 | list_for_each_safe(qe, qen, &itnim->io_q) { |
926 | ioim = (struct bfa_ioim_s *) qe; |
927 | bfa_ioim_iocdisable(ioim); |
928 | } |
929 | |
930 | /* |
931 | * For IO request in pending queue, we pretend an early timeout. |
932 | */ |
933 | list_for_each_safe(qe, qen, &itnim->pending_q) { |
934 | ioim = (struct bfa_ioim_s *) qe; |
935 | bfa_ioim_tov(ioim); |
936 | } |
937 | |
938 | list_for_each_safe(qe, qen, &itnim->io_cleanup_q) { |
939 | ioim = (struct bfa_ioim_s *) qe; |
940 | bfa_ioim_iocdisable(ioim); |
941 | } |
942 | } |
943 | |
944 | /* |
945 | * IO cleanup completion |
946 | */ |
947 | static void |
948 | bfa_itnim_cleanp_comp(void *itnim_cbarg) |
949 | { |
950 | struct bfa_itnim_s *itnim = itnim_cbarg; |
951 | |
952 | bfa_stats(itnim, cleanup_comps); |
953 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); |
954 | } |
955 | |
956 | /* |
957 | * Initiate cleanup of all IOs. |
958 | */ |
959 | static void |
960 | bfa_itnim_cleanup(struct bfa_itnim_s *itnim) |
961 | { |
962 | struct bfa_ioim_s *ioim; |
963 | struct bfa_tskim_s *tskim; |
964 | struct list_head *qe, *qen; |
965 | |
966 | bfa_wc_init(wc: &itnim->wc, wc_resume: bfa_itnim_cleanp_comp, wc_cbarg: itnim); |
967 | |
968 | list_for_each_safe(qe, qen, &itnim->io_q) { |
969 | ioim = (struct bfa_ioim_s *) qe; |
970 | |
971 | /* |
972 | * Move IO to a cleanup queue from active queue so that a later |
973 | * TM will not pickup this IO. |
974 | */ |
975 | list_del(entry: &ioim->qe); |
976 | list_add_tail(new: &ioim->qe, head: &itnim->io_cleanup_q); |
977 | |
978 | bfa_wc_up(wc: &itnim->wc); |
979 | bfa_ioim_cleanup(ioim); |
980 | } |
981 | |
982 | list_for_each_safe(qe, qen, &itnim->tsk_q) { |
983 | tskim = (struct bfa_tskim_s *) qe; |
984 | bfa_wc_up(wc: &itnim->wc); |
985 | bfa_tskim_cleanup(tskim); |
986 | } |
987 | |
988 | bfa_wc_wait(wc: &itnim->wc); |
989 | } |
990 | |
991 | static void |
992 | __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete) |
993 | { |
994 | struct bfa_itnim_s *itnim = cbarg; |
995 | |
996 | if (complete) |
997 | bfa_cb_itnim_online(itnim: itnim->ditn); |
998 | } |
999 | |
1000 | static void |
1001 | __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete) |
1002 | { |
1003 | struct bfa_itnim_s *itnim = cbarg; |
1004 | |
1005 | if (complete) |
1006 | bfa_cb_itnim_offline(itnim: itnim->ditn); |
1007 | } |
1008 | |
1009 | static void |
1010 | __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) |
1011 | { |
1012 | struct bfa_itnim_s *itnim = cbarg; |
1013 | |
1014 | if (complete) |
1015 | bfa_cb_itnim_sler(itnim: itnim->ditn); |
1016 | } |
1017 | |
1018 | /* |
1019 | * Call to resume any I/O requests waiting for room in request queue. |
1020 | */ |
1021 | static void |
1022 | bfa_itnim_qresume(void *cbarg) |
1023 | { |
1024 | struct bfa_itnim_s *itnim = cbarg; |
1025 | |
1026 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); |
1027 | } |
1028 | |
1029 | /* |
1030 | * bfa_itnim_public |
1031 | */ |
1032 | |
1033 | void |
1034 | bfa_itnim_iodone(struct bfa_itnim_s *itnim) |
1035 | { |
1036 | bfa_wc_down(wc: &itnim->wc); |
1037 | } |
1038 | |
1039 | void |
1040 | bfa_itnim_tskdone(struct bfa_itnim_s *itnim) |
1041 | { |
1042 | bfa_wc_down(wc: &itnim->wc); |
1043 | } |
1044 | |
1045 | void |
1046 | bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len) |
1047 | { |
1048 | /* |
1049 | * ITN memory |
1050 | */ |
1051 | *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); |
1052 | } |
1053 | |
1054 | void |
1055 | bfa_itnim_attach(struct bfa_fcpim_s *fcpim) |
1056 | { |
1057 | struct bfa_s *bfa = fcpim->bfa; |
1058 | struct bfa_fcp_mod_s *fcp = fcpim->fcp; |
1059 | struct bfa_itnim_s *itnim; |
1060 | int i, j; |
1061 | |
1062 | INIT_LIST_HEAD(list: &fcpim->itnim_q); |
1063 | |
1064 | itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp); |
1065 | fcpim->itnim_arr = itnim; |
1066 | |
1067 | for (i = 0; i < fcpim->num_itnims; i++, itnim++) { |
1068 | memset(itnim, 0, sizeof(struct bfa_itnim_s)); |
1069 | itnim->bfa = bfa; |
1070 | itnim->fcpim = fcpim; |
1071 | itnim->reqq = BFA_REQQ_QOS_LO; |
1072 | itnim->rport = BFA_RPORT_FROM_TAG(bfa, i); |
1073 | itnim->iotov_active = BFA_FALSE; |
1074 | bfa_reqq_winit(wqe: &itnim->reqq_wait, qresume: bfa_itnim_qresume, cbarg: itnim); |
1075 | |
1076 | INIT_LIST_HEAD(list: &itnim->io_q); |
1077 | INIT_LIST_HEAD(list: &itnim->io_cleanup_q); |
1078 | INIT_LIST_HEAD(list: &itnim->pending_q); |
1079 | INIT_LIST_HEAD(list: &itnim->tsk_q); |
1080 | INIT_LIST_HEAD(list: &itnim->delay_comp_q); |
1081 | for (j = 0; j < BFA_IOBUCKET_MAX; j++) |
1082 | itnim->ioprofile.io_latency.min[j] = ~0; |
1083 | bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); |
1084 | } |
1085 | |
1086 | bfa_mem_kva_curp(fcp) = (u8 *) itnim; |
1087 | } |
1088 | |
1089 | void |
1090 | bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) |
1091 | { |
1092 | bfa_stats(itnim, ioc_disabled); |
1093 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL); |
1094 | } |
1095 | |
1096 | static bfa_boolean_t |
1097 | bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) |
1098 | { |
1099 | struct bfi_itn_create_req_s *m; |
1100 | |
1101 | itnim->msg_no++; |
1102 | |
1103 | /* |
1104 | * check for room in queue to send request now |
1105 | */ |
1106 | m = bfa_reqq_next(itnim->bfa, itnim->reqq); |
1107 | if (!m) { |
1108 | bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); |
1109 | return BFA_FALSE; |
1110 | } |
1111 | |
1112 | bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ, |
1113 | bfa_fn_lpu(itnim->bfa)); |
1114 | m->fw_handle = itnim->rport->fw_handle; |
1115 | m->class = FC_CLASS_3; |
1116 | m->seq_rec = itnim->seq_rec; |
1117 | m->msg_no = itnim->msg_no; |
1118 | bfa_stats(itnim, fw_create); |
1119 | |
1120 | /* |
1121 | * queue I/O message to firmware |
1122 | */ |
1123 | bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); |
1124 | return BFA_TRUE; |
1125 | } |
1126 | |
1127 | static bfa_boolean_t |
1128 | bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) |
1129 | { |
1130 | struct bfi_itn_delete_req_s *m; |
1131 | |
1132 | /* |
1133 | * check for room in queue to send request now |
1134 | */ |
1135 | m = bfa_reqq_next(itnim->bfa, itnim->reqq); |
1136 | if (!m) { |
1137 | bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); |
1138 | return BFA_FALSE; |
1139 | } |
1140 | |
1141 | bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ, |
1142 | bfa_fn_lpu(itnim->bfa)); |
1143 | m->fw_handle = itnim->rport->fw_handle; |
1144 | bfa_stats(itnim, fw_delete); |
1145 | |
1146 | /* |
1147 | * queue I/O message to firmware |
1148 | */ |
1149 | bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh); |
1150 | return BFA_TRUE; |
1151 | } |
1152 | |
1153 | /* |
1154 | * Cleanup all pending failed inflight requests. |
1155 | */ |
1156 | static void |
1157 | bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) |
1158 | { |
1159 | struct bfa_ioim_s *ioim; |
1160 | struct list_head *qe, *qen; |
1161 | |
1162 | list_for_each_safe(qe, qen, &itnim->delay_comp_q) { |
1163 | ioim = (struct bfa_ioim_s *)qe; |
1164 | bfa_ioim_delayed_comp(ioim, iotov); |
1165 | } |
1166 | } |
1167 | |
1168 | /* |
1169 | * Start all pending IO requests. |
1170 | */ |
1171 | static void |
1172 | bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) |
1173 | { |
1174 | struct bfa_ioim_s *ioim; |
1175 | |
1176 | bfa_itnim_iotov_stop(itnim); |
1177 | |
1178 | /* |
1179 | * Abort all inflight IO requests in the queue |
1180 | */ |
1181 | bfa_itnim_delayed_comp(itnim, iotov: BFA_FALSE); |
1182 | |
1183 | /* |
1184 | * Start all pending IO requests. |
1185 | */ |
1186 | while (!list_empty(head: &itnim->pending_q)) { |
1187 | bfa_q_deq(&itnim->pending_q, &ioim); |
1188 | list_add_tail(new: &ioim->qe, head: &itnim->io_q); |
1189 | bfa_ioim_start(ioim); |
1190 | } |
1191 | } |
1192 | |
1193 | /* |
1194 | * Fail all pending IO requests |
1195 | */ |
1196 | static void |
1197 | bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) |
1198 | { |
1199 | struct bfa_ioim_s *ioim; |
1200 | |
1201 | /* |
1202 | * Fail all inflight IO requests in the queue |
1203 | */ |
1204 | bfa_itnim_delayed_comp(itnim, iotov: BFA_TRUE); |
1205 | |
1206 | /* |
1207 | * Fail any pending IO requests. |
1208 | */ |
1209 | while (!list_empty(head: &itnim->pending_q)) { |
1210 | bfa_q_deq(&itnim->pending_q, &ioim); |
1211 | list_add_tail(new: &ioim->qe, head: &ioim->fcpim->ioim_comp_q); |
1212 | bfa_ioim_tov(ioim); |
1213 | } |
1214 | } |
1215 | |
1216 | /* |
1217 | * IO TOV timer callback. Fail any pending IO requests. |
1218 | */ |
1219 | static void |
1220 | bfa_itnim_iotov(void *itnim_arg) |
1221 | { |
1222 | struct bfa_itnim_s *itnim = itnim_arg; |
1223 | |
1224 | itnim->iotov_active = BFA_FALSE; |
1225 | |
1226 | bfa_cb_itnim_tov_begin(itnim: itnim->ditn); |
1227 | bfa_itnim_iotov_cleanup(itnim); |
1228 | bfa_cb_itnim_tov(itnim: itnim->ditn); |
1229 | } |
1230 | |
1231 | /* |
1232 | * Start IO TOV timer for failing back pending IO requests in offline state. |
1233 | */ |
1234 | static void |
1235 | bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) |
1236 | { |
1237 | if (itnim->fcpim->path_tov > 0) { |
1238 | |
1239 | itnim->iotov_active = BFA_TRUE; |
1240 | WARN_ON(!bfa_itnim_hold_io(itnim)); |
1241 | bfa_timer_start(itnim->bfa, &itnim->timer, |
1242 | bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); |
1243 | } |
1244 | } |
1245 | |
1246 | /* |
1247 | * Stop IO TOV timer. |
1248 | */ |
1249 | static void |
1250 | bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) |
1251 | { |
1252 | if (itnim->iotov_active) { |
1253 | itnim->iotov_active = BFA_FALSE; |
1254 | bfa_timer_stop(timer: &itnim->timer); |
1255 | } |
1256 | } |
1257 | |
1258 | /* |
1259 | * Stop IO TOV timer. |
1260 | */ |
1261 | static void |
1262 | bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) |
1263 | { |
1264 | bfa_boolean_t pathtov_active = BFA_FALSE; |
1265 | |
1266 | if (itnim->iotov_active) |
1267 | pathtov_active = BFA_TRUE; |
1268 | |
1269 | bfa_itnim_iotov_stop(itnim); |
1270 | if (pathtov_active) |
1271 | bfa_cb_itnim_tov_begin(itnim: itnim->ditn); |
1272 | bfa_itnim_iotov_cleanup(itnim); |
1273 | if (pathtov_active) |
1274 | bfa_cb_itnim_tov(itnim: itnim->ditn); |
1275 | } |
1276 | |
1277 | static void |
1278 | bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) |
1279 | { |
1280 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa); |
1281 | fcpim->del_itn_stats.del_itn_iocomp_aborted += |
1282 | itnim->stats.iocomp_aborted; |
1283 | fcpim->del_itn_stats.del_itn_iocomp_timedout += |
1284 | itnim->stats.iocomp_timedout; |
1285 | fcpim->del_itn_stats.del_itn_iocom_sqer_needed += |
1286 | itnim->stats.iocom_sqer_needed; |
1287 | fcpim->del_itn_stats.del_itn_iocom_res_free += |
1288 | itnim->stats.iocom_res_free; |
1289 | fcpim->del_itn_stats.del_itn_iocom_hostabrts += |
1290 | itnim->stats.iocom_hostabrts; |
1291 | fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios; |
1292 | fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns; |
1293 | fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; |
1294 | } |
1295 | |
1296 | /* |
1297 | * bfa_itnim_public |
1298 | */ |
1299 | |
1300 | /* |
1301 | * Itnim interrupt processing. |
1302 | */ |
1303 | void |
1304 | bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
1305 | { |
1306 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
1307 | union bfi_itn_i2h_msg_u msg; |
1308 | struct bfa_itnim_s *itnim; |
1309 | |
1310 | bfa_trc(bfa, m->mhdr.msg_id); |
1311 | |
1312 | msg.msg = m; |
1313 | |
1314 | switch (m->mhdr.msg_id) { |
1315 | case BFI_ITN_I2H_CREATE_RSP: |
1316 | itnim = BFA_ITNIM_FROM_TAG(fcpim, |
1317 | msg.create_rsp->bfa_handle); |
1318 | WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); |
1319 | bfa_stats(itnim, create_comps); |
1320 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); |
1321 | break; |
1322 | |
1323 | case BFI_ITN_I2H_DELETE_RSP: |
1324 | itnim = BFA_ITNIM_FROM_TAG(fcpim, |
1325 | msg.delete_rsp->bfa_handle); |
1326 | WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); |
1327 | bfa_stats(itnim, delete_comps); |
1328 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); |
1329 | break; |
1330 | |
1331 | case BFI_ITN_I2H_SLER_EVENT: |
1332 | itnim = BFA_ITNIM_FROM_TAG(fcpim, |
1333 | msg.sler_event->bfa_handle); |
1334 | bfa_stats(itnim, sler_events); |
1335 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER); |
1336 | break; |
1337 | |
1338 | default: |
1339 | bfa_trc(bfa, m->mhdr.msg_id); |
1340 | WARN_ON(1); |
1341 | } |
1342 | } |
1343 | |
1344 | /* |
1345 | * bfa_itnim_api |
1346 | */ |
1347 | |
1348 | struct bfa_itnim_s * |
1349 | bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) |
1350 | { |
1351 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
1352 | struct bfa_itnim_s *itnim; |
1353 | |
1354 | bfa_itn_create(bfa, rport, isr: bfa_itnim_isr); |
1355 | |
1356 | itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); |
1357 | WARN_ON(itnim->rport != rport); |
1358 | |
1359 | itnim->ditn = ditn; |
1360 | |
1361 | bfa_stats(itnim, creates); |
1362 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); |
1363 | |
1364 | return itnim; |
1365 | } |
1366 | |
1367 | void |
1368 | bfa_itnim_delete(struct bfa_itnim_s *itnim) |
1369 | { |
1370 | bfa_stats(itnim, deletes); |
1371 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE); |
1372 | } |
1373 | |
1374 | void |
1375 | bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec) |
1376 | { |
1377 | itnim->seq_rec = seq_rec; |
1378 | bfa_stats(itnim, onlines); |
1379 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE); |
1380 | } |
1381 | |
1382 | void |
1383 | bfa_itnim_offline(struct bfa_itnim_s *itnim) |
1384 | { |
1385 | bfa_stats(itnim, offlines); |
1386 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); |
1387 | } |
1388 | |
1389 | /* |
1390 | * Return true if itnim is considered offline for holding off IO request. |
1391 | * IO is not held if itnim is being deleted. |
1392 | */ |
1393 | bfa_boolean_t |
1394 | bfa_itnim_hold_io(struct bfa_itnim_s *itnim) |
1395 | { |
1396 | return itnim->fcpim->path_tov && itnim->iotov_active && |
1397 | (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || |
1398 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || |
1399 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) || |
1400 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || |
1401 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || |
1402 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); |
1403 | } |
1404 | |
1405 | #define bfa_io_lat_clock_res_div HZ |
1406 | #define bfa_io_lat_clock_res_mul 1000 |
1407 | bfa_status_t |
1408 | bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, |
1409 | struct bfa_itnim_ioprofile_s *ioprofile) |
1410 | { |
1411 | struct bfa_fcpim_s *fcpim; |
1412 | |
1413 | if (!itnim) |
1414 | return BFA_STATUS_NO_FCPIM_NEXUS; |
1415 | |
1416 | fcpim = BFA_FCPIM(itnim->bfa); |
1417 | |
1418 | if (!fcpim->io_profile) |
1419 | return BFA_STATUS_IOPROFILE_OFF; |
1420 | |
1421 | itnim->ioprofile.index = BFA_IOBUCKET_MAX; |
1422 | /* unsigned 32-bit time_t overflow here in y2106 */ |
1423 | itnim->ioprofile.io_profile_start_time = |
1424 | bfa_io_profile_start_time(itnim->bfa); |
1425 | itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul; |
1426 | itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div; |
1427 | *ioprofile = itnim->ioprofile; |
1428 | |
1429 | return BFA_STATUS_OK; |
1430 | } |
1431 | |
1432 | void |
1433 | bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) |
1434 | { |
1435 | int j; |
1436 | |
1437 | if (!itnim) |
1438 | return; |
1439 | |
1440 | memset(&itnim->stats, 0, sizeof(itnim->stats)); |
1441 | memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); |
1442 | for (j = 0; j < BFA_IOBUCKET_MAX; j++) |
1443 | itnim->ioprofile.io_latency.min[j] = ~0; |
1444 | } |
1445 | |
1446 | /* |
1447 | * BFA IO module state machine functions |
1448 | */ |
1449 | |
1450 | /* |
1451 | * IO is not started (unallocated). |
1452 | */ |
1453 | static void |
1454 | bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1455 | { |
1456 | switch (event) { |
1457 | case BFA_IOIM_SM_START: |
1458 | if (!bfa_itnim_is_online(ioim->itnim)) { |
1459 | if (!bfa_itnim_hold_io(itnim: ioim->itnim)) { |
1460 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1461 | list_del(entry: &ioim->qe); |
1462 | list_add_tail(new: &ioim->qe, |
1463 | head: &ioim->fcpim->ioim_comp_q); |
1464 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
1465 | __bfa_cb_ioim_pathtov, ioim); |
1466 | } else { |
1467 | list_del(entry: &ioim->qe); |
1468 | list_add_tail(new: &ioim->qe, |
1469 | head: &ioim->itnim->pending_q); |
1470 | } |
1471 | break; |
1472 | } |
1473 | |
1474 | if (ioim->nsges > BFI_SGE_INLINE) { |
1475 | if (!bfa_ioim_sgpg_alloc(ioim)) { |
1476 | bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); |
1477 | return; |
1478 | } |
1479 | } |
1480 | |
1481 | if (!bfa_ioim_send_ioreq(ioim)) { |
1482 | bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); |
1483 | break; |
1484 | } |
1485 | |
1486 | bfa_sm_set_state(ioim, bfa_ioim_sm_active); |
1487 | break; |
1488 | |
1489 | case BFA_IOIM_SM_IOTOV: |
1490 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1491 | bfa_ioim_move_to_comp_q(ioim); |
1492 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
1493 | __bfa_cb_ioim_pathtov, ioim); |
1494 | break; |
1495 | |
1496 | case BFA_IOIM_SM_ABORT: |
1497 | /* |
1498 | * IO in pending queue can get abort requests. Complete abort |
1499 | * requests immediately. |
1500 | */ |
1501 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1502 | WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); |
1503 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
1504 | __bfa_cb_ioim_abort, ioim); |
1505 | break; |
1506 | |
1507 | default: |
1508 | bfa_sm_fault(ioim->bfa, event); |
1509 | } |
1510 | } |
1511 | |
1512 | /* |
1513 | * IO is waiting for SG pages. |
1514 | */ |
1515 | static void |
1516 | bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1517 | { |
1518 | bfa_trc(ioim->bfa, ioim->iotag); |
1519 | bfa_trc(ioim->bfa, event); |
1520 | |
1521 | switch (event) { |
1522 | case BFA_IOIM_SM_SGALLOCED: |
1523 | if (!bfa_ioim_send_ioreq(ioim)) { |
1524 | bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); |
1525 | break; |
1526 | } |
1527 | bfa_sm_set_state(ioim, bfa_ioim_sm_active); |
1528 | break; |
1529 | |
1530 | case BFA_IOIM_SM_CLEANUP: |
1531 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1532 | bfa_sgpg_wcancel(bfa: ioim->bfa, wqe: &ioim->iosp->sgpg_wqe); |
1533 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1534 | ioim); |
1535 | bfa_ioim_notify_cleanup(ioim); |
1536 | break; |
1537 | |
1538 | case BFA_IOIM_SM_ABORT: |
1539 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1540 | bfa_sgpg_wcancel(bfa: ioim->bfa, wqe: &ioim->iosp->sgpg_wqe); |
1541 | bfa_ioim_move_to_comp_q(ioim); |
1542 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1543 | ioim); |
1544 | break; |
1545 | |
1546 | case BFA_IOIM_SM_HWFAIL: |
1547 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1548 | bfa_sgpg_wcancel(bfa: ioim->bfa, wqe: &ioim->iosp->sgpg_wqe); |
1549 | bfa_ioim_move_to_comp_q(ioim); |
1550 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1551 | ioim); |
1552 | break; |
1553 | |
1554 | default: |
1555 | bfa_sm_fault(ioim->bfa, event); |
1556 | } |
1557 | } |
1558 | |
1559 | /* |
1560 | * IO is active. |
1561 | */ |
1562 | static void |
1563 | bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1564 | { |
1565 | switch (event) { |
1566 | case BFA_IOIM_SM_COMP_GOOD: |
1567 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1568 | bfa_ioim_move_to_comp_q(ioim); |
1569 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
1570 | __bfa_cb_ioim_good_comp, ioim); |
1571 | break; |
1572 | |
1573 | case BFA_IOIM_SM_COMP: |
1574 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1575 | bfa_ioim_move_to_comp_q(ioim); |
1576 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, |
1577 | ioim); |
1578 | break; |
1579 | |
1580 | case BFA_IOIM_SM_DONE: |
1581 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1582 | bfa_ioim_move_to_comp_q(ioim); |
1583 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, |
1584 | ioim); |
1585 | break; |
1586 | |
1587 | case BFA_IOIM_SM_ABORT: |
1588 | ioim->iosp->abort_explicit = BFA_TRUE; |
1589 | ioim->io_cbfn = __bfa_cb_ioim_abort; |
1590 | |
1591 | if (bfa_ioim_send_abort(ioim)) |
1592 | bfa_sm_set_state(ioim, bfa_ioim_sm_abort); |
1593 | else { |
1594 | bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); |
1595 | bfa_stats(ioim->itnim, qwait); |
1596 | bfa_reqq_wait(ioim->bfa, ioim->reqq, |
1597 | &ioim->iosp->reqq_wait); |
1598 | } |
1599 | break; |
1600 | |
1601 | case BFA_IOIM_SM_CLEANUP: |
1602 | ioim->iosp->abort_explicit = BFA_FALSE; |
1603 | ioim->io_cbfn = __bfa_cb_ioim_failed; |
1604 | |
1605 | if (bfa_ioim_send_abort(ioim)) |
1606 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); |
1607 | else { |
1608 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); |
1609 | bfa_stats(ioim->itnim, qwait); |
1610 | bfa_reqq_wait(ioim->bfa, ioim->reqq, |
1611 | &ioim->iosp->reqq_wait); |
1612 | } |
1613 | break; |
1614 | |
1615 | case BFA_IOIM_SM_HWFAIL: |
1616 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1617 | bfa_ioim_move_to_comp_q(ioim); |
1618 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1619 | ioim); |
1620 | break; |
1621 | |
1622 | case BFA_IOIM_SM_SQRETRY: |
1623 | if (bfa_ioim_maxretry_reached(ioim)) { |
1624 | /* max retry reached, free IO */ |
1625 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1626 | bfa_ioim_move_to_comp_q(ioim); |
1627 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
1628 | __bfa_cb_ioim_failed, ioim); |
1629 | break; |
1630 | } |
1631 | /* waiting for IO tag resource free */ |
1632 | bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry); |
1633 | break; |
1634 | |
1635 | default: |
1636 | bfa_sm_fault(ioim->bfa, event); |
1637 | } |
1638 | } |
1639 | |
1640 | /* |
1641 | * IO is retried with new tag. |
1642 | */ |
1643 | static void |
1644 | bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1645 | { |
1646 | switch (event) { |
1647 | case BFA_IOIM_SM_FREE: |
1648 | /* abts and rrq done. Now retry the IO with new tag */ |
1649 | bfa_ioim_update_iotag(ioim); |
1650 | if (!bfa_ioim_send_ioreq(ioim)) { |
1651 | bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); |
1652 | break; |
1653 | } |
1654 | bfa_sm_set_state(ioim, bfa_ioim_sm_active); |
1655 | break; |
1656 | |
1657 | case BFA_IOIM_SM_CLEANUP: |
1658 | ioim->iosp->abort_explicit = BFA_FALSE; |
1659 | ioim->io_cbfn = __bfa_cb_ioim_failed; |
1660 | |
1661 | if (bfa_ioim_send_abort(ioim)) |
1662 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); |
1663 | else { |
1664 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); |
1665 | bfa_stats(ioim->itnim, qwait); |
1666 | bfa_reqq_wait(ioim->bfa, ioim->reqq, |
1667 | &ioim->iosp->reqq_wait); |
1668 | } |
1669 | break; |
1670 | |
1671 | case BFA_IOIM_SM_HWFAIL: |
1672 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1673 | bfa_ioim_move_to_comp_q(ioim); |
1674 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
1675 | __bfa_cb_ioim_failed, ioim); |
1676 | break; |
1677 | |
1678 | case BFA_IOIM_SM_ABORT: |
1679 | /* in this state IO abort is done. |
1680 | * Waiting for IO tag resource free. |
1681 | */ |
1682 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1683 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1684 | ioim); |
1685 | break; |
1686 | |
1687 | default: |
1688 | bfa_sm_fault(ioim->bfa, event); |
1689 | } |
1690 | } |
1691 | |
1692 | /* |
1693 | * IO is being aborted, waiting for completion from firmware. |
1694 | */ |
1695 | static void |
1696 | bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1697 | { |
1698 | bfa_trc(ioim->bfa, ioim->iotag); |
1699 | bfa_trc(ioim->bfa, event); |
1700 | |
1701 | switch (event) { |
1702 | case BFA_IOIM_SM_COMP_GOOD: |
1703 | case BFA_IOIM_SM_COMP: |
1704 | case BFA_IOIM_SM_DONE: |
1705 | case BFA_IOIM_SM_FREE: |
1706 | break; |
1707 | |
1708 | case BFA_IOIM_SM_ABORT_DONE: |
1709 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1710 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1711 | ioim); |
1712 | break; |
1713 | |
1714 | case BFA_IOIM_SM_ABORT_COMP: |
1715 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1716 | bfa_ioim_move_to_comp_q(ioim); |
1717 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1718 | ioim); |
1719 | break; |
1720 | |
1721 | case BFA_IOIM_SM_COMP_UTAG: |
1722 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1723 | bfa_ioim_move_to_comp_q(ioim); |
1724 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1725 | ioim); |
1726 | break; |
1727 | |
1728 | case BFA_IOIM_SM_CLEANUP: |
1729 | WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); |
1730 | ioim->iosp->abort_explicit = BFA_FALSE; |
1731 | |
1732 | if (bfa_ioim_send_abort(ioim)) |
1733 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); |
1734 | else { |
1735 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); |
1736 | bfa_stats(ioim->itnim, qwait); |
1737 | bfa_reqq_wait(ioim->bfa, ioim->reqq, |
1738 | &ioim->iosp->reqq_wait); |
1739 | } |
1740 | break; |
1741 | |
1742 | case BFA_IOIM_SM_HWFAIL: |
1743 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1744 | bfa_ioim_move_to_comp_q(ioim); |
1745 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1746 | ioim); |
1747 | break; |
1748 | |
1749 | default: |
1750 | bfa_sm_fault(ioim->bfa, event); |
1751 | } |
1752 | } |
1753 | |
1754 | /* |
1755 | * IO is being cleaned up (implicit abort), waiting for completion from |
1756 | * firmware. |
1757 | */ |
1758 | static void |
1759 | bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1760 | { |
1761 | bfa_trc(ioim->bfa, ioim->iotag); |
1762 | bfa_trc(ioim->bfa, event); |
1763 | |
1764 | switch (event) { |
1765 | case BFA_IOIM_SM_COMP_GOOD: |
1766 | case BFA_IOIM_SM_COMP: |
1767 | case BFA_IOIM_SM_DONE: |
1768 | case BFA_IOIM_SM_FREE: |
1769 | break; |
1770 | |
1771 | case BFA_IOIM_SM_ABORT: |
1772 | /* |
1773 | * IO is already being aborted implicitly |
1774 | */ |
1775 | ioim->io_cbfn = __bfa_cb_ioim_abort; |
1776 | break; |
1777 | |
1778 | case BFA_IOIM_SM_ABORT_DONE: |
1779 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1780 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); |
1781 | bfa_ioim_notify_cleanup(ioim); |
1782 | break; |
1783 | |
1784 | case BFA_IOIM_SM_ABORT_COMP: |
1785 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1786 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); |
1787 | bfa_ioim_notify_cleanup(ioim); |
1788 | break; |
1789 | |
1790 | case BFA_IOIM_SM_COMP_UTAG: |
1791 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1792 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); |
1793 | bfa_ioim_notify_cleanup(ioim); |
1794 | break; |
1795 | |
1796 | case BFA_IOIM_SM_HWFAIL: |
1797 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1798 | bfa_ioim_move_to_comp_q(ioim); |
1799 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1800 | ioim); |
1801 | break; |
1802 | |
1803 | case BFA_IOIM_SM_CLEANUP: |
1804 | /* |
1805 | * IO can be in cleanup state already due to TM command. |
1806 | * 2nd cleanup request comes from ITN offline event. |
1807 | */ |
1808 | break; |
1809 | |
1810 | default: |
1811 | bfa_sm_fault(ioim->bfa, event); |
1812 | } |
1813 | } |
1814 | |
1815 | /* |
1816 | * IO is waiting for room in request CQ |
1817 | */ |
1818 | static void |
1819 | bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1820 | { |
1821 | bfa_trc(ioim->bfa, ioim->iotag); |
1822 | bfa_trc(ioim->bfa, event); |
1823 | |
1824 | switch (event) { |
1825 | case BFA_IOIM_SM_QRESUME: |
1826 | bfa_sm_set_state(ioim, bfa_ioim_sm_active); |
1827 | bfa_ioim_send_ioreq(ioim); |
1828 | break; |
1829 | |
1830 | case BFA_IOIM_SM_ABORT: |
1831 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1832 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1833 | bfa_ioim_move_to_comp_q(ioim); |
1834 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1835 | ioim); |
1836 | break; |
1837 | |
1838 | case BFA_IOIM_SM_CLEANUP: |
1839 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1840 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1841 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1842 | ioim); |
1843 | bfa_ioim_notify_cleanup(ioim); |
1844 | break; |
1845 | |
1846 | case BFA_IOIM_SM_HWFAIL: |
1847 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1848 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1849 | bfa_ioim_move_to_comp_q(ioim); |
1850 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1851 | ioim); |
1852 | break; |
1853 | |
1854 | default: |
1855 | bfa_sm_fault(ioim->bfa, event); |
1856 | } |
1857 | } |
1858 | |
1859 | /* |
1860 | * Active IO is being aborted, waiting for room in request CQ. |
1861 | */ |
1862 | static void |
1863 | bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1864 | { |
1865 | bfa_trc(ioim->bfa, ioim->iotag); |
1866 | bfa_trc(ioim->bfa, event); |
1867 | |
1868 | switch (event) { |
1869 | case BFA_IOIM_SM_QRESUME: |
1870 | bfa_sm_set_state(ioim, bfa_ioim_sm_abort); |
1871 | bfa_ioim_send_abort(ioim); |
1872 | break; |
1873 | |
1874 | case BFA_IOIM_SM_CLEANUP: |
1875 | WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); |
1876 | ioim->iosp->abort_explicit = BFA_FALSE; |
1877 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); |
1878 | break; |
1879 | |
1880 | case BFA_IOIM_SM_COMP_GOOD: |
1881 | case BFA_IOIM_SM_COMP: |
1882 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1883 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1884 | bfa_ioim_move_to_comp_q(ioim); |
1885 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1886 | ioim); |
1887 | break; |
1888 | |
1889 | case BFA_IOIM_SM_DONE: |
1890 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1891 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1892 | bfa_ioim_move_to_comp_q(ioim); |
1893 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, |
1894 | ioim); |
1895 | break; |
1896 | |
1897 | case BFA_IOIM_SM_HWFAIL: |
1898 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1899 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1900 | bfa_ioim_move_to_comp_q(ioim); |
1901 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1902 | ioim); |
1903 | break; |
1904 | |
1905 | default: |
1906 | bfa_sm_fault(ioim->bfa, event); |
1907 | } |
1908 | } |
1909 | |
1910 | /* |
1911 | * Active IO is being cleaned up, waiting for room in request CQ. |
1912 | */ |
1913 | static void |
1914 | bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1915 | { |
1916 | bfa_trc(ioim->bfa, ioim->iotag); |
1917 | bfa_trc(ioim->bfa, event); |
1918 | |
1919 | switch (event) { |
1920 | case BFA_IOIM_SM_QRESUME: |
1921 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); |
1922 | bfa_ioim_send_abort(ioim); |
1923 | break; |
1924 | |
1925 | case BFA_IOIM_SM_ABORT: |
1926 | /* |
1927 | * IO is already being cleaned up implicitly |
1928 | */ |
1929 | ioim->io_cbfn = __bfa_cb_ioim_abort; |
1930 | break; |
1931 | |
1932 | case BFA_IOIM_SM_COMP_GOOD: |
1933 | case BFA_IOIM_SM_COMP: |
1934 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1935 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1936 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); |
1937 | bfa_ioim_notify_cleanup(ioim); |
1938 | break; |
1939 | |
1940 | case BFA_IOIM_SM_DONE: |
1941 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1942 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1943 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); |
1944 | bfa_ioim_notify_cleanup(ioim); |
1945 | break; |
1946 | |
1947 | case BFA_IOIM_SM_HWFAIL: |
1948 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1949 | bfa_reqq_wcancel(&ioim->iosp->reqq_wait); |
1950 | bfa_ioim_move_to_comp_q(ioim); |
1951 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, |
1952 | ioim); |
1953 | break; |
1954 | |
1955 | default: |
1956 | bfa_sm_fault(ioim->bfa, event); |
1957 | } |
1958 | } |
1959 | |
1960 | /* |
1961 | * IO bfa callback is pending. |
1962 | */ |
1963 | static void |
1964 | bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1965 | { |
1966 | switch (event) { |
1967 | case BFA_IOIM_SM_HCB: |
1968 | bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); |
1969 | bfa_ioim_free(ioim); |
1970 | break; |
1971 | |
1972 | case BFA_IOIM_SM_CLEANUP: |
1973 | bfa_ioim_notify_cleanup(ioim); |
1974 | break; |
1975 | |
1976 | case BFA_IOIM_SM_HWFAIL: |
1977 | break; |
1978 | |
1979 | default: |
1980 | bfa_sm_fault(ioim->bfa, event); |
1981 | } |
1982 | } |
1983 | |
1984 | /* |
1985 | * IO bfa callback is pending. IO resource cannot be freed. |
1986 | */ |
1987 | static void |
1988 | bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1989 | { |
1990 | bfa_trc(ioim->bfa, ioim->iotag); |
1991 | bfa_trc(ioim->bfa, event); |
1992 | |
1993 | switch (event) { |
1994 | case BFA_IOIM_SM_HCB: |
1995 | bfa_sm_set_state(ioim, bfa_ioim_sm_resfree); |
1996 | list_del(entry: &ioim->qe); |
1997 | list_add_tail(new: &ioim->qe, head: &ioim->fcpim->ioim_resfree_q); |
1998 | break; |
1999 | |
2000 | case BFA_IOIM_SM_FREE: |
2001 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
2002 | break; |
2003 | |
2004 | case BFA_IOIM_SM_CLEANUP: |
2005 | bfa_ioim_notify_cleanup(ioim); |
2006 | break; |
2007 | |
2008 | case BFA_IOIM_SM_HWFAIL: |
2009 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
2010 | break; |
2011 | |
2012 | default: |
2013 | bfa_sm_fault(ioim->bfa, event); |
2014 | } |
2015 | } |
2016 | |
2017 | /* |
2018 | * IO is completed, waiting resource free from firmware. |
2019 | */ |
2020 | static void |
2021 | bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
2022 | { |
2023 | bfa_trc(ioim->bfa, ioim->iotag); |
2024 | bfa_trc(ioim->bfa, event); |
2025 | |
2026 | switch (event) { |
2027 | case BFA_IOIM_SM_FREE: |
2028 | bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); |
2029 | bfa_ioim_free(ioim); |
2030 | break; |
2031 | |
2032 | case BFA_IOIM_SM_CLEANUP: |
2033 | bfa_ioim_notify_cleanup(ioim); |
2034 | break; |
2035 | |
2036 | case BFA_IOIM_SM_HWFAIL: |
2037 | break; |
2038 | |
2039 | default: |
2040 | bfa_sm_fault(ioim->bfa, event); |
2041 | } |
2042 | } |
2043 | |
2044 | /* |
2045 | * This is called from bfa_fcpim_start after the bfa_init() with flash read |
2046 | * is complete by driver. now invalidate the stale content of lun mask |
2047 | * like unit attention, rp tag and lp tag. |
2048 | */ |
2049 | void |
2050 | bfa_ioim_lm_init(struct bfa_s *bfa) |
2051 | { |
2052 | struct bfa_lun_mask_s *lunm_list; |
2053 | int i; |
2054 | |
2055 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) |
2056 | return; |
2057 | |
2058 | lunm_list = bfa_get_lun_mask_list(bfa); |
2059 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2060 | lunm_list[i].ua = BFA_IOIM_LM_UA_RESET; |
2061 | lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; |
2062 | lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; |
2063 | } |
2064 | } |
2065 | |
2066 | static void |
2067 | __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) |
2068 | { |
2069 | struct bfa_ioim_s *ioim = cbarg; |
2070 | |
2071 | if (!complete) { |
2072 | bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); |
2073 | return; |
2074 | } |
2075 | |
2076 | bfa_cb_ioim_good_comp(bfad: ioim->bfa->bfad, dio: ioim->dio); |
2077 | } |
2078 | |
2079 | static void |
2080 | __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) |
2081 | { |
2082 | struct bfa_ioim_s *ioim = cbarg; |
2083 | struct bfi_ioim_rsp_s *m; |
2084 | u8 *snsinfo = NULL; |
2085 | u8 sns_len = 0; |
2086 | s32 residue = 0; |
2087 | |
2088 | if (!complete) { |
2089 | bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); |
2090 | return; |
2091 | } |
2092 | |
2093 | m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; |
2094 | if (m->io_status == BFI_IOIM_STS_OK) { |
2095 | /* |
2096 | * setup sense information, if present |
2097 | */ |
2098 | if ((m->scsi_status == SAM_STAT_CHECK_CONDITION) && |
2099 | m->sns_len) { |
2100 | sns_len = m->sns_len; |
2101 | snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp, |
2102 | ioim->iotag); |
2103 | } |
2104 | |
2105 | /* |
2106 | * setup residue value correctly for normal completions |
2107 | */ |
2108 | if (m->resid_flags == FCP_RESID_UNDER) { |
2109 | residue = be32_to_cpu(m->residue); |
2110 | bfa_stats(ioim->itnim, iocomp_underrun); |
2111 | } |
2112 | if (m->resid_flags == FCP_RESID_OVER) { |
2113 | residue = be32_to_cpu(m->residue); |
2114 | residue = -residue; |
2115 | bfa_stats(ioim->itnim, iocomp_overrun); |
2116 | } |
2117 | } |
2118 | |
2119 | bfa_cb_ioim_done(bfad: ioim->bfa->bfad, dio: ioim->dio, io_status: m->io_status, |
2120 | scsi_status: m->scsi_status, sns_len, sns_info: snsinfo, residue); |
2121 | } |
2122 | |
2123 | void |
2124 | bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, |
2125 | u16 rp_tag, u8 lp_tag) |
2126 | { |
2127 | struct bfa_lun_mask_s *lun_list; |
2128 | u8 i; |
2129 | |
2130 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) |
2131 | return; |
2132 | |
2133 | lun_list = bfa_get_lun_mask_list(bfa); |
2134 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2135 | if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { |
2136 | if ((lun_list[i].lp_wwn == lp_wwn) && |
2137 | (lun_list[i].rp_wwn == rp_wwn)) { |
2138 | lun_list[i].rp_tag = rp_tag; |
2139 | lun_list[i].lp_tag = lp_tag; |
2140 | } |
2141 | } |
2142 | } |
2143 | } |
2144 | |
2145 | /* |
2146 | * set UA for all active luns in LM DB |
2147 | */ |
2148 | static void |
2149 | bfa_ioim_lm_set_ua(struct bfa_s *bfa) |
2150 | { |
2151 | struct bfa_lun_mask_s *lunm_list; |
2152 | int i; |
2153 | |
2154 | lunm_list = bfa_get_lun_mask_list(bfa); |
2155 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2156 | if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) |
2157 | continue; |
2158 | lunm_list[i].ua = BFA_IOIM_LM_UA_SET; |
2159 | } |
2160 | } |
2161 | |
2162 | bfa_status_t |
2163 | bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update) |
2164 | { |
2165 | struct bfa_lunmask_cfg_s *lun_mask; |
2166 | |
2167 | bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); |
2168 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) |
2169 | return BFA_STATUS_FAILED; |
2170 | |
2171 | if (bfa_get_lun_mask_status(bfa) == update) |
2172 | return BFA_STATUS_NO_CHANGE; |
2173 | |
2174 | lun_mask = bfa_get_lun_mask(bfa); |
2175 | lun_mask->status = update; |
2176 | |
2177 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) |
2178 | bfa_ioim_lm_set_ua(bfa); |
2179 | |
2180 | return bfa_dconf_update(bfa); |
2181 | } |
2182 | |
2183 | bfa_status_t |
2184 | bfa_fcpim_lunmask_clear(struct bfa_s *bfa) |
2185 | { |
2186 | int i; |
2187 | struct bfa_lun_mask_s *lunm_list; |
2188 | |
2189 | bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); |
2190 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) |
2191 | return BFA_STATUS_FAILED; |
2192 | |
2193 | lunm_list = bfa_get_lun_mask_list(bfa); |
2194 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2195 | if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) { |
2196 | if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) |
2197 | bfa_rport_unset_lunmask(bfa, |
2198 | BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag)); |
2199 | } |
2200 | } |
2201 | |
2202 | memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG); |
2203 | return bfa_dconf_update(bfa); |
2204 | } |
2205 | |
2206 | bfa_status_t |
2207 | bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf) |
2208 | { |
2209 | struct bfa_lunmask_cfg_s *lun_mask; |
2210 | |
2211 | bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); |
2212 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) |
2213 | return BFA_STATUS_FAILED; |
2214 | |
2215 | lun_mask = bfa_get_lun_mask(bfa); |
2216 | memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s)); |
2217 | return BFA_STATUS_OK; |
2218 | } |
2219 | |
2220 | bfa_status_t |
2221 | bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, |
2222 | wwn_t rpwwn, struct scsi_lun lun) |
2223 | { |
2224 | struct bfa_lun_mask_s *lunm_list; |
2225 | struct bfa_rport_s *rp = NULL; |
2226 | int i, free_index = MAX_LUN_MASK_CFG + 1; |
2227 | struct bfa_fcs_lport_s *port = NULL; |
2228 | struct bfa_fcs_rport_s *rp_fcs; |
2229 | |
2230 | bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); |
2231 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) |
2232 | return BFA_STATUS_FAILED; |
2233 | |
2234 | port = bfa_fcs_lookup_port(fcs: &((struct bfad_s *)bfa->bfad)->bfa_fcs, |
2235 | vf_id, lpwwn: *pwwn); |
2236 | if (port) { |
2237 | *pwwn = port->port_cfg.pwwn; |
2238 | rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, pwwn: rpwwn); |
2239 | if (rp_fcs) |
2240 | rp = rp_fcs->bfa_rport; |
2241 | } |
2242 | |
2243 | lunm_list = bfa_get_lun_mask_list(bfa); |
2244 | /* if entry exists */ |
2245 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2246 | if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) |
2247 | free_index = i; |
2248 | if ((lunm_list[i].lp_wwn == *pwwn) && |
2249 | (lunm_list[i].rp_wwn == rpwwn) && |
2250 | (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == |
2251 | scsilun_to_int((struct scsi_lun *)&lun))) |
2252 | return BFA_STATUS_ENTRY_EXISTS; |
2253 | } |
2254 | |
2255 | if (free_index > MAX_LUN_MASK_CFG) |
2256 | return BFA_STATUS_MAX_ENTRY_REACHED; |
2257 | |
2258 | if (rp) { |
2259 | lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa, |
2260 | pid: rp->rport_info.local_pid); |
2261 | lunm_list[free_index].rp_tag = rp->rport_tag; |
2262 | } else { |
2263 | lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID; |
2264 | lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID; |
2265 | } |
2266 | |
2267 | lunm_list[free_index].lp_wwn = *pwwn; |
2268 | lunm_list[free_index].rp_wwn = rpwwn; |
2269 | lunm_list[free_index].lun = lun; |
2270 | lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE; |
2271 | |
2272 | /* set for all luns in this rp */ |
2273 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2274 | if ((lunm_list[i].lp_wwn == *pwwn) && |
2275 | (lunm_list[i].rp_wwn == rpwwn)) |
2276 | lunm_list[i].ua = BFA_IOIM_LM_UA_SET; |
2277 | } |
2278 | |
2279 | return bfa_dconf_update(bfa); |
2280 | } |
2281 | |
2282 | bfa_status_t |
2283 | bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, |
2284 | wwn_t rpwwn, struct scsi_lun lun) |
2285 | { |
2286 | struct bfa_lun_mask_s *lunm_list; |
2287 | struct bfa_fcs_lport_s *port = NULL; |
2288 | int i; |
2289 | |
2290 | /* in min cfg lunm_list could be NULL but no commands should run. */ |
2291 | if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG) |
2292 | return BFA_STATUS_FAILED; |
2293 | |
2294 | bfa_trc(bfa, bfa_get_lun_mask_status(bfa)); |
2295 | bfa_trc(bfa, *pwwn); |
2296 | bfa_trc(bfa, rpwwn); |
2297 | bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun)); |
2298 | |
2299 | if (*pwwn == 0) { |
2300 | port = bfa_fcs_lookup_port( |
2301 | fcs: &((struct bfad_s *)bfa->bfad)->bfa_fcs, |
2302 | vf_id, lpwwn: *pwwn); |
2303 | if (port) |
2304 | *pwwn = port->port_cfg.pwwn; |
2305 | } |
2306 | |
2307 | lunm_list = bfa_get_lun_mask_list(bfa); |
2308 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2309 | if ((lunm_list[i].lp_wwn == *pwwn) && |
2310 | (lunm_list[i].rp_wwn == rpwwn) && |
2311 | (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) == |
2312 | scsilun_to_int((struct scsi_lun *)&lun))) { |
2313 | lunm_list[i].lp_wwn = 0; |
2314 | lunm_list[i].rp_wwn = 0; |
2315 | int_to_scsilun(0, &lunm_list[i].lun); |
2316 | lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE; |
2317 | if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) { |
2318 | lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID; |
2319 | lunm_list[i].lp_tag = BFA_LP_TAG_INVALID; |
2320 | } |
2321 | return bfa_dconf_update(bfa); |
2322 | } |
2323 | } |
2324 | |
2325 | /* set for all luns in this rp */ |
2326 | for (i = 0; i < MAX_LUN_MASK_CFG; i++) { |
2327 | if ((lunm_list[i].lp_wwn == *pwwn) && |
2328 | (lunm_list[i].rp_wwn == rpwwn)) |
2329 | lunm_list[i].ua = BFA_IOIM_LM_UA_SET; |
2330 | } |
2331 | |
2332 | return BFA_STATUS_ENTRY_NOT_EXISTS; |
2333 | } |
2334 | |
2335 | static void |
2336 | __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) |
2337 | { |
2338 | struct bfa_ioim_s *ioim = cbarg; |
2339 | |
2340 | if (!complete) { |
2341 | bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); |
2342 | return; |
2343 | } |
2344 | |
2345 | bfa_cb_ioim_done(bfad: ioim->bfa->bfad, dio: ioim->dio, io_status: BFI_IOIM_STS_ABORTED, |
2346 | scsi_status: 0, sns_len: 0, NULL, residue: 0); |
2347 | } |
2348 | |
2349 | static void |
2350 | __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) |
2351 | { |
2352 | struct bfa_ioim_s *ioim = cbarg; |
2353 | |
2354 | bfa_stats(ioim->itnim, path_tov_expired); |
2355 | if (!complete) { |
2356 | bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); |
2357 | return; |
2358 | } |
2359 | |
2360 | bfa_cb_ioim_done(bfad: ioim->bfa->bfad, dio: ioim->dio, io_status: BFI_IOIM_STS_PATHTOV, |
2361 | scsi_status: 0, sns_len: 0, NULL, residue: 0); |
2362 | } |
2363 | |
2364 | static void |
2365 | __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) |
2366 | { |
2367 | struct bfa_ioim_s *ioim = cbarg; |
2368 | |
2369 | if (!complete) { |
2370 | bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); |
2371 | return; |
2372 | } |
2373 | |
2374 | bfa_cb_ioim_abort(bfad: ioim->bfa->bfad, dio: ioim->dio); |
2375 | } |
2376 | |
2377 | static void |
2378 | bfa_ioim_sgpg_alloced(void *cbarg) |
2379 | { |
2380 | struct bfa_ioim_s *ioim = cbarg; |
2381 | |
2382 | ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); |
2383 | list_splice_tail_init(list: &ioim->iosp->sgpg_wqe.sgpg_q, head: &ioim->sgpg_q); |
2384 | ioim->sgpg = bfa_q_first(&ioim->sgpg_q); |
2385 | bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); |
2386 | } |
2387 | |
2388 | /* |
2389 | * Send I/O request to firmware. |
2390 | */ |
2391 | static bfa_boolean_t |
2392 | bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) |
2393 | { |
2394 | struct bfa_itnim_s *itnim = ioim->itnim; |
2395 | struct bfi_ioim_req_s *m; |
2396 | static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } }; |
2397 | struct bfi_sge_s *sge, *sgpge; |
2398 | u32 pgdlen = 0; |
2399 | u32 fcp_dl; |
2400 | u64 addr; |
2401 | struct scatterlist *sg; |
2402 | struct bfa_sgpg_s *sgpg; |
2403 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; |
2404 | u32 i, sge_id, pgcumsz; |
2405 | enum dma_data_direction dmadir; |
2406 | |
2407 | /* |
2408 | * check for room in queue to send request now |
2409 | */ |
2410 | m = bfa_reqq_next(ioim->bfa, ioim->reqq); |
2411 | if (!m) { |
2412 | bfa_stats(ioim->itnim, qwait); |
2413 | bfa_reqq_wait(ioim->bfa, ioim->reqq, |
2414 | &ioim->iosp->reqq_wait); |
2415 | return BFA_FALSE; |
2416 | } |
2417 | |
2418 | /* |
2419 | * build i/o request message next |
2420 | */ |
2421 | m->io_tag = cpu_to_be16(ioim->iotag); |
2422 | m->rport_hdl = ioim->itnim->rport->fw_handle; |
2423 | m->io_timeout = 0; |
2424 | |
2425 | sge = &m->sges[0]; |
2426 | sgpg = ioim->sgpg; |
2427 | sge_id = 0; |
2428 | sgpge = NULL; |
2429 | pgcumsz = 0; |
2430 | scsi_for_each_sg(cmnd, sg, ioim->nsges, i) { |
2431 | if (i == 0) { |
2432 | /* build inline IO SG element */ |
2433 | addr = bfa_sgaddr_le(sg_dma_address(sg)); |
2434 | sge->sga = *(union bfi_addr_u *) &addr; |
2435 | pgdlen = sg_dma_len(sg); |
2436 | sge->sg_len = pgdlen; |
2437 | sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? |
2438 | BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; |
2439 | bfa_sge_to_be(sge); |
2440 | sge++; |
2441 | } else { |
2442 | if (sge_id == 0) |
2443 | sgpge = sgpg->sgpg->sges; |
2444 | |
2445 | addr = bfa_sgaddr_le(sg_dma_address(sg)); |
2446 | sgpge->sga = *(union bfi_addr_u *) &addr; |
2447 | sgpge->sg_len = sg_dma_len(sg); |
2448 | pgcumsz += sgpge->sg_len; |
2449 | |
2450 | /* set flags */ |
2451 | if (i < (ioim->nsges - 1) && |
2452 | sge_id < (BFI_SGPG_DATA_SGES - 1)) |
2453 | sgpge->flags = BFI_SGE_DATA; |
2454 | else if (i < (ioim->nsges - 1)) |
2455 | sgpge->flags = BFI_SGE_DATA_CPL; |
2456 | else |
2457 | sgpge->flags = BFI_SGE_DATA_LAST; |
2458 | |
2459 | bfa_sge_to_le(sgpge); |
2460 | |
2461 | sgpge++; |
2462 | if (i == (ioim->nsges - 1)) { |
2463 | sgpge->flags = BFI_SGE_PGDLEN; |
2464 | sgpge->sga.a32.addr_lo = 0; |
2465 | sgpge->sga.a32.addr_hi = 0; |
2466 | sgpge->sg_len = pgcumsz; |
2467 | bfa_sge_to_le(sgpge); |
2468 | } else if (++sge_id == BFI_SGPG_DATA_SGES) { |
2469 | sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); |
2470 | sgpge->flags = BFI_SGE_LINK; |
2471 | sgpge->sga = sgpg->sgpg_pa; |
2472 | sgpge->sg_len = pgcumsz; |
2473 | bfa_sge_to_le(sgpge); |
2474 | sge_id = 0; |
2475 | pgcumsz = 0; |
2476 | } |
2477 | } |
2478 | } |
2479 | |
2480 | if (ioim->nsges > BFI_SGE_INLINE) { |
2481 | sge->sga = ioim->sgpg->sgpg_pa; |
2482 | } else { |
2483 | sge->sga.a32.addr_lo = 0; |
2484 | sge->sga.a32.addr_hi = 0; |
2485 | } |
2486 | sge->sg_len = pgdlen; |
2487 | sge->flags = BFI_SGE_PGDLEN; |
2488 | bfa_sge_to_be(sge); |
2489 | |
2490 | /* |
2491 | * set up I/O command parameters |
2492 | */ |
2493 | m->cmnd = cmnd_z0; |
2494 | int_to_scsilun(cmnd->device->lun, &m->cmnd.lun); |
2495 | dmadir = cmnd->sc_data_direction; |
2496 | if (dmadir == DMA_TO_DEVICE) |
2497 | m->cmnd.iodir = FCP_IODIR_WRITE; |
2498 | else if (dmadir == DMA_FROM_DEVICE) |
2499 | m->cmnd.iodir = FCP_IODIR_READ; |
2500 | else |
2501 | m->cmnd.iodir = FCP_IODIR_NONE; |
2502 | |
2503 | m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd; |
2504 | fcp_dl = scsi_bufflen(cmd: cmnd); |
2505 | m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); |
2506 | |
2507 | /* |
2508 | * set up I/O message header |
2509 | */ |
2510 | switch (m->cmnd.iodir) { |
2511 | case FCP_IODIR_READ: |
2512 | bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa)); |
2513 | bfa_stats(itnim, input_reqs); |
2514 | ioim->itnim->stats.rd_throughput += fcp_dl; |
2515 | break; |
2516 | case FCP_IODIR_WRITE: |
2517 | bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa)); |
2518 | bfa_stats(itnim, output_reqs); |
2519 | ioim->itnim->stats.wr_throughput += fcp_dl; |
2520 | break; |
2521 | case FCP_IODIR_RW: |
2522 | bfa_stats(itnim, input_reqs); |
2523 | bfa_stats(itnim, output_reqs); |
2524 | fallthrough; |
2525 | default: |
2526 | bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); |
2527 | } |
2528 | if (itnim->seq_rec || |
2529 | (scsi_bufflen(cmd: cmnd) & (sizeof(u32) - 1))) |
2530 | bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); |
2531 | |
2532 | /* |
2533 | * queue I/O message to firmware |
2534 | */ |
2535 | bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); |
2536 | return BFA_TRUE; |
2537 | } |
2538 | |
2539 | /* |
2540 | * Setup any additional SG pages needed.Inline SG element is setup |
2541 | * at queuing time. |
2542 | */ |
2543 | static bfa_boolean_t |
2544 | bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim) |
2545 | { |
2546 | u16 nsgpgs; |
2547 | |
2548 | WARN_ON(ioim->nsges <= BFI_SGE_INLINE); |
2549 | |
2550 | /* |
2551 | * allocate SG pages needed |
2552 | */ |
2553 | nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); |
2554 | if (!nsgpgs) |
2555 | return BFA_TRUE; |
2556 | |
2557 | if (bfa_sgpg_malloc(bfa: ioim->bfa, sgpg_q: &ioim->sgpg_q, nsgpgs) |
2558 | != BFA_STATUS_OK) { |
2559 | bfa_sgpg_wait(bfa: ioim->bfa, wqe: &ioim->iosp->sgpg_wqe, nsgpgs); |
2560 | return BFA_FALSE; |
2561 | } |
2562 | |
2563 | ioim->nsgpgs = nsgpgs; |
2564 | ioim->sgpg = bfa_q_first(&ioim->sgpg_q); |
2565 | |
2566 | return BFA_TRUE; |
2567 | } |
2568 | |
2569 | /* |
2570 | * Send I/O abort request to firmware. |
2571 | */ |
2572 | static bfa_boolean_t |
2573 | bfa_ioim_send_abort(struct bfa_ioim_s *ioim) |
2574 | { |
2575 | struct bfi_ioim_abort_req_s *m; |
2576 | enum bfi_ioim_h2i msgop; |
2577 | |
2578 | /* |
2579 | * check for room in queue to send request now |
2580 | */ |
2581 | m = bfa_reqq_next(ioim->bfa, ioim->reqq); |
2582 | if (!m) |
2583 | return BFA_FALSE; |
2584 | |
2585 | /* |
2586 | * build i/o request message next |
2587 | */ |
2588 | if (ioim->iosp->abort_explicit) |
2589 | msgop = BFI_IOIM_H2I_IOABORT_REQ; |
2590 | else |
2591 | msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; |
2592 | |
2593 | bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa)); |
2594 | m->io_tag = cpu_to_be16(ioim->iotag); |
2595 | m->abort_tag = ++ioim->abort_tag; |
2596 | |
2597 | /* |
2598 | * queue I/O message to firmware |
2599 | */ |
2600 | bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh); |
2601 | return BFA_TRUE; |
2602 | } |
2603 | |
2604 | /* |
2605 | * Call to resume any I/O requests waiting for room in request queue. |
2606 | */ |
2607 | static void |
2608 | bfa_ioim_qresume(void *cbarg) |
2609 | { |
2610 | struct bfa_ioim_s *ioim = cbarg; |
2611 | |
2612 | bfa_stats(ioim->itnim, qresumes); |
2613 | bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME); |
2614 | } |
2615 | |
2616 | |
2617 | static void |
2618 | bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) |
2619 | { |
2620 | /* |
2621 | * Move IO from itnim queue to fcpim global queue since itnim will be |
2622 | * freed. |
2623 | */ |
2624 | list_del(entry: &ioim->qe); |
2625 | list_add_tail(new: &ioim->qe, head: &ioim->fcpim->ioim_comp_q); |
2626 | |
2627 | if (!ioim->iosp->tskim) { |
2628 | if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) { |
2629 | bfa_cb_dequeue(&ioim->hcb_qe); |
2630 | list_del(entry: &ioim->qe); |
2631 | list_add_tail(new: &ioim->qe, head: &ioim->itnim->delay_comp_q); |
2632 | } |
2633 | bfa_itnim_iodone(itnim: ioim->itnim); |
2634 | } else |
2635 | bfa_wc_down(wc: &ioim->iosp->tskim->wc); |
2636 | } |
2637 | |
2638 | static bfa_boolean_t |
2639 | bfa_ioim_is_abortable(struct bfa_ioim_s *ioim) |
2640 | { |
2641 | if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) && |
2642 | (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) || |
2643 | (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) || |
2644 | (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) || |
2645 | (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) || |
2646 | (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) || |
2647 | (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree))) |
2648 | return BFA_FALSE; |
2649 | |
2650 | return BFA_TRUE; |
2651 | } |
2652 | |
2653 | void |
2654 | bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) |
2655 | { |
2656 | /* |
2657 | * If path tov timer expired, failback with PATHTOV status - these |
2658 | * IO requests are not normally retried by IO stack. |
2659 | * |
2660 | * Otherwise device cameback online and fail it with normal failed |
2661 | * status so that IO stack retries these failed IO requests. |
2662 | */ |
2663 | if (iotov) |
2664 | ioim->io_cbfn = __bfa_cb_ioim_pathtov; |
2665 | else { |
2666 | ioim->io_cbfn = __bfa_cb_ioim_failed; |
2667 | bfa_stats(ioim->itnim, iocom_nexus_abort); |
2668 | } |
2669 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); |
2670 | |
2671 | /* |
2672 | * Move IO to fcpim global queue since itnim will be |
2673 | * freed. |
2674 | */ |
2675 | list_del(entry: &ioim->qe); |
2676 | list_add_tail(new: &ioim->qe, head: &ioim->fcpim->ioim_comp_q); |
2677 | } |
2678 | |
2679 | |
2680 | /* |
2681 | * Memory allocation and initialization. |
2682 | */ |
2683 | void |
2684 | bfa_ioim_attach(struct bfa_fcpim_s *fcpim) |
2685 | { |
2686 | struct bfa_ioim_s *ioim; |
2687 | struct bfa_fcp_mod_s *fcp = fcpim->fcp; |
2688 | struct bfa_ioim_sp_s *iosp; |
2689 | u16 i; |
2690 | |
2691 | /* |
2692 | * claim memory first |
2693 | */ |
2694 | ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp); |
2695 | fcpim->ioim_arr = ioim; |
2696 | bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs); |
2697 | |
2698 | iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp); |
2699 | fcpim->ioim_sp_arr = iosp; |
2700 | bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs); |
2701 | |
2702 | /* |
2703 | * Initialize ioim free queues |
2704 | */ |
2705 | INIT_LIST_HEAD(list: &fcpim->ioim_resfree_q); |
2706 | INIT_LIST_HEAD(list: &fcpim->ioim_comp_q); |
2707 | |
2708 | for (i = 0; i < fcpim->fcp->num_ioim_reqs; |
2709 | i++, ioim++, iosp++) { |
2710 | /* |
2711 | * initialize IOIM |
2712 | */ |
2713 | memset(ioim, 0, sizeof(struct bfa_ioim_s)); |
2714 | ioim->iotag = i; |
2715 | ioim->bfa = fcpim->bfa; |
2716 | ioim->fcpim = fcpim; |
2717 | ioim->iosp = iosp; |
2718 | INIT_LIST_HEAD(list: &ioim->sgpg_q); |
2719 | bfa_reqq_winit(wqe: &ioim->iosp->reqq_wait, |
2720 | qresume: bfa_ioim_qresume, cbarg: ioim); |
2721 | bfa_sgpg_winit(wqe: &ioim->iosp->sgpg_wqe, |
2722 | cbfn: bfa_ioim_sgpg_alloced, cbarg: ioim); |
2723 | bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); |
2724 | } |
2725 | } |
2726 | |
2727 | void |
2728 | bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
2729 | { |
2730 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
2731 | struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; |
2732 | struct bfa_ioim_s *ioim; |
2733 | u16 iotag; |
2734 | enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; |
2735 | |
2736 | iotag = be16_to_cpu(rsp->io_tag); |
2737 | |
2738 | ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); |
2739 | WARN_ON(ioim->iotag != iotag); |
2740 | |
2741 | bfa_trc(ioim->bfa, ioim->iotag); |
2742 | bfa_trc(ioim->bfa, rsp->io_status); |
2743 | bfa_trc(ioim->bfa, rsp->reuse_io_tag); |
2744 | |
2745 | if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) |
2746 | ioim->iosp->comp_rspmsg = *m; |
2747 | |
2748 | switch (rsp->io_status) { |
2749 | case BFI_IOIM_STS_OK: |
2750 | bfa_stats(ioim->itnim, iocomp_ok); |
2751 | if (rsp->reuse_io_tag == 0) |
2752 | evt = BFA_IOIM_SM_DONE; |
2753 | else |
2754 | evt = BFA_IOIM_SM_COMP; |
2755 | break; |
2756 | |
2757 | case BFI_IOIM_STS_TIMEDOUT: |
2758 | bfa_stats(ioim->itnim, iocomp_timedout); |
2759 | fallthrough; |
2760 | case BFI_IOIM_STS_ABORTED: |
2761 | rsp->io_status = BFI_IOIM_STS_ABORTED; |
2762 | bfa_stats(ioim->itnim, iocomp_aborted); |
2763 | if (rsp->reuse_io_tag == 0) |
2764 | evt = BFA_IOIM_SM_DONE; |
2765 | else |
2766 | evt = BFA_IOIM_SM_COMP; |
2767 | break; |
2768 | |
2769 | case BFI_IOIM_STS_PROTO_ERR: |
2770 | bfa_stats(ioim->itnim, iocom_proto_err); |
2771 | WARN_ON(!rsp->reuse_io_tag); |
2772 | evt = BFA_IOIM_SM_COMP; |
2773 | break; |
2774 | |
2775 | case BFI_IOIM_STS_SQER_NEEDED: |
2776 | bfa_stats(ioim->itnim, iocom_sqer_needed); |
2777 | WARN_ON(rsp->reuse_io_tag != 0); |
2778 | evt = BFA_IOIM_SM_SQRETRY; |
2779 | break; |
2780 | |
2781 | case BFI_IOIM_STS_RES_FREE: |
2782 | bfa_stats(ioim->itnim, iocom_res_free); |
2783 | evt = BFA_IOIM_SM_FREE; |
2784 | break; |
2785 | |
2786 | case BFI_IOIM_STS_HOST_ABORTED: |
2787 | bfa_stats(ioim->itnim, iocom_hostabrts); |
2788 | if (rsp->abort_tag != ioim->abort_tag) { |
2789 | bfa_trc(ioim->bfa, rsp->abort_tag); |
2790 | bfa_trc(ioim->bfa, ioim->abort_tag); |
2791 | return; |
2792 | } |
2793 | |
2794 | if (rsp->reuse_io_tag) |
2795 | evt = BFA_IOIM_SM_ABORT_COMP; |
2796 | else |
2797 | evt = BFA_IOIM_SM_ABORT_DONE; |
2798 | break; |
2799 | |
2800 | case BFI_IOIM_STS_UTAG: |
2801 | bfa_stats(ioim->itnim, iocom_utags); |
2802 | evt = BFA_IOIM_SM_COMP_UTAG; |
2803 | break; |
2804 | |
2805 | default: |
2806 | WARN_ON(1); |
2807 | } |
2808 | |
2809 | bfa_sm_send_event(ioim, evt); |
2810 | } |
2811 | |
2812 | void |
2813 | bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
2814 | { |
2815 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
2816 | struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; |
2817 | struct bfa_ioim_s *ioim; |
2818 | u16 iotag; |
2819 | |
2820 | iotag = be16_to_cpu(rsp->io_tag); |
2821 | |
2822 | ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); |
2823 | WARN_ON(ioim->iotag != iotag); |
2824 | |
2825 | bfa_ioim_cb_profile_comp(fcpim, ioim); |
2826 | |
2827 | bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); |
2828 | } |
2829 | |
2830 | /* |
2831 | * Called by itnim to clean up IO while going offline. |
2832 | */ |
2833 | void |
2834 | bfa_ioim_cleanup(struct bfa_ioim_s *ioim) |
2835 | { |
2836 | bfa_trc(ioim->bfa, ioim->iotag); |
2837 | bfa_stats(ioim->itnim, io_cleanups); |
2838 | |
2839 | ioim->iosp->tskim = NULL; |
2840 | bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); |
2841 | } |
2842 | |
2843 | void |
2844 | bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim) |
2845 | { |
2846 | bfa_trc(ioim->bfa, ioim->iotag); |
2847 | bfa_stats(ioim->itnim, io_tmaborts); |
2848 | |
2849 | ioim->iosp->tskim = tskim; |
2850 | bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); |
2851 | } |
2852 | |
2853 | /* |
2854 | * IOC failure handling. |
2855 | */ |
2856 | void |
2857 | bfa_ioim_iocdisable(struct bfa_ioim_s *ioim) |
2858 | { |
2859 | bfa_trc(ioim->bfa, ioim->iotag); |
2860 | bfa_stats(ioim->itnim, io_iocdowns); |
2861 | bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); |
2862 | } |
2863 | |
2864 | /* |
2865 | * IO offline TOV popped. Fail the pending IO. |
2866 | */ |
2867 | void |
2868 | bfa_ioim_tov(struct bfa_ioim_s *ioim) |
2869 | { |
2870 | bfa_trc(ioim->bfa, ioim->iotag); |
2871 | bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV); |
2872 | } |
2873 | |
2874 | |
2875 | /* |
2876 | * Allocate IOIM resource for initiator mode I/O request. |
2877 | */ |
2878 | struct bfa_ioim_s * |
2879 | bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, |
2880 | struct bfa_itnim_s *itnim, u16 nsges) |
2881 | { |
2882 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
2883 | struct bfa_ioim_s *ioim; |
2884 | struct bfa_iotag_s *iotag = NULL; |
2885 | |
2886 | /* |
2887 | * alocate IOIM resource |
2888 | */ |
2889 | bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag); |
2890 | if (!iotag) { |
2891 | bfa_stats(itnim, no_iotags); |
2892 | return NULL; |
2893 | } |
2894 | |
2895 | ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag); |
2896 | |
2897 | ioim->dio = dio; |
2898 | ioim->itnim = itnim; |
2899 | ioim->nsges = nsges; |
2900 | ioim->nsgpgs = 0; |
2901 | |
2902 | bfa_stats(itnim, total_ios); |
2903 | fcpim->ios_active++; |
2904 | |
2905 | list_add_tail(new: &ioim->qe, head: &itnim->io_q); |
2906 | |
2907 | return ioim; |
2908 | } |
2909 | |
2910 | void |
2911 | bfa_ioim_free(struct bfa_ioim_s *ioim) |
2912 | { |
2913 | struct bfa_fcpim_s *fcpim = ioim->fcpim; |
2914 | struct bfa_iotag_s *iotag; |
2915 | |
2916 | if (ioim->nsgpgs > 0) |
2917 | bfa_sgpg_mfree(bfa: ioim->bfa, sgpg_q: &ioim->sgpg_q, nsgpgs: ioim->nsgpgs); |
2918 | |
2919 | bfa_stats(ioim->itnim, io_comps); |
2920 | fcpim->ios_active--; |
2921 | |
2922 | ioim->iotag &= BFA_IOIM_IOTAG_MASK; |
2923 | |
2924 | WARN_ON(!(ioim->iotag < |
2925 | (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs))); |
2926 | iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag); |
2927 | |
2928 | if (ioim->iotag < fcpim->fcp->num_ioim_reqs) |
2929 | list_add_tail(new: &iotag->qe, head: &fcpim->fcp->iotag_ioim_free_q); |
2930 | else |
2931 | list_add_tail(new: &iotag->qe, head: &fcpim->fcp->iotag_tio_free_q); |
2932 | |
2933 | list_del(entry: &ioim->qe); |
2934 | } |
2935 | |
2936 | void |
2937 | bfa_ioim_start(struct bfa_ioim_s *ioim) |
2938 | { |
2939 | bfa_ioim_cb_profile_start(ioim->fcpim, ioim); |
2940 | |
2941 | /* |
2942 | * Obtain the queue over which this request has to be issued |
2943 | */ |
2944 | ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? |
2945 | BFA_FALSE : bfa_itnim_get_reqq(ioim); |
2946 | |
2947 | bfa_sm_send_event(ioim, BFA_IOIM_SM_START); |
2948 | } |
2949 | |
2950 | /* |
2951 | * Driver I/O abort request. |
2952 | */ |
2953 | bfa_status_t |
2954 | bfa_ioim_abort(struct bfa_ioim_s *ioim) |
2955 | { |
2956 | |
2957 | bfa_trc(ioim->bfa, ioim->iotag); |
2958 | |
2959 | if (!bfa_ioim_is_abortable(ioim)) |
2960 | return BFA_STATUS_FAILED; |
2961 | |
2962 | bfa_stats(ioim->itnim, io_aborts); |
2963 | bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT); |
2964 | |
2965 | return BFA_STATUS_OK; |
2966 | } |
2967 | |
2968 | /* |
2969 | * BFA TSKIM state machine functions |
2970 | */ |
2971 | |
2972 | /* |
2973 | * Task management command beginning state. |
2974 | */ |
2975 | static void |
2976 | bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
2977 | { |
2978 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
2979 | |
2980 | switch (event) { |
2981 | case BFA_TSKIM_SM_START: |
2982 | bfa_sm_set_state(tskim, bfa_tskim_sm_active); |
2983 | bfa_tskim_gather_ios(tskim); |
2984 | |
2985 | /* |
2986 | * If device is offline, do not send TM on wire. Just cleanup |
2987 | * any pending IO requests and complete TM request. |
2988 | */ |
2989 | if (!bfa_itnim_is_online(tskim->itnim)) { |
2990 | bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); |
2991 | tskim->tsk_status = BFI_TSKIM_STS_OK; |
2992 | bfa_tskim_cleanup_ios(tskim); |
2993 | return; |
2994 | } |
2995 | |
2996 | if (!bfa_tskim_send(tskim)) { |
2997 | bfa_sm_set_state(tskim, bfa_tskim_sm_qfull); |
2998 | bfa_stats(tskim->itnim, tm_qwait); |
2999 | bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, |
3000 | &tskim->reqq_wait); |
3001 | } |
3002 | break; |
3003 | |
3004 | default: |
3005 | bfa_sm_fault(tskim->bfa, event); |
3006 | } |
3007 | } |
3008 | |
3009 | /* |
3010 | * TM command is active, awaiting completion from firmware to |
3011 | * cleanup IO requests in TM scope. |
3012 | */ |
3013 | static void |
3014 | bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
3015 | { |
3016 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
3017 | |
3018 | switch (event) { |
3019 | case BFA_TSKIM_SM_DONE: |
3020 | bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); |
3021 | bfa_tskim_cleanup_ios(tskim); |
3022 | break; |
3023 | |
3024 | case BFA_TSKIM_SM_CLEANUP: |
3025 | bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); |
3026 | if (!bfa_tskim_send_abort(tskim)) { |
3027 | bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull); |
3028 | bfa_stats(tskim->itnim, tm_qwait); |
3029 | bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, |
3030 | &tskim->reqq_wait); |
3031 | } |
3032 | break; |
3033 | |
3034 | case BFA_TSKIM_SM_HWFAIL: |
3035 | bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); |
3036 | bfa_tskim_iocdisable_ios(tskim); |
3037 | bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); |
3038 | break; |
3039 | |
3040 | default: |
3041 | bfa_sm_fault(tskim->bfa, event); |
3042 | } |
3043 | } |
3044 | |
3045 | /* |
3046 | * An active TM is being cleaned up since ITN is offline. Awaiting cleanup |
3047 | * completion event from firmware. |
3048 | */ |
3049 | static void |
3050 | bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
3051 | { |
3052 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
3053 | |
3054 | switch (event) { |
3055 | case BFA_TSKIM_SM_DONE: |
3056 | /* |
3057 | * Ignore and wait for ABORT completion from firmware. |
3058 | */ |
3059 | break; |
3060 | |
3061 | case BFA_TSKIM_SM_UTAG: |
3062 | case BFA_TSKIM_SM_CLEANUP_DONE: |
3063 | bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); |
3064 | bfa_tskim_cleanup_ios(tskim); |
3065 | break; |
3066 | |
3067 | case BFA_TSKIM_SM_HWFAIL: |
3068 | bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); |
3069 | bfa_tskim_iocdisable_ios(tskim); |
3070 | bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); |
3071 | break; |
3072 | |
3073 | default: |
3074 | bfa_sm_fault(tskim->bfa, event); |
3075 | } |
3076 | } |
3077 | |
3078 | static void |
3079 | bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
3080 | { |
3081 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
3082 | |
3083 | switch (event) { |
3084 | case BFA_TSKIM_SM_IOS_DONE: |
3085 | bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); |
3086 | bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done); |
3087 | break; |
3088 | |
3089 | case BFA_TSKIM_SM_CLEANUP: |
3090 | /* |
3091 | * Ignore, TM command completed on wire. |
3092 | * Notify TM conmpletion on IO cleanup completion. |
3093 | */ |
3094 | break; |
3095 | |
3096 | case BFA_TSKIM_SM_HWFAIL: |
3097 | bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); |
3098 | bfa_tskim_iocdisable_ios(tskim); |
3099 | bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); |
3100 | break; |
3101 | |
3102 | default: |
3103 | bfa_sm_fault(tskim->bfa, event); |
3104 | } |
3105 | } |
3106 | |
3107 | /* |
3108 | * Task management command is waiting for room in request CQ |
3109 | */ |
3110 | static void |
3111 | bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
3112 | { |
3113 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
3114 | |
3115 | switch (event) { |
3116 | case BFA_TSKIM_SM_QRESUME: |
3117 | bfa_sm_set_state(tskim, bfa_tskim_sm_active); |
3118 | bfa_tskim_send(tskim); |
3119 | break; |
3120 | |
3121 | case BFA_TSKIM_SM_CLEANUP: |
3122 | /* |
3123 | * No need to send TM on wire since ITN is offline. |
3124 | */ |
3125 | bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); |
3126 | bfa_reqq_wcancel(&tskim->reqq_wait); |
3127 | bfa_tskim_cleanup_ios(tskim); |
3128 | break; |
3129 | |
3130 | case BFA_TSKIM_SM_HWFAIL: |
3131 | bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); |
3132 | bfa_reqq_wcancel(&tskim->reqq_wait); |
3133 | bfa_tskim_iocdisable_ios(tskim); |
3134 | bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); |
3135 | break; |
3136 | |
3137 | default: |
3138 | bfa_sm_fault(tskim->bfa, event); |
3139 | } |
3140 | } |
3141 | |
3142 | /* |
3143 | * Task management command is active, awaiting for room in request CQ |
3144 | * to send clean up request. |
3145 | */ |
3146 | static void |
3147 | bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, |
3148 | enum bfa_tskim_event event) |
3149 | { |
3150 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
3151 | |
3152 | switch (event) { |
3153 | case BFA_TSKIM_SM_DONE: |
3154 | bfa_reqq_wcancel(&tskim->reqq_wait); |
3155 | fallthrough; |
3156 | case BFA_TSKIM_SM_QRESUME: |
3157 | bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); |
3158 | bfa_tskim_send_abort(tskim); |
3159 | break; |
3160 | |
3161 | case BFA_TSKIM_SM_HWFAIL: |
3162 | bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); |
3163 | bfa_reqq_wcancel(&tskim->reqq_wait); |
3164 | bfa_tskim_iocdisable_ios(tskim); |
3165 | bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); |
3166 | break; |
3167 | |
3168 | default: |
3169 | bfa_sm_fault(tskim->bfa, event); |
3170 | } |
3171 | } |
3172 | |
3173 | /* |
3174 | * BFA callback is pending |
3175 | */ |
3176 | static void |
3177 | bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
3178 | { |
3179 | bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event); |
3180 | |
3181 | switch (event) { |
3182 | case BFA_TSKIM_SM_HCB: |
3183 | bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); |
3184 | bfa_tskim_free(tskim); |
3185 | break; |
3186 | |
3187 | case BFA_TSKIM_SM_CLEANUP: |
3188 | bfa_tskim_notify_comp(tskim); |
3189 | break; |
3190 | |
3191 | case BFA_TSKIM_SM_HWFAIL: |
3192 | break; |
3193 | |
3194 | default: |
3195 | bfa_sm_fault(tskim->bfa, event); |
3196 | } |
3197 | } |
3198 | |
3199 | static void |
3200 | __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) |
3201 | { |
3202 | struct bfa_tskim_s *tskim = cbarg; |
3203 | |
3204 | if (!complete) { |
3205 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); |
3206 | return; |
3207 | } |
3208 | |
3209 | bfa_stats(tskim->itnim, tm_success); |
3210 | bfa_cb_tskim_done(bfad: tskim->bfa->bfad, dtsk: tskim->dtsk, tsk_status: tskim->tsk_status); |
3211 | } |
3212 | |
3213 | static void |
3214 | __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete) |
3215 | { |
3216 | struct bfa_tskim_s *tskim = cbarg; |
3217 | |
3218 | if (!complete) { |
3219 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); |
3220 | return; |
3221 | } |
3222 | |
3223 | bfa_stats(tskim->itnim, tm_failures); |
3224 | bfa_cb_tskim_done(bfad: tskim->bfa->bfad, dtsk: tskim->dtsk, |
3225 | tsk_status: BFI_TSKIM_STS_FAILED); |
3226 | } |
3227 | |
3228 | static bfa_boolean_t |
3229 | bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun) |
3230 | { |
3231 | switch (tskim->tm_cmnd) { |
3232 | case FCP_TM_TARGET_RESET: |
3233 | return BFA_TRUE; |
3234 | |
3235 | case FCP_TM_ABORT_TASK_SET: |
3236 | case FCP_TM_CLEAR_TASK_SET: |
3237 | case FCP_TM_LUN_RESET: |
3238 | case FCP_TM_CLEAR_ACA: |
3239 | return !memcmp(p: &tskim->lun, q: &lun, size: sizeof(lun)); |
3240 | |
3241 | default: |
3242 | WARN_ON(1); |
3243 | } |
3244 | |
3245 | return BFA_FALSE; |
3246 | } |
3247 | |
3248 | /* |
3249 | * Gather affected IO requests and task management commands. |
3250 | */ |
3251 | static void |
3252 | bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) |
3253 | { |
3254 | struct bfa_itnim_s *itnim = tskim->itnim; |
3255 | struct bfa_ioim_s *ioim; |
3256 | struct list_head *qe, *qen; |
3257 | struct scsi_cmnd *cmnd; |
3258 | struct scsi_lun scsilun; |
3259 | |
3260 | INIT_LIST_HEAD(list: &tskim->io_q); |
3261 | |
3262 | /* |
3263 | * Gather any active IO requests first. |
3264 | */ |
3265 | list_for_each_safe(qe, qen, &itnim->io_q) { |
3266 | ioim = (struct bfa_ioim_s *) qe; |
3267 | cmnd = (struct scsi_cmnd *) ioim->dio; |
3268 | int_to_scsilun(cmnd->device->lun, &scsilun); |
3269 | if (bfa_tskim_match_scope(tskim, lun: scsilun)) { |
3270 | list_del(entry: &ioim->qe); |
3271 | list_add_tail(new: &ioim->qe, head: &tskim->io_q); |
3272 | } |
3273 | } |
3274 | |
3275 | /* |
3276 | * Failback any pending IO requests immediately. |
3277 | */ |
3278 | list_for_each_safe(qe, qen, &itnim->pending_q) { |
3279 | ioim = (struct bfa_ioim_s *) qe; |
3280 | cmnd = (struct scsi_cmnd *) ioim->dio; |
3281 | int_to_scsilun(cmnd->device->lun, &scsilun); |
3282 | if (bfa_tskim_match_scope(tskim, lun: scsilun)) { |
3283 | list_del(entry: &ioim->qe); |
3284 | list_add_tail(new: &ioim->qe, head: &ioim->fcpim->ioim_comp_q); |
3285 | bfa_ioim_tov(ioim); |
3286 | } |
3287 | } |
3288 | } |
3289 | |
3290 | /* |
3291 | * IO cleanup completion |
3292 | */ |
3293 | static void |
3294 | bfa_tskim_cleanp_comp(void *tskim_cbarg) |
3295 | { |
3296 | struct bfa_tskim_s *tskim = tskim_cbarg; |
3297 | |
3298 | bfa_stats(tskim->itnim, tm_io_comps); |
3299 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); |
3300 | } |
3301 | |
3302 | /* |
3303 | * Gather affected IO requests and task management commands. |
3304 | */ |
3305 | static void |
3306 | bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) |
3307 | { |
3308 | struct bfa_ioim_s *ioim; |
3309 | struct list_head *qe, *qen; |
3310 | |
3311 | bfa_wc_init(wc: &tskim->wc, wc_resume: bfa_tskim_cleanp_comp, wc_cbarg: tskim); |
3312 | |
3313 | list_for_each_safe(qe, qen, &tskim->io_q) { |
3314 | ioim = (struct bfa_ioim_s *) qe; |
3315 | bfa_wc_up(wc: &tskim->wc); |
3316 | bfa_ioim_cleanup_tm(ioim, tskim); |
3317 | } |
3318 | |
3319 | bfa_wc_wait(wc: &tskim->wc); |
3320 | } |
3321 | |
3322 | /* |
3323 | * Send task management request to firmware. |
3324 | */ |
3325 | static bfa_boolean_t |
3326 | bfa_tskim_send(struct bfa_tskim_s *tskim) |
3327 | { |
3328 | struct bfa_itnim_s *itnim = tskim->itnim; |
3329 | struct bfi_tskim_req_s *m; |
3330 | |
3331 | /* |
3332 | * check for room in queue to send request now |
3333 | */ |
3334 | m = bfa_reqq_next(tskim->bfa, itnim->reqq); |
3335 | if (!m) |
3336 | return BFA_FALSE; |
3337 | |
3338 | /* |
3339 | * build i/o request message next |
3340 | */ |
3341 | bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, |
3342 | bfa_fn_lpu(tskim->bfa)); |
3343 | |
3344 | m->tsk_tag = cpu_to_be16(tskim->tsk_tag); |
3345 | m->itn_fhdl = tskim->itnim->rport->fw_handle; |
3346 | m->t_secs = tskim->tsecs; |
3347 | m->lun = tskim->lun; |
3348 | m->tm_flags = tskim->tm_cmnd; |
3349 | |
3350 | /* |
3351 | * queue I/O message to firmware |
3352 | */ |
3353 | bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); |
3354 | return BFA_TRUE; |
3355 | } |
3356 | |
3357 | /* |
3358 | * Send abort request to cleanup an active TM to firmware. |
3359 | */ |
3360 | static bfa_boolean_t |
3361 | bfa_tskim_send_abort(struct bfa_tskim_s *tskim) |
3362 | { |
3363 | struct bfa_itnim_s *itnim = tskim->itnim; |
3364 | struct bfi_tskim_abortreq_s *m; |
3365 | |
3366 | /* |
3367 | * check for room in queue to send request now |
3368 | */ |
3369 | m = bfa_reqq_next(tskim->bfa, itnim->reqq); |
3370 | if (!m) |
3371 | return BFA_FALSE; |
3372 | |
3373 | /* |
3374 | * build i/o request message next |
3375 | */ |
3376 | bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, |
3377 | bfa_fn_lpu(tskim->bfa)); |
3378 | |
3379 | m->tsk_tag = cpu_to_be16(tskim->tsk_tag); |
3380 | |
3381 | /* |
3382 | * queue I/O message to firmware |
3383 | */ |
3384 | bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh); |
3385 | return BFA_TRUE; |
3386 | } |
3387 | |
3388 | /* |
3389 | * Call to resume task management cmnd waiting for room in request queue. |
3390 | */ |
3391 | static void |
3392 | bfa_tskim_qresume(void *cbarg) |
3393 | { |
3394 | struct bfa_tskim_s *tskim = cbarg; |
3395 | |
3396 | bfa_stats(tskim->itnim, tm_qresumes); |
3397 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); |
3398 | } |
3399 | |
3400 | /* |
3401 | * Cleanup IOs associated with a task mangement command on IOC failures. |
3402 | */ |
3403 | static void |
3404 | bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) |
3405 | { |
3406 | struct bfa_ioim_s *ioim; |
3407 | struct list_head *qe, *qen; |
3408 | |
3409 | list_for_each_safe(qe, qen, &tskim->io_q) { |
3410 | ioim = (struct bfa_ioim_s *) qe; |
3411 | bfa_ioim_iocdisable(ioim); |
3412 | } |
3413 | } |
3414 | |
3415 | /* |
3416 | * Notification on completions from related ioim. |
3417 | */ |
3418 | void |
3419 | bfa_tskim_iodone(struct bfa_tskim_s *tskim) |
3420 | { |
3421 | bfa_wc_down(wc: &tskim->wc); |
3422 | } |
3423 | |
3424 | /* |
3425 | * Handle IOC h/w failure notification from itnim. |
3426 | */ |
3427 | void |
3428 | bfa_tskim_iocdisable(struct bfa_tskim_s *tskim) |
3429 | { |
3430 | tskim->notify = BFA_FALSE; |
3431 | bfa_stats(tskim->itnim, tm_iocdowns); |
3432 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); |
3433 | } |
3434 | |
3435 | /* |
3436 | * Cleanup TM command and associated IOs as part of ITNIM offline. |
3437 | */ |
3438 | void |
3439 | bfa_tskim_cleanup(struct bfa_tskim_s *tskim) |
3440 | { |
3441 | tskim->notify = BFA_TRUE; |
3442 | bfa_stats(tskim->itnim, tm_cleanups); |
3443 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); |
3444 | } |
3445 | |
3446 | /* |
3447 | * Memory allocation and initialization. |
3448 | */ |
3449 | void |
3450 | bfa_tskim_attach(struct bfa_fcpim_s *fcpim) |
3451 | { |
3452 | struct bfa_tskim_s *tskim; |
3453 | struct bfa_fcp_mod_s *fcp = fcpim->fcp; |
3454 | u16 i; |
3455 | |
3456 | INIT_LIST_HEAD(list: &fcpim->tskim_free_q); |
3457 | INIT_LIST_HEAD(list: &fcpim->tskim_unused_q); |
3458 | |
3459 | tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp); |
3460 | fcpim->tskim_arr = tskim; |
3461 | |
3462 | for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { |
3463 | /* |
3464 | * initialize TSKIM |
3465 | */ |
3466 | memset(tskim, 0, sizeof(struct bfa_tskim_s)); |
3467 | tskim->tsk_tag = i; |
3468 | tskim->bfa = fcpim->bfa; |
3469 | tskim->fcpim = fcpim; |
3470 | tskim->notify = BFA_FALSE; |
3471 | bfa_reqq_winit(wqe: &tskim->reqq_wait, qresume: bfa_tskim_qresume, |
3472 | cbarg: tskim); |
3473 | bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); |
3474 | |
3475 | list_add_tail(new: &tskim->qe, head: &fcpim->tskim_free_q); |
3476 | } |
3477 | |
3478 | bfa_mem_kva_curp(fcp) = (u8 *) tskim; |
3479 | } |
3480 | |
3481 | void |
3482 | bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
3483 | { |
3484 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
3485 | struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; |
3486 | struct bfa_tskim_s *tskim; |
3487 | u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); |
3488 | |
3489 | tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); |
3490 | WARN_ON(tskim->tsk_tag != tsk_tag); |
3491 | |
3492 | tskim->tsk_status = rsp->tsk_status; |
3493 | |
3494 | /* |
3495 | * Firmware sends BFI_TSKIM_STS_ABORTED status for abort |
3496 | * requests. All other statuses are for normal completions. |
3497 | */ |
3498 | if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { |
3499 | bfa_stats(tskim->itnim, tm_cleanup_comps); |
3500 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); |
3501 | } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) { |
3502 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG); |
3503 | } else { |
3504 | bfa_stats(tskim->itnim, tm_fw_rsps); |
3505 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); |
3506 | } |
3507 | } |
3508 | |
3509 | |
3510 | struct bfa_tskim_s * |
3511 | bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) |
3512 | { |
3513 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
3514 | struct bfa_tskim_s *tskim; |
3515 | |
3516 | bfa_q_deq(&fcpim->tskim_free_q, &tskim); |
3517 | |
3518 | if (tskim) |
3519 | tskim->dtsk = dtsk; |
3520 | |
3521 | return tskim; |
3522 | } |
3523 | |
3524 | void |
3525 | bfa_tskim_free(struct bfa_tskim_s *tskim) |
3526 | { |
3527 | WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); |
3528 | list_del(entry: &tskim->qe); |
3529 | list_add_tail(new: &tskim->qe, head: &tskim->fcpim->tskim_free_q); |
3530 | } |
3531 | |
3532 | /* |
3533 | * Start a task management command. |
3534 | * |
3535 | * @param[in] tskim BFA task management command instance |
3536 | * @param[in] itnim i-t nexus for the task management command |
3537 | * @param[in] lun lun, if applicable |
3538 | * @param[in] tm_cmnd Task management command code. |
3539 | * @param[in] t_secs Timeout in seconds |
3540 | * |
3541 | * @return None. |
3542 | */ |
3543 | void |
3544 | bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, |
3545 | struct scsi_lun lun, |
3546 | enum fcp_tm_cmnd tm_cmnd, u8 tsecs) |
3547 | { |
3548 | tskim->itnim = itnim; |
3549 | tskim->lun = lun; |
3550 | tskim->tm_cmnd = tm_cmnd; |
3551 | tskim->tsecs = tsecs; |
3552 | tskim->notify = BFA_FALSE; |
3553 | bfa_stats(itnim, tm_cmnds); |
3554 | |
3555 | list_add_tail(new: &tskim->qe, head: &itnim->tsk_q); |
3556 | bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); |
3557 | } |
3558 | |
3559 | void |
3560 | bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw) |
3561 | { |
3562 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
3563 | struct list_head *qe; |
3564 | int i; |
3565 | |
3566 | for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) { |
3567 | bfa_q_deq_tail(&fcpim->tskim_free_q, &qe); |
3568 | list_add_tail(new: qe, head: &fcpim->tskim_unused_q); |
3569 | } |
3570 | } |
3571 | |
3572 | void |
3573 | bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, |
3574 | struct bfa_s *bfa) |
3575 | { |
3576 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); |
3577 | struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa); |
3578 | struct bfa_mem_dma_s *seg_ptr; |
3579 | u16 nsegs, idx, per_seg_ios, num_io_req; |
3580 | u32 km_len = 0; |
3581 | |
3582 | /* |
3583 | * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value. |
3584 | * So if the values are non zero, adjust them appropriately. |
3585 | */ |
3586 | if (cfg->fwcfg.num_ioim_reqs && |
3587 | cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) |
3588 | cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; |
3589 | else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX) |
3590 | cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; |
3591 | |
3592 | if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX) |
3593 | cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; |
3594 | |
3595 | num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); |
3596 | if (num_io_req > BFA_IO_MAX) { |
3597 | if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) { |
3598 | cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2; |
3599 | cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2; |
3600 | } else if (cfg->fwcfg.num_fwtio_reqs) |
3601 | cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX; |
3602 | else |
3603 | cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX; |
3604 | } |
3605 | |
3606 | bfa_fcpim_meminfo(cfg, km_len: &km_len); |
3607 | |
3608 | num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); |
3609 | km_len += num_io_req * sizeof(struct bfa_iotag_s); |
3610 | km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s); |
3611 | |
3612 | /* dma memory */ |
3613 | nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); |
3614 | per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN); |
3615 | |
3616 | bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { |
3617 | if (num_io_req >= per_seg_ios) { |
3618 | num_io_req -= per_seg_ios; |
3619 | bfa_mem_dma_setup(meminfo: minfo, dm_ptr: seg_ptr, |
3620 | seg_sz: per_seg_ios * BFI_IOIM_SNSLEN); |
3621 | } else |
3622 | bfa_mem_dma_setup(meminfo: minfo, dm_ptr: seg_ptr, |
3623 | seg_sz: num_io_req * BFI_IOIM_SNSLEN); |
3624 | } |
3625 | |
3626 | /* kva memory */ |
3627 | bfa_mem_kva_setup(meminfo: minfo, kva_ptr: fcp_kva, seg_sz: km_len); |
3628 | } |
3629 | |
3630 | void |
3631 | bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, |
3632 | struct bfa_pcidev_s *pcidev) |
3633 | { |
3634 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); |
3635 | struct bfa_mem_dma_s *seg_ptr; |
3636 | u16 idx, nsegs, num_io_req; |
3637 | |
3638 | fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs; |
3639 | fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; |
3640 | fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; |
3641 | fcp->num_itns = cfg->fwcfg.num_rports; |
3642 | fcp->bfa = bfa; |
3643 | |
3644 | /* |
3645 | * Setup the pool of snsbase addr's, that is passed to fw as |
3646 | * part of bfi_iocfc_cfg_s. |
3647 | */ |
3648 | num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs); |
3649 | nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN); |
3650 | |
3651 | bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) { |
3652 | |
3653 | if (!bfa_mem_dma_virt(seg_ptr)) |
3654 | break; |
3655 | |
3656 | fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr); |
3657 | fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr); |
3658 | bfa_iocfc_set_snsbase(bfa, seg_no: idx, snsbase_pa: fcp->snsbase[idx].pa); |
3659 | } |
3660 | |
3661 | fcp->throttle_update_required = 1; |
3662 | bfa_fcpim_attach(fcp, bfad, cfg, pcidev); |
3663 | |
3664 | bfa_iotag_attach(fcp); |
3665 | |
3666 | fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp); |
3667 | bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr + |
3668 | (fcp->num_itns * sizeof(struct bfa_itn_s)); |
3669 | memset(fcp->itn_arr, 0, |
3670 | (fcp->num_itns * sizeof(struct bfa_itn_s))); |
3671 | } |
3672 | |
3673 | void |
3674 | bfa_fcp_iocdisable(struct bfa_s *bfa) |
3675 | { |
3676 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); |
3677 | |
3678 | bfa_fcpim_iocdisable(fcp); |
3679 | } |
3680 | |
3681 | void |
3682 | bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw) |
3683 | { |
3684 | struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); |
3685 | struct list_head *qe; |
3686 | int i; |
3687 | |
3688 | /* Update io throttle value only once during driver load time */ |
3689 | if (!mod->throttle_update_required) |
3690 | return; |
3691 | |
3692 | for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { |
3693 | bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); |
3694 | list_add_tail(new: qe, head: &mod->iotag_unused_q); |
3695 | } |
3696 | |
3697 | if (mod->num_ioim_reqs != num_ioim_fw) { |
3698 | bfa_trc(bfa, mod->num_ioim_reqs); |
3699 | bfa_trc(bfa, num_ioim_fw); |
3700 | } |
3701 | |
3702 | mod->max_ioim_reqs = max_ioim_fw; |
3703 | mod->num_ioim_reqs = num_ioim_fw; |
3704 | mod->throttle_update_required = 0; |
3705 | } |
3706 | |
3707 | void |
3708 | bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport, |
3709 | void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)) |
3710 | { |
3711 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); |
3712 | struct bfa_itn_s *itn; |
3713 | |
3714 | itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag); |
3715 | itn->isr = isr; |
3716 | } |
3717 | |
3718 | /* |
3719 | * Itn interrupt processing. |
3720 | */ |
3721 | void |
3722 | bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
3723 | { |
3724 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); |
3725 | union bfi_itn_i2h_msg_u msg; |
3726 | struct bfa_itn_s *itn; |
3727 | |
3728 | msg.msg = m; |
3729 | itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle); |
3730 | |
3731 | if (itn->isr) |
3732 | itn->isr(bfa, m); |
3733 | else |
3734 | WARN_ON(1); |
3735 | } |
3736 | |
3737 | void |
3738 | bfa_iotag_attach(struct bfa_fcp_mod_s *fcp) |
3739 | { |
3740 | struct bfa_iotag_s *iotag; |
3741 | u16 num_io_req, i; |
3742 | |
3743 | iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp); |
3744 | fcp->iotag_arr = iotag; |
3745 | |
3746 | INIT_LIST_HEAD(list: &fcp->iotag_ioim_free_q); |
3747 | INIT_LIST_HEAD(list: &fcp->iotag_tio_free_q); |
3748 | INIT_LIST_HEAD(list: &fcp->iotag_unused_q); |
3749 | |
3750 | num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs; |
3751 | for (i = 0; i < num_io_req; i++, iotag++) { |
3752 | memset(iotag, 0, sizeof(struct bfa_iotag_s)); |
3753 | iotag->tag = i; |
3754 | if (i < fcp->num_ioim_reqs) |
3755 | list_add_tail(new: &iotag->qe, head: &fcp->iotag_ioim_free_q); |
3756 | else |
3757 | list_add_tail(new: &iotag->qe, head: &fcp->iotag_tio_free_q); |
3758 | } |
3759 | |
3760 | bfa_mem_kva_curp(fcp) = (u8 *) iotag; |
3761 | } |
3762 | |
3763 | |
3764 | /* |
3765 | * To send config req, first try to use throttle value from flash |
3766 | * If 0, then use driver parameter |
3767 | * We need to use min(flash_val, drv_val) because |
3768 | * memory allocation was done based on this cfg'd value |
3769 | */ |
3770 | u16 |
3771 | bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param) |
3772 | { |
3773 | u16 tmp; |
3774 | struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); |
3775 | |
3776 | /* |
3777 | * If throttle value from flash is already in effect after driver is |
3778 | * loaded then until next load, always return current value instead |
3779 | * of actual flash value |
3780 | */ |
3781 | if (!fcp->throttle_update_required) |
3782 | return (u16)fcp->num_ioim_reqs; |
3783 | |
3784 | tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0; |
3785 | if (!tmp || (tmp > drv_cfg_param)) |
3786 | tmp = drv_cfg_param; |
3787 | |
3788 | return tmp; |
3789 | } |
3790 | |
3791 | bfa_status_t |
3792 | bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value) |
3793 | { |
3794 | if (!bfa_dconf_get_min_cfg(bfa)) { |
3795 | BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value; |
3796 | BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1; |
3797 | return BFA_STATUS_OK; |
3798 | } |
3799 | |
3800 | return BFA_STATUS_FAILED; |
3801 | } |
3802 | |
3803 | u16 |
3804 | bfa_fcpim_read_throttle(struct bfa_s *bfa) |
3805 | { |
3806 | struct bfa_throttle_cfg_s *throttle_cfg = |
3807 | &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg); |
3808 | |
3809 | return ((!bfa_dconf_get_min_cfg(bfa)) ? |
3810 | ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0); |
3811 | } |
3812 | |
3813 | bfa_status_t |
3814 | bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value) |
3815 | { |
3816 | /* in min cfg no commands should run. */ |
3817 | if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || |
3818 | (!bfa_dconf_read_data_valid(bfa))) |
3819 | return BFA_STATUS_FAILED; |
3820 | |
3821 | bfa_fcpim_write_throttle(bfa, value); |
3822 | |
3823 | return bfa_dconf_update(bfa); |
3824 | } |
3825 | |
3826 | bfa_status_t |
3827 | bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf) |
3828 | { |
3829 | struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); |
3830 | struct bfa_defs_fcpim_throttle_s throttle; |
3831 | |
3832 | if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || |
3833 | (!bfa_dconf_read_data_valid(bfa))) |
3834 | return BFA_STATUS_FAILED; |
3835 | |
3836 | memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s)); |
3837 | |
3838 | throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs); |
3839 | throttle.cfg_value = bfa_fcpim_read_throttle(bfa); |
3840 | if (!throttle.cfg_value) |
3841 | throttle.cfg_value = throttle.cur_value; |
3842 | throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs); |
3843 | memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s)); |
3844 | |
3845 | return BFA_STATUS_OK; |
3846 | } |
3847 | |