1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* Copyright (c) 2015 - 2021 Intel Corporation */ |
3 | #include "osdep.h" |
4 | #include "hmc.h" |
5 | #include "defs.h" |
6 | #include "type.h" |
7 | #include "protos.h" |
8 | |
9 | /** |
10 | * irdma_find_sd_index_limit - finds segment descriptor index limit |
11 | * @hmc_info: pointer to the HMC configuration information structure |
12 | * @type: type of HMC resources we're searching |
13 | * @idx: starting index for the object |
14 | * @cnt: number of objects we're trying to create |
15 | * @sd_idx: pointer to return index of the segment descriptor in question |
16 | * @sd_limit: pointer to return the maximum number of segment descriptors |
17 | * |
18 | * This function calculates the segment descriptor index and index limit |
19 | * for the resource defined by irdma_hmc_rsrc_type. |
20 | */ |
21 | |
22 | static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type, |
23 | u32 idx, u32 cnt, u32 *sd_idx, |
24 | u32 *sd_limit) |
25 | { |
26 | u64 fpm_addr, fpm_limit; |
27 | |
28 | fpm_addr = hmc_info->hmc_obj[(type)].base + |
29 | hmc_info->hmc_obj[type].size * idx; |
30 | fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt; |
31 | *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE); |
32 | *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE); |
33 | *sd_limit += 1; |
34 | } |
35 | |
36 | /** |
37 | * irdma_find_pd_index_limit - finds page descriptor index limit |
38 | * @hmc_info: pointer to the HMC configuration information struct |
39 | * @type: HMC resource type we're examining |
40 | * @idx: starting index for the object |
41 | * @cnt: number of objects we're trying to create |
42 | * @pd_idx: pointer to return page descriptor index |
43 | * @pd_limit: pointer to return page descriptor index limit |
44 | * |
45 | * Calculates the page descriptor index and index limit for the resource |
46 | * defined by irdma_hmc_rsrc_type. |
47 | */ |
48 | |
49 | static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type, |
50 | u32 idx, u32 cnt, u32 *pd_idx, |
51 | u32 *pd_limit) |
52 | { |
53 | u64 fpm_adr, fpm_limit; |
54 | |
55 | fpm_adr = hmc_info->hmc_obj[type].base + |
56 | hmc_info->hmc_obj[type].size * idx; |
57 | fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); |
58 | *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE); |
59 | *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE); |
60 | *pd_limit += 1; |
61 | } |
62 | |
63 | /** |
64 | * irdma_set_sd_entry - setup entry for sd programming |
65 | * @pa: physical addr |
66 | * @idx: sd index |
67 | * @type: paged or direct sd |
68 | * @entry: sd entry ptr |
69 | */ |
70 | static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type, |
71 | struct irdma_update_sd_entry *entry) |
72 | { |
73 | entry->data = pa | |
74 | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) | |
75 | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE, |
76 | type == IRDMA_SD_TYPE_PAGED ? 0 : 1) | |
77 | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1); |
78 | |
79 | entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15); |
80 | } |
81 | |
82 | /** |
83 | * irdma_clr_sd_entry - setup entry for sd clear |
84 | * @idx: sd index |
85 | * @type: paged or direct sd |
86 | * @entry: sd entry ptr |
87 | */ |
88 | static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type, |
89 | struct irdma_update_sd_entry *entry) |
90 | { |
91 | entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) | |
92 | FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE, |
93 | type == IRDMA_SD_TYPE_PAGED ? 0 : 1); |
94 | |
95 | entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15); |
96 | } |
97 | |
98 | /** |
99 | * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF |
100 | * @dev: pointer to our device struct |
101 | * @sd_idx: segment descriptor index |
102 | * @pd_idx: page descriptor index |
103 | */ |
104 | static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx, |
105 | u32 pd_idx) |
106 | { |
107 | u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) | |
108 | FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) | |
109 | FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx); |
110 | |
111 | writel(val, addr: dev->hw_regs[IRDMA_PFHMC_PDINV]); |
112 | } |
113 | |
114 | /** |
115 | * irdma_hmc_sd_one - setup 1 sd entry for cqp |
116 | * @dev: pointer to the device structure |
117 | * @hmc_fn_id: hmc's function id |
118 | * @pa: physical addr |
119 | * @sd_idx: sd index |
120 | * @type: paged or direct sd |
121 | * @setsd: flag to set or clear sd |
122 | */ |
123 | int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx, |
124 | enum irdma_sd_entry_type type, bool setsd) |
125 | { |
126 | struct irdma_update_sds_info sdinfo; |
127 | |
128 | sdinfo.cnt = 1; |
129 | sdinfo.hmc_fn_id = hmc_fn_id; |
130 | if (setsd) |
131 | irdma_set_sd_entry(pa, idx: sd_idx, type, entry: sdinfo.entry); |
132 | else |
133 | irdma_clr_sd_entry(idx: sd_idx, type, entry: sdinfo.entry); |
134 | return dev->cqp->process_cqp_sds(dev, &sdinfo); |
135 | } |
136 | |
137 | /** |
138 | * irdma_hmc_sd_grp - setup group of sd entries for cqp |
139 | * @dev: pointer to the device structure |
140 | * @hmc_info: pointer to the HMC configuration information struct |
141 | * @sd_index: sd index |
142 | * @sd_cnt: number of sd entries |
143 | * @setsd: flag to set or clear sd |
144 | */ |
145 | static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev, |
146 | struct irdma_hmc_info *hmc_info, u32 sd_index, |
147 | u32 sd_cnt, bool setsd) |
148 | { |
149 | struct irdma_hmc_sd_entry *sd_entry; |
150 | struct irdma_update_sds_info sdinfo = {}; |
151 | u64 pa; |
152 | u32 i; |
153 | int ret_code = 0; |
154 | |
155 | sdinfo.hmc_fn_id = hmc_info->hmc_fn_id; |
156 | for (i = sd_index; i < sd_index + sd_cnt; i++) { |
157 | sd_entry = &hmc_info->sd_table.sd_entry[i]; |
158 | if (!sd_entry || (!sd_entry->valid && setsd) || |
159 | (sd_entry->valid && !setsd)) |
160 | continue; |
161 | if (setsd) { |
162 | pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ? |
163 | sd_entry->u.pd_table.pd_page_addr.pa : |
164 | sd_entry->u.bp.addr.pa; |
165 | irdma_set_sd_entry(pa, idx: i, type: sd_entry->entry_type, |
166 | entry: &sdinfo.entry[sdinfo.cnt]); |
167 | } else { |
168 | irdma_clr_sd_entry(idx: i, type: sd_entry->entry_type, |
169 | entry: &sdinfo.entry[sdinfo.cnt]); |
170 | } |
171 | sdinfo.cnt++; |
172 | if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) { |
173 | ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); |
174 | if (ret_code) { |
175 | ibdev_dbg(to_ibdev(dev), |
176 | "HMC: sd_programming failed err=%d\n" , |
177 | ret_code); |
178 | return ret_code; |
179 | } |
180 | |
181 | sdinfo.cnt = 0; |
182 | } |
183 | } |
184 | if (sdinfo.cnt) |
185 | ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); |
186 | |
187 | return ret_code; |
188 | } |
189 | |
190 | /** |
191 | * irdma_hmc_finish_add_sd_reg - program sd entries for objects |
192 | * @dev: pointer to the device structure |
193 | * @info: create obj info |
194 | */ |
195 | static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev, |
196 | struct irdma_hmc_create_obj_info *info) |
197 | { |
198 | if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) |
199 | return -EINVAL; |
200 | |
201 | if ((info->start_idx + info->count) > |
202 | info->hmc_info->hmc_obj[info->rsrc_type].cnt) |
203 | return -EINVAL; |
204 | |
205 | if (!info->add_sd_cnt) |
206 | return 0; |
207 | return irdma_hmc_sd_grp(dev, hmc_info: info->hmc_info, |
208 | sd_index: info->hmc_info->sd_indexes[0], sd_cnt: info->add_sd_cnt, |
209 | setsd: true); |
210 | } |
211 | |
212 | /** |
213 | * irdma_sc_create_hmc_obj - allocate backing store for hmc objects |
214 | * @dev: pointer to the device structure |
215 | * @info: pointer to irdma_hmc_create_obj_info struct |
216 | * |
217 | * This will allocate memory for PDs and backing pages and populate |
218 | * the sd and pd entries. |
219 | */ |
220 | int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, |
221 | struct irdma_hmc_create_obj_info *info) |
222 | { |
223 | struct irdma_hmc_sd_entry *sd_entry; |
224 | u32 sd_idx, sd_lmt; |
225 | u32 pd_idx = 0, pd_lmt = 0; |
226 | u32 pd_idx1 = 0, pd_lmt1 = 0; |
227 | u32 i, j; |
228 | bool pd_error = false; |
229 | int ret_code = 0; |
230 | |
231 | if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) |
232 | return -EINVAL; |
233 | |
234 | if ((info->start_idx + info->count) > |
235 | info->hmc_info->hmc_obj[info->rsrc_type].cnt) { |
236 | ibdev_dbg(to_ibdev(dev), |
237 | "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n" , |
238 | info->rsrc_type, info->start_idx, info->count, |
239 | info->hmc_info->hmc_obj[info->rsrc_type].cnt); |
240 | return -EINVAL; |
241 | } |
242 | |
243 | irdma_find_sd_index_limit(hmc_info: info->hmc_info, type: info->rsrc_type, |
244 | idx: info->start_idx, cnt: info->count, sd_idx: &sd_idx, |
245 | sd_limit: &sd_lmt); |
246 | if (sd_idx >= info->hmc_info->sd_table.sd_cnt || |
247 | sd_lmt > info->hmc_info->sd_table.sd_cnt) { |
248 | return -EINVAL; |
249 | } |
250 | |
251 | irdma_find_pd_index_limit(hmc_info: info->hmc_info, type: info->rsrc_type, |
252 | idx: info->start_idx, cnt: info->count, pd_idx: &pd_idx, |
253 | pd_limit: &pd_lmt); |
254 | |
255 | for (j = sd_idx; j < sd_lmt; j++) { |
256 | ret_code = irdma_add_sd_table_entry(hw: dev->hw, hmc_info: info->hmc_info, sd_index: j, |
257 | type: info->entry_type, |
258 | IRDMA_HMC_DIRECT_BP_SIZE); |
259 | if (ret_code) |
260 | goto exit_sd_error; |
261 | |
262 | sd_entry = &info->hmc_info->sd_table.sd_entry[j]; |
263 | if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED && |
264 | (dev->hmc_info == info->hmc_info && |
265 | info->rsrc_type != IRDMA_HMC_IW_PBLE)) { |
266 | pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT)); |
267 | pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT); |
268 | for (i = pd_idx1; i < pd_lmt1; i++) { |
269 | /* update the pd table entry */ |
270 | ret_code = irdma_add_pd_table_entry(dev, |
271 | hmc_info: info->hmc_info, |
272 | pd_index: i, NULL); |
273 | if (ret_code) { |
274 | pd_error = true; |
275 | break; |
276 | } |
277 | } |
278 | if (pd_error) { |
279 | while (i && (i > pd_idx1)) { |
280 | irdma_remove_pd_bp(dev, hmc_info: info->hmc_info, |
281 | idx: i - 1); |
282 | i--; |
283 | } |
284 | } |
285 | } |
286 | if (sd_entry->valid) |
287 | continue; |
288 | |
289 | info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j; |
290 | info->add_sd_cnt++; |
291 | sd_entry->valid = true; |
292 | } |
293 | return irdma_hmc_finish_add_sd_reg(dev, info); |
294 | |
295 | exit_sd_error: |
296 | while (j && (j > sd_idx)) { |
297 | sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; |
298 | switch (sd_entry->entry_type) { |
299 | case IRDMA_SD_TYPE_PAGED: |
300 | pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT); |
301 | pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT)); |
302 | for (i = pd_idx1; i < pd_lmt1; i++) |
303 | irdma_prep_remove_pd_page(hmc_info: info->hmc_info, idx: i); |
304 | break; |
305 | case IRDMA_SD_TYPE_DIRECT: |
306 | irdma_prep_remove_pd_page(hmc_info: info->hmc_info, idx: (j - 1)); |
307 | break; |
308 | default: |
309 | ret_code = -EINVAL; |
310 | break; |
311 | } |
312 | j--; |
313 | } |
314 | |
315 | return ret_code; |
316 | } |
317 | |
318 | /** |
319 | * irdma_finish_del_sd_reg - delete sd entries for objects |
320 | * @dev: pointer to the device structure |
321 | * @info: dele obj info |
322 | * @reset: true if called before reset |
323 | */ |
324 | static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev, |
325 | struct irdma_hmc_del_obj_info *info, |
326 | bool reset) |
327 | { |
328 | struct irdma_hmc_sd_entry *sd_entry; |
329 | int ret_code = 0; |
330 | u32 i, sd_idx; |
331 | struct irdma_dma_mem *mem; |
332 | |
333 | if (!reset) |
334 | ret_code = irdma_hmc_sd_grp(dev, hmc_info: info->hmc_info, |
335 | sd_index: info->hmc_info->sd_indexes[0], |
336 | sd_cnt: info->del_sd_cnt, setsd: false); |
337 | |
338 | if (ret_code) |
339 | ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n" ); |
340 | for (i = 0; i < info->del_sd_cnt; i++) { |
341 | sd_idx = info->hmc_info->sd_indexes[i]; |
342 | sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx]; |
343 | mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ? |
344 | &sd_entry->u.pd_table.pd_page_addr : |
345 | &sd_entry->u.bp.addr; |
346 | |
347 | if (!mem || !mem->va) { |
348 | ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n" ); |
349 | } else { |
350 | dma_free_coherent(dev: dev->hw->device, size: mem->size, cpu_addr: mem->va, |
351 | dma_handle: mem->pa); |
352 | mem->va = NULL; |
353 | } |
354 | } |
355 | |
356 | return ret_code; |
357 | } |
358 | |
359 | /** |
360 | * irdma_sc_del_hmc_obj - remove pe hmc objects |
361 | * @dev: pointer to the device structure |
362 | * @info: pointer to irdma_hmc_del_obj_info struct |
363 | * @reset: true if called before reset |
364 | * |
365 | * This will de-populate the SDs and PDs. It frees |
366 | * the memory for PDS and backing storage. After this function is returned, |
367 | * caller should deallocate memory allocated previously for |
368 | * book-keeping information about PDs and backing storage. |
369 | */ |
370 | int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, |
371 | struct irdma_hmc_del_obj_info *info, bool reset) |
372 | { |
373 | struct irdma_hmc_pd_table *pd_table; |
374 | u32 sd_idx, sd_lmt; |
375 | u32 pd_idx, pd_lmt, rel_pd_idx; |
376 | u32 i, j; |
377 | int ret_code = 0; |
378 | |
379 | if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { |
380 | ibdev_dbg(to_ibdev(dev), |
381 | "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n" , |
382 | info->start_idx, info->rsrc_type, |
383 | info->hmc_info->hmc_obj[info->rsrc_type].cnt); |
384 | return -EINVAL; |
385 | } |
386 | |
387 | if ((info->start_idx + info->count) > |
388 | info->hmc_info->hmc_obj[info->rsrc_type].cnt) { |
389 | ibdev_dbg(to_ibdev(dev), |
390 | "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n" , |
391 | info->start_idx, info->count, info->rsrc_type, |
392 | info->hmc_info->hmc_obj[info->rsrc_type].cnt); |
393 | return -EINVAL; |
394 | } |
395 | |
396 | irdma_find_pd_index_limit(hmc_info: info->hmc_info, type: info->rsrc_type, |
397 | idx: info->start_idx, cnt: info->count, pd_idx: &pd_idx, |
398 | pd_limit: &pd_lmt); |
399 | |
400 | for (j = pd_idx; j < pd_lmt; j++) { |
401 | sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD; |
402 | |
403 | if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid) |
404 | continue; |
405 | |
406 | if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type != |
407 | IRDMA_SD_TYPE_PAGED) |
408 | continue; |
409 | |
410 | rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD; |
411 | pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; |
412 | if (pd_table->pd_entry && |
413 | pd_table->pd_entry[rel_pd_idx].valid) { |
414 | ret_code = irdma_remove_pd_bp(dev, hmc_info: info->hmc_info, idx: j); |
415 | if (ret_code) { |
416 | ibdev_dbg(to_ibdev(dev), |
417 | "HMC: remove_pd_bp error\n" ); |
418 | return ret_code; |
419 | } |
420 | } |
421 | } |
422 | |
423 | irdma_find_sd_index_limit(hmc_info: info->hmc_info, type: info->rsrc_type, |
424 | idx: info->start_idx, cnt: info->count, sd_idx: &sd_idx, |
425 | sd_limit: &sd_lmt); |
426 | if (sd_idx >= info->hmc_info->sd_table.sd_cnt || |
427 | sd_lmt > info->hmc_info->sd_table.sd_cnt) { |
428 | ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n" ); |
429 | return -EINVAL; |
430 | } |
431 | |
432 | for (i = sd_idx; i < sd_lmt; i++) { |
433 | pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table; |
434 | if (!info->hmc_info->sd_table.sd_entry[i].valid) |
435 | continue; |
436 | switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { |
437 | case IRDMA_SD_TYPE_DIRECT: |
438 | ret_code = irdma_prep_remove_sd_bp(hmc_info: info->hmc_info, idx: i); |
439 | if (!ret_code) { |
440 | info->hmc_info->sd_indexes[info->del_sd_cnt] = |
441 | (u16)i; |
442 | info->del_sd_cnt++; |
443 | } |
444 | break; |
445 | case IRDMA_SD_TYPE_PAGED: |
446 | ret_code = irdma_prep_remove_pd_page(hmc_info: info->hmc_info, idx: i); |
447 | if (ret_code) |
448 | break; |
449 | if (dev->hmc_info != info->hmc_info && |
450 | info->rsrc_type == IRDMA_HMC_IW_PBLE && |
451 | pd_table->pd_entry) { |
452 | kfree(objp: pd_table->pd_entry_virt_mem.va); |
453 | pd_table->pd_entry = NULL; |
454 | } |
455 | info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i; |
456 | info->del_sd_cnt++; |
457 | break; |
458 | default: |
459 | break; |
460 | } |
461 | } |
462 | return irdma_finish_del_sd_reg(dev, info, reset); |
463 | } |
464 | |
465 | /** |
466 | * irdma_add_sd_table_entry - Adds a segment descriptor to the table |
467 | * @hw: pointer to our hw struct |
468 | * @hmc_info: pointer to the HMC configuration information struct |
469 | * @sd_index: segment descriptor index to manipulate |
470 | * @type: what type of segment descriptor we're manipulating |
471 | * @direct_mode_sz: size to alloc in direct mode |
472 | */ |
473 | int irdma_add_sd_table_entry(struct irdma_hw *hw, |
474 | struct irdma_hmc_info *hmc_info, u32 sd_index, |
475 | enum irdma_sd_entry_type type, u64 direct_mode_sz) |
476 | { |
477 | struct irdma_hmc_sd_entry *sd_entry; |
478 | struct irdma_dma_mem dma_mem; |
479 | u64 alloc_len; |
480 | |
481 | sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; |
482 | if (!sd_entry->valid) { |
483 | if (type == IRDMA_SD_TYPE_PAGED) |
484 | alloc_len = IRDMA_HMC_PAGED_BP_SIZE; |
485 | else |
486 | alloc_len = direct_mode_sz; |
487 | |
488 | /* allocate a 4K pd page or 2M backing page */ |
489 | dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT); |
490 | dma_mem.va = dma_alloc_coherent(dev: hw->device, size: dma_mem.size, |
491 | dma_handle: &dma_mem.pa, GFP_KERNEL); |
492 | if (!dma_mem.va) |
493 | return -ENOMEM; |
494 | if (type == IRDMA_SD_TYPE_PAGED) { |
495 | struct irdma_virt_mem *vmem = |
496 | &sd_entry->u.pd_table.pd_entry_virt_mem; |
497 | |
498 | vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512; |
499 | vmem->va = kzalloc(size: vmem->size, GFP_KERNEL); |
500 | if (!vmem->va) { |
501 | dma_free_coherent(dev: hw->device, size: dma_mem.size, |
502 | cpu_addr: dma_mem.va, dma_handle: dma_mem.pa); |
503 | dma_mem.va = NULL; |
504 | return -ENOMEM; |
505 | } |
506 | sd_entry->u.pd_table.pd_entry = vmem->va; |
507 | |
508 | memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem, |
509 | sizeof(sd_entry->u.pd_table.pd_page_addr)); |
510 | } else { |
511 | memcpy(&sd_entry->u.bp.addr, &dma_mem, |
512 | sizeof(sd_entry->u.bp.addr)); |
513 | |
514 | sd_entry->u.bp.sd_pd_index = sd_index; |
515 | } |
516 | |
517 | hmc_info->sd_table.sd_entry[sd_index].entry_type = type; |
518 | hmc_info->sd_table.use_cnt++; |
519 | } |
520 | if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT) |
521 | sd_entry->u.bp.use_cnt++; |
522 | |
523 | return 0; |
524 | } |
525 | |
526 | /** |
527 | * irdma_add_pd_table_entry - Adds page descriptor to the specified table |
528 | * @dev: pointer to our device structure |
529 | * @hmc_info: pointer to the HMC configuration information structure |
530 | * @pd_index: which page descriptor index to manipulate |
531 | * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. |
532 | * |
533 | * This function: |
534 | * 1. Initializes the pd entry |
535 | * 2. Adds pd_entry in the pd_table |
536 | * 3. Mark the entry valid in irdma_hmc_pd_entry structure |
537 | * 4. Initializes the pd_entry's ref count to 1 |
538 | * assumptions: |
539 | * 1. The memory for pd should be pinned down, physically contiguous and |
540 | * aligned on 4K boundary and zeroed memory. |
541 | * 2. It should be 4K in size. |
542 | */ |
543 | int irdma_add_pd_table_entry(struct irdma_sc_dev *dev, |
544 | struct irdma_hmc_info *hmc_info, u32 pd_index, |
545 | struct irdma_dma_mem *rsrc_pg) |
546 | { |
547 | struct irdma_hmc_pd_table *pd_table; |
548 | struct irdma_hmc_pd_entry *pd_entry; |
549 | struct irdma_dma_mem mem; |
550 | struct irdma_dma_mem *page = &mem; |
551 | u32 sd_idx, rel_pd_idx; |
552 | u64 *pd_addr; |
553 | u64 page_desc; |
554 | |
555 | if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) |
556 | return -EINVAL; |
557 | |
558 | sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD); |
559 | if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != |
560 | IRDMA_SD_TYPE_PAGED) |
561 | return 0; |
562 | |
563 | rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD); |
564 | pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; |
565 | pd_entry = &pd_table->pd_entry[rel_pd_idx]; |
566 | if (!pd_entry->valid) { |
567 | if (rsrc_pg) { |
568 | pd_entry->rsrc_pg = true; |
569 | page = rsrc_pg; |
570 | } else { |
571 | page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE, |
572 | IRDMA_HMC_PD_BP_BUF_ALIGNMENT); |
573 | page->va = dma_alloc_coherent(dev: dev->hw->device, |
574 | size: page->size, dma_handle: &page->pa, |
575 | GFP_KERNEL); |
576 | if (!page->va) |
577 | return -ENOMEM; |
578 | |
579 | pd_entry->rsrc_pg = false; |
580 | } |
581 | |
582 | memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr)); |
583 | pd_entry->bp.sd_pd_index = pd_index; |
584 | pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED; |
585 | page_desc = page->pa | 0x1; |
586 | pd_addr = pd_table->pd_page_addr.va; |
587 | pd_addr += rel_pd_idx; |
588 | memcpy(pd_addr, &page_desc, sizeof(*pd_addr)); |
589 | pd_entry->sd_index = sd_idx; |
590 | pd_entry->valid = true; |
591 | pd_table->use_cnt++; |
592 | irdma_invalidate_pf_hmc_pd(dev, sd_idx, pd_idx: rel_pd_idx); |
593 | } |
594 | pd_entry->bp.use_cnt++; |
595 | |
596 | return 0; |
597 | } |
598 | |
599 | /** |
600 | * irdma_remove_pd_bp - remove a backing page from a page descriptor |
601 | * @dev: pointer to our HW structure |
602 | * @hmc_info: pointer to the HMC configuration information structure |
603 | * @idx: the page index |
604 | * |
605 | * This function: |
606 | * 1. Marks the entry in pd table (for paged address mode) or in sd table |
607 | * (for direct address mode) invalid. |
608 | * 2. Write to register PMPDINV to invalidate the backing page in FV cache |
609 | * 3. Decrement the ref count for the pd _entry |
610 | * assumptions: |
611 | * 1. Caller can deallocate the memory used by backing storage after this |
612 | * function returns. |
613 | */ |
614 | int irdma_remove_pd_bp(struct irdma_sc_dev *dev, |
615 | struct irdma_hmc_info *hmc_info, u32 idx) |
616 | { |
617 | struct irdma_hmc_pd_entry *pd_entry; |
618 | struct irdma_hmc_pd_table *pd_table; |
619 | struct irdma_hmc_sd_entry *sd_entry; |
620 | u32 sd_idx, rel_pd_idx; |
621 | struct irdma_dma_mem *mem; |
622 | u64 *pd_addr; |
623 | |
624 | sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD; |
625 | rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD; |
626 | if (sd_idx >= hmc_info->sd_table.sd_cnt) |
627 | return -EINVAL; |
628 | |
629 | sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; |
630 | if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED) |
631 | return -EINVAL; |
632 | |
633 | pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; |
634 | pd_entry = &pd_table->pd_entry[rel_pd_idx]; |
635 | if (--pd_entry->bp.use_cnt) |
636 | return 0; |
637 | |
638 | pd_entry->valid = false; |
639 | pd_table->use_cnt--; |
640 | pd_addr = pd_table->pd_page_addr.va; |
641 | pd_addr += rel_pd_idx; |
642 | memset(pd_addr, 0, sizeof(u64)); |
643 | irdma_invalidate_pf_hmc_pd(dev, sd_idx, pd_idx: idx); |
644 | |
645 | if (!pd_entry->rsrc_pg) { |
646 | mem = &pd_entry->bp.addr; |
647 | if (!mem || !mem->va) |
648 | return -EINVAL; |
649 | |
650 | dma_free_coherent(dev: dev->hw->device, size: mem->size, cpu_addr: mem->va, |
651 | dma_handle: mem->pa); |
652 | mem->va = NULL; |
653 | } |
654 | if (!pd_table->use_cnt) |
655 | kfree(objp: pd_table->pd_entry_virt_mem.va); |
656 | |
657 | return 0; |
658 | } |
659 | |
660 | /** |
661 | * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry |
662 | * @hmc_info: pointer to the HMC configuration information structure |
663 | * @idx: the page index |
664 | */ |
665 | int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx) |
666 | { |
667 | struct irdma_hmc_sd_entry *sd_entry; |
668 | |
669 | sd_entry = &hmc_info->sd_table.sd_entry[idx]; |
670 | if (--sd_entry->u.bp.use_cnt) |
671 | return -EBUSY; |
672 | |
673 | hmc_info->sd_table.use_cnt--; |
674 | sd_entry->valid = false; |
675 | |
676 | return 0; |
677 | } |
678 | |
679 | /** |
680 | * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry. |
681 | * @hmc_info: pointer to the HMC configuration information structure |
682 | * @idx: segment descriptor index to find the relevant page descriptor |
683 | */ |
684 | int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx) |
685 | { |
686 | struct irdma_hmc_sd_entry *sd_entry; |
687 | |
688 | sd_entry = &hmc_info->sd_table.sd_entry[idx]; |
689 | |
690 | if (sd_entry->u.pd_table.use_cnt) |
691 | return -EBUSY; |
692 | |
693 | sd_entry->valid = false; |
694 | hmc_info->sd_table.use_cnt--; |
695 | |
696 | return 0; |
697 | } |
698 | |