1 | // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) |
---|---|
2 | /* Copyright(c) 2020 Intel Corporation */ |
3 | #include <linux/iopoll.h> |
4 | #include "adf_accel_devices.h" |
5 | #include "adf_cfg_services.h" |
6 | #include "adf_common_drv.h" |
7 | #include "adf_fw_config.h" |
8 | #include "adf_gen4_hw_data.h" |
9 | #include "adf_gen4_pm.h" |
10 | |
11 | static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) |
12 | { |
13 | return BUILD_RING_BASE_ADDR(addr, size); |
14 | } |
15 | |
16 | static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) |
17 | { |
18 | return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); |
19 | } |
20 | |
21 | static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, |
22 | u32 value) |
23 | { |
24 | WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); |
25 | } |
26 | |
27 | static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) |
28 | { |
29 | return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); |
30 | } |
31 | |
32 | static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, |
33 | u32 value) |
34 | { |
35 | WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); |
36 | } |
37 | |
38 | static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) |
39 | { |
40 | return READ_CSR_E_STAT(csr_base_addr, bank); |
41 | } |
42 | |
43 | static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, |
44 | u32 value) |
45 | { |
46 | WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); |
47 | } |
48 | |
49 | static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, |
50 | dma_addr_t addr) |
51 | { |
52 | WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); |
53 | } |
54 | |
55 | static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, |
56 | u32 value) |
57 | { |
58 | WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); |
59 | } |
60 | |
61 | static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) |
62 | { |
63 | WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); |
64 | } |
65 | |
66 | static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) |
67 | { |
68 | WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); |
69 | } |
70 | |
71 | static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, |
72 | u32 value) |
73 | { |
74 | WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); |
75 | } |
76 | |
77 | static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, |
78 | u32 value) |
79 | { |
80 | WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); |
81 | } |
82 | |
83 | static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, |
84 | u32 value) |
85 | { |
86 | WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); |
87 | } |
88 | |
89 | void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) |
90 | { |
91 | csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; |
92 | csr_ops->read_csr_ring_head = read_csr_ring_head; |
93 | csr_ops->write_csr_ring_head = write_csr_ring_head; |
94 | csr_ops->read_csr_ring_tail = read_csr_ring_tail; |
95 | csr_ops->write_csr_ring_tail = write_csr_ring_tail; |
96 | csr_ops->read_csr_e_stat = read_csr_e_stat; |
97 | csr_ops->write_csr_ring_config = write_csr_ring_config; |
98 | csr_ops->write_csr_ring_base = write_csr_ring_base; |
99 | csr_ops->write_csr_int_flag = write_csr_int_flag; |
100 | csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; |
101 | csr_ops->write_csr_int_col_en = write_csr_int_col_en; |
102 | csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; |
103 | csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; |
104 | csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; |
105 | } |
106 | EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); |
107 | |
108 | u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) |
109 | { |
110 | return ADF_GEN4_ACCELERATORS_MASK; |
111 | } |
112 | EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask); |
113 | |
114 | u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self) |
115 | { |
116 | return ADF_GEN4_MAX_ACCELERATORS; |
117 | } |
118 | EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels); |
119 | |
120 | u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self) |
121 | { |
122 | if (!self || !self->ae_mask) |
123 | return 0; |
124 | |
125 | return hweight32(self->ae_mask); |
126 | } |
127 | EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes); |
128 | |
129 | u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self) |
130 | { |
131 | return ADF_GEN4_PMISC_BAR; |
132 | } |
133 | EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id); |
134 | |
135 | u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self) |
136 | { |
137 | return ADF_GEN4_ETR_BAR; |
138 | } |
139 | EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id); |
140 | |
141 | u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self) |
142 | { |
143 | return ADF_GEN4_SRAM_BAR; |
144 | } |
145 | EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id); |
146 | |
147 | enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self) |
148 | { |
149 | return DEV_SKU_1; |
150 | } |
151 | EXPORT_SYMBOL_GPL(adf_gen4_get_sku); |
152 | |
153 | void adf_gen4_get_arb_info(struct arb_info *arb_info) |
154 | { |
155 | arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG; |
156 | arb_info->arb_offset = ADF_GEN4_ARB_OFFSET; |
157 | arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET; |
158 | } |
159 | EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info); |
160 | |
161 | void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info) |
162 | { |
163 | admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET; |
164 | admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET; |
165 | admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET; |
166 | } |
167 | EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info); |
168 | |
169 | u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self) |
170 | { |
171 | /* |
172 | * GEN4 uses KPT counter for HB |
173 | */ |
174 | return ADF_GEN4_KPT_COUNTER_FREQ; |
175 | } |
176 | EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock); |
177 | |
178 | void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev) |
179 | { |
180 | struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR]; |
181 | void __iomem *csr = misc_bar->virt_addr; |
182 | |
183 | /* Enable all in errsou3 except VFLR notification on host */ |
184 | ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); |
185 | } |
186 | EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction); |
187 | |
188 | void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev) |
189 | { |
190 | void __iomem *addr; |
191 | |
192 | addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; |
193 | |
194 | /* Enable bundle interrupts */ |
195 | ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0); |
196 | ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0); |
197 | |
198 | /* Enable misc interrupts */ |
199 | ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0); |
200 | } |
201 | EXPORT_SYMBOL_GPL(adf_gen4_enable_ints); |
202 | |
203 | int adf_gen4_init_device(struct adf_accel_dev *accel_dev) |
204 | { |
205 | void __iomem *addr; |
206 | u32 status; |
207 | u32 csr; |
208 | int ret; |
209 | |
210 | addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; |
211 | |
212 | /* Temporarily mask PM interrupt */ |
213 | csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); |
214 | csr |= ADF_GEN4_PM_SOU; |
215 | ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); |
216 | |
217 | /* Set DRV_ACTIVE bit to power up the device */ |
218 | ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); |
219 | |
220 | /* Poll status register to make sure the device is powered up */ |
221 | ret = read_poll_timeout(ADF_CSR_RD, status, |
222 | status & ADF_GEN4_PM_INIT_STATE, |
223 | ADF_GEN4_PM_POLL_DELAY_US, |
224 | ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, |
225 | ADF_GEN4_PM_STATUS); |
226 | if (ret) |
227 | dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); |
228 | |
229 | return ret; |
230 | } |
231 | EXPORT_SYMBOL_GPL(adf_gen4_init_device); |
232 | |
233 | static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, |
234 | u32 *lower) |
235 | { |
236 | *lower = lower_32_bits(value); |
237 | *upper = upper_32_bits(value); |
238 | } |
239 | |
240 | void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) |
241 | { |
242 | void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); |
243 | u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; |
244 | u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; |
245 | u32 ssm_wdt_pke_high = 0; |
246 | u32 ssm_wdt_pke_low = 0; |
247 | u32 ssm_wdt_high = 0; |
248 | u32 ssm_wdt_low = 0; |
249 | |
250 | /* Convert 64bit WDT timer value into 32bit values for |
251 | * mmio write to 32bit CSRs. |
252 | */ |
253 | adf_gen4_unpack_ssm_wdtimer(value: timer_val, upper: &ssm_wdt_high, lower: &ssm_wdt_low); |
254 | adf_gen4_unpack_ssm_wdtimer(value: timer_val_pke, upper: &ssm_wdt_pke_high, |
255 | lower: &ssm_wdt_pke_low); |
256 | |
257 | /* Enable WDT for sym and dc */ |
258 | ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low); |
259 | ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high); |
260 | /* Enable WDT for pke */ |
261 | ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low); |
262 | ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high); |
263 | } |
264 | EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); |
265 | |
266 | /* |
267 | * The vector routing table is used to select the MSI-X entry to use for each |
268 | * interrupt source. |
269 | * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts. |
270 | * The final entry corresponds to VF2PF or error interrupts. |
271 | * This vector table could be used to configure one MSI-X entry to be shared |
272 | * between multiple interrupt sources. |
273 | * |
274 | * The default routing is set to have a one to one correspondence between the |
275 | * interrupt source and the MSI-X entry used. |
276 | */ |
277 | void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev) |
278 | { |
279 | void __iomem *csr; |
280 | int i; |
281 | |
282 | csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; |
283 | for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++) |
284 | ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i); |
285 | } |
286 | EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable); |
287 | |
288 | int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev) |
289 | { |
290 | return 0; |
291 | } |
292 | EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled); |
293 | |
294 | static int reset_ring_pair(void __iomem *csr, u32 bank_number) |
295 | { |
296 | u32 status; |
297 | int ret; |
298 | |
299 | /* Write rpresetctl register BIT(0) as 1 |
300 | * Since rpresetctl registers have no RW fields, no need to preserve |
301 | * values for other bits. Just write directly. |
302 | */ |
303 | ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number), |
304 | ADF_WQM_CSR_RPRESETCTL_RESET); |
305 | |
306 | /* Read rpresetsts register and wait for rp reset to complete */ |
307 | ret = read_poll_timeout(ADF_CSR_RD, status, |
308 | status & ADF_WQM_CSR_RPRESETSTS_STATUS, |
309 | ADF_RPRESET_POLL_DELAY_US, |
310 | ADF_RPRESET_POLL_TIMEOUT_US, true, |
311 | csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); |
312 | if (!ret) { |
313 | /* When rp reset is done, clear rpresetsts */ |
314 | ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), |
315 | ADF_WQM_CSR_RPRESETSTS_STATUS); |
316 | } |
317 | |
318 | return ret; |
319 | } |
320 | |
321 | int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) |
322 | { |
323 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
324 | u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data); |
325 | void __iomem *csr; |
326 | int ret; |
327 | |
328 | if (bank_number >= hw_data->num_banks) |
329 | return -EINVAL; |
330 | |
331 | dev_dbg(&GET_DEV(accel_dev), |
332 | "ring pair reset for bank:%d\n", bank_number); |
333 | |
334 | csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr; |
335 | ret = reset_ring_pair(csr, bank_number); |
336 | if (ret) |
337 | dev_err(&GET_DEV(accel_dev), |
338 | "ring pair reset failed (timeout)\n"); |
339 | else |
340 | dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n"); |
341 | |
342 | return ret; |
343 | } |
344 | EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset); |
345 | |
346 | static const u32 thrd_to_arb_map_dcc[] = { |
347 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
348 | 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, |
349 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
350 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
351 | 0x0 |
352 | }; |
353 | |
354 | static const u16 rp_group_to_arb_mask[] = { |
355 | [RP_GROUP_0] = 0x5, |
356 | [RP_GROUP_1] = 0xA, |
357 | }; |
358 | |
359 | static bool is_single_service(int service_id) |
360 | { |
361 | switch (service_id) { |
362 | case SVC_DC: |
363 | case SVC_SYM: |
364 | case SVC_ASYM: |
365 | return true; |
366 | case SVC_CY: |
367 | case SVC_CY2: |
368 | case SVC_DCC: |
369 | case SVC_ASYM_DC: |
370 | case SVC_DC_ASYM: |
371 | case SVC_SYM_DC: |
372 | case SVC_DC_SYM: |
373 | default: |
374 | return false; |
375 | } |
376 | } |
377 | |
378 | int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) |
379 | { |
380 | struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); |
381 | u32 *thd2arb_map = hw_data->thd_to_arb_map; |
382 | unsigned int ae_cnt, worker_obj_cnt, i, j; |
383 | unsigned long ae_mask, thds_mask; |
384 | int srv_id, rp_group; |
385 | u32 thd2arb_map_base; |
386 | u16 arb_mask; |
387 | |
388 | if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask || |
389 | !hw_data->get_num_aes || !hw_data->uof_get_num_objs || |
390 | !hw_data->uof_get_ae_mask) |
391 | return -EFAULT; |
392 | |
393 | srv_id = adf_get_service_enabled(accel_dev); |
394 | if (srv_id < 0) |
395 | return srv_id; |
396 | |
397 | ae_cnt = hw_data->get_num_aes(hw_data); |
398 | worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - |
399 | ADF_GEN4_ADMIN_ACCELENGINES; |
400 | |
401 | if (srv_id == SVC_DCC) { |
402 | if (ae_cnt > ICP_QAT_HW_AE_DELIMITER) |
403 | return -EINVAL; |
404 | |
405 | memcpy(thd2arb_map, thrd_to_arb_map_dcc, |
406 | array_size(sizeof(*thd2arb_map), ae_cnt)); |
407 | return 0; |
408 | } |
409 | |
410 | for (i = 0; i < worker_obj_cnt; i++) { |
411 | ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); |
412 | rp_group = hw_data->get_rp_group(accel_dev, ae_mask); |
413 | thds_mask = hw_data->get_ena_thd_mask(accel_dev, i); |
414 | thd2arb_map_base = 0; |
415 | |
416 | if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) |
417 | return -EINVAL; |
418 | |
419 | if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR) |
420 | return -EINVAL; |
421 | |
422 | if (is_single_service(service_id: srv_id)) |
423 | arb_mask = rp_group_to_arb_mask[RP_GROUP_0] | |
424 | rp_group_to_arb_mask[RP_GROUP_1]; |
425 | else |
426 | arb_mask = rp_group_to_arb_mask[rp_group]; |
427 | |
428 | for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE) |
429 | thd2arb_map_base |= arb_mask << (j * 4); |
430 | |
431 | for_each_set_bit(j, &ae_mask, ae_cnt) |
432 | thd2arb_map[j] = thd2arb_map_base; |
433 | } |
434 | return 0; |
435 | } |
436 | EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); |
437 | |
438 | u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) |
439 | { |
440 | struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); |
441 | enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; |
442 | unsigned int ae_mask, start_id, worker_obj_cnt, i; |
443 | u16 ring_to_svc_map; |
444 | int rp_group; |
445 | |
446 | if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask || |
447 | !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs) |
448 | return 0; |
449 | |
450 | /* If dcc, all rings handle compression requests */ |
451 | if (adf_get_service_enabled(accel_dev) == SVC_DCC) { |
452 | for (i = 0; i < RP_GROUP_COUNT; i++) |
453 | rps[i] = COMP; |
454 | goto set_mask; |
455 | } |
456 | |
457 | worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - |
458 | ADF_GEN4_ADMIN_ACCELENGINES; |
459 | start_id = worker_obj_cnt - RP_GROUP_COUNT; |
460 | |
461 | for (i = start_id; i < worker_obj_cnt; i++) { |
462 | ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); |
463 | rp_group = hw_data->get_rp_group(accel_dev, ae_mask); |
464 | if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) |
465 | return 0; |
466 | |
467 | switch (hw_data->uof_get_obj_type(accel_dev, i)) { |
468 | case ADF_FW_SYM_OBJ: |
469 | rps[rp_group] = SYM; |
470 | break; |
471 | case ADF_FW_ASYM_OBJ: |
472 | rps[rp_group] = ASYM; |
473 | break; |
474 | case ADF_FW_DC_OBJ: |
475 | rps[rp_group] = COMP; |
476 | break; |
477 | default: |
478 | rps[rp_group] = 0; |
479 | break; |
480 | } |
481 | } |
482 | |
483 | set_mask: |
484 | ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | |
485 | rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | |
486 | rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | |
487 | rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; |
488 | |
489 | return ring_to_svc_map; |
490 | } |
491 | EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); |
492 |
Definitions
- build_csr_ring_base_addr
- read_csr_ring_head
- write_csr_ring_head
- read_csr_ring_tail
- write_csr_ring_tail
- read_csr_e_stat
- write_csr_ring_config
- write_csr_ring_base
- write_csr_int_flag
- write_csr_int_srcsel
- write_csr_int_col_en
- write_csr_int_col_ctl
- write_csr_int_flag_and_col
- write_csr_ring_srv_arb_en
- adf_gen4_init_hw_csr_ops
- adf_gen4_get_accel_mask
- adf_gen4_get_num_accels
- adf_gen4_get_num_aes
- adf_gen4_get_misc_bar_id
- adf_gen4_get_etr_bar_id
- adf_gen4_get_sram_bar_id
- adf_gen4_get_sku
- adf_gen4_get_arb_info
- adf_gen4_get_admin_info
- adf_gen4_get_heartbeat_clock
- adf_gen4_enable_error_correction
- adf_gen4_enable_ints
- adf_gen4_init_device
- adf_gen4_unpack_ssm_wdtimer
- adf_gen4_set_ssm_wdtimer
- adf_gen4_set_msix_default_rttable
- adf_pfvf_comms_disabled
- reset_ring_pair
- adf_gen4_ring_pair_reset
- thrd_to_arb_map_dcc
- rp_group_to_arb_mask
- is_single_service
- adf_gen4_init_thd2arb_map
Improve your Profiling and Debugging skills
Find out more