1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | #ifndef __CARD_BASE_H__ |
3 | #define __CARD_BASE_H__ |
4 | |
5 | /** |
6 | * IBM Accelerator Family 'GenWQE' |
7 | * |
8 | * (C) Copyright IBM Corp. 2013 |
9 | * |
10 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> |
11 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> |
12 | * Author: Michael Jung <mijung@gmx.net> |
13 | * Author: Michael Ruettger <michael@ibmra.de> |
14 | */ |
15 | |
16 | /* |
17 | * Interfaces within the GenWQE module. Defines genwqe_card and |
18 | * ddcb_queue as well as ddcb_requ. |
19 | */ |
20 | |
21 | #include <linux/kernel.h> |
22 | #include <linux/types.h> |
23 | #include <linux/cdev.h> |
24 | #include <linux/stringify.h> |
25 | #include <linux/pci.h> |
26 | #include <linux/semaphore.h> |
27 | #include <linux/uaccess.h> |
28 | #include <linux/io.h> |
29 | #include <linux/debugfs.h> |
30 | #include <linux/slab.h> |
31 | |
32 | #include <linux/genwqe/genwqe_card.h> |
33 | #include "genwqe_driver.h" |
34 | |
35 | #define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ |
36 | |
37 | #define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ |
38 | #define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */ |
39 | #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS) |
40 | |
41 | /* Compile parameters, some of them appear in debugfs for later adjustment */ |
42 | #define GENWQE_DDCB_MAX 32 /* DDCBs on the work-queue */ |
43 | #define GENWQE_POLLING_ENABLED 0 /* in case of irqs not working */ |
44 | #define GENWQE_DDCB_SOFTWARE_TIMEOUT 10 /* timeout per DDCB in seconds */ |
45 | #define GENWQE_KILL_TIMEOUT 8 /* time until process gets killed */ |
46 | #define GENWQE_VF_JOBTIMEOUT_MSEC 250 /* 250 msec */ |
47 | #define GENWQE_PF_JOBTIMEOUT_MSEC 8000 /* 8 sec should be ok */ |
48 | #define GENWQE_HEALTH_CHECK_INTERVAL 4 /* <= 0: disabled */ |
49 | |
50 | /* Sysfs attribute groups used when we create the genwqe device */ |
51 | extern const struct attribute_group *genwqe_attribute_groups[]; |
52 | |
53 | /* |
54 | * Config space for Genwqe5 A7: |
55 | * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 |
56 | * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 |
57 | * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] |
58 | * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 |
59 | */ |
60 | #define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */ |
61 | |
62 | #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */ |
63 | #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */ |
64 | #define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */ |
65 | |
66 | #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000 |
67 | #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */ |
68 | #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */ |
69 | |
70 | #define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */ |
71 | |
72 | /** |
73 | * struct genwqe_reg - Genwqe data dump functionality |
74 | */ |
75 | struct genwqe_reg { |
76 | u32 addr; |
77 | u32 idx; |
78 | u64 val; |
79 | }; |
80 | |
81 | /* |
82 | * enum genwqe_dbg_type - Specify chip unit to dump/debug |
83 | */ |
84 | enum genwqe_dbg_type { |
85 | GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */ |
86 | GENWQE_DBG_UNIT1 = 1, |
87 | GENWQE_DBG_UNIT2 = 2, |
88 | GENWQE_DBG_UNIT3 = 3, |
89 | GENWQE_DBG_UNIT4 = 4, |
90 | GENWQE_DBG_UNIT5 = 5, |
91 | GENWQE_DBG_UNIT6 = 6, |
92 | GENWQE_DBG_UNIT7 = 7, |
93 | GENWQE_DBG_REGS = 8, |
94 | GENWQE_DBG_DMA = 9, |
95 | GENWQE_DBG_UNITS = 10, /* max number of possible debug units */ |
96 | }; |
97 | |
98 | /* Software error injection to simulate card failures */ |
99 | #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */ |
100 | #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ |
101 | #define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */ |
102 | #define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */ |
103 | |
104 | /* |
105 | * Genwqe card description and management data. |
106 | * |
107 | * Error-handling in case of card malfunction |
108 | * ------------------------------------------ |
109 | * |
110 | * If the card is detected to be defective the outside environment |
111 | * will cause the PCI layer to call deinit (the cleanup function for |
112 | * probe). This is the same effect like doing a unbind/bind operation |
113 | * on the card. |
114 | * |
115 | * The genwqe card driver implements a health checking thread which |
116 | * verifies the card function. If this detects a problem the cards |
117 | * device is being shutdown and restarted again, along with a reset of |
118 | * the card and queue. |
119 | * |
120 | * All functions accessing the card device return either -EIO or -ENODEV |
121 | * code to indicate the malfunction to the user. The user has to close |
122 | * the file descriptor and open a new one, once the card becomes |
123 | * available again. |
124 | * |
125 | * If the open file descriptor is setup to receive SIGIO, the signal is |
126 | * genereated for the application which has to provide a handler to |
127 | * react on it. If the application does not close the open |
128 | * file descriptor a SIGKILL is send to enforce freeing the cards |
129 | * resources. |
130 | * |
131 | * I did not find a different way to prevent kernel problems due to |
132 | * reference counters for the cards character devices getting out of |
133 | * sync. The character device deallocation does not block, even if |
134 | * there is still an open file descriptor pending. If this pending |
135 | * descriptor is closed, the data structures used by the character |
136 | * device is reinstantiated, which will lead to the reference counter |
137 | * dropping below the allowed values. |
138 | * |
139 | * Card recovery |
140 | * ------------- |
141 | * |
142 | * To test the internal driver recovery the following command can be used: |
143 | * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' |
144 | */ |
145 | |
146 | |
147 | /** |
148 | * struct dma_mapping_type - Mapping type definition |
149 | * |
150 | * To avoid memcpying data arround we use user memory directly. To do |
151 | * this we need to pin/swap-in the memory and request a DMA address |
152 | * for it. |
153 | */ |
154 | enum dma_mapping_type { |
155 | GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */ |
156 | GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */ |
157 | GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */ |
158 | }; |
159 | |
160 | /** |
161 | * struct dma_mapping - Information about memory mappings done by the driver |
162 | */ |
163 | struct dma_mapping { |
164 | enum dma_mapping_type type; |
165 | |
166 | void *u_vaddr; /* user-space vaddr/non-aligned */ |
167 | void *k_vaddr; /* kernel-space vaddr/non-aligned */ |
168 | dma_addr_t dma_addr; /* physical DMA address */ |
169 | |
170 | struct page **page_list; /* list of pages used by user buff */ |
171 | dma_addr_t *dma_list; /* list of dma addresses per page */ |
172 | unsigned int nr_pages; /* number of pages */ |
173 | unsigned int size; /* size in bytes */ |
174 | |
175 | struct list_head card_list; /* list of usr_maps for card */ |
176 | struct list_head pin_list; /* list of pinned memory for dev */ |
177 | int write; /* writable map? useful in unmapping */ |
178 | }; |
179 | |
180 | static inline void genwqe_mapping_init(struct dma_mapping *m, |
181 | enum dma_mapping_type type) |
182 | { |
183 | memset(m, 0, sizeof(*m)); |
184 | m->type = type; |
185 | m->write = 1; /* Assume the maps we create are R/W */ |
186 | } |
187 | |
188 | /** |
189 | * struct ddcb_queue - DDCB queue data |
190 | * @ddcb_max: Number of DDCBs on the queue |
191 | * @ddcb_next: Next free DDCB |
192 | * @ddcb_act: Next DDCB supposed to finish |
193 | * @ddcb_seq: Sequence number of last DDCB |
194 | * @ddcbs_in_flight: Currently enqueued DDCBs |
195 | * @ddcbs_completed: Number of already completed DDCBs |
196 | * @return_on_busy: Number of -EBUSY returns on full queue |
197 | * @wait_on_busy: Number of waits on full queue |
198 | * @ddcb_daddr: DMA address of first DDCB in the queue |
199 | * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue |
200 | * @ddcb_req: Associated requests (one per DDCB) |
201 | * @ddcb_waitqs: Associated wait queues (one per DDCB) |
202 | * @ddcb_lock: Lock to protect queuing operations |
203 | * @ddcb_waitq: Wait on next DDCB finishing |
204 | */ |
205 | |
206 | struct ddcb_queue { |
207 | int ddcb_max; /* amount of DDCBs */ |
208 | int ddcb_next; /* next available DDCB num */ |
209 | int ddcb_act; /* DDCB to be processed */ |
210 | u16 ddcb_seq; /* slc seq num */ |
211 | unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ |
212 | unsigned int ddcbs_completed; |
213 | unsigned int ddcbs_max_in_flight; |
214 | unsigned int return_on_busy; /* how many times -EBUSY? */ |
215 | unsigned int wait_on_busy; |
216 | |
217 | dma_addr_t ddcb_daddr; /* DMA address */ |
218 | struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ |
219 | struct ddcb_requ **ddcb_req; /* ddcb processing parameter */ |
220 | wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ |
221 | |
222 | spinlock_t ddcb_lock; /* exclusive access to queue */ |
223 | wait_queue_head_t busy_waitq; /* wait for ddcb processing */ |
224 | |
225 | /* registers or the respective queue to be used */ |
226 | u32 IO_QUEUE_CONFIG; |
227 | u32 IO_QUEUE_STATUS; |
228 | u32 IO_QUEUE_SEGMENT; |
229 | u32 IO_QUEUE_INITSQN; |
230 | u32 IO_QUEUE_WRAP; |
231 | u32 IO_QUEUE_OFFSET; |
232 | u32 IO_QUEUE_WTIME; |
233 | u32 IO_QUEUE_ERRCNTS; |
234 | u32 IO_QUEUE_LRW; |
235 | }; |
236 | |
237 | /* |
238 | * GFIR, SLU_UNITCFG, APP_UNITCFG |
239 | * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. |
240 | */ |
241 | #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64))) |
242 | |
243 | struct genwqe_ffdc { |
244 | unsigned int entries; |
245 | struct genwqe_reg *regs; |
246 | }; |
247 | |
248 | /** |
249 | * struct genwqe_dev - GenWQE device information |
250 | * @card_state: Card operation state, see above |
251 | * @ffdc: First Failure Data Capture buffers for each unit |
252 | * @card_thread: Working thread to operate the DDCB queue |
253 | * @card_waitq: Wait queue used in card_thread |
254 | * @queue: DDCB queue |
255 | * @health_thread: Card monitoring thread (only for PFs) |
256 | * @health_waitq: Wait queue used in health_thread |
257 | * @pci_dev: Associated PCI device (function) |
258 | * @mmio: Base address of 64-bit register space |
259 | * @mmio_len: Length of register area |
260 | * @file_lock: Lock to protect access to file_list |
261 | * @file_list: List of all processes with open GenWQE file descriptors |
262 | * |
263 | * This struct contains all information needed to communicate with a |
264 | * GenWQE card. It is initialized when a GenWQE device is found and |
265 | * destroyed when it goes away. It holds data to maintain the queue as |
266 | * well as data needed to feed the user interfaces. |
267 | */ |
268 | struct genwqe_dev { |
269 | enum genwqe_card_state card_state; |
270 | spinlock_t print_lock; |
271 | |
272 | int card_idx; /* card index 0..CARD_NO_MAX-1 */ |
273 | u64 flags; /* general flags */ |
274 | |
275 | /* FFDC data gathering */ |
276 | struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; |
277 | |
278 | /* DDCB workqueue */ |
279 | struct task_struct *card_thread; |
280 | wait_queue_head_t queue_waitq; |
281 | struct ddcb_queue queue; /* genwqe DDCB queue */ |
282 | unsigned int irqs_processed; |
283 | |
284 | /* Card health checking thread */ |
285 | struct task_struct *health_thread; |
286 | wait_queue_head_t health_waitq; |
287 | |
288 | int use_platform_recovery; /* use platform recovery mechanisms */ |
289 | |
290 | /* char device */ |
291 | dev_t devnum_genwqe; /* major/minor num card */ |
292 | const struct class *class_genwqe; /* reference to class object */ |
293 | struct device *dev; /* for device creation */ |
294 | struct cdev cdev_genwqe; /* char device for card */ |
295 | |
296 | struct dentry *debugfs_root; /* debugfs card root directory */ |
297 | struct dentry *debugfs_genwqe; /* debugfs driver root directory */ |
298 | |
299 | /* pci resources */ |
300 | struct pci_dev *pci_dev; /* PCI device */ |
301 | void __iomem *mmio; /* BAR-0 MMIO start */ |
302 | unsigned long mmio_len; |
303 | int num_vfs; |
304 | u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; |
305 | int is_privileged; /* access to all regs possible */ |
306 | |
307 | /* config regs which we need often */ |
308 | u64 slu_unitcfg; |
309 | u64 app_unitcfg; |
310 | u64 softreset; |
311 | u64 err_inject; |
312 | u64 last_gfir; |
313 | char app_name[5]; |
314 | |
315 | spinlock_t file_lock; /* lock for open files */ |
316 | struct list_head file_list; /* list of open files */ |
317 | |
318 | /* debugfs parameters */ |
319 | int ddcb_software_timeout; /* wait until DDCB times out */ |
320 | int skip_recovery; /* circumvention if recovery fails */ |
321 | int kill_timeout; /* wait after sending SIGKILL */ |
322 | }; |
323 | |
324 | /** |
325 | * enum genwqe_requ_state - State of a DDCB execution request |
326 | */ |
327 | enum genwqe_requ_state { |
328 | GENWQE_REQU_NEW = 0, |
329 | GENWQE_REQU_ENQUEUED = 1, |
330 | GENWQE_REQU_TAPPED = 2, |
331 | GENWQE_REQU_FINISHED = 3, |
332 | GENWQE_REQU_STATE_MAX, |
333 | }; |
334 | |
335 | /** |
336 | * struct genwqe_sgl - Scatter gather list describing user-space memory |
337 | * @sgl: scatter gather list needs to be 128 byte aligned |
338 | * @sgl_dma_addr: dma address of sgl |
339 | * @sgl_size: size of area used for sgl |
340 | * @user_addr: user-space address of memory area |
341 | * @user_size: size of user-space memory area |
342 | * @page: buffer for partial pages if needed |
343 | * @page_dma_addr: dma address partial pages |
344 | * @write: should we write it back to userspace? |
345 | */ |
346 | struct genwqe_sgl { |
347 | dma_addr_t sgl_dma_addr; |
348 | struct sg_entry *sgl; |
349 | size_t sgl_size; /* size of sgl */ |
350 | |
351 | void __user *user_addr; /* user-space base-address */ |
352 | size_t user_size; /* size of memory area */ |
353 | |
354 | int write; |
355 | |
356 | unsigned long nr_pages; |
357 | unsigned long fpage_offs; |
358 | size_t fpage_size; |
359 | size_t lpage_size; |
360 | |
361 | void *fpage; |
362 | dma_addr_t fpage_dma_addr; |
363 | |
364 | void *lpage; |
365 | dma_addr_t lpage_dma_addr; |
366 | }; |
367 | |
368 | int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, |
369 | void __user *user_addr, size_t user_size, int write); |
370 | |
371 | int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, |
372 | dma_addr_t *dma_list); |
373 | |
374 | int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl); |
375 | |
376 | /** |
377 | * struct ddcb_requ - Kernel internal representation of the DDCB request |
378 | * @cmd: User space representation of the DDCB execution request |
379 | */ |
380 | struct ddcb_requ { |
381 | /* kernel specific content */ |
382 | enum genwqe_requ_state req_state; /* request status */ |
383 | int num; /* ddcb_no for this request */ |
384 | struct ddcb_queue *queue; /* associated queue */ |
385 | |
386 | struct dma_mapping dma_mappings[DDCB_FIXUPS]; |
387 | struct genwqe_sgl sgls[DDCB_FIXUPS]; |
388 | |
389 | /* kernel/user shared content */ |
390 | struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ |
391 | struct genwqe_debug_data debug_data; |
392 | }; |
393 | |
394 | /** |
395 | * struct genwqe_file - Information for open GenWQE devices |
396 | */ |
397 | struct genwqe_file { |
398 | struct genwqe_dev *cd; |
399 | struct genwqe_driver *client; |
400 | struct file *filp; |
401 | |
402 | struct fasync_struct *async_queue; |
403 | struct pid *opener; |
404 | struct list_head list; /* entry in list of open files */ |
405 | |
406 | spinlock_t map_lock; /* lock for dma_mappings */ |
407 | struct list_head map_list; /* list of dma_mappings */ |
408 | |
409 | spinlock_t pin_lock; /* lock for pinned memory */ |
410 | struct list_head pin_list; /* list of pinned memory */ |
411 | }; |
412 | |
413 | int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ |
414 | int genwqe_finish_queue(struct genwqe_dev *cd); |
415 | int genwqe_release_service_layer(struct genwqe_dev *cd); |
416 | |
417 | /** |
418 | * genwqe_get_slu_id() - Read Service Layer Unit Id |
419 | * Return: 0x00: Development code |
420 | * 0x01: SLC1 (old) |
421 | * 0x02: SLC2 (sept2012) |
422 | * 0x03: SLC2 (feb2013, generic driver) |
423 | */ |
424 | static inline int genwqe_get_slu_id(struct genwqe_dev *cd) |
425 | { |
426 | return (int)((cd->slu_unitcfg >> 32) & 0xff); |
427 | } |
428 | |
429 | int genwqe_ddcbs_in_flight(struct genwqe_dev *cd); |
430 | |
431 | u8 genwqe_card_type(struct genwqe_dev *cd); |
432 | int genwqe_card_reset(struct genwqe_dev *cd); |
433 | int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); |
434 | void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); |
435 | |
436 | int genwqe_device_create(struct genwqe_dev *cd); |
437 | int genwqe_device_remove(struct genwqe_dev *cd); |
438 | |
439 | /* debugfs */ |
440 | void genwqe_init_debugfs(struct genwqe_dev *cd); |
441 | void genqwe_exit_debugfs(struct genwqe_dev *cd); |
442 | |
443 | int genwqe_read_softreset(struct genwqe_dev *cd); |
444 | |
445 | /* Hardware Circumventions */ |
446 | int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); |
447 | int genwqe_flash_readback_fails(struct genwqe_dev *cd); |
448 | |
449 | /** |
450 | * genwqe_write_vreg() - Write register in VF window |
451 | * @cd: genwqe device |
452 | * @reg: register address |
453 | * @val: value to write |
454 | * @func: 0: PF, 1: VF0, ..., 15: VF14 |
455 | */ |
456 | int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); |
457 | |
458 | /** |
459 | * genwqe_read_vreg() - Read register in VF window |
460 | * @cd: genwqe device |
461 | * @reg: register address |
462 | * @func: 0: PF, 1: VF0, ..., 15: VF14 |
463 | * |
464 | * Return: content of the register |
465 | */ |
466 | u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); |
467 | |
468 | /* FFDC Buffer Management */ |
469 | int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); |
470 | int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, |
471 | struct genwqe_reg *regs, unsigned int max_regs); |
472 | int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, |
473 | unsigned int max_regs, int all); |
474 | int genwqe_ffdc_dump_dma(struct genwqe_dev *cd, |
475 | struct genwqe_reg *regs, unsigned int max_regs); |
476 | |
477 | int genwqe_init_debug_data(struct genwqe_dev *cd, |
478 | struct genwqe_debug_data *d); |
479 | |
480 | void genwqe_init_crc32(void); |
481 | int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); |
482 | |
483 | /* Memory allocation/deallocation; dma address handling */ |
484 | int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, |
485 | void *uaddr, unsigned long size); |
486 | |
487 | int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m); |
488 | |
489 | static inline bool dma_mapping_used(struct dma_mapping *m) |
490 | { |
491 | if (!m) |
492 | return false; |
493 | return m->size != 0; |
494 | } |
495 | |
496 | /** |
497 | * __genwqe_execute_ddcb() - Execute DDCB request with addr translation |
498 | * |
499 | * This function will do the address translation changes to the DDCBs |
500 | * according to the definitions required by the ATS field. It looks up |
501 | * the memory allocation buffer or does vmap/vunmap for the respective |
502 | * user-space buffers, inclusive page pinning and scatter gather list |
503 | * buildup and teardown. |
504 | */ |
505 | int __genwqe_execute_ddcb(struct genwqe_dev *cd, |
506 | struct genwqe_ddcb_cmd *cmd, unsigned int f_flags); |
507 | |
508 | /** |
509 | * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation |
510 | * |
511 | * This version will not do address translation or any modification of |
512 | * the DDCB data. It is used e.g. for the MoveFlash DDCB which is |
513 | * entirely prepared by the driver itself. That means the appropriate |
514 | * DMA addresses are already in the DDCB and do not need any |
515 | * modification. |
516 | */ |
517 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, |
518 | struct genwqe_ddcb_cmd *cmd, |
519 | unsigned int f_flags); |
520 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, |
521 | struct ddcb_requ *req, |
522 | unsigned int f_flags); |
523 | |
524 | int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); |
525 | int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); |
526 | |
527 | /* register access */ |
528 | int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); |
529 | u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); |
530 | int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); |
531 | u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); |
532 | |
533 | void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, |
534 | dma_addr_t *dma_handle); |
535 | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, |
536 | void *vaddr, dma_addr_t dma_handle); |
537 | |
538 | /* Base clock frequency in MHz */ |
539 | int genwqe_base_clock_frequency(struct genwqe_dev *cd); |
540 | |
541 | /* Before FFDC is captured the traps should be stopped. */ |
542 | void genwqe_stop_traps(struct genwqe_dev *cd); |
543 | void genwqe_start_traps(struct genwqe_dev *cd); |
544 | |
545 | /* Hardware circumvention */ |
546 | bool genwqe_need_err_masking(struct genwqe_dev *cd); |
547 | |
548 | /** |
549 | * genwqe_is_privileged() - Determine operation mode for PCI function |
550 | * |
551 | * On Intel with SRIOV support we see: |
552 | * PF: is_physfn = 1 is_virtfn = 0 |
553 | * VF: is_physfn = 0 is_virtfn = 1 |
554 | * |
555 | * On Systems with no SRIOV support _and_ virtualized systems we get: |
556 | * is_physfn = 0 is_virtfn = 0 |
557 | * |
558 | * Other vendors have individual pci device ids to distinguish between |
559 | * virtual function drivers and physical function drivers. GenWQE |
560 | * unfortunately has just on pci device id for both, VFs and PF. |
561 | * |
562 | * The following code is used to distinguish if the card is running in |
563 | * privileged mode, either as true PF or in a virtualized system with |
564 | * full register access e.g. currently on PowerPC. |
565 | * |
566 | * if (pci_dev->is_virtfn) |
567 | * cd->is_privileged = 0; |
568 | * else |
569 | * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) |
570 | * != IO_ILLEGAL_VALUE); |
571 | */ |
572 | static inline int genwqe_is_privileged(struct genwqe_dev *cd) |
573 | { |
574 | return cd->is_privileged; |
575 | } |
576 | |
577 | #endif /* __CARD_BASE_H__ */ |
578 | |