1/* SPDX-License-Identifier: GPL-2.0-only
2 *
3 * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#ifndef _QAIC_H_
8#define _QAIC_H_
9
10#include <linux/interrupt.h>
11#include <linux/kref.h>
12#include <linux/mhi.h>
13#include <linux/mutex.h>
14#include <linux/pci.h>
15#include <linux/spinlock.h>
16#include <linux/srcu.h>
17#include <linux/wait.h>
18#include <linux/workqueue.h>
19#include <drm/drm_device.h>
20#include <drm/drm_gem.h>
21
22#define QAIC_DBC_BASE SZ_128K
23#define QAIC_DBC_SIZE SZ_4K
24
25#define QAIC_NO_PARTITION -1
26
27#define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
28
29#define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
30#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
31#define to_drm(qddev) (&(qddev)->drm)
32#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
33#define to_qaic_device(dev) (to_qaic_drm_device((dev))->qdev)
34
35enum __packed dev_states {
36 /* Device is offline or will be very soon */
37 QAIC_OFFLINE,
38 /* Device is booting, not clear if it's in a usable state */
39 QAIC_BOOT,
40 /* Device is fully operational */
41 QAIC_ONLINE,
42};
43
44extern bool datapath_polling;
45
46struct qaic_user {
47 /* Uniquely identifies this user for the device */
48 int handle;
49 struct kref ref_count;
50 /* Char device opened by this user */
51 struct qaic_drm_device *qddev;
52 /* Node in list of users that opened this drm device */
53 struct list_head node;
54 /* SRCU used to synchronize this user during cleanup */
55 struct srcu_struct qddev_lock;
56 atomic_t chunk_id;
57};
58
59struct dma_bridge_chan {
60 /* Pointer to device strcut maintained by driver */
61 struct qaic_device *qdev;
62 /* ID of this DMA bridge channel(DBC) */
63 unsigned int id;
64 /* Synchronizes access to xfer_list */
65 spinlock_t xfer_lock;
66 /* Base address of request queue */
67 void *req_q_base;
68 /* Base address of response queue */
69 void *rsp_q_base;
70 /*
71 * Base bus address of request queue. Response queue bus address can be
72 * calculated by adding request queue size to this variable
73 */
74 dma_addr_t dma_addr;
75 /* Total size of request and response queue in byte */
76 u32 total_size;
77 /* Capacity of request/response queue */
78 u32 nelem;
79 /* The user that opened this DBC */
80 struct qaic_user *usr;
81 /*
82 * Request ID of next memory handle that goes in request queue. One
83 * memory handle can enqueue more than one request elements, all
84 * this requests that belong to same memory handle have same request ID
85 */
86 u16 next_req_id;
87 /* true: DBC is in use; false: DBC not in use */
88 bool in_use;
89 /*
90 * Base address of device registers. Used to read/write request and
91 * response queue's head and tail pointer of this DBC.
92 */
93 void __iomem *dbc_base;
94 /* Head of list where each node is a memory handle queued in request queue */
95 struct list_head xfer_list;
96 /* Synchronizes DBC readers during cleanup */
97 struct srcu_struct ch_lock;
98 /*
99 * When this DBC is released, any thread waiting on this wait queue is
100 * woken up
101 */
102 wait_queue_head_t dbc_release;
103 /* Head of list where each node is a bo associated with this DBC */
104 struct list_head bo_lists;
105 /* The irq line for this DBC. Used for polling */
106 unsigned int irq;
107 /* Polling work item to simulate interrupts */
108 struct work_struct poll_work;
109};
110
111struct qaic_device {
112 /* Pointer to base PCI device struct of our physical device */
113 struct pci_dev *pdev;
114 /* Req. ID of request that will be queued next in MHI control device */
115 u32 next_seq_num;
116 /* Base address of bar 0 */
117 void __iomem *bar_0;
118 /* Base address of bar 2 */
119 void __iomem *bar_2;
120 /* Controller structure for MHI devices */
121 struct mhi_controller *mhi_cntrl;
122 /* MHI control channel device */
123 struct mhi_device *cntl_ch;
124 /* List of requests queued in MHI control device */
125 struct list_head cntl_xfer_list;
126 /* Synchronizes MHI control device transactions and its xfer list */
127 struct mutex cntl_mutex;
128 /* Array of DBC struct of this device */
129 struct dma_bridge_chan *dbc;
130 /* Work queue for tasks related to MHI control device */
131 struct workqueue_struct *cntl_wq;
132 /* Synchronizes all the users of device during cleanup */
133 struct srcu_struct dev_lock;
134 /* Track the state of the device during resets */
135 enum dev_states dev_state;
136 /* true: single MSI is used to operate device */
137 bool single_msi;
138 /*
139 * true: A tx MHI transaction has failed and a rx buffer is still queued
140 * in control device. Such a buffer is considered lost rx buffer
141 * false: No rx buffer is lost in control device
142 */
143 bool cntl_lost_buf;
144 /* Maximum number of DBC supported by this device */
145 u32 num_dbc;
146 /* Reference to the drm_device for this device when it is created */
147 struct qaic_drm_device *qddev;
148 /* Generate the CRC of a control message */
149 u32 (*gen_crc)(void *msg);
150 /* Validate the CRC of a control message */
151 bool (*valid_crc)(void *msg);
152 /* MHI "QAIC_TIMESYNC" channel device */
153 struct mhi_device *qts_ch;
154 /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
155 struct workqueue_struct *qts_wq;
156};
157
158struct qaic_drm_device {
159 /* The drm device struct of this drm device */
160 struct drm_device drm;
161 /* Pointer to the root device struct driven by this driver */
162 struct qaic_device *qdev;
163 /*
164 * The physical device can be partition in number of logical devices.
165 * And each logical device is given a partition id. This member stores
166 * that id. QAIC_NO_PARTITION is a sentinel used to mark that this drm
167 * device is the actual physical device
168 */
169 s32 partition_id;
170 /* Head in list of users who have opened this drm device */
171 struct list_head users;
172 /* Synchronizes access to users list */
173 struct mutex users_mutex;
174};
175
176struct qaic_bo {
177 struct drm_gem_object base;
178 /* Scatter/gather table for allocate/imported BO */
179 struct sg_table *sgt;
180 /* Head in list of slices of this BO */
181 struct list_head slices;
182 /* Total nents, for all slices of this BO */
183 int total_slice_nents;
184 /*
185 * Direction of transfer. It can assume only two value DMA_TO_DEVICE and
186 * DMA_FROM_DEVICE.
187 */
188 int dir;
189 /* The pointer of the DBC which operates on this BO */
190 struct dma_bridge_chan *dbc;
191 /* Number of slice that belongs to this buffer */
192 u32 nr_slice;
193 /* Number of slice that have been transferred by DMA engine */
194 u32 nr_slice_xfer_done;
195 /*
196 * If true then user has attached slicing information to this BO by
197 * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
198 */
199 bool sliced;
200 /* Request ID of this BO if it is queued for execution */
201 u16 req_id;
202 /* Handle assigned to this BO */
203 u32 handle;
204 /* Wait on this for completion of DMA transfer of this BO */
205 struct completion xfer_done;
206 /*
207 * Node in linked list where head is dbc->xfer_list.
208 * This link list contain BO's that are queued for DMA transfer.
209 */
210 struct list_head xfer_list;
211 /*
212 * Node in linked list where head is dbc->bo_lists.
213 * This link list contain BO's that are associated with the DBC it is
214 * linked to.
215 */
216 struct list_head bo_list;
217 struct {
218 /*
219 * Latest timestamp(ns) at which kernel received a request to
220 * execute this BO
221 */
222 u64 req_received_ts;
223 /*
224 * Latest timestamp(ns) at which kernel enqueued requests of
225 * this BO for execution in DMA queue
226 */
227 u64 req_submit_ts;
228 /*
229 * Latest timestamp(ns) at which kernel received a completion
230 * interrupt for requests of this BO
231 */
232 u64 req_processed_ts;
233 /*
234 * Number of elements already enqueued in DMA queue before
235 * enqueuing requests of this BO
236 */
237 u32 queue_level_before;
238 } perf_stats;
239 /* Synchronizes BO operations */
240 struct mutex lock;
241};
242
243struct bo_slice {
244 /* Mapped pages */
245 struct sg_table *sgt;
246 /* Number of requests required to queue in DMA queue */
247 int nents;
248 /* See enum dma_data_direction */
249 int dir;
250 /* Actual requests that will be copied in DMA queue */
251 struct dbc_req *reqs;
252 struct kref ref_count;
253 /* true: No DMA transfer required */
254 bool no_xfer;
255 /* Pointer to the parent BO handle */
256 struct qaic_bo *bo;
257 /* Node in list of slices maintained by parent BO */
258 struct list_head slice;
259 /* Size of this slice in bytes */
260 u64 size;
261 /* Offset of this slice in buffer */
262 u64 offset;
263};
264
265int get_dbc_req_elem_size(void);
266int get_dbc_rsp_elem_size(void);
267int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor);
268int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
269void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
270
271void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result);
272
273int qaic_control_open(struct qaic_device *qdev);
274void qaic_control_close(struct qaic_device *qdev);
275void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr);
276
277irqreturn_t dbc_irq_threaded_fn(int irq, void *data);
278irqreturn_t dbc_irq_handler(int irq, void *data);
279int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
280void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr);
281void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
282void release_dbc(struct qaic_device *qdev, u32 dbc_id);
283
284void wake_all_cntl(struct qaic_device *qdev);
285void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
286
287struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
288
289int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
290int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
291int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
292int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
293int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
294int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
295int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
296int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
297void irq_polling_work(struct work_struct *work);
298
299#endif /* _QAIC_H_ */
300

source code of linux/drivers/accel/qaic/qaic.h