1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. |
4 | * |
5 | */ |
6 | #ifndef _MHI_H_ |
7 | #define _MHI_H_ |
8 | |
9 | #include <linux/device.h> |
10 | #include <linux/dma-direction.h> |
11 | #include <linux/mutex.h> |
12 | #include <linux/skbuff.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/wait.h> |
16 | #include <linux/workqueue.h> |
17 | |
18 | #define MHI_MAX_OEM_PK_HASH_SEGMENTS 16 |
19 | |
20 | struct mhi_chan; |
21 | struct mhi_event; |
22 | struct mhi_ctxt; |
23 | struct mhi_cmd; |
24 | struct mhi_buf_info; |
25 | |
26 | /** |
27 | * enum mhi_callback - MHI callback |
28 | * @MHI_CB_IDLE: MHI entered idle state |
29 | * @MHI_CB_PENDING_DATA: New data available for client to process |
30 | * @MHI_CB_LPM_ENTER: MHI host entered low power mode |
31 | * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode |
32 | * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env |
33 | * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env |
34 | * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover) |
35 | * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state |
36 | * @MHI_CB_BW_REQ: Received a bandwidth switch request from device |
37 | */ |
38 | enum mhi_callback { |
39 | MHI_CB_IDLE, |
40 | MHI_CB_PENDING_DATA, |
41 | MHI_CB_LPM_ENTER, |
42 | MHI_CB_LPM_EXIT, |
43 | MHI_CB_EE_RDDM, |
44 | MHI_CB_EE_MISSION_MODE, |
45 | MHI_CB_SYS_ERROR, |
46 | MHI_CB_FATAL_ERROR, |
47 | MHI_CB_BW_REQ, |
48 | }; |
49 | |
50 | /** |
51 | * enum mhi_flags - Transfer flags |
52 | * @MHI_EOB: End of buffer for bulk transfer |
53 | * @MHI_EOT: End of transfer |
54 | * @MHI_CHAIN: Linked transfer |
55 | */ |
56 | enum mhi_flags { |
57 | MHI_EOB = BIT(0), |
58 | MHI_EOT = BIT(1), |
59 | MHI_CHAIN = BIT(2), |
60 | }; |
61 | |
62 | /** |
63 | * enum mhi_device_type - Device types |
64 | * @MHI_DEVICE_XFER: Handles data transfer |
65 | * @MHI_DEVICE_CONTROLLER: Control device |
66 | */ |
67 | enum mhi_device_type { |
68 | MHI_DEVICE_XFER, |
69 | MHI_DEVICE_CONTROLLER, |
70 | }; |
71 | |
72 | /** |
73 | * enum mhi_ch_type - Channel types |
74 | * @MHI_CH_TYPE_INVALID: Invalid channel type |
75 | * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device |
76 | * @MHI_CH_TYPE_INBOUND: Inbound channel from the device |
77 | * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine |
78 | * multiple packets and send them as a single |
79 | * large packet to reduce CPU consumption |
80 | */ |
81 | enum mhi_ch_type { |
82 | MHI_CH_TYPE_INVALID = 0, |
83 | MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, |
84 | MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, |
85 | MHI_CH_TYPE_INBOUND_COALESCED = 3, |
86 | }; |
87 | |
88 | /** |
89 | * struct image_info - Firmware and RDDM table |
90 | * @mhi_buf: Buffer for firmware and RDDM table |
91 | * @entries: # of entries in table |
92 | */ |
93 | struct image_info { |
94 | struct mhi_buf *mhi_buf; |
95 | /* private: from internal.h */ |
96 | struct bhi_vec_entry *bhi_vec; |
97 | /* public: */ |
98 | u32 entries; |
99 | }; |
100 | |
101 | /** |
102 | * struct mhi_link_info - BW requirement |
103 | * target_link_speed - Link speed as defined by TLS bits in LinkControl reg |
104 | * target_link_width - Link width as defined by NLW bits in LinkStatus reg |
105 | */ |
106 | struct mhi_link_info { |
107 | unsigned int target_link_speed; |
108 | unsigned int target_link_width; |
109 | }; |
110 | |
111 | /** |
112 | * enum mhi_ee_type - Execution environment types |
113 | * @MHI_EE_PBL: Primary Bootloader |
114 | * @MHI_EE_SBL: Secondary Bootloader |
115 | * @MHI_EE_AMSS: Modem, aka the primary runtime EE |
116 | * @MHI_EE_RDDM: Ram dump download mode |
117 | * @MHI_EE_WFW: WLAN firmware mode |
118 | * @MHI_EE_PTHRU: Passthrough |
119 | * @MHI_EE_EDL: Embedded downloader |
120 | * @MHI_EE_FP: Flash Programmer Environment |
121 | */ |
122 | enum mhi_ee_type { |
123 | MHI_EE_PBL, |
124 | MHI_EE_SBL, |
125 | MHI_EE_AMSS, |
126 | MHI_EE_RDDM, |
127 | MHI_EE_WFW, |
128 | MHI_EE_PTHRU, |
129 | MHI_EE_EDL, |
130 | MHI_EE_FP, |
131 | MHI_EE_MAX_SUPPORTED = MHI_EE_FP, |
132 | MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ |
133 | MHI_EE_NOT_SUPPORTED, |
134 | MHI_EE_MAX, |
135 | }; |
136 | |
137 | /** |
138 | * enum mhi_state - MHI states |
139 | * @MHI_STATE_RESET: Reset state |
140 | * @MHI_STATE_READY: Ready state |
141 | * @MHI_STATE_M0: M0 state |
142 | * @MHI_STATE_M1: M1 state |
143 | * @MHI_STATE_M2: M2 state |
144 | * @MHI_STATE_M3: M3 state |
145 | * @MHI_STATE_M3_FAST: M3 Fast state |
146 | * @MHI_STATE_BHI: BHI state |
147 | * @MHI_STATE_SYS_ERR: System Error state |
148 | */ |
149 | enum mhi_state { |
150 | MHI_STATE_RESET = 0x0, |
151 | MHI_STATE_READY = 0x1, |
152 | MHI_STATE_M0 = 0x2, |
153 | MHI_STATE_M1 = 0x3, |
154 | MHI_STATE_M2 = 0x4, |
155 | MHI_STATE_M3 = 0x5, |
156 | MHI_STATE_M3_FAST = 0x6, |
157 | MHI_STATE_BHI = 0x7, |
158 | MHI_STATE_SYS_ERR = 0xFF, |
159 | MHI_STATE_MAX, |
160 | }; |
161 | |
162 | /** |
163 | * enum mhi_ch_ee_mask - Execution environment mask for channel |
164 | * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE |
165 | * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE |
166 | * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE |
167 | * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE |
168 | * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE |
169 | * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE |
170 | * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE |
171 | */ |
172 | enum mhi_ch_ee_mask { |
173 | MHI_CH_EE_PBL = BIT(MHI_EE_PBL), |
174 | MHI_CH_EE_SBL = BIT(MHI_EE_SBL), |
175 | MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), |
176 | MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), |
177 | MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), |
178 | MHI_CH_EE_WFW = BIT(MHI_EE_WFW), |
179 | MHI_CH_EE_EDL = BIT(MHI_EE_EDL), |
180 | }; |
181 | |
182 | /** |
183 | * enum mhi_er_data_type - Event ring data types |
184 | * @MHI_ER_DATA: Only client data over this ring |
185 | * @MHI_ER_CTRL: MHI control data and client data |
186 | */ |
187 | enum mhi_er_data_type { |
188 | MHI_ER_DATA, |
189 | MHI_ER_CTRL, |
190 | }; |
191 | |
192 | /** |
193 | * enum mhi_db_brst_mode - Doorbell mode |
194 | * @MHI_DB_BRST_DISABLE: Burst mode disable |
195 | * @MHI_DB_BRST_ENABLE: Burst mode enable |
196 | */ |
197 | enum mhi_db_brst_mode { |
198 | MHI_DB_BRST_DISABLE = 0x2, |
199 | MHI_DB_BRST_ENABLE = 0x3, |
200 | }; |
201 | |
202 | /** |
203 | * struct mhi_channel_config - Channel configuration structure for controller |
204 | * @name: The name of this channel |
205 | * @num: The number assigned to this channel |
206 | * @num_elements: The number of elements that can be queued to this channel |
207 | * @local_elements: The local ring length of the channel |
208 | * @event_ring: The event ring index that services this channel |
209 | * @dir: Direction that data may flow on this channel |
210 | * @type: Channel type |
211 | * @ee_mask: Execution Environment mask for this channel |
212 | * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds |
213 | for UL channels, multiple of 8 ring elements for DL channels |
214 | * @doorbell: Doorbell mode |
215 | * @lpm_notify: The channel master requires low power mode notifications |
216 | * @offload_channel: The client manages the channel completely |
217 | * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition |
218 | * @auto_queue: Framework will automatically queue buffers for DL traffic |
219 | * @wake-capable: Channel capable of waking up the system |
220 | */ |
221 | struct mhi_channel_config { |
222 | char *name; |
223 | u32 num; |
224 | u32 num_elements; |
225 | u32 local_elements; |
226 | u32 event_ring; |
227 | enum dma_data_direction dir; |
228 | enum mhi_ch_type type; |
229 | u32 ee_mask; |
230 | u32 pollcfg; |
231 | enum mhi_db_brst_mode doorbell; |
232 | bool lpm_notify; |
233 | bool offload_channel; |
234 | bool doorbell_mode_switch; |
235 | bool auto_queue; |
236 | bool wake_capable; |
237 | }; |
238 | |
239 | /** |
240 | * struct mhi_event_config - Event ring configuration structure for controller |
241 | * @num_elements: The number of elements that can be queued to this ring |
242 | * @irq_moderation_ms: Delay irq for additional events to be aggregated |
243 | * @irq: IRQ associated with this ring |
244 | * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring |
245 | * @priority: Priority of this ring. Use 1 for now |
246 | * @mode: Doorbell mode |
247 | * @data_type: Type of data this ring will process |
248 | * @hardware_event: This ring is associated with hardware channels |
249 | * @client_managed: This ring is client managed |
250 | * @offload_channel: This ring is associated with an offloaded channel |
251 | */ |
252 | struct mhi_event_config { |
253 | u32 num_elements; |
254 | u32 irq_moderation_ms; |
255 | u32 irq; |
256 | u32 channel; |
257 | u32 priority; |
258 | enum mhi_db_brst_mode mode; |
259 | enum mhi_er_data_type data_type; |
260 | bool hardware_event; |
261 | bool client_managed; |
262 | bool offload_channel; |
263 | }; |
264 | |
265 | /** |
266 | * struct mhi_controller_config - Root MHI controller configuration |
267 | * @max_channels: Maximum number of channels supported |
268 | * @timeout_ms: Timeout value for operations. 0 means use default |
269 | * @buf_len: Size of automatically allocated buffers. 0 means use default |
270 | * @num_channels: Number of channels defined in @ch_cfg |
271 | * @ch_cfg: Array of defined channels |
272 | * @num_events: Number of event rings defined in @event_cfg |
273 | * @event_cfg: Array of defined event rings |
274 | * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access |
275 | * @m2_no_db: Host is not allowed to ring DB in M2 state |
276 | */ |
277 | struct mhi_controller_config { |
278 | u32 max_channels; |
279 | u32 timeout_ms; |
280 | u32 buf_len; |
281 | u32 num_channels; |
282 | const struct mhi_channel_config *ch_cfg; |
283 | u32 num_events; |
284 | struct mhi_event_config *event_cfg; |
285 | bool use_bounce_buf; |
286 | bool m2_no_db; |
287 | }; |
288 | |
289 | /** |
290 | * struct mhi_controller - Master MHI controller structure |
291 | * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI |
292 | * controller (required) |
293 | * @mhi_dev: MHI device instance for the controller |
294 | * @debugfs_dentry: MHI controller debugfs directory |
295 | * @regs: Base address of MHI MMIO register space (required) |
296 | * @bhi: Points to base of MHI BHI register space |
297 | * @bhie: Points to base of MHI BHIe register space |
298 | * @wake_db: MHI WAKE doorbell register address |
299 | * @iova_start: IOMMU starting address for data (required) |
300 | * @iova_stop: IOMMU stop address for data (required) |
301 | * @fw_image: Firmware image name for normal booting (optional) |
302 | * @fw_data: Firmware image data content for normal booting, used only |
303 | * if fw_image is NULL and fbc_download is true (optional) |
304 | * @fw_sz: Firmware image data size for normal booting, used only if fw_image |
305 | * is NULL and fbc_download is true (optional) |
306 | * @edl_image: Firmware image name for emergency download mode (optional) |
307 | * @rddm_size: RAM dump size that host should allocate for debugging purpose |
308 | * @sbl_size: SBL image size downloaded through BHIe (optional) |
309 | * @seg_len: BHIe vector size (optional) |
310 | * @reg_len: Length of the MHI MMIO region (required) |
311 | * @fbc_image: Points to firmware image buffer |
312 | * @rddm_image: Points to RAM dump buffer |
313 | * @mhi_chan: Points to the channel configuration table |
314 | * @lpm_chans: List of channels that require LPM notifications |
315 | * @irq: base irq # to request (required) |
316 | * @max_chan: Maximum number of channels the controller supports |
317 | * @total_ev_rings: Total # of event rings allocated |
318 | * @hw_ev_rings: Number of hardware event rings |
319 | * @sw_ev_rings: Number of software event rings |
320 | * @nr_irqs: Number of IRQ allocated by bus master (required) |
321 | * @family_number: MHI controller family number |
322 | * @device_number: MHI controller device number |
323 | * @major_version: MHI controller major revision number |
324 | * @minor_version: MHI controller minor revision number |
325 | * @serial_number: MHI controller serial number obtained from BHI |
326 | * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI |
327 | * @mhi_event: MHI event ring configurations table |
328 | * @mhi_cmd: MHI command ring configurations table |
329 | * @mhi_ctxt: MHI device context, shared memory between host and device |
330 | * @pm_mutex: Mutex for suspend/resume operation |
331 | * @pm_lock: Lock for protecting MHI power management state |
332 | * @timeout_ms: Timeout in ms for state transitions |
333 | * @pm_state: MHI power management state |
334 | * @db_access: DB access states |
335 | * @ee: MHI device execution environment |
336 | * @dev_state: MHI device state |
337 | * @dev_wake: Device wakeup count |
338 | * @pending_pkts: Pending packets for the controller |
339 | * @M0, M2, M3: Counters to track number of device MHI state changes |
340 | * @transition_list: List of MHI state transitions |
341 | * @transition_lock: Lock for protecting MHI state transition list |
342 | * @wlock: Lock for protecting device wakeup |
343 | * @mhi_link_info: Device bandwidth info |
344 | * @st_worker: State transition worker |
345 | * @hiprio_wq: High priority workqueue for MHI work such as state transitions |
346 | * @state_event: State change event |
347 | * @status_cb: CB function to notify power states of the device (required) |
348 | * @wake_get: CB function to assert device wake (optional) |
349 | * @wake_put: CB function to de-assert device wake (optional) |
350 | * @wake_toggle: CB function to assert and de-assert device wake (optional) |
351 | * @runtime_get: CB function to controller runtime resume (required) |
352 | * @runtime_put: CB function to decrement pm usage (required) |
353 | * @map_single: CB function to create TRE buffer |
354 | * @unmap_single: CB function to destroy TRE buffer |
355 | * @read_reg: Read a MHI register via the physical link (required) |
356 | * @write_reg: Write a MHI register via the physical link (required) |
357 | * @reset: Controller specific reset function (optional) |
358 | * @buffer_len: Bounce buffer length |
359 | * @index: Index of the MHI controller instance |
360 | * @bounce_buf: Use of bounce buffer |
361 | * @fbc_download: MHI host needs to do complete image transfer (optional) |
362 | * @wake_set: Device wakeup set flag |
363 | * @irq_flags: irq flags passed to request_irq (optional) |
364 | * @mru: the default MRU for the MHI device |
365 | * |
366 | * Fields marked as (required) need to be populated by the controller driver |
367 | * before calling mhi_register_controller(). For the fields marked as (optional) |
368 | * they can be populated depending on the usecase. |
369 | * |
370 | * The following fields are present for the purpose of implementing any device |
371 | * specific quirks or customizations for specific MHI revisions used in device |
372 | * by the controller drivers. The MHI stack will just populate these fields |
373 | * during mhi_register_controller(): |
374 | * family_number |
375 | * device_number |
376 | * major_version |
377 | * minor_version |
378 | */ |
379 | struct mhi_controller { |
380 | struct device *cntrl_dev; |
381 | struct mhi_device *mhi_dev; |
382 | struct dentry *debugfs_dentry; |
383 | void __iomem *regs; |
384 | void __iomem *bhi; |
385 | void __iomem *bhie; |
386 | void __iomem *wake_db; |
387 | |
388 | dma_addr_t iova_start; |
389 | dma_addr_t iova_stop; |
390 | const char *fw_image; |
391 | const u8 *fw_data; |
392 | size_t fw_sz; |
393 | const char *edl_image; |
394 | size_t rddm_size; |
395 | size_t sbl_size; |
396 | size_t seg_len; |
397 | size_t reg_len; |
398 | struct image_info *fbc_image; |
399 | struct image_info *rddm_image; |
400 | struct mhi_chan *mhi_chan; |
401 | struct list_head lpm_chans; |
402 | int *irq; |
403 | u32 max_chan; |
404 | u32 total_ev_rings; |
405 | u32 hw_ev_rings; |
406 | u32 sw_ev_rings; |
407 | u32 nr_irqs; |
408 | u32 family_number; |
409 | u32 device_number; |
410 | u32 major_version; |
411 | u32 minor_version; |
412 | u32 serial_number; |
413 | u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS]; |
414 | |
415 | struct mhi_event *mhi_event; |
416 | struct mhi_cmd *mhi_cmd; |
417 | struct mhi_ctxt *mhi_ctxt; |
418 | |
419 | struct mutex pm_mutex; |
420 | rwlock_t pm_lock; |
421 | u32 timeout_ms; |
422 | u32 pm_state; |
423 | u32 db_access; |
424 | enum mhi_ee_type ee; |
425 | enum mhi_state dev_state; |
426 | atomic_t dev_wake; |
427 | atomic_t pending_pkts; |
428 | u32 M0, M2, M3; |
429 | struct list_head transition_list; |
430 | spinlock_t transition_lock; |
431 | spinlock_t wlock; |
432 | struct mhi_link_info mhi_link_info; |
433 | struct work_struct st_worker; |
434 | struct workqueue_struct *hiprio_wq; |
435 | wait_queue_head_t state_event; |
436 | |
437 | void (*status_cb)(struct mhi_controller *mhi_cntrl, |
438 | enum mhi_callback cb); |
439 | void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); |
440 | void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); |
441 | void (*wake_toggle)(struct mhi_controller *mhi_cntrl); |
442 | int (*runtime_get)(struct mhi_controller *mhi_cntrl); |
443 | void (*runtime_put)(struct mhi_controller *mhi_cntrl); |
444 | int (*map_single)(struct mhi_controller *mhi_cntrl, |
445 | struct mhi_buf_info *buf); |
446 | void (*unmap_single)(struct mhi_controller *mhi_cntrl, |
447 | struct mhi_buf_info *buf); |
448 | int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, |
449 | u32 *out); |
450 | void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, |
451 | u32 val); |
452 | void (*reset)(struct mhi_controller *mhi_cntrl); |
453 | |
454 | size_t buffer_len; |
455 | int index; |
456 | bool bounce_buf; |
457 | bool fbc_download; |
458 | bool wake_set; |
459 | unsigned long irq_flags; |
460 | u32 mru; |
461 | }; |
462 | |
463 | /** |
464 | * struct mhi_device - Structure representing an MHI device which binds |
465 | * to channels or is associated with controllers |
466 | * @id: Pointer to MHI device ID struct |
467 | * @name: Name of the associated MHI device |
468 | * @mhi_cntrl: Controller the device belongs to |
469 | * @ul_chan: UL channel for the device |
470 | * @dl_chan: DL channel for the device |
471 | * @dev: Driver model device node for the MHI device |
472 | * @dev_type: MHI device type |
473 | * @ul_chan_id: MHI channel id for UL transfer |
474 | * @dl_chan_id: MHI channel id for DL transfer |
475 | * @dev_wake: Device wakeup counter |
476 | */ |
477 | struct mhi_device { |
478 | const struct mhi_device_id *id; |
479 | const char *name; |
480 | struct mhi_controller *mhi_cntrl; |
481 | struct mhi_chan *ul_chan; |
482 | struct mhi_chan *dl_chan; |
483 | struct device dev; |
484 | enum mhi_device_type dev_type; |
485 | int ul_chan_id; |
486 | int dl_chan_id; |
487 | u32 dev_wake; |
488 | }; |
489 | |
490 | /** |
491 | * struct mhi_result - Completed buffer information |
492 | * @buf_addr: Address of data buffer |
493 | * @bytes_xferd: # of bytes transferred |
494 | * @dir: Channel direction |
495 | * @transaction_status: Status of last transaction |
496 | */ |
497 | struct mhi_result { |
498 | void *buf_addr; |
499 | size_t bytes_xferd; |
500 | enum dma_data_direction dir; |
501 | int transaction_status; |
502 | }; |
503 | |
504 | /** |
505 | * struct mhi_buf - MHI Buffer description |
506 | * @buf: Virtual address of the buffer |
507 | * @name: Buffer label. For offload channel, configurations name must be: |
508 | * ECA - Event context array data |
509 | * CCA - Channel context array data |
510 | * @dma_addr: IOMMU address of the buffer |
511 | * @len: # of bytes |
512 | */ |
513 | struct mhi_buf { |
514 | void *buf; |
515 | const char *name; |
516 | dma_addr_t dma_addr; |
517 | size_t len; |
518 | }; |
519 | |
520 | /** |
521 | * struct mhi_driver - Structure representing a MHI client driver |
522 | * @probe: CB function for client driver probe function |
523 | * @remove: CB function for client driver remove function |
524 | * @ul_xfer_cb: CB function for UL data transfer |
525 | * @dl_xfer_cb: CB function for DL data transfer |
526 | * @status_cb: CB functions for asynchronous status |
527 | * @driver: Device driver model driver |
528 | */ |
529 | struct mhi_driver { |
530 | const struct mhi_device_id *id_table; |
531 | int (*probe)(struct mhi_device *mhi_dev, |
532 | const struct mhi_device_id *id); |
533 | void (*remove)(struct mhi_device *mhi_dev); |
534 | void (*ul_xfer_cb)(struct mhi_device *mhi_dev, |
535 | struct mhi_result *result); |
536 | void (*dl_xfer_cb)(struct mhi_device *mhi_dev, |
537 | struct mhi_result *result); |
538 | void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); |
539 | struct device_driver driver; |
540 | }; |
541 | |
542 | #define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) |
543 | #define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) |
544 | |
545 | /** |
546 | * mhi_alloc_controller - Allocate the MHI Controller structure |
547 | * Allocate the mhi_controller structure using zero initialized memory |
548 | */ |
549 | struct mhi_controller *mhi_alloc_controller(void); |
550 | |
551 | /** |
552 | * mhi_free_controller - Free the MHI Controller structure |
553 | * Free the mhi_controller structure which was previously allocated |
554 | */ |
555 | void mhi_free_controller(struct mhi_controller *mhi_cntrl); |
556 | |
557 | /** |
558 | * mhi_register_controller - Register MHI controller |
559 | * @mhi_cntrl: MHI controller to register |
560 | * @config: Configuration to use for the controller |
561 | */ |
562 | int mhi_register_controller(struct mhi_controller *mhi_cntrl, |
563 | const struct mhi_controller_config *config); |
564 | |
565 | /** |
566 | * mhi_unregister_controller - Unregister MHI controller |
567 | * @mhi_cntrl: MHI controller to unregister |
568 | */ |
569 | void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); |
570 | |
571 | /* |
572 | * module_mhi_driver() - Helper macro for drivers that don't do |
573 | * anything special other than using default mhi_driver_register() and |
574 | * mhi_driver_unregister(). This eliminates a lot of boilerplate. |
575 | * Each module may only use this macro once. |
576 | */ |
577 | #define module_mhi_driver(mhi_drv) \ |
578 | module_driver(mhi_drv, mhi_driver_register, \ |
579 | mhi_driver_unregister) |
580 | |
581 | /* |
582 | * Macro to avoid include chaining to get THIS_MODULE |
583 | */ |
584 | #define mhi_driver_register(mhi_drv) \ |
585 | __mhi_driver_register(mhi_drv, THIS_MODULE) |
586 | |
587 | /** |
588 | * __mhi_driver_register - Register driver with MHI framework |
589 | * @mhi_drv: Driver associated with the device |
590 | * @owner: The module owner |
591 | */ |
592 | int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner); |
593 | |
594 | /** |
595 | * mhi_driver_unregister - Unregister a driver for mhi_devices |
596 | * @mhi_drv: Driver associated with the device |
597 | */ |
598 | void mhi_driver_unregister(struct mhi_driver *mhi_drv); |
599 | |
600 | /** |
601 | * mhi_set_mhi_state - Set MHI device state |
602 | * @mhi_cntrl: MHI controller |
603 | * @state: State to set |
604 | */ |
605 | void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, |
606 | enum mhi_state state); |
607 | |
608 | /** |
609 | * mhi_notify - Notify the MHI client driver about client device status |
610 | * @mhi_dev: MHI device instance |
611 | * @cb_reason: MHI callback reason |
612 | */ |
613 | void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); |
614 | |
615 | /** |
616 | * mhi_get_free_desc_count - Get transfer ring length |
617 | * Get # of TD available to queue buffers |
618 | * @mhi_dev: Device associated with the channels |
619 | * @dir: Direction of the channel |
620 | */ |
621 | int mhi_get_free_desc_count(struct mhi_device *mhi_dev, |
622 | enum dma_data_direction dir); |
623 | |
624 | /** |
625 | * mhi_prepare_for_power_up - Do pre-initialization before power up. |
626 | * This is optional, call this before power up if |
627 | * the controller does not want bus framework to |
628 | * automatically free any allocated memory during |
629 | * shutdown process. |
630 | * @mhi_cntrl: MHI controller |
631 | */ |
632 | int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); |
633 | |
634 | /** |
635 | * mhi_async_power_up - Start MHI power up sequence |
636 | * @mhi_cntrl: MHI controller |
637 | */ |
638 | int mhi_async_power_up(struct mhi_controller *mhi_cntrl); |
639 | |
640 | /** |
641 | * mhi_sync_power_up - Start MHI power up sequence and wait till the device |
642 | * enters valid EE state |
643 | * @mhi_cntrl: MHI controller |
644 | */ |
645 | int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); |
646 | |
647 | /** |
648 | * mhi_power_down - Start MHI power down sequence |
649 | * @mhi_cntrl: MHI controller |
650 | * @graceful: Link is still accessible, so do a graceful shutdown process |
651 | */ |
652 | void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); |
653 | |
654 | /** |
655 | * mhi_unprepare_after_power_down - Free any allocated memory after power down |
656 | * @mhi_cntrl: MHI controller |
657 | */ |
658 | void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); |
659 | |
660 | /** |
661 | * mhi_pm_suspend - Move MHI into a suspended state |
662 | * @mhi_cntrl: MHI controller |
663 | */ |
664 | int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); |
665 | |
666 | /** |
667 | * mhi_pm_resume - Resume MHI from suspended state |
668 | * @mhi_cntrl: MHI controller |
669 | */ |
670 | int mhi_pm_resume(struct mhi_controller *mhi_cntrl); |
671 | |
672 | /** |
673 | * mhi_pm_resume_force - Force resume MHI from suspended state |
674 | * @mhi_cntrl: MHI controller |
675 | * |
676 | * Resume the device irrespective of its MHI state. As per the MHI spec, devices |
677 | * has to be in M3 state during resume. But some devices seem to be in a |
678 | * different MHI state other than M3 but they continue working fine if allowed. |
679 | * This API is intented to be used for such devices. |
680 | * |
681 | * Return: 0 if the resume succeeds, a negative error code otherwise |
682 | */ |
683 | int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl); |
684 | |
685 | /** |
686 | * mhi_download_rddm_image - Download ramdump image from device for |
687 | * debugging purpose. |
688 | * @mhi_cntrl: MHI controller |
689 | * @in_panic: Download rddm image during kernel panic |
690 | */ |
691 | int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic); |
692 | |
693 | /** |
694 | * mhi_force_rddm_mode - Force device into rddm mode |
695 | * @mhi_cntrl: MHI controller |
696 | */ |
697 | int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); |
698 | |
699 | /** |
700 | * mhi_get_exec_env - Get BHI execution environment of the device |
701 | * @mhi_cntrl: MHI controller |
702 | */ |
703 | enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); |
704 | |
705 | /** |
706 | * mhi_get_mhi_state - Get MHI state of the device |
707 | * @mhi_cntrl: MHI controller |
708 | */ |
709 | enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); |
710 | |
711 | /** |
712 | * mhi_soc_reset - Trigger a device reset. This can be used as a last resort |
713 | * to reset and recover a device. |
714 | * @mhi_cntrl: MHI controller |
715 | */ |
716 | void mhi_soc_reset(struct mhi_controller *mhi_cntrl); |
717 | |
718 | /** |
719 | * mhi_device_get - Disable device low power mode |
720 | * @mhi_dev: Device associated with the channel |
721 | */ |
722 | void mhi_device_get(struct mhi_device *mhi_dev); |
723 | |
724 | /** |
725 | * mhi_device_get_sync - Disable device low power mode. Synchronously |
726 | * take the controller out of suspended state |
727 | * @mhi_dev: Device associated with the channel |
728 | */ |
729 | int mhi_device_get_sync(struct mhi_device *mhi_dev); |
730 | |
731 | /** |
732 | * mhi_device_put - Re-enable device low power mode |
733 | * @mhi_dev: Device associated with the channel |
734 | */ |
735 | void mhi_device_put(struct mhi_device *mhi_dev); |
736 | |
737 | /** |
738 | * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer. |
739 | * @mhi_dev: Device associated with the channels |
740 | * |
741 | * Allocate and initialize the channel context and also issue the START channel |
742 | * command to both channels. Channels can be started only if both host and |
743 | * device execution environments match and channels are in a DISABLED state. |
744 | */ |
745 | int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); |
746 | |
747 | /** |
748 | * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue |
749 | * buffers for DL traffic |
750 | * @mhi_dev: Device associated with the channels |
751 | * |
752 | * Allocate and initialize the channel context and also issue the START channel |
753 | * command to both channels. Channels can be started only if both host and |
754 | * device execution environments match and channels are in a DISABLED state. |
755 | * The MHI core will automatically allocate and queue buffers for the DL traffic. |
756 | */ |
757 | int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev); |
758 | |
759 | /** |
760 | * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. |
761 | * Issue the RESET channel command and let the |
762 | * device clean-up the context so no incoming |
763 | * transfers are seen on the host. Free memory |
764 | * associated with the context on host. If device |
765 | * is unresponsive, only perform a host side |
766 | * clean-up. Channels can be reset only if both |
767 | * host and device execution environments match |
768 | * and channels are in an ENABLED, STOPPED or |
769 | * SUSPENDED state. |
770 | * @mhi_dev: Device associated with the channels |
771 | */ |
772 | void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); |
773 | |
774 | /** |
775 | * mhi_queue_dma - Send or receive DMA mapped buffers from client device |
776 | * over MHI channel |
777 | * @mhi_dev: Device associated with the channels |
778 | * @dir: DMA direction for the channel |
779 | * @mhi_buf: Buffer for holding the DMA mapped data |
780 | * @len: Buffer length |
781 | * @mflags: MHI transfer flags used for the transfer |
782 | */ |
783 | int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, |
784 | struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags); |
785 | |
786 | /** |
787 | * mhi_queue_buf - Send or receive raw buffers from client device over MHI |
788 | * channel |
789 | * @mhi_dev: Device associated with the channels |
790 | * @dir: DMA direction for the channel |
791 | * @buf: Buffer for holding the data |
792 | * @len: Buffer length |
793 | * @mflags: MHI transfer flags used for the transfer |
794 | */ |
795 | int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, |
796 | void *buf, size_t len, enum mhi_flags mflags); |
797 | |
798 | /** |
799 | * mhi_queue_skb - Send or receive SKBs from client device over MHI channel |
800 | * @mhi_dev: Device associated with the channels |
801 | * @dir: DMA direction for the channel |
802 | * @skb: Buffer for holding SKBs |
803 | * @len: Buffer length |
804 | * @mflags: MHI transfer flags used for the transfer |
805 | */ |
806 | int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, |
807 | struct sk_buff *skb, size_t len, enum mhi_flags mflags); |
808 | |
809 | /** |
810 | * mhi_queue_is_full - Determine whether queueing new elements is possible |
811 | * @mhi_dev: Device associated with the channels |
812 | * @dir: DMA direction for the channel |
813 | */ |
814 | bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir); |
815 | |
816 | #endif /* _MHI_H_ */ |
817 | |