1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * MHI Endpoint bus stack |
4 | * |
5 | * Copyright (C) 2022 Linaro Ltd. |
6 | * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> |
7 | */ |
8 | |
9 | #include <linux/bitfield.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dma-direction.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> |
14 | #include <linux/irq.h> |
15 | #include <linux/mhi_ep.h> |
16 | #include <linux/mod_devicetable.h> |
17 | #include <linux/module.h> |
18 | #include "internal.h" |
19 | |
20 | #define M0_WAIT_DELAY_MS 100 |
21 | #define M0_WAIT_COUNT 100 |
22 | |
23 | static DEFINE_IDA(mhi_ep_cntrl_ida); |
24 | |
25 | static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); |
26 | static int mhi_ep_destroy_device(struct device *dev, void *data); |
27 | |
28 | static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, |
29 | struct mhi_ring_element *el, bool bei) |
30 | { |
31 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
32 | union mhi_ep_ring_ctx *ctx; |
33 | struct mhi_ep_ring *ring; |
34 | int ret; |
35 | |
36 | mutex_lock(&mhi_cntrl->event_lock); |
37 | ring = &mhi_cntrl->mhi_event[ring_idx].ring; |
38 | ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; |
39 | if (!ring->started) { |
40 | ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); |
41 | if (ret) { |
42 | dev_err(dev, "Error starting event ring (%u)\n" , ring_idx); |
43 | goto err_unlock; |
44 | } |
45 | } |
46 | |
47 | /* Add element to the event ring */ |
48 | ret = mhi_ep_ring_add_element(ring, element: el); |
49 | if (ret) { |
50 | dev_err(dev, "Error adding element to event ring (%u)\n" , ring_idx); |
51 | goto err_unlock; |
52 | } |
53 | |
54 | mutex_unlock(lock: &mhi_cntrl->event_lock); |
55 | |
56 | /* |
57 | * As per the MHI specification, section 4.3, Interrupt moderation: |
58 | * |
59 | * 1. If BEI flag is not set, cancel any pending intmodt work if started |
60 | * for the event ring and raise IRQ immediately. |
61 | * |
62 | * 2. If both BEI and intmodt are set, and if no IRQ is pending for the |
63 | * same event ring, start the IRQ delayed work as per the value of |
64 | * intmodt. If previous IRQ is pending, then do nothing as the pending |
65 | * IRQ is enough for the host to process the current event ring element. |
66 | * |
67 | * 3. If BEI is set and intmodt is not set, no need to raise IRQ. |
68 | */ |
69 | if (!bei) { |
70 | if (READ_ONCE(ring->irq_pending)) |
71 | cancel_delayed_work(dwork: &ring->intmodt_work); |
72 | |
73 | mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); |
74 | } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) { |
75 | WRITE_ONCE(ring->irq_pending, true); |
76 | schedule_delayed_work(dwork: &ring->intmodt_work, delay: msecs_to_jiffies(m: ring->intmodt)); |
77 | } |
78 | |
79 | return 0; |
80 | |
81 | err_unlock: |
82 | mutex_unlock(lock: &mhi_cntrl->event_lock); |
83 | |
84 | return ret; |
85 | } |
86 | |
87 | static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, |
88 | struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) |
89 | { |
90 | struct mhi_ring_element *event; |
91 | int ret; |
92 | |
93 | event = kmem_cache_zalloc(k: mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); |
94 | if (!event) |
95 | return -ENOMEM; |
96 | |
97 | event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); |
98 | event->dword[0] = MHI_TRE_EV_DWORD0(code, len); |
99 | event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); |
100 | |
101 | ret = mhi_ep_send_event(mhi_cntrl, ring_idx: ring->er_index, el: event, MHI_TRE_DATA_GET_BEI(tre)); |
102 | kmem_cache_free(s: mhi_cntrl->ev_ring_el_cache, objp: event); |
103 | |
104 | return ret; |
105 | } |
106 | |
107 | int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) |
108 | { |
109 | struct mhi_ring_element *event; |
110 | int ret; |
111 | |
112 | event = kmem_cache_zalloc(k: mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); |
113 | if (!event) |
114 | return -ENOMEM; |
115 | |
116 | event->dword[0] = MHI_SC_EV_DWORD0(state); |
117 | event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); |
118 | |
119 | ret = mhi_ep_send_event(mhi_cntrl, ring_idx: 0, el: event, bei: 0); |
120 | kmem_cache_free(s: mhi_cntrl->ev_ring_el_cache, objp: event); |
121 | |
122 | return ret; |
123 | } |
124 | |
125 | int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) |
126 | { |
127 | struct mhi_ring_element *event; |
128 | int ret; |
129 | |
130 | event = kmem_cache_zalloc(k: mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); |
131 | if (!event) |
132 | return -ENOMEM; |
133 | |
134 | event->dword[0] = MHI_EE_EV_DWORD0(exec_env); |
135 | event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); |
136 | |
137 | ret = mhi_ep_send_event(mhi_cntrl, ring_idx: 0, el: event, bei: 0); |
138 | kmem_cache_free(s: mhi_cntrl->ev_ring_el_cache, objp: event); |
139 | |
140 | return ret; |
141 | } |
142 | |
143 | static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) |
144 | { |
145 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; |
146 | struct mhi_ring_element *event; |
147 | int ret; |
148 | |
149 | event = kmem_cache_zalloc(k: mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); |
150 | if (!event) |
151 | return -ENOMEM; |
152 | |
153 | event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); |
154 | event->dword[0] = MHI_CC_EV_DWORD0(code); |
155 | event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); |
156 | |
157 | ret = mhi_ep_send_event(mhi_cntrl, ring_idx: 0, el: event, bei: 0); |
158 | kmem_cache_free(s: mhi_cntrl->ev_ring_el_cache, objp: event); |
159 | |
160 | return ret; |
161 | } |
162 | |
163 | static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) |
164 | { |
165 | struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; |
166 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
167 | struct mhi_result result = {}; |
168 | struct mhi_ep_chan *mhi_chan; |
169 | struct mhi_ep_ring *ch_ring; |
170 | u32 tmp, ch_id; |
171 | int ret; |
172 | |
173 | ch_id = MHI_TRE_GET_CMD_CHID(el); |
174 | |
175 | /* Check if the channel is supported by the controller */ |
176 | if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { |
177 | dev_dbg(dev, "Channel (%u) not supported!\n" , ch_id); |
178 | return -ENODEV; |
179 | } |
180 | |
181 | mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; |
182 | ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; |
183 | |
184 | switch (MHI_TRE_GET_CMD_TYPE(el)) { |
185 | case MHI_PKT_TYPE_START_CHAN_CMD: |
186 | dev_dbg(dev, "Received START command for channel (%u)\n" , ch_id); |
187 | |
188 | mutex_lock(&mhi_chan->lock); |
189 | /* Initialize and configure the corresponding channel ring */ |
190 | if (!ch_ring->started) { |
191 | ret = mhi_ep_ring_start(mhi_cntrl, ring: ch_ring, |
192 | ctx: (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); |
193 | if (ret) { |
194 | dev_err(dev, "Failed to start ring for channel (%u)\n" , ch_id); |
195 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, |
196 | code: MHI_EV_CC_UNDEFINED_ERR); |
197 | if (ret) |
198 | dev_err(dev, "Error sending completion event: %d\n" , ret); |
199 | |
200 | goto err_unlock; |
201 | } |
202 | |
203 | mhi_chan->rd_offset = ch_ring->rd_offset; |
204 | } |
205 | |
206 | /* Set channel state to RUNNING */ |
207 | mhi_chan->state = MHI_CH_STATE_RUNNING; |
208 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); |
209 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
210 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); |
211 | mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); |
212 | |
213 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, code: MHI_EV_CC_SUCCESS); |
214 | if (ret) { |
215 | dev_err(dev, "Error sending command completion event (%u)\n" , |
216 | MHI_EV_CC_SUCCESS); |
217 | goto err_unlock; |
218 | } |
219 | |
220 | mutex_unlock(lock: &mhi_chan->lock); |
221 | |
222 | /* |
223 | * Create MHI device only during UL channel start. Since the MHI |
224 | * channels operate in a pair, we'll associate both UL and DL |
225 | * channels to the same device. |
226 | * |
227 | * We also need to check for mhi_dev != NULL because, the host |
228 | * will issue START_CHAN command during resume and we don't |
229 | * destroy the device during suspend. |
230 | */ |
231 | if (!(ch_id % 2) && !mhi_chan->mhi_dev) { |
232 | ret = mhi_ep_create_device(mhi_cntrl, ch_id); |
233 | if (ret) { |
234 | dev_err(dev, "Error creating device for channel (%u)\n" , ch_id); |
235 | mhi_ep_handle_syserr(mhi_cntrl); |
236 | return ret; |
237 | } |
238 | } |
239 | |
240 | /* Finally, enable DB for the channel */ |
241 | mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); |
242 | |
243 | break; |
244 | case MHI_PKT_TYPE_STOP_CHAN_CMD: |
245 | dev_dbg(dev, "Received STOP command for channel (%u)\n" , ch_id); |
246 | if (!ch_ring->started) { |
247 | dev_err(dev, "Channel (%u) not opened\n" , ch_id); |
248 | return -ENODEV; |
249 | } |
250 | |
251 | mutex_lock(&mhi_chan->lock); |
252 | /* Disable DB for the channel */ |
253 | mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); |
254 | |
255 | /* Send channel disconnect status to client drivers */ |
256 | if (mhi_chan->xfer_cb) { |
257 | result.transaction_status = -ENOTCONN; |
258 | result.bytes_xferd = 0; |
259 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); |
260 | } |
261 | |
262 | /* Set channel state to STOP */ |
263 | mhi_chan->state = MHI_CH_STATE_STOP; |
264 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); |
265 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
266 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); |
267 | mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); |
268 | |
269 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, code: MHI_EV_CC_SUCCESS); |
270 | if (ret) { |
271 | dev_err(dev, "Error sending command completion event (%u)\n" , |
272 | MHI_EV_CC_SUCCESS); |
273 | goto err_unlock; |
274 | } |
275 | |
276 | mutex_unlock(lock: &mhi_chan->lock); |
277 | break; |
278 | case MHI_PKT_TYPE_RESET_CHAN_CMD: |
279 | dev_dbg(dev, "Received RESET command for channel (%u)\n" , ch_id); |
280 | if (!ch_ring->started) { |
281 | dev_err(dev, "Channel (%u) not opened\n" , ch_id); |
282 | return -ENODEV; |
283 | } |
284 | |
285 | mutex_lock(&mhi_chan->lock); |
286 | /* Stop and reset the transfer ring */ |
287 | mhi_ep_ring_reset(mhi_cntrl, ring: ch_ring); |
288 | |
289 | /* Send channel disconnect status to client driver */ |
290 | if (mhi_chan->xfer_cb) { |
291 | result.transaction_status = -ENOTCONN; |
292 | result.bytes_xferd = 0; |
293 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); |
294 | } |
295 | |
296 | /* Set channel state to DISABLED */ |
297 | mhi_chan->state = MHI_CH_STATE_DISABLED; |
298 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); |
299 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
300 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); |
301 | mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); |
302 | |
303 | ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, code: MHI_EV_CC_SUCCESS); |
304 | if (ret) { |
305 | dev_err(dev, "Error sending command completion event (%u)\n" , |
306 | MHI_EV_CC_SUCCESS); |
307 | goto err_unlock; |
308 | } |
309 | |
310 | mutex_unlock(lock: &mhi_chan->lock); |
311 | break; |
312 | default: |
313 | dev_err(dev, "Invalid command received: %lu for channel (%u)\n" , |
314 | MHI_TRE_GET_CMD_TYPE(el), ch_id); |
315 | return -EINVAL; |
316 | } |
317 | |
318 | return 0; |
319 | |
320 | err_unlock: |
321 | mutex_unlock(lock: &mhi_chan->lock); |
322 | |
323 | return ret; |
324 | } |
325 | |
326 | bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) |
327 | { |
328 | struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : |
329 | mhi_dev->ul_chan; |
330 | struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; |
331 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; |
332 | |
333 | return !!(mhi_chan->rd_offset == ring->wr_offset); |
334 | } |
335 | EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); |
336 | |
337 | static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info) |
338 | { |
339 | struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; |
340 | struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; |
341 | struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan; |
342 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; |
343 | struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; |
344 | struct mhi_result result = {}; |
345 | int ret; |
346 | |
347 | if (mhi_chan->xfer_cb) { |
348 | result.buf_addr = buf_info->cb_buf; |
349 | result.dir = mhi_chan->dir; |
350 | result.bytes_xferd = buf_info->size; |
351 | |
352 | mhi_chan->xfer_cb(mhi_dev, &result); |
353 | } |
354 | |
355 | /* |
356 | * The host will split the data packet into multiple TREs if it can't fit |
357 | * the packet in a single TRE. In that case, CHAIN flag will be set by the |
358 | * host for all TREs except the last one. |
359 | */ |
360 | if (buf_info->code != MHI_EV_CC_OVERFLOW) { |
361 | if (MHI_TRE_DATA_GET_CHAIN(el)) { |
362 | /* |
363 | * IEOB (Interrupt on End of Block) flag will be set by the host if |
364 | * it expects the completion event for all TREs of a TD. |
365 | */ |
366 | if (MHI_TRE_DATA_GET_IEOB(el)) { |
367 | ret = mhi_ep_send_completion_event(mhi_cntrl, ring, tre: el, |
368 | MHI_TRE_DATA_GET_LEN(el), |
369 | code: MHI_EV_CC_EOB); |
370 | if (ret < 0) { |
371 | dev_err(&mhi_chan->mhi_dev->dev, |
372 | "Error sending transfer compl. event\n" ); |
373 | goto err_free_tre_buf; |
374 | } |
375 | } |
376 | } else { |
377 | /* |
378 | * IEOT (Interrupt on End of Transfer) flag will be set by the host |
379 | * for the last TRE of the TD and expects the completion event for |
380 | * the same. |
381 | */ |
382 | if (MHI_TRE_DATA_GET_IEOT(el)) { |
383 | ret = mhi_ep_send_completion_event(mhi_cntrl, ring, tre: el, |
384 | MHI_TRE_DATA_GET_LEN(el), |
385 | code: MHI_EV_CC_EOT); |
386 | if (ret < 0) { |
387 | dev_err(&mhi_chan->mhi_dev->dev, |
388 | "Error sending transfer compl. event\n" ); |
389 | goto err_free_tre_buf; |
390 | } |
391 | } |
392 | } |
393 | } |
394 | |
395 | mhi_ep_ring_inc_index(ring); |
396 | |
397 | err_free_tre_buf: |
398 | kmem_cache_free(s: mhi_cntrl->tre_buf_cache, objp: buf_info->cb_buf); |
399 | } |
400 | |
401 | static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, |
402 | struct mhi_ep_ring *ring) |
403 | { |
404 | struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; |
405 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
406 | size_t tr_len, read_offset, write_offset; |
407 | struct mhi_ep_buf_info buf_info = {}; |
408 | u32 len = MHI_EP_DEFAULT_MTU; |
409 | struct mhi_ring_element *el; |
410 | bool tr_done = false; |
411 | void *buf_addr; |
412 | u32 buf_left; |
413 | int ret; |
414 | |
415 | buf_left = len; |
416 | |
417 | do { |
418 | /* Don't process the transfer ring if the channel is not in RUNNING state */ |
419 | if (mhi_chan->state != MHI_CH_STATE_RUNNING) { |
420 | dev_err(dev, "Channel not available\n" ); |
421 | return -ENODEV; |
422 | } |
423 | |
424 | el = &ring->ring_cache[mhi_chan->rd_offset]; |
425 | |
426 | /* Check if there is data pending to be read from previous read operation */ |
427 | if (mhi_chan->tre_bytes_left) { |
428 | dev_dbg(dev, "TRE bytes remaining: %u\n" , mhi_chan->tre_bytes_left); |
429 | tr_len = min(buf_left, mhi_chan->tre_bytes_left); |
430 | } else { |
431 | mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); |
432 | mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); |
433 | mhi_chan->tre_bytes_left = mhi_chan->tre_size; |
434 | |
435 | tr_len = min(buf_left, mhi_chan->tre_size); |
436 | } |
437 | |
438 | read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; |
439 | write_offset = len - buf_left; |
440 | |
441 | buf_addr = kmem_cache_zalloc(k: mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); |
442 | if (!buf_addr) |
443 | return -ENOMEM; |
444 | |
445 | buf_info.host_addr = mhi_chan->tre_loc + read_offset; |
446 | buf_info.dev_addr = buf_addr + write_offset; |
447 | buf_info.size = tr_len; |
448 | buf_info.cb = mhi_ep_read_completion; |
449 | buf_info.cb_buf = buf_addr; |
450 | buf_info.mhi_dev = mhi_chan->mhi_dev; |
451 | |
452 | if (mhi_chan->tre_bytes_left - tr_len) |
453 | buf_info.code = MHI_EV_CC_OVERFLOW; |
454 | |
455 | dev_dbg(dev, "Reading %zd bytes from channel (%u)\n" , tr_len, ring->ch_id); |
456 | ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); |
457 | if (ret < 0) { |
458 | dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n" ); |
459 | goto err_free_buf_addr; |
460 | } |
461 | |
462 | buf_left -= tr_len; |
463 | mhi_chan->tre_bytes_left -= tr_len; |
464 | |
465 | if (!mhi_chan->tre_bytes_left) { |
466 | if (MHI_TRE_DATA_GET_IEOT(el)) |
467 | tr_done = true; |
468 | |
469 | mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; |
470 | } |
471 | } while (buf_left && !tr_done); |
472 | |
473 | return 0; |
474 | |
475 | err_free_buf_addr: |
476 | kmem_cache_free(s: mhi_cntrl->tre_buf_cache, objp: buf_addr); |
477 | |
478 | return ret; |
479 | } |
480 | |
481 | static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) |
482 | { |
483 | struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; |
484 | struct mhi_result result = {}; |
485 | struct mhi_ep_chan *mhi_chan; |
486 | int ret; |
487 | |
488 | mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; |
489 | |
490 | /* |
491 | * Bail out if transfer callback is not registered for the channel. |
492 | * This is most likely due to the client driver not loaded at this point. |
493 | */ |
494 | if (!mhi_chan->xfer_cb) { |
495 | dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n" ); |
496 | return -ENODEV; |
497 | } |
498 | |
499 | if (ring->ch_id % 2) { |
500 | /* DL channel */ |
501 | result.dir = mhi_chan->dir; |
502 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); |
503 | } else { |
504 | /* UL channel */ |
505 | do { |
506 | ret = mhi_ep_read_channel(mhi_cntrl, ring); |
507 | if (ret < 0) { |
508 | dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n" ); |
509 | return ret; |
510 | } |
511 | |
512 | /* Read until the ring becomes empty */ |
513 | } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); |
514 | } |
515 | |
516 | return 0; |
517 | } |
518 | |
519 | static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info) |
520 | { |
521 | struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; |
522 | struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; |
523 | struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; |
524 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; |
525 | struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; |
526 | struct device *dev = &mhi_dev->dev; |
527 | struct mhi_result result = {}; |
528 | int ret; |
529 | |
530 | if (mhi_chan->xfer_cb) { |
531 | result.buf_addr = buf_info->cb_buf; |
532 | result.dir = mhi_chan->dir; |
533 | result.bytes_xferd = buf_info->size; |
534 | |
535 | mhi_chan->xfer_cb(mhi_dev, &result); |
536 | } |
537 | |
538 | ret = mhi_ep_send_completion_event(mhi_cntrl, ring, tre: el, len: buf_info->size, |
539 | code: buf_info->code); |
540 | if (ret) { |
541 | dev_err(dev, "Error sending transfer completion event\n" ); |
542 | return; |
543 | } |
544 | |
545 | mhi_ep_ring_inc_index(ring); |
546 | } |
547 | |
548 | /* TODO: Handle partially formed TDs */ |
549 | int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) |
550 | { |
551 | struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; |
552 | struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; |
553 | struct device *dev = &mhi_chan->mhi_dev->dev; |
554 | struct mhi_ep_buf_info buf_info = {}; |
555 | struct mhi_ring_element *el; |
556 | u32 buf_left, read_offset; |
557 | struct mhi_ep_ring *ring; |
558 | size_t tr_len; |
559 | u32 tre_len; |
560 | int ret; |
561 | |
562 | buf_left = skb->len; |
563 | ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; |
564 | |
565 | mutex_lock(&mhi_chan->lock); |
566 | |
567 | do { |
568 | /* Don't process the transfer ring if the channel is not in RUNNING state */ |
569 | if (mhi_chan->state != MHI_CH_STATE_RUNNING) { |
570 | dev_err(dev, "Channel not available\n" ); |
571 | ret = -ENODEV; |
572 | goto err_exit; |
573 | } |
574 | |
575 | if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { |
576 | dev_err(dev, "TRE not available!\n" ); |
577 | ret = -ENOSPC; |
578 | goto err_exit; |
579 | } |
580 | |
581 | el = &ring->ring_cache[mhi_chan->rd_offset]; |
582 | tre_len = MHI_TRE_DATA_GET_LEN(el); |
583 | |
584 | tr_len = min(buf_left, tre_len); |
585 | read_offset = skb->len - buf_left; |
586 | |
587 | buf_info.dev_addr = skb->data + read_offset; |
588 | buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el); |
589 | buf_info.size = tr_len; |
590 | buf_info.cb = mhi_ep_skb_completion; |
591 | buf_info.cb_buf = skb; |
592 | buf_info.mhi_dev = mhi_dev; |
593 | |
594 | /* |
595 | * For all TREs queued by the host for DL channel, only the EOT flag will be set. |
596 | * If the packet doesn't fit into a single TRE, send the OVERFLOW event to |
597 | * the host so that the host can adjust the packet boundary to next TREs. Else send |
598 | * the EOT event to the host indicating the packet boundary. |
599 | */ |
600 | if (buf_left - tr_len) |
601 | buf_info.code = MHI_EV_CC_OVERFLOW; |
602 | else |
603 | buf_info.code = MHI_EV_CC_EOT; |
604 | |
605 | dev_dbg(dev, "Writing %zd bytes to channel (%u)\n" , tr_len, ring->ch_id); |
606 | ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); |
607 | if (ret < 0) { |
608 | dev_err(dev, "Error writing to the channel\n" ); |
609 | goto err_exit; |
610 | } |
611 | |
612 | buf_left -= tr_len; |
613 | |
614 | /* |
615 | * Update the read offset cached in mhi_chan. Actual read offset |
616 | * will be updated by the completion handler. |
617 | */ |
618 | mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; |
619 | } while (buf_left); |
620 | |
621 | mutex_unlock(lock: &mhi_chan->lock); |
622 | |
623 | return 0; |
624 | |
625 | err_exit: |
626 | mutex_unlock(lock: &mhi_chan->lock); |
627 | |
628 | return ret; |
629 | } |
630 | EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); |
631 | |
632 | static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) |
633 | { |
634 | size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; |
635 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
636 | int ret; |
637 | |
638 | /* Update the number of event rings (NER) programmed by the host */ |
639 | mhi_ep_mmio_update_ner(mhi_cntrl); |
640 | |
641 | dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n" , |
642 | mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); |
643 | |
644 | ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; |
645 | ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; |
646 | cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; |
647 | |
648 | /* Get the channel context base pointer from host */ |
649 | mhi_ep_mmio_get_chc_base(mhi_cntrl); |
650 | |
651 | /* Allocate and map memory for caching host channel context */ |
652 | ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, |
653 | &mhi_cntrl->ch_ctx_cache_phys, |
654 | (void __iomem **) &mhi_cntrl->ch_ctx_cache, |
655 | ch_ctx_host_size); |
656 | if (ret) { |
657 | dev_err(dev, "Failed to allocate and map ch_ctx_cache\n" ); |
658 | return ret; |
659 | } |
660 | |
661 | /* Get the event context base pointer from host */ |
662 | mhi_ep_mmio_get_erc_base(mhi_cntrl); |
663 | |
664 | /* Allocate and map memory for caching host event context */ |
665 | ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, |
666 | &mhi_cntrl->ev_ctx_cache_phys, |
667 | (void __iomem **) &mhi_cntrl->ev_ctx_cache, |
668 | ev_ctx_host_size); |
669 | if (ret) { |
670 | dev_err(dev, "Failed to allocate and map ev_ctx_cache\n" ); |
671 | goto err_ch_ctx; |
672 | } |
673 | |
674 | /* Get the command context base pointer from host */ |
675 | mhi_ep_mmio_get_crc_base(mhi_cntrl); |
676 | |
677 | /* Allocate and map memory for caching host command context */ |
678 | ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, |
679 | &mhi_cntrl->cmd_ctx_cache_phys, |
680 | (void __iomem **) &mhi_cntrl->cmd_ctx_cache, |
681 | cmd_ctx_host_size); |
682 | if (ret) { |
683 | dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n" ); |
684 | goto err_ev_ctx; |
685 | } |
686 | |
687 | /* Initialize command ring */ |
688 | ret = mhi_ep_ring_start(mhi_cntrl, ring: &mhi_cntrl->mhi_cmd->ring, |
689 | ctx: (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); |
690 | if (ret) { |
691 | dev_err(dev, "Failed to start the command ring\n" ); |
692 | goto err_cmd_ctx; |
693 | } |
694 | |
695 | return ret; |
696 | |
697 | err_cmd_ctx: |
698 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, |
699 | (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); |
700 | |
701 | err_ev_ctx: |
702 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, |
703 | (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); |
704 | |
705 | err_ch_ctx: |
706 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, |
707 | (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); |
708 | |
709 | return ret; |
710 | } |
711 | |
712 | static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) |
713 | { |
714 | size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; |
715 | |
716 | ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; |
717 | ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; |
718 | cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; |
719 | |
720 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, |
721 | (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); |
722 | |
723 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, |
724 | (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); |
725 | |
726 | mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, |
727 | (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); |
728 | } |
729 | |
730 | static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) |
731 | { |
732 | /* |
733 | * Doorbell interrupts are enabled when the corresponding channel gets started. |
734 | * Enabling all interrupts here triggers spurious irqs as some of the interrupts |
735 | * associated with hw channels always get triggered. |
736 | */ |
737 | mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); |
738 | mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); |
739 | } |
740 | |
741 | static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) |
742 | { |
743 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
744 | enum mhi_state state; |
745 | bool mhi_reset; |
746 | u32 count = 0; |
747 | int ret; |
748 | |
749 | /* Wait for Host to set the M0 state */ |
750 | do { |
751 | msleep(M0_WAIT_DELAY_MS); |
752 | mhi_ep_mmio_get_mhi_state(mhi_cntrl, state: &state, mhi_reset: &mhi_reset); |
753 | if (mhi_reset) { |
754 | /* Clear the MHI reset if host is in reset state */ |
755 | mhi_ep_mmio_clear_reset(mhi_cntrl); |
756 | dev_info(dev, "Detected Host reset while waiting for M0\n" ); |
757 | } |
758 | count++; |
759 | } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); |
760 | |
761 | if (state != MHI_STATE_M0) { |
762 | dev_err(dev, "Host failed to enter M0\n" ); |
763 | return -ETIMEDOUT; |
764 | } |
765 | |
766 | ret = mhi_ep_cache_host_cfg(mhi_cntrl); |
767 | if (ret) { |
768 | dev_err(dev, "Failed to cache host config\n" ); |
769 | return ret; |
770 | } |
771 | |
772 | mhi_ep_mmio_set_env(mhi_cntrl, value: MHI_EE_AMSS); |
773 | |
774 | /* Enable all interrupts now */ |
775 | mhi_ep_enable_int(mhi_cntrl); |
776 | |
777 | return 0; |
778 | } |
779 | |
780 | static void mhi_ep_cmd_ring_worker(struct work_struct *work) |
781 | { |
782 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); |
783 | struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; |
784 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
785 | struct mhi_ring_element *el; |
786 | int ret; |
787 | |
788 | /* Update the write offset for the ring */ |
789 | ret = mhi_ep_update_wr_offset(ring); |
790 | if (ret) { |
791 | dev_err(dev, "Error updating write offset for ring\n" ); |
792 | return; |
793 | } |
794 | |
795 | /* Sanity check to make sure there are elements in the ring */ |
796 | if (ring->rd_offset == ring->wr_offset) |
797 | return; |
798 | |
799 | /* |
800 | * Process command ring element till write offset. In case of an error, just try to |
801 | * process next element. |
802 | */ |
803 | while (ring->rd_offset != ring->wr_offset) { |
804 | el = &ring->ring_cache[ring->rd_offset]; |
805 | |
806 | ret = mhi_ep_process_cmd_ring(ring, el); |
807 | if (ret && ret != -ENODEV) |
808 | dev_err(dev, "Error processing cmd ring element: %zu\n" , ring->rd_offset); |
809 | |
810 | mhi_ep_ring_inc_index(ring); |
811 | } |
812 | } |
813 | |
814 | static void mhi_ep_ch_ring_worker(struct work_struct *work) |
815 | { |
816 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); |
817 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
818 | struct mhi_ep_ring_item *itr, *tmp; |
819 | struct mhi_ep_ring *ring; |
820 | struct mhi_ep_chan *chan; |
821 | unsigned long flags; |
822 | LIST_HEAD(head); |
823 | int ret; |
824 | |
825 | spin_lock_irqsave(&mhi_cntrl->list_lock, flags); |
826 | list_splice_tail_init(list: &mhi_cntrl->ch_db_list, head: &head); |
827 | spin_unlock_irqrestore(lock: &mhi_cntrl->list_lock, flags); |
828 | |
829 | /* Process each queued channel ring. In case of an error, just process next element. */ |
830 | list_for_each_entry_safe(itr, tmp, &head, node) { |
831 | list_del(entry: &itr->node); |
832 | ring = itr->ring; |
833 | |
834 | chan = &mhi_cntrl->mhi_chan[ring->ch_id]; |
835 | mutex_lock(&chan->lock); |
836 | |
837 | /* |
838 | * The ring could've stopped while we waited to grab the (chan->lock), so do |
839 | * a sanity check before going further. |
840 | */ |
841 | if (!ring->started) { |
842 | mutex_unlock(lock: &chan->lock); |
843 | kfree(objp: itr); |
844 | continue; |
845 | } |
846 | |
847 | /* Update the write offset for the ring */ |
848 | ret = mhi_ep_update_wr_offset(ring); |
849 | if (ret) { |
850 | dev_err(dev, "Error updating write offset for ring\n" ); |
851 | mutex_unlock(lock: &chan->lock); |
852 | kmem_cache_free(s: mhi_cntrl->ring_item_cache, objp: itr); |
853 | continue; |
854 | } |
855 | |
856 | /* Sanity check to make sure there are elements in the ring */ |
857 | if (chan->rd_offset == ring->wr_offset) { |
858 | mutex_unlock(lock: &chan->lock); |
859 | kmem_cache_free(s: mhi_cntrl->ring_item_cache, objp: itr); |
860 | continue; |
861 | } |
862 | |
863 | dev_dbg(dev, "Processing the ring for channel (%u)\n" , ring->ch_id); |
864 | ret = mhi_ep_process_ch_ring(ring); |
865 | if (ret) { |
866 | dev_err(dev, "Error processing ring for channel (%u): %d\n" , |
867 | ring->ch_id, ret); |
868 | mutex_unlock(lock: &chan->lock); |
869 | kmem_cache_free(s: mhi_cntrl->ring_item_cache, objp: itr); |
870 | continue; |
871 | } |
872 | |
873 | mutex_unlock(lock: &chan->lock); |
874 | kmem_cache_free(s: mhi_cntrl->ring_item_cache, objp: itr); |
875 | } |
876 | } |
877 | |
878 | static void mhi_ep_state_worker(struct work_struct *work) |
879 | { |
880 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); |
881 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
882 | struct mhi_ep_state_transition *itr, *tmp; |
883 | unsigned long flags; |
884 | LIST_HEAD(head); |
885 | int ret; |
886 | |
887 | spin_lock_irqsave(&mhi_cntrl->list_lock, flags); |
888 | list_splice_tail_init(list: &mhi_cntrl->st_transition_list, head: &head); |
889 | spin_unlock_irqrestore(lock: &mhi_cntrl->list_lock, flags); |
890 | |
891 | list_for_each_entry_safe(itr, tmp, &head, node) { |
892 | list_del(entry: &itr->node); |
893 | dev_dbg(dev, "Handling MHI state transition to %s\n" , |
894 | mhi_state_str(itr->state)); |
895 | |
896 | switch (itr->state) { |
897 | case MHI_STATE_M0: |
898 | ret = mhi_ep_set_m0_state(mhi_cntrl); |
899 | if (ret) |
900 | dev_err(dev, "Failed to transition to M0 state\n" ); |
901 | break; |
902 | case MHI_STATE_M3: |
903 | ret = mhi_ep_set_m3_state(mhi_cntrl); |
904 | if (ret) |
905 | dev_err(dev, "Failed to transition to M3 state\n" ); |
906 | break; |
907 | default: |
908 | dev_err(dev, "Invalid MHI state transition: %d\n" , itr->state); |
909 | break; |
910 | } |
911 | kfree(objp: itr); |
912 | } |
913 | } |
914 | |
915 | static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, |
916 | u32 ch_idx) |
917 | { |
918 | struct mhi_ep_ring_item *item; |
919 | struct mhi_ep_ring *ring; |
920 | bool work = !!ch_int; |
921 | LIST_HEAD(head); |
922 | u32 i; |
923 | |
924 | /* First add the ring items to a local list */ |
925 | for_each_set_bit(i, &ch_int, 32) { |
926 | /* Channel index varies for each register: 0, 32, 64, 96 */ |
927 | u32 ch_id = ch_idx + i; |
928 | |
929 | ring = &mhi_cntrl->mhi_chan[ch_id].ring; |
930 | item = kmem_cache_zalloc(k: mhi_cntrl->ring_item_cache, GFP_ATOMIC); |
931 | if (!item) |
932 | return; |
933 | |
934 | item->ring = ring; |
935 | list_add_tail(new: &item->node, head: &head); |
936 | } |
937 | |
938 | /* Now, splice the local list into ch_db_list and queue the work item */ |
939 | if (work) { |
940 | spin_lock(lock: &mhi_cntrl->list_lock); |
941 | list_splice_tail_init(list: &head, head: &mhi_cntrl->ch_db_list); |
942 | spin_unlock(lock: &mhi_cntrl->list_lock); |
943 | |
944 | queue_work(wq: mhi_cntrl->wq, work: &mhi_cntrl->ch_ring_work); |
945 | } |
946 | } |
947 | |
948 | /* |
949 | * Channel interrupt statuses are contained in 4 registers each of 32bit length. |
950 | * For checking all interrupts, we need to loop through each registers and then |
951 | * check for bits set. |
952 | */ |
953 | static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) |
954 | { |
955 | u32 ch_int, ch_idx, i; |
956 | |
957 | /* Bail out if there is no channel doorbell interrupt */ |
958 | if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) |
959 | return; |
960 | |
961 | for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { |
962 | ch_idx = i * MHI_MASK_CH_LEN; |
963 | |
964 | /* Only process channel interrupt if the mask is enabled */ |
965 | ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; |
966 | if (ch_int) { |
967 | mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); |
968 | mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), |
969 | val: mhi_cntrl->chdb[i].status); |
970 | } |
971 | } |
972 | } |
973 | |
974 | static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, |
975 | enum mhi_state state) |
976 | { |
977 | struct mhi_ep_state_transition *item; |
978 | |
979 | item = kzalloc(size: sizeof(*item), GFP_ATOMIC); |
980 | if (!item) |
981 | return; |
982 | |
983 | item->state = state; |
984 | spin_lock(lock: &mhi_cntrl->list_lock); |
985 | list_add_tail(new: &item->node, head: &mhi_cntrl->st_transition_list); |
986 | spin_unlock(lock: &mhi_cntrl->list_lock); |
987 | |
988 | queue_work(wq: mhi_cntrl->wq, work: &mhi_cntrl->state_work); |
989 | } |
990 | |
991 | /* |
992 | * Interrupt handler that services interrupts raised by the host writing to |
993 | * MHICTRL and Command ring doorbell (CRDB) registers for state change and |
994 | * channel interrupts. |
995 | */ |
996 | static irqreturn_t mhi_ep_irq(int irq, void *data) |
997 | { |
998 | struct mhi_ep_cntrl *mhi_cntrl = data; |
999 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
1000 | enum mhi_state state; |
1001 | u32 int_value; |
1002 | bool mhi_reset; |
1003 | |
1004 | /* Acknowledge the ctrl interrupt */ |
1005 | int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); |
1006 | mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, val: int_value); |
1007 | |
1008 | /* Check for ctrl interrupt */ |
1009 | if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { |
1010 | dev_dbg(dev, "Processing ctrl interrupt\n" ); |
1011 | mhi_ep_mmio_get_mhi_state(mhi_cntrl, state: &state, mhi_reset: &mhi_reset); |
1012 | if (mhi_reset) { |
1013 | dev_info(dev, "Host triggered MHI reset!\n" ); |
1014 | disable_irq_nosync(irq: mhi_cntrl->irq); |
1015 | schedule_work(work: &mhi_cntrl->reset_work); |
1016 | return IRQ_HANDLED; |
1017 | } |
1018 | |
1019 | mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); |
1020 | } |
1021 | |
1022 | /* Check for command doorbell interrupt */ |
1023 | if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { |
1024 | dev_dbg(dev, "Processing command doorbell interrupt\n" ); |
1025 | queue_work(wq: mhi_cntrl->wq, work: &mhi_cntrl->cmd_ring_work); |
1026 | } |
1027 | |
1028 | /* Check for channel interrupts */ |
1029 | mhi_ep_check_channel_interrupt(mhi_cntrl); |
1030 | |
1031 | return IRQ_HANDLED; |
1032 | } |
1033 | |
1034 | static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) |
1035 | { |
1036 | struct mhi_ep_ring *ch_ring, *ev_ring; |
1037 | struct mhi_result result = {}; |
1038 | struct mhi_ep_chan *mhi_chan; |
1039 | int i; |
1040 | |
1041 | /* Stop all the channels */ |
1042 | for (i = 0; i < mhi_cntrl->max_chan; i++) { |
1043 | mhi_chan = &mhi_cntrl->mhi_chan[i]; |
1044 | if (!mhi_chan->ring.started) |
1045 | continue; |
1046 | |
1047 | mutex_lock(&mhi_chan->lock); |
1048 | /* Send channel disconnect status to client drivers */ |
1049 | if (mhi_chan->xfer_cb) { |
1050 | result.transaction_status = -ENOTCONN; |
1051 | result.bytes_xferd = 0; |
1052 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); |
1053 | } |
1054 | |
1055 | mhi_chan->state = MHI_CH_STATE_DISABLED; |
1056 | mutex_unlock(lock: &mhi_chan->lock); |
1057 | } |
1058 | |
1059 | flush_workqueue(mhi_cntrl->wq); |
1060 | |
1061 | /* Destroy devices associated with all channels */ |
1062 | device_for_each_child(dev: &mhi_cntrl->mhi_dev->dev, NULL, fn: mhi_ep_destroy_device); |
1063 | |
1064 | /* Stop and reset the transfer rings */ |
1065 | for (i = 0; i < mhi_cntrl->max_chan; i++) { |
1066 | mhi_chan = &mhi_cntrl->mhi_chan[i]; |
1067 | if (!mhi_chan->ring.started) |
1068 | continue; |
1069 | |
1070 | ch_ring = &mhi_cntrl->mhi_chan[i].ring; |
1071 | mutex_lock(&mhi_chan->lock); |
1072 | mhi_ep_ring_reset(mhi_cntrl, ring: ch_ring); |
1073 | mutex_unlock(lock: &mhi_chan->lock); |
1074 | } |
1075 | |
1076 | /* Stop and reset the event rings */ |
1077 | for (i = 0; i < mhi_cntrl->event_rings; i++) { |
1078 | ev_ring = &mhi_cntrl->mhi_event[i].ring; |
1079 | if (!ev_ring->started) |
1080 | continue; |
1081 | |
1082 | mutex_lock(&mhi_cntrl->event_lock); |
1083 | mhi_ep_ring_reset(mhi_cntrl, ring: ev_ring); |
1084 | mutex_unlock(lock: &mhi_cntrl->event_lock); |
1085 | } |
1086 | |
1087 | /* Stop and reset the command ring */ |
1088 | mhi_ep_ring_reset(mhi_cntrl, ring: &mhi_cntrl->mhi_cmd->ring); |
1089 | |
1090 | mhi_ep_free_host_cfg(mhi_cntrl); |
1091 | mhi_ep_mmio_mask_interrupts(mhi_cntrl); |
1092 | |
1093 | mhi_cntrl->enabled = false; |
1094 | } |
1095 | |
1096 | static void mhi_ep_reset_worker(struct work_struct *work) |
1097 | { |
1098 | struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); |
1099 | enum mhi_state cur_state; |
1100 | |
1101 | mhi_ep_power_down(mhi_cntrl); |
1102 | |
1103 | mutex_lock(&mhi_cntrl->state_lock); |
1104 | |
1105 | /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ |
1106 | mhi_ep_mmio_reset(mhi_cntrl); |
1107 | cur_state = mhi_cntrl->mhi_state; |
1108 | |
1109 | /* |
1110 | * Only proceed further if the reset is due to SYS_ERR. The host will |
1111 | * issue reset during shutdown also and we don't need to do re-init in |
1112 | * that case. |
1113 | */ |
1114 | if (cur_state == MHI_STATE_SYS_ERR) |
1115 | mhi_ep_power_up(mhi_cntrl); |
1116 | |
1117 | mutex_unlock(lock: &mhi_cntrl->state_lock); |
1118 | } |
1119 | |
1120 | /* |
1121 | * We don't need to do anything special other than setting the MHI SYS_ERR |
1122 | * state. The host will reset all contexts and issue MHI RESET so that we |
1123 | * could also recover from error state. |
1124 | */ |
1125 | void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) |
1126 | { |
1127 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
1128 | int ret; |
1129 | |
1130 | ret = mhi_ep_set_mhi_state(mhi_cntrl, mhi_state: MHI_STATE_SYS_ERR); |
1131 | if (ret) |
1132 | return; |
1133 | |
1134 | /* Signal host that the device went to SYS_ERR state */ |
1135 | ret = mhi_ep_send_state_change_event(mhi_cntrl, state: MHI_STATE_SYS_ERR); |
1136 | if (ret) |
1137 | dev_err(dev, "Failed sending SYS_ERR state change event: %d\n" , ret); |
1138 | } |
1139 | |
1140 | int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) |
1141 | { |
1142 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
1143 | int ret, i; |
1144 | |
1145 | /* |
1146 | * Mask all interrupts until the state machine is ready. Interrupts will |
1147 | * be enabled later with mhi_ep_enable(). |
1148 | */ |
1149 | mhi_ep_mmio_mask_interrupts(mhi_cntrl); |
1150 | mhi_ep_mmio_init(mhi_cntrl); |
1151 | |
1152 | mhi_cntrl->mhi_event = kcalloc(n: mhi_cntrl->event_rings, |
1153 | size: sizeof(*mhi_cntrl->mhi_event), |
1154 | GFP_KERNEL); |
1155 | if (!mhi_cntrl->mhi_event) |
1156 | return -ENOMEM; |
1157 | |
1158 | /* Initialize command, channel and event rings */ |
1159 | mhi_ep_ring_init(ring: &mhi_cntrl->mhi_cmd->ring, type: RING_TYPE_CMD, id: 0); |
1160 | for (i = 0; i < mhi_cntrl->max_chan; i++) |
1161 | mhi_ep_ring_init(ring: &mhi_cntrl->mhi_chan[i].ring, type: RING_TYPE_CH, id: i); |
1162 | for (i = 0; i < mhi_cntrl->event_rings; i++) |
1163 | mhi_ep_ring_init(ring: &mhi_cntrl->mhi_event[i].ring, type: RING_TYPE_ER, id: i); |
1164 | |
1165 | mhi_cntrl->mhi_state = MHI_STATE_RESET; |
1166 | |
1167 | /* Set AMSS EE before signaling ready state */ |
1168 | mhi_ep_mmio_set_env(mhi_cntrl, value: MHI_EE_AMSS); |
1169 | |
1170 | /* All set, notify the host that we are ready */ |
1171 | ret = mhi_ep_set_ready_state(mhi_cntrl); |
1172 | if (ret) |
1173 | goto err_free_event; |
1174 | |
1175 | dev_dbg(dev, "READY state notification sent to the host\n" ); |
1176 | |
1177 | ret = mhi_ep_enable(mhi_cntrl); |
1178 | if (ret) { |
1179 | dev_err(dev, "Failed to enable MHI endpoint\n" ); |
1180 | goto err_free_event; |
1181 | } |
1182 | |
1183 | enable_irq(irq: mhi_cntrl->irq); |
1184 | mhi_cntrl->enabled = true; |
1185 | |
1186 | return 0; |
1187 | |
1188 | err_free_event: |
1189 | kfree(objp: mhi_cntrl->mhi_event); |
1190 | |
1191 | return ret; |
1192 | } |
1193 | EXPORT_SYMBOL_GPL(mhi_ep_power_up); |
1194 | |
1195 | void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) |
1196 | { |
1197 | if (mhi_cntrl->enabled) { |
1198 | mhi_ep_abort_transfer(mhi_cntrl); |
1199 | kfree(objp: mhi_cntrl->mhi_event); |
1200 | disable_irq(irq: mhi_cntrl->irq); |
1201 | } |
1202 | } |
1203 | EXPORT_SYMBOL_GPL(mhi_ep_power_down); |
1204 | |
1205 | void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) |
1206 | { |
1207 | struct mhi_ep_chan *mhi_chan; |
1208 | u32 tmp; |
1209 | int i; |
1210 | |
1211 | for (i = 0; i < mhi_cntrl->max_chan; i++) { |
1212 | mhi_chan = &mhi_cntrl->mhi_chan[i]; |
1213 | |
1214 | if (!mhi_chan->mhi_dev) |
1215 | continue; |
1216 | |
1217 | mutex_lock(&mhi_chan->lock); |
1218 | /* Skip if the channel is not currently running */ |
1219 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); |
1220 | if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { |
1221 | mutex_unlock(lock: &mhi_chan->lock); |
1222 | continue; |
1223 | } |
1224 | |
1225 | dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n" ); |
1226 | /* Set channel state to SUSPENDED */ |
1227 | mhi_chan->state = MHI_CH_STATE_SUSPENDED; |
1228 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
1229 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); |
1230 | mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); |
1231 | mutex_unlock(lock: &mhi_chan->lock); |
1232 | } |
1233 | } |
1234 | |
1235 | void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) |
1236 | { |
1237 | struct mhi_ep_chan *mhi_chan; |
1238 | u32 tmp; |
1239 | int i; |
1240 | |
1241 | for (i = 0; i < mhi_cntrl->max_chan; i++) { |
1242 | mhi_chan = &mhi_cntrl->mhi_chan[i]; |
1243 | |
1244 | if (!mhi_chan->mhi_dev) |
1245 | continue; |
1246 | |
1247 | mutex_lock(&mhi_chan->lock); |
1248 | /* Skip if the channel is not currently suspended */ |
1249 | tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); |
1250 | if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { |
1251 | mutex_unlock(lock: &mhi_chan->lock); |
1252 | continue; |
1253 | } |
1254 | |
1255 | dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n" ); |
1256 | /* Set channel state to RUNNING */ |
1257 | mhi_chan->state = MHI_CH_STATE_RUNNING; |
1258 | tmp &= ~CHAN_CTX_CHSTATE_MASK; |
1259 | tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); |
1260 | mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); |
1261 | mutex_unlock(lock: &mhi_chan->lock); |
1262 | } |
1263 | } |
1264 | |
1265 | static void mhi_ep_release_device(struct device *dev) |
1266 | { |
1267 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); |
1268 | |
1269 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) |
1270 | mhi_dev->mhi_cntrl->mhi_dev = NULL; |
1271 | |
1272 | /* |
1273 | * We need to set the mhi_chan->mhi_dev to NULL here since the MHI |
1274 | * devices for the channels will only get created in mhi_ep_create_device() |
1275 | * if the mhi_dev associated with it is NULL. |
1276 | */ |
1277 | if (mhi_dev->ul_chan) |
1278 | mhi_dev->ul_chan->mhi_dev = NULL; |
1279 | |
1280 | if (mhi_dev->dl_chan) |
1281 | mhi_dev->dl_chan->mhi_dev = NULL; |
1282 | |
1283 | kfree(objp: mhi_dev); |
1284 | } |
1285 | |
1286 | static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, |
1287 | enum mhi_device_type dev_type) |
1288 | { |
1289 | struct mhi_ep_device *mhi_dev; |
1290 | struct device *dev; |
1291 | |
1292 | mhi_dev = kzalloc(size: sizeof(*mhi_dev), GFP_KERNEL); |
1293 | if (!mhi_dev) |
1294 | return ERR_PTR(error: -ENOMEM); |
1295 | |
1296 | dev = &mhi_dev->dev; |
1297 | device_initialize(dev); |
1298 | dev->bus = &mhi_ep_bus_type; |
1299 | dev->release = mhi_ep_release_device; |
1300 | |
1301 | /* Controller device is always allocated first */ |
1302 | if (dev_type == MHI_DEVICE_CONTROLLER) |
1303 | /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ |
1304 | dev->parent = mhi_cntrl->cntrl_dev; |
1305 | else |
1306 | /* for MHI client devices, parent is the MHI controller device */ |
1307 | dev->parent = &mhi_cntrl->mhi_dev->dev; |
1308 | |
1309 | mhi_dev->mhi_cntrl = mhi_cntrl; |
1310 | mhi_dev->dev_type = dev_type; |
1311 | |
1312 | return mhi_dev; |
1313 | } |
1314 | |
1315 | /* |
1316 | * MHI channels are always defined in pairs with UL as the even numbered |
1317 | * channel and DL as odd numbered one. This function gets UL channel (primary) |
1318 | * as the ch_id and always looks after the next entry in channel list for |
1319 | * the corresponding DL channel (secondary). |
1320 | */ |
1321 | static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) |
1322 | { |
1323 | struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; |
1324 | struct device *dev = mhi_cntrl->cntrl_dev; |
1325 | struct mhi_ep_device *mhi_dev; |
1326 | int ret; |
1327 | |
1328 | /* Check if the channel name is same for both UL and DL */ |
1329 | if (strcmp(mhi_chan->name, mhi_chan[1].name)) { |
1330 | dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n" , |
1331 | mhi_chan->name, mhi_chan[1].name); |
1332 | return -EINVAL; |
1333 | } |
1334 | |
1335 | mhi_dev = mhi_ep_alloc_device(mhi_cntrl, dev_type: MHI_DEVICE_XFER); |
1336 | if (IS_ERR(ptr: mhi_dev)) |
1337 | return PTR_ERR(ptr: mhi_dev); |
1338 | |
1339 | /* Configure primary channel */ |
1340 | mhi_dev->ul_chan = mhi_chan; |
1341 | get_device(dev: &mhi_dev->dev); |
1342 | mhi_chan->mhi_dev = mhi_dev; |
1343 | |
1344 | /* Configure secondary channel as well */ |
1345 | mhi_chan++; |
1346 | mhi_dev->dl_chan = mhi_chan; |
1347 | get_device(dev: &mhi_dev->dev); |
1348 | mhi_chan->mhi_dev = mhi_dev; |
1349 | |
1350 | /* Channel name is same for both UL and DL */ |
1351 | mhi_dev->name = mhi_chan->name; |
1352 | ret = dev_set_name(dev: &mhi_dev->dev, name: "%s_%s" , |
1353 | dev_name(dev: &mhi_cntrl->mhi_dev->dev), |
1354 | mhi_dev->name); |
1355 | if (ret) { |
1356 | put_device(dev: &mhi_dev->dev); |
1357 | return ret; |
1358 | } |
1359 | |
1360 | ret = device_add(dev: &mhi_dev->dev); |
1361 | if (ret) |
1362 | put_device(dev: &mhi_dev->dev); |
1363 | |
1364 | return ret; |
1365 | } |
1366 | |
1367 | static int mhi_ep_destroy_device(struct device *dev, void *data) |
1368 | { |
1369 | struct mhi_ep_device *mhi_dev; |
1370 | struct mhi_ep_cntrl *mhi_cntrl; |
1371 | struct mhi_ep_chan *ul_chan, *dl_chan; |
1372 | |
1373 | if (dev->bus != &mhi_ep_bus_type) |
1374 | return 0; |
1375 | |
1376 | mhi_dev = to_mhi_ep_device(dev); |
1377 | mhi_cntrl = mhi_dev->mhi_cntrl; |
1378 | |
1379 | /* Only destroy devices created for channels */ |
1380 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) |
1381 | return 0; |
1382 | |
1383 | ul_chan = mhi_dev->ul_chan; |
1384 | dl_chan = mhi_dev->dl_chan; |
1385 | |
1386 | if (ul_chan) |
1387 | put_device(dev: &ul_chan->mhi_dev->dev); |
1388 | |
1389 | if (dl_chan) |
1390 | put_device(dev: &dl_chan->mhi_dev->dev); |
1391 | |
1392 | dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n" , |
1393 | mhi_dev->name); |
1394 | |
1395 | /* Notify the client and remove the device from MHI bus */ |
1396 | device_del(dev); |
1397 | put_device(dev); |
1398 | |
1399 | return 0; |
1400 | } |
1401 | |
1402 | static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, |
1403 | const struct mhi_ep_cntrl_config *config) |
1404 | { |
1405 | const struct mhi_ep_channel_config *ch_cfg; |
1406 | struct device *dev = mhi_cntrl->cntrl_dev; |
1407 | u32 chan, i; |
1408 | int ret = -EINVAL; |
1409 | |
1410 | mhi_cntrl->max_chan = config->max_channels; |
1411 | |
1412 | /* |
1413 | * Allocate max_channels supported by the MHI endpoint and populate |
1414 | * only the defined channels |
1415 | */ |
1416 | mhi_cntrl->mhi_chan = kcalloc(n: mhi_cntrl->max_chan, size: sizeof(*mhi_cntrl->mhi_chan), |
1417 | GFP_KERNEL); |
1418 | if (!mhi_cntrl->mhi_chan) |
1419 | return -ENOMEM; |
1420 | |
1421 | for (i = 0; i < config->num_channels; i++) { |
1422 | struct mhi_ep_chan *mhi_chan; |
1423 | |
1424 | ch_cfg = &config->ch_cfg[i]; |
1425 | |
1426 | chan = ch_cfg->num; |
1427 | if (chan >= mhi_cntrl->max_chan) { |
1428 | dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n" , |
1429 | chan, mhi_cntrl->max_chan); |
1430 | goto error_chan_cfg; |
1431 | } |
1432 | |
1433 | /* Bi-directional and direction less channels are not supported */ |
1434 | if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { |
1435 | dev_err(dev, "Invalid direction (%u) for channel (%u)\n" , |
1436 | ch_cfg->dir, chan); |
1437 | goto error_chan_cfg; |
1438 | } |
1439 | |
1440 | mhi_chan = &mhi_cntrl->mhi_chan[chan]; |
1441 | mhi_chan->name = ch_cfg->name; |
1442 | mhi_chan->chan = chan; |
1443 | mhi_chan->dir = ch_cfg->dir; |
1444 | mutex_init(&mhi_chan->lock); |
1445 | } |
1446 | |
1447 | return 0; |
1448 | |
1449 | error_chan_cfg: |
1450 | kfree(objp: mhi_cntrl->mhi_chan); |
1451 | |
1452 | return ret; |
1453 | } |
1454 | |
1455 | /* |
1456 | * Allocate channel and command rings here. Event rings will be allocated |
1457 | * in mhi_ep_power_up() as the config comes from the host. |
1458 | */ |
1459 | int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, |
1460 | const struct mhi_ep_cntrl_config *config) |
1461 | { |
1462 | struct mhi_ep_device *mhi_dev; |
1463 | int ret; |
1464 | |
1465 | if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) |
1466 | return -EINVAL; |
1467 | |
1468 | if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync || |
1469 | !mhi_cntrl->read_async || !mhi_cntrl->write_async) |
1470 | return -EINVAL; |
1471 | |
1472 | ret = mhi_ep_chan_init(mhi_cntrl, config); |
1473 | if (ret) |
1474 | return ret; |
1475 | |
1476 | mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, size: sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); |
1477 | if (!mhi_cntrl->mhi_cmd) { |
1478 | ret = -ENOMEM; |
1479 | goto err_free_ch; |
1480 | } |
1481 | |
1482 | mhi_cntrl->ev_ring_el_cache = kmem_cache_create(name: "mhi_ep_event_ring_el" , |
1483 | size: sizeof(struct mhi_ring_element), align: 0, |
1484 | SLAB_CACHE_DMA, NULL); |
1485 | if (!mhi_cntrl->ev_ring_el_cache) { |
1486 | ret = -ENOMEM; |
1487 | goto err_free_cmd; |
1488 | } |
1489 | |
1490 | mhi_cntrl->tre_buf_cache = kmem_cache_create(name: "mhi_ep_tre_buf" , MHI_EP_DEFAULT_MTU, align: 0, |
1491 | SLAB_CACHE_DMA, NULL); |
1492 | if (!mhi_cntrl->tre_buf_cache) { |
1493 | ret = -ENOMEM; |
1494 | goto err_destroy_ev_ring_el_cache; |
1495 | } |
1496 | |
1497 | mhi_cntrl->ring_item_cache = kmem_cache_create(name: "mhi_ep_ring_item" , |
1498 | size: sizeof(struct mhi_ep_ring_item), align: 0, |
1499 | flags: 0, NULL); |
1500 | if (!mhi_cntrl->ring_item_cache) { |
1501 | ret = -ENOMEM; |
1502 | goto err_destroy_tre_buf_cache; |
1503 | } |
1504 | |
1505 | INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); |
1506 | INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); |
1507 | INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); |
1508 | INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); |
1509 | |
1510 | mhi_cntrl->wq = alloc_workqueue(fmt: "mhi_ep_wq" , flags: 0, max_active: 0); |
1511 | if (!mhi_cntrl->wq) { |
1512 | ret = -ENOMEM; |
1513 | goto err_destroy_ring_item_cache; |
1514 | } |
1515 | |
1516 | INIT_LIST_HEAD(list: &mhi_cntrl->st_transition_list); |
1517 | INIT_LIST_HEAD(list: &mhi_cntrl->ch_db_list); |
1518 | spin_lock_init(&mhi_cntrl->list_lock); |
1519 | mutex_init(&mhi_cntrl->state_lock); |
1520 | mutex_init(&mhi_cntrl->event_lock); |
1521 | |
1522 | /* Set MHI version and AMSS EE before enumeration */ |
1523 | mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, val: config->mhi_version); |
1524 | mhi_ep_mmio_set_env(mhi_cntrl, value: MHI_EE_AMSS); |
1525 | |
1526 | /* Set controller index */ |
1527 | ret = ida_alloc(ida: &mhi_ep_cntrl_ida, GFP_KERNEL); |
1528 | if (ret < 0) |
1529 | goto err_destroy_wq; |
1530 | |
1531 | mhi_cntrl->index = ret; |
1532 | |
1533 | irq_set_status_flags(irq: mhi_cntrl->irq, set: IRQ_NOAUTOEN); |
1534 | ret = request_irq(irq: mhi_cntrl->irq, handler: mhi_ep_irq, IRQF_TRIGGER_HIGH, |
1535 | name: "doorbell_irq" , dev: mhi_cntrl); |
1536 | if (ret) { |
1537 | dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n" ); |
1538 | goto err_ida_free; |
1539 | } |
1540 | |
1541 | /* Allocate the controller device */ |
1542 | mhi_dev = mhi_ep_alloc_device(mhi_cntrl, dev_type: MHI_DEVICE_CONTROLLER); |
1543 | if (IS_ERR(ptr: mhi_dev)) { |
1544 | dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n" ); |
1545 | ret = PTR_ERR(ptr: mhi_dev); |
1546 | goto err_free_irq; |
1547 | } |
1548 | |
1549 | ret = dev_set_name(dev: &mhi_dev->dev, name: "mhi_ep%u" , mhi_cntrl->index); |
1550 | if (ret) |
1551 | goto err_put_dev; |
1552 | |
1553 | mhi_dev->name = dev_name(dev: &mhi_dev->dev); |
1554 | mhi_cntrl->mhi_dev = mhi_dev; |
1555 | |
1556 | ret = device_add(dev: &mhi_dev->dev); |
1557 | if (ret) |
1558 | goto err_put_dev; |
1559 | |
1560 | dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n" ); |
1561 | |
1562 | return 0; |
1563 | |
1564 | err_put_dev: |
1565 | put_device(dev: &mhi_dev->dev); |
1566 | err_free_irq: |
1567 | free_irq(mhi_cntrl->irq, mhi_cntrl); |
1568 | err_ida_free: |
1569 | ida_free(&mhi_ep_cntrl_ida, id: mhi_cntrl->index); |
1570 | err_destroy_wq: |
1571 | destroy_workqueue(wq: mhi_cntrl->wq); |
1572 | err_destroy_ring_item_cache: |
1573 | kmem_cache_destroy(s: mhi_cntrl->ring_item_cache); |
1574 | err_destroy_ev_ring_el_cache: |
1575 | kmem_cache_destroy(s: mhi_cntrl->ev_ring_el_cache); |
1576 | err_destroy_tre_buf_cache: |
1577 | kmem_cache_destroy(s: mhi_cntrl->tre_buf_cache); |
1578 | err_free_cmd: |
1579 | kfree(objp: mhi_cntrl->mhi_cmd); |
1580 | err_free_ch: |
1581 | kfree(objp: mhi_cntrl->mhi_chan); |
1582 | |
1583 | return ret; |
1584 | } |
1585 | EXPORT_SYMBOL_GPL(mhi_ep_register_controller); |
1586 | |
1587 | /* |
1588 | * It is expected that the controller drivers will power down the MHI EP stack |
1589 | * using "mhi_ep_power_down()" before calling this function to unregister themselves. |
1590 | */ |
1591 | void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) |
1592 | { |
1593 | struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; |
1594 | |
1595 | destroy_workqueue(wq: mhi_cntrl->wq); |
1596 | |
1597 | free_irq(mhi_cntrl->irq, mhi_cntrl); |
1598 | |
1599 | kmem_cache_destroy(s: mhi_cntrl->tre_buf_cache); |
1600 | kmem_cache_destroy(s: mhi_cntrl->ev_ring_el_cache); |
1601 | kmem_cache_destroy(s: mhi_cntrl->ring_item_cache); |
1602 | kfree(objp: mhi_cntrl->mhi_cmd); |
1603 | kfree(objp: mhi_cntrl->mhi_chan); |
1604 | |
1605 | device_del(dev: &mhi_dev->dev); |
1606 | put_device(dev: &mhi_dev->dev); |
1607 | |
1608 | ida_free(&mhi_ep_cntrl_ida, id: mhi_cntrl->index); |
1609 | } |
1610 | EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); |
1611 | |
1612 | static int mhi_ep_driver_probe(struct device *dev) |
1613 | { |
1614 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); |
1615 | struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); |
1616 | struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; |
1617 | struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; |
1618 | |
1619 | ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; |
1620 | dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; |
1621 | |
1622 | return mhi_drv->probe(mhi_dev, mhi_dev->id); |
1623 | } |
1624 | |
1625 | static int mhi_ep_driver_remove(struct device *dev) |
1626 | { |
1627 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); |
1628 | struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); |
1629 | struct mhi_result result = {}; |
1630 | struct mhi_ep_chan *mhi_chan; |
1631 | int dir; |
1632 | |
1633 | /* Skip if it is a controller device */ |
1634 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) |
1635 | return 0; |
1636 | |
1637 | /* Disconnect the channels associated with the driver */ |
1638 | for (dir = 0; dir < 2; dir++) { |
1639 | mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; |
1640 | |
1641 | if (!mhi_chan) |
1642 | continue; |
1643 | |
1644 | mutex_lock(&mhi_chan->lock); |
1645 | /* Send channel disconnect status to the client driver */ |
1646 | if (mhi_chan->xfer_cb) { |
1647 | result.transaction_status = -ENOTCONN; |
1648 | result.bytes_xferd = 0; |
1649 | mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); |
1650 | } |
1651 | |
1652 | mhi_chan->state = MHI_CH_STATE_DISABLED; |
1653 | mhi_chan->xfer_cb = NULL; |
1654 | mutex_unlock(lock: &mhi_chan->lock); |
1655 | } |
1656 | |
1657 | /* Remove the client driver now */ |
1658 | mhi_drv->remove(mhi_dev); |
1659 | |
1660 | return 0; |
1661 | } |
1662 | |
1663 | int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) |
1664 | { |
1665 | struct device_driver *driver = &mhi_drv->driver; |
1666 | |
1667 | if (!mhi_drv->probe || !mhi_drv->remove) |
1668 | return -EINVAL; |
1669 | |
1670 | /* Client drivers should have callbacks defined for both channels */ |
1671 | if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) |
1672 | return -EINVAL; |
1673 | |
1674 | driver->bus = &mhi_ep_bus_type; |
1675 | driver->owner = owner; |
1676 | driver->probe = mhi_ep_driver_probe; |
1677 | driver->remove = mhi_ep_driver_remove; |
1678 | |
1679 | return driver_register(drv: driver); |
1680 | } |
1681 | EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); |
1682 | |
1683 | void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) |
1684 | { |
1685 | driver_unregister(drv: &mhi_drv->driver); |
1686 | } |
1687 | EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); |
1688 | |
1689 | static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env) |
1690 | { |
1691 | const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); |
1692 | |
1693 | return add_uevent_var(env, format: "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, |
1694 | mhi_dev->name); |
1695 | } |
1696 | |
1697 | static int mhi_ep_match(struct device *dev, struct device_driver *drv) |
1698 | { |
1699 | struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); |
1700 | struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); |
1701 | const struct mhi_device_id *id; |
1702 | |
1703 | /* |
1704 | * If the device is a controller type then there is no client driver |
1705 | * associated with it |
1706 | */ |
1707 | if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) |
1708 | return 0; |
1709 | |
1710 | for (id = mhi_drv->id_table; id->chan[0]; id++) |
1711 | if (!strcmp(mhi_dev->name, id->chan)) { |
1712 | mhi_dev->id = id; |
1713 | return 1; |
1714 | } |
1715 | |
1716 | return 0; |
1717 | }; |
1718 | |
1719 | struct bus_type mhi_ep_bus_type = { |
1720 | .name = "mhi_ep" , |
1721 | .dev_name = "mhi_ep" , |
1722 | .match = mhi_ep_match, |
1723 | .uevent = mhi_ep_uevent, |
1724 | }; |
1725 | |
1726 | static int __init mhi_ep_init(void) |
1727 | { |
1728 | return bus_register(bus: &mhi_ep_bus_type); |
1729 | } |
1730 | |
1731 | static void __exit mhi_ep_exit(void) |
1732 | { |
1733 | bus_unregister(bus: &mhi_ep_bus_type); |
1734 | } |
1735 | |
1736 | postcore_initcall(mhi_ep_init); |
1737 | module_exit(mhi_ep_exit); |
1738 | |
1739 | MODULE_LICENSE("GPL v2" ); |
1740 | MODULE_DESCRIPTION("MHI Bus Endpoint stack" ); |
1741 | MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>" ); |
1742 | |