1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2021, MediaTek Inc. |
4 | * Copyright (c) 2021-2022, Intel Corporation. |
5 | * |
6 | * Authors: |
7 | * Amir Hanania <amir.hanania@intel.com> |
8 | * Haijun Liu <haijun.liu@mediatek.com> |
9 | * Moises Veleta <moises.veleta@intel.com> |
10 | * Ricardo Martinez <ricardo.martinez@linux.intel.com> |
11 | * |
12 | * Contributors: |
13 | * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> |
14 | * Eliot Lee <eliot.lee@intel.com> |
15 | * Sreehari Kancharla <sreehari.kancharla@intel.com> |
16 | */ |
17 | |
18 | #include <linux/device.h> |
19 | #include <linux/gfp.h> |
20 | #include <linux/irqreturn.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/list.h> |
23 | #include <linux/string.h> |
24 | #include <linux/wait.h> |
25 | #include <linux/workqueue.h> |
26 | |
27 | #include "t7xx_dpmaif.h" |
28 | #include "t7xx_hif_dpmaif.h" |
29 | #include "t7xx_hif_dpmaif_rx.h" |
30 | #include "t7xx_hif_dpmaif_tx.h" |
31 | #include "t7xx_pci.h" |
32 | #include "t7xx_pcie_mac.h" |
33 | #include "t7xx_state_monitor.h" |
34 | |
35 | unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx) |
36 | { |
37 | buf_idx++; |
38 | |
39 | return buf_idx < buf_len ? buf_idx : 0; |
40 | } |
41 | |
42 | unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, |
43 | unsigned int wr_idx, enum dpmaif_rdwr rd_wr) |
44 | { |
45 | int pkt_cnt; |
46 | |
47 | if (rd_wr == DPMAIF_READ) |
48 | pkt_cnt = wr_idx - rd_idx; |
49 | else |
50 | pkt_cnt = rd_idx - wr_idx - 1; |
51 | |
52 | if (pkt_cnt < 0) |
53 | pkt_cnt += total_cnt; |
54 | |
55 | return (unsigned int)pkt_cnt; |
56 | } |
57 | |
58 | static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl) |
59 | { |
60 | struct dpmaif_isr_para *isr_para; |
61 | int i; |
62 | |
63 | for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { |
64 | isr_para = &dpmaif_ctrl->isr_para[i]; |
65 | t7xx_pcie_mac_set_int(t7xx_dev: dpmaif_ctrl->t7xx_dev, int_type: isr_para->pcie_int); |
66 | } |
67 | } |
68 | |
69 | static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl) |
70 | { |
71 | struct dpmaif_isr_para *isr_para; |
72 | int i; |
73 | |
74 | for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { |
75 | isr_para = &dpmaif_ctrl->isr_para[i]; |
76 | t7xx_pcie_mac_clear_int(t7xx_dev: dpmaif_ctrl->t7xx_dev, int_type: isr_para->pcie_int); |
77 | } |
78 | } |
79 | |
80 | static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para) |
81 | { |
82 | struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; |
83 | struct dpmaif_hw_intr_st_para intr_status; |
84 | struct device *dev = dpmaif_ctrl->dev; |
85 | struct dpmaif_hw_info *hw_info; |
86 | int i; |
87 | |
88 | memset(&intr_status, 0, sizeof(intr_status)); |
89 | hw_info = &dpmaif_ctrl->hw_info; |
90 | |
91 | if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, para: &intr_status, qno: isr_para->dlq_id) < 0) { |
92 | dev_err(dev, "Failed to get HW interrupt count\n" ); |
93 | return; |
94 | } |
95 | |
96 | t7xx_pcie_mac_clear_int_status(t7xx_dev: dpmaif_ctrl->t7xx_dev, int_type: isr_para->pcie_int); |
97 | |
98 | for (i = 0; i < intr_status.intr_cnt; i++) { |
99 | switch (intr_status.intr_types[i]) { |
100 | case DPF_INTR_UL_DONE: |
101 | t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, que_mask: intr_status.intr_queues[i]); |
102 | break; |
103 | |
104 | case DPF_INTR_UL_DRB_EMPTY: |
105 | case DPF_INTR_UL_MD_NOTREADY: |
106 | case DPF_INTR_UL_MD_PWR_NOTREADY: |
107 | /* No need to log an error for these */ |
108 | break; |
109 | |
110 | case DPF_INTR_DL_BATCNT_LEN_ERR: |
111 | dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n" ); |
112 | t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info); |
113 | break; |
114 | |
115 | case DPF_INTR_DL_PITCNT_LEN_ERR: |
116 | dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n" ); |
117 | t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info); |
118 | break; |
119 | |
120 | case DPF_INTR_DL_Q0_PITCNT_LEN_ERR: |
121 | dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n" ); |
122 | t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT); |
123 | break; |
124 | |
125 | case DPF_INTR_DL_Q1_PITCNT_LEN_ERR: |
126 | dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n" ); |
127 | t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1); |
128 | break; |
129 | |
130 | case DPF_INTR_DL_DONE: |
131 | case DPF_INTR_DL_Q0_DONE: |
132 | case DPF_INTR_DL_Q1_DONE: |
133 | t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, que_mask: intr_status.intr_queues[i]); |
134 | break; |
135 | |
136 | default: |
137 | dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n" , |
138 | intr_status.intr_types[i]); |
139 | } |
140 | } |
141 | } |
142 | |
143 | static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data) |
144 | { |
145 | struct dpmaif_isr_para *isr_para = data; |
146 | struct dpmaif_ctrl *dpmaif_ctrl; |
147 | |
148 | dpmaif_ctrl = isr_para->dpmaif_ctrl; |
149 | if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { |
150 | dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n" ); |
151 | return IRQ_HANDLED; |
152 | } |
153 | |
154 | t7xx_pcie_mac_clear_int(t7xx_dev: dpmaif_ctrl->t7xx_dev, int_type: isr_para->pcie_int); |
155 | |
156 | return IRQ_WAKE_THREAD; |
157 | } |
158 | |
159 | static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data) |
160 | { |
161 | struct dpmaif_isr_para *isr_para = data; |
162 | struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; |
163 | |
164 | t7xx_dpmaif_irq_cb(isr_para); |
165 | t7xx_pcie_mac_set_int(t7xx_dev: dpmaif_ctrl->t7xx_dev, int_type: isr_para->pcie_int); |
166 | return IRQ_HANDLED; |
167 | } |
168 | |
169 | static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl) |
170 | { |
171 | struct dpmaif_isr_para *isr_para; |
172 | unsigned char i; |
173 | |
174 | dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT; |
175 | dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT; |
176 | |
177 | for (i = 0; i < DPMAIF_RXQ_NUM; i++) { |
178 | isr_para = &dpmaif_ctrl->isr_para[i]; |
179 | isr_para->dpmaif_ctrl = dpmaif_ctrl; |
180 | isr_para->dlq_id = i; |
181 | isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i]; |
182 | } |
183 | } |
184 | |
185 | static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl) |
186 | { |
187 | struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev; |
188 | struct dpmaif_isr_para *isr_para; |
189 | enum t7xx_int int_type; |
190 | int i; |
191 | |
192 | t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl); |
193 | |
194 | for (i = 0; i < DPMAIF_RXQ_NUM; i++) { |
195 | isr_para = &dpmaif_ctrl->isr_para[i]; |
196 | int_type = isr_para->pcie_int; |
197 | t7xx_pcie_mac_clear_int(t7xx_dev, int_type); |
198 | |
199 | t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler; |
200 | t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread; |
201 | t7xx_dev->callback_param[int_type] = isr_para; |
202 | |
203 | t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type); |
204 | t7xx_pcie_mac_set_int(t7xx_dev, int_type); |
205 | } |
206 | } |
207 | |
208 | static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl) |
209 | { |
210 | struct dpmaif_rx_queue *rx_q; |
211 | struct dpmaif_tx_queue *tx_q; |
212 | int ret, rx_idx, tx_idx, i; |
213 | |
214 | ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, bat_req: &dpmaif_ctrl->bat_req, buf_type: BAT_TYPE_NORMAL); |
215 | if (ret) { |
216 | dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n" , ret); |
217 | return ret; |
218 | } |
219 | |
220 | ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, bat_req: &dpmaif_ctrl->bat_frag, buf_type: BAT_TYPE_FRAG); |
221 | if (ret) { |
222 | dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n" , ret); |
223 | goto err_free_normal_bat; |
224 | } |
225 | |
226 | for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) { |
227 | rx_q = &dpmaif_ctrl->rxq[rx_idx]; |
228 | rx_q->index = rx_idx; |
229 | rx_q->dpmaif_ctrl = dpmaif_ctrl; |
230 | ret = t7xx_dpmaif_rxq_init(queue: rx_q); |
231 | if (ret) |
232 | goto err_free_rxq; |
233 | } |
234 | |
235 | for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) { |
236 | tx_q = &dpmaif_ctrl->txq[tx_idx]; |
237 | tx_q->index = tx_idx; |
238 | tx_q->dpmaif_ctrl = dpmaif_ctrl; |
239 | ret = t7xx_dpmaif_txq_init(txq: tx_q); |
240 | if (ret) |
241 | goto err_free_txq; |
242 | } |
243 | |
244 | ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl); |
245 | if (ret) { |
246 | dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n" ); |
247 | goto err_free_txq; |
248 | } |
249 | |
250 | ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl); |
251 | if (ret) |
252 | goto err_thread_rel; |
253 | |
254 | return 0; |
255 | |
256 | err_thread_rel: |
257 | t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); |
258 | |
259 | err_free_txq: |
260 | for (i = 0; i < tx_idx; i++) { |
261 | tx_q = &dpmaif_ctrl->txq[i]; |
262 | t7xx_dpmaif_txq_free(txq: tx_q); |
263 | } |
264 | |
265 | err_free_rxq: |
266 | for (i = 0; i < rx_idx; i++) { |
267 | rx_q = &dpmaif_ctrl->rxq[i]; |
268 | t7xx_dpmaif_rxq_free(queue: rx_q); |
269 | } |
270 | |
271 | t7xx_dpmaif_bat_free(dpmaif_ctrl, bat_req: &dpmaif_ctrl->bat_frag); |
272 | |
273 | err_free_normal_bat: |
274 | t7xx_dpmaif_bat_free(dpmaif_ctrl, bat_req: &dpmaif_ctrl->bat_req); |
275 | |
276 | return ret; |
277 | } |
278 | |
279 | static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl) |
280 | { |
281 | struct dpmaif_rx_queue *rx_q; |
282 | struct dpmaif_tx_queue *tx_q; |
283 | int i; |
284 | |
285 | t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); |
286 | t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl); |
287 | |
288 | for (i = 0; i < DPMAIF_TXQ_NUM; i++) { |
289 | tx_q = &dpmaif_ctrl->txq[i]; |
290 | t7xx_dpmaif_txq_free(txq: tx_q); |
291 | } |
292 | |
293 | for (i = 0; i < DPMAIF_RXQ_NUM; i++) { |
294 | rx_q = &dpmaif_ctrl->rxq[i]; |
295 | t7xx_dpmaif_rxq_free(queue: rx_q); |
296 | } |
297 | } |
298 | |
299 | static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl) |
300 | { |
301 | struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; |
302 | struct dpmaif_hw_params hw_init_para; |
303 | struct dpmaif_rx_queue *rxq; |
304 | struct dpmaif_tx_queue *txq; |
305 | unsigned int buf_cnt; |
306 | int i, ret = 0; |
307 | |
308 | if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON) |
309 | return -EFAULT; |
310 | |
311 | memset(&hw_init_para, 0, sizeof(hw_init_para)); |
312 | |
313 | for (i = 0; i < DPMAIF_RXQ_NUM; i++) { |
314 | rxq = &dpmaif_ctrl->rxq[i]; |
315 | rxq->que_started = true; |
316 | rxq->index = i; |
317 | rxq->budget = rxq->bat_req->bat_size_cnt - 1; |
318 | |
319 | hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr; |
320 | hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt; |
321 | hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr; |
322 | hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt; |
323 | hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr; |
324 | hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt; |
325 | } |
326 | |
327 | bitmap_zero(dst: dpmaif_ctrl->bat_req.bat_bitmap, nbits: dpmaif_ctrl->bat_req.bat_size_cnt); |
328 | buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1; |
329 | ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, bat_req: &dpmaif_ctrl->bat_req, q_num: 0, buf_cnt, initial: true); |
330 | if (ret) { |
331 | dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n" , ret); |
332 | return ret; |
333 | } |
334 | |
335 | buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1; |
336 | ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, bat_req: &dpmaif_ctrl->bat_frag, buf_cnt, first_time: true); |
337 | if (ret) { |
338 | dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n" , ret); |
339 | goto err_free_normal_bat; |
340 | } |
341 | |
342 | for (i = 0; i < DPMAIF_TXQ_NUM; i++) { |
343 | txq = &dpmaif_ctrl->txq[i]; |
344 | txq->que_started = true; |
345 | |
346 | hw_init_para.drb_base_addr[i] = txq->drb_bus_addr; |
347 | hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt; |
348 | } |
349 | |
350 | ret = t7xx_dpmaif_hw_init(hw_info, init_param: &hw_init_para); |
351 | if (ret) { |
352 | dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n" , ret); |
353 | goto err_free_frag_bat; |
354 | } |
355 | |
356 | ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, bat_entry_cnt: rxq->bat_req->bat_size_cnt - 1); |
357 | if (ret) |
358 | goto err_free_frag_bat; |
359 | |
360 | ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, frg_entry_cnt: rxq->bat_frag->bat_size_cnt - 1); |
361 | if (ret) |
362 | goto err_free_frag_bat; |
363 | |
364 | t7xx_dpmaif_ul_clr_all_intr(hw_info); |
365 | t7xx_dpmaif_dl_clr_all_intr(hw_info); |
366 | dpmaif_ctrl->state = DPMAIF_STATE_PWRON; |
367 | t7xx_dpmaif_enable_irq(dpmaif_ctrl); |
368 | wake_up(&dpmaif_ctrl->tx_wq); |
369 | return 0; |
370 | |
371 | err_free_frag_bat: |
372 | t7xx_dpmaif_bat_free(dpmaif_ctrl: rxq->dpmaif_ctrl, bat_req: rxq->bat_frag); |
373 | |
374 | err_free_normal_bat: |
375 | t7xx_dpmaif_bat_free(dpmaif_ctrl: rxq->dpmaif_ctrl, bat_req: rxq->bat_req); |
376 | |
377 | return ret; |
378 | } |
379 | |
380 | static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl) |
381 | { |
382 | t7xx_dpmaif_tx_stop(dpmaif_ctrl); |
383 | t7xx_dpmaif_rx_stop(dpmaif_ctrl); |
384 | } |
385 | |
386 | static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl) |
387 | { |
388 | t7xx_dpmaif_hw_stop_all_txq(hw_info: &dpmaif_ctrl->hw_info); |
389 | t7xx_dpmaif_hw_stop_all_rxq(hw_info: &dpmaif_ctrl->hw_info); |
390 | } |
391 | |
392 | static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl) |
393 | { |
394 | if (!dpmaif_ctrl->dpmaif_sw_init_done) { |
395 | dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n" ); |
396 | return -EFAULT; |
397 | } |
398 | |
399 | if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF) |
400 | return -EFAULT; |
401 | |
402 | t7xx_dpmaif_disable_irq(dpmaif_ctrl); |
403 | dpmaif_ctrl->state = DPMAIF_STATE_PWROFF; |
404 | t7xx_dpmaif_stop_sw(dpmaif_ctrl); |
405 | t7xx_dpmaif_tx_clear(dpmaif_ctrl); |
406 | t7xx_dpmaif_rx_clear(dpmaif_ctrl); |
407 | return 0; |
408 | } |
409 | |
410 | static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param) |
411 | { |
412 | struct dpmaif_ctrl *dpmaif_ctrl = param; |
413 | |
414 | t7xx_dpmaif_tx_stop(dpmaif_ctrl); |
415 | t7xx_dpmaif_hw_stop_all_txq(hw_info: &dpmaif_ctrl->hw_info); |
416 | t7xx_dpmaif_hw_stop_all_rxq(hw_info: &dpmaif_ctrl->hw_info); |
417 | t7xx_dpmaif_disable_irq(dpmaif_ctrl); |
418 | t7xx_dpmaif_rx_stop(dpmaif_ctrl); |
419 | return 0; |
420 | } |
421 | |
422 | static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl) |
423 | { |
424 | int qno; |
425 | |
426 | for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) |
427 | t7xx_dpmaif_dlq_unmask_rx_done(hw_info: &dpmaif_ctrl->hw_info, qno); |
428 | } |
429 | |
430 | static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl) |
431 | { |
432 | struct dpmaif_rx_queue *rxq; |
433 | struct dpmaif_tx_queue *txq; |
434 | unsigned int que_cnt; |
435 | |
436 | for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) { |
437 | txq = &dpmaif_ctrl->txq[que_cnt]; |
438 | txq->que_started = true; |
439 | } |
440 | |
441 | for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) { |
442 | rxq = &dpmaif_ctrl->rxq[que_cnt]; |
443 | rxq->que_started = true; |
444 | } |
445 | } |
446 | |
447 | static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param) |
448 | { |
449 | struct dpmaif_ctrl *dpmaif_ctrl = param; |
450 | |
451 | if (!dpmaif_ctrl) |
452 | return 0; |
453 | |
454 | t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl); |
455 | t7xx_dpmaif_enable_irq(dpmaif_ctrl); |
456 | t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl); |
457 | t7xx_dpmaif_start_hw(hw_info: &dpmaif_ctrl->hw_info); |
458 | wake_up(&dpmaif_ctrl->tx_wq); |
459 | return 0; |
460 | } |
461 | |
462 | static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl) |
463 | { |
464 | struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; |
465 | int ret; |
466 | |
467 | INIT_LIST_HEAD(list: &dpmaif_pm_entity->entity); |
468 | dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend; |
469 | dpmaif_pm_entity->suspend_late = NULL; |
470 | dpmaif_pm_entity->resume_early = NULL; |
471 | dpmaif_pm_entity->resume = &t7xx_dpmaif_resume; |
472 | dpmaif_pm_entity->id = PM_ENTITY_ID_DATA; |
473 | dpmaif_pm_entity->entity_param = dpmaif_ctrl; |
474 | |
475 | ret = t7xx_pci_pm_entity_register(t7xx_dev: dpmaif_ctrl->t7xx_dev, pm_entity: dpmaif_pm_entity); |
476 | if (ret) |
477 | dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n" ); |
478 | |
479 | return ret; |
480 | } |
481 | |
482 | static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl) |
483 | { |
484 | struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; |
485 | int ret; |
486 | |
487 | ret = t7xx_pci_pm_entity_unregister(t7xx_dev: dpmaif_ctrl->t7xx_dev, pm_entity: dpmaif_pm_entity); |
488 | if (ret < 0) |
489 | dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n" ); |
490 | |
491 | return ret; |
492 | } |
493 | |
494 | int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) |
495 | { |
496 | int ret = 0; |
497 | |
498 | switch (state) { |
499 | case MD_STATE_WAITING_FOR_HS1: |
500 | ret = t7xx_dpmaif_start(dpmaif_ctrl); |
501 | break; |
502 | |
503 | case MD_STATE_EXCEPTION: |
504 | ret = t7xx_dpmaif_stop(dpmaif_ctrl); |
505 | break; |
506 | |
507 | case MD_STATE_STOPPED: |
508 | ret = t7xx_dpmaif_stop(dpmaif_ctrl); |
509 | break; |
510 | |
511 | case MD_STATE_WAITING_TO_STOP: |
512 | t7xx_dpmaif_stop_hw(dpmaif_ctrl); |
513 | break; |
514 | |
515 | default: |
516 | break; |
517 | } |
518 | |
519 | return ret; |
520 | } |
521 | |
522 | /** |
523 | * t7xx_dpmaif_hif_init() - Initialize data path. |
524 | * @t7xx_dev: MTK context structure. |
525 | * @callbacks: Callbacks implemented by the network layer to handle RX skb and |
526 | * event notifications. |
527 | * |
528 | * Allocate and initialize datapath control block. |
529 | * Register datapath ISR, TX and RX resources. |
530 | * |
531 | * Return: |
532 | * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure. |
533 | * * NULL - In case of error. |
534 | */ |
535 | struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, |
536 | struct dpmaif_callbacks *callbacks) |
537 | { |
538 | struct device *dev = &t7xx_dev->pdev->dev; |
539 | struct dpmaif_ctrl *dpmaif_ctrl; |
540 | int ret; |
541 | |
542 | if (!callbacks) |
543 | return NULL; |
544 | |
545 | dpmaif_ctrl = devm_kzalloc(dev, size: sizeof(*dpmaif_ctrl), GFP_KERNEL); |
546 | if (!dpmaif_ctrl) |
547 | return NULL; |
548 | |
549 | dpmaif_ctrl->t7xx_dev = t7xx_dev; |
550 | dpmaif_ctrl->callbacks = callbacks; |
551 | dpmaif_ctrl->dev = dev; |
552 | dpmaif_ctrl->dpmaif_sw_init_done = false; |
553 | dpmaif_ctrl->hw_info.dev = dev; |
554 | dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - |
555 | t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; |
556 | |
557 | ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl); |
558 | if (ret) |
559 | return NULL; |
560 | |
561 | t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); |
562 | t7xx_dpmaif_disable_irq(dpmaif_ctrl); |
563 | |
564 | ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); |
565 | if (ret) { |
566 | t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); |
567 | dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n" , ret); |
568 | return NULL; |
569 | } |
570 | |
571 | dpmaif_ctrl->dpmaif_sw_init_done = true; |
572 | return dpmaif_ctrl; |
573 | } |
574 | |
575 | void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl) |
576 | { |
577 | if (dpmaif_ctrl->dpmaif_sw_init_done) { |
578 | t7xx_dpmaif_stop(dpmaif_ctrl); |
579 | t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); |
580 | t7xx_dpmaif_sw_release(dpmaif_ctrl); |
581 | dpmaif_ctrl->dpmaif_sw_init_done = false; |
582 | } |
583 | } |
584 | |