1 | /* SPDX-License-Identifier: GPL-2.0-only |
2 | * |
3 | * Copyright (c) 2021, MediaTek Inc. |
4 | * Copyright (c) 2021-2022, Intel Corporation. |
5 | * |
6 | * Authors: |
7 | * Haijun Liu <haijun.liu@mediatek.com> |
8 | * Moises Veleta <moises.veleta@intel.com> |
9 | * Ricardo Martinez <ricardo.martinez@linux.intel.com> |
10 | * Sreehari Kancharla <sreehari.kancharla@intel.com> |
11 | * |
12 | * Contributors: |
13 | * Amir Hanania <amir.hanania@intel.com> |
14 | * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> |
15 | * Eliot Lee <eliot.lee@intel.com> |
16 | */ |
17 | |
18 | #ifndef __T7XX_HIF_CLDMA_H__ |
19 | #define __T7XX_HIF_CLDMA_H__ |
20 | |
21 | #include <linux/bits.h> |
22 | #include <linux/device.h> |
23 | #include <linux/dmapool.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/skbuff.h> |
26 | #include <linux/spinlock.h> |
27 | #include <linux/wait.h> |
28 | #include <linux/workqueue.h> |
29 | #include <linux/types.h> |
30 | |
31 | #include "t7xx_cldma.h" |
32 | #include "t7xx_pci.h" |
33 | |
34 | #define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) |
35 | #define CLDMA_SHARED_Q_BUFF_SZ 3584 |
36 | #define CLDMA_DEDICATED_Q_BUFF_SZ 2048 |
37 | |
38 | /** |
39 | * enum cldma_id - Identifiers for CLDMA HW units. |
40 | * @CLDMA_ID_MD: Modem control channel. |
41 | * @CLDMA_ID_AP: Application Processor control channel. |
42 | * @CLDMA_NUM: Number of CLDMA HW units available. |
43 | */ |
44 | enum cldma_id { |
45 | CLDMA_ID_MD, |
46 | CLDMA_ID_AP, |
47 | CLDMA_NUM |
48 | }; |
49 | |
50 | struct cldma_gpd { |
51 | u8 flags; |
52 | u8 not_used1; |
53 | __le16 rx_data_allow_len; |
54 | __le32 next_gpd_ptr_h; |
55 | __le32 next_gpd_ptr_l; |
56 | __le32 data_buff_bd_ptr_h; |
57 | __le32 data_buff_bd_ptr_l; |
58 | __le16 data_buff_len; |
59 | __le16 not_used2; |
60 | }; |
61 | |
62 | enum cldma_cfg { |
63 | CLDMA_SHARED_Q_CFG, |
64 | CLDMA_DEDICATED_Q_CFG, |
65 | }; |
66 | |
67 | struct cldma_request { |
68 | struct cldma_gpd *gpd; /* Virtual address for CPU */ |
69 | dma_addr_t gpd_addr; /* Physical address for DMA */ |
70 | struct sk_buff *skb; |
71 | dma_addr_t mapped_buff; |
72 | struct list_head entry; |
73 | }; |
74 | |
75 | struct cldma_ring { |
76 | struct list_head gpd_ring; /* Ring of struct cldma_request */ |
77 | unsigned int length; /* Number of struct cldma_request */ |
78 | int pkt_size; |
79 | }; |
80 | |
81 | struct cldma_queue { |
82 | struct cldma_ctrl *md_ctrl; |
83 | enum mtk_txrx dir; |
84 | unsigned int index; |
85 | struct cldma_ring *tr_ring; |
86 | struct cldma_request *tr_done; |
87 | struct cldma_request *rx_refill; |
88 | struct cldma_request *tx_next; |
89 | int budget; /* Same as ring buffer size by default */ |
90 | spinlock_t ring_lock; |
91 | wait_queue_head_t req_wq; /* Only for TX */ |
92 | struct workqueue_struct *worker; |
93 | struct work_struct cldma_work; |
94 | int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); |
95 | }; |
96 | |
97 | struct cldma_ctrl { |
98 | enum cldma_id hif_id; |
99 | struct device *dev; |
100 | struct t7xx_pci_dev *t7xx_dev; |
101 | struct cldma_queue txq[CLDMA_TXQ_NUM]; |
102 | struct cldma_queue rxq[CLDMA_RXQ_NUM]; |
103 | unsigned short txq_active; |
104 | unsigned short rxq_active; |
105 | unsigned short txq_started; |
106 | spinlock_t cldma_lock; /* Protects CLDMA structure */ |
107 | /* Assumes T/R GPD/BD/SPD have the same size */ |
108 | struct dma_pool *gpd_dmapool; |
109 | struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; |
110 | struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; |
111 | struct md_pm_entity *pm_entity; |
112 | struct t7xx_cldma_hw hw_info; |
113 | bool is_late_init; |
114 | }; |
115 | |
116 | #define CLDMA_Q_IDX_DUMP 1 |
117 | #define GPD_FLAGS_HWO BIT(0) |
118 | #define GPD_FLAGS_IOC BIT(7) |
119 | #define GPD_DMAPOOL_ALIGN 16 |
120 | |
121 | int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); |
122 | void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); |
123 | int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); |
124 | void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); |
125 | void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id); |
126 | void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); |
127 | int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); |
128 | void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); |
129 | void t7xx_cldma_set_recv_skb(struct cldma_queue *queue, |
130 | int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); |
131 | int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); |
132 | void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); |
133 | void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); |
134 | |
135 | #endif /* __T7XX_HIF_CLDMA_H__ */ |
136 | |