1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/platform_device.h> |
6 | #include <linux/slab.h> |
7 | #include <linux/module.h> |
8 | #include <linux/bitfield.h> |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/skbuff.h> |
11 | #include <linux/of_platform.h> |
12 | #include <linux/of_address.h> |
13 | #include <linux/of_reserved_mem.h> |
14 | #include <linux/mfd/syscon.h> |
15 | #include <linux/debugfs.h> |
16 | #include <linux/soc/mediatek/mtk_wed.h> |
17 | #include <net/flow_offload.h> |
18 | #include <net/pkt_cls.h> |
19 | #include "mtk_eth_soc.h" |
20 | #include "mtk_wed.h" |
21 | #include "mtk_ppe.h" |
22 | #include "mtk_wed_wo.h" |
23 | |
24 | #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) |
25 | |
26 | #define MTK_WED_PKT_SIZE 1920 |
27 | #define MTK_WED_BUF_SIZE 2048 |
28 | #define MTK_WED_PAGE_BUF_SIZE 128 |
29 | #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) |
30 | #define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) |
31 | #define MTK_WED_RX_RING_SIZE 1536 |
32 | #define MTK_WED_RX_PG_BM_CNT 8192 |
33 | #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) |
34 | #define MTK_WED_AMSDU_NPAGES 32 |
35 | |
36 | #define MTK_WED_TX_RING_SIZE 2048 |
37 | #define MTK_WED_WDMA_RING_SIZE 1024 |
38 | #define MTK_WED_MAX_GROUP_SIZE 0x100 |
39 | #define MTK_WED_VLD_GROUP_SIZE 0x40 |
40 | #define MTK_WED_PER_GROUP_PKT 128 |
41 | |
42 | #define MTK_WED_FBUF_SIZE 128 |
43 | #define MTK_WED_MIOD_CNT 16 |
44 | #define MTK_WED_FB_CMD_CNT 1024 |
45 | #define MTK_WED_RRO_QUE_CNT 8192 |
46 | #define MTK_WED_MIOD_ENTRY_CNT 128 |
47 | |
48 | #define MTK_WED_TX_BM_DMA_SIZE 65536 |
49 | #define MTK_WED_TX_BM_PKT_CNT 32768 |
50 | |
51 | static struct mtk_wed_hw *hw_list[3]; |
52 | static DEFINE_MUTEX(hw_lock); |
53 | |
54 | struct mtk_wed_flow_block_priv { |
55 | struct mtk_wed_hw *hw; |
56 | struct net_device *dev; |
57 | }; |
58 | |
59 | static const struct mtk_wed_soc_data mt7622_data = { |
60 | .regmap = { |
61 | .tx_bm_tkid = 0x088, |
62 | .wpdma_rx_ring0 = 0x770, |
63 | .reset_idx_tx_mask = GENMASK(3, 0), |
64 | .reset_idx_rx_mask = GENMASK(17, 16), |
65 | }, |
66 | .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), |
67 | .wdma_desc_size = sizeof(struct mtk_wdma_desc), |
68 | }; |
69 | |
70 | static const struct mtk_wed_soc_data mt7986_data = { |
71 | .regmap = { |
72 | .tx_bm_tkid = 0x0c8, |
73 | .wpdma_rx_ring0 = 0x770, |
74 | .reset_idx_tx_mask = GENMASK(1, 0), |
75 | .reset_idx_rx_mask = GENMASK(7, 6), |
76 | }, |
77 | .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), |
78 | .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), |
79 | }; |
80 | |
81 | static const struct mtk_wed_soc_data mt7988_data = { |
82 | .regmap = { |
83 | .tx_bm_tkid = 0x0c8, |
84 | .wpdma_rx_ring0 = 0x7d0, |
85 | .reset_idx_tx_mask = GENMASK(1, 0), |
86 | .reset_idx_rx_mask = GENMASK(7, 6), |
87 | }, |
88 | .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), |
89 | .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), |
90 | }; |
91 | |
92 | static void |
93 | wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) |
94 | { |
95 | regmap_update_bits(map: dev->hw->regs, reg, mask: mask | val, val); |
96 | } |
97 | |
98 | static void |
99 | wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) |
100 | { |
101 | return wed_m32(dev, reg, mask: 0, val: mask); |
102 | } |
103 | |
104 | static void |
105 | wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) |
106 | { |
107 | return wed_m32(dev, reg, mask, val: 0); |
108 | } |
109 | |
110 | static void |
111 | wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) |
112 | { |
113 | wdma_w32(dev, reg, val: (wdma_r32(dev, reg) & ~mask) | val); |
114 | } |
115 | |
116 | static void |
117 | wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) |
118 | { |
119 | wdma_m32(dev, reg, mask: 0, val: mask); |
120 | } |
121 | |
122 | static void |
123 | wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) |
124 | { |
125 | wdma_m32(dev, reg, mask, val: 0); |
126 | } |
127 | |
128 | static u32 |
129 | wifi_r32(struct mtk_wed_device *dev, u32 reg) |
130 | { |
131 | return readl(addr: dev->wlan.base + reg); |
132 | } |
133 | |
134 | static void |
135 | wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) |
136 | { |
137 | writel(val, addr: dev->wlan.base + reg); |
138 | } |
139 | |
140 | static u32 |
141 | mtk_wed_read_reset(struct mtk_wed_device *dev) |
142 | { |
143 | return wed_r32(dev, MTK_WED_RESET); |
144 | } |
145 | |
146 | static u32 |
147 | mtk_wdma_read_reset(struct mtk_wed_device *dev) |
148 | { |
149 | return wdma_r32(dev, MTK_WDMA_GLO_CFG); |
150 | } |
151 | |
152 | static void |
153 | mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) |
154 | { |
155 | u32 status; |
156 | |
157 | if (!mtk_wed_is_v3_or_greater(hw: dev->hw)) |
158 | return; |
159 | |
160 | wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); |
161 | wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); |
162 | |
163 | if (read_poll_timeout(wdma_r32, status, |
164 | !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), |
165 | 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) |
166 | dev_err(dev->hw->dev, "rx reset failed\n" ); |
167 | |
168 | if (read_poll_timeout(wdma_r32, status, |
169 | !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), |
170 | 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) |
171 | dev_err(dev->hw->dev, "rx reset failed\n" ); |
172 | |
173 | wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); |
174 | wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); |
175 | |
176 | if (read_poll_timeout(wdma_r32, status, |
177 | !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), |
178 | 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) |
179 | dev_err(dev->hw->dev, "rx reset failed\n" ); |
180 | |
181 | if (read_poll_timeout(wdma_r32, status, |
182 | !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), |
183 | 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) |
184 | dev_err(dev->hw->dev, "rx reset failed\n" ); |
185 | |
186 | /* prefetch FIFO */ |
187 | wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, |
188 | MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | |
189 | MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); |
190 | wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, |
191 | MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | |
192 | MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); |
193 | |
194 | /* core FIFO */ |
195 | wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, |
196 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | |
197 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | |
198 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | |
199 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | |
200 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | |
201 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | |
202 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); |
203 | wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, |
204 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | |
205 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | |
206 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | |
207 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | |
208 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | |
209 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | |
210 | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); |
211 | |
212 | /* writeback FIFO */ |
213 | wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), |
214 | MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
215 | wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), |
216 | MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
217 | |
218 | wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), |
219 | MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
220 | wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), |
221 | MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); |
222 | |
223 | /* prefetch ring status */ |
224 | wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, |
225 | MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); |
226 | wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, |
227 | MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); |
228 | |
229 | /* writeback ring status */ |
230 | wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, |
231 | MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); |
232 | wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, |
233 | MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); |
234 | } |
235 | |
236 | static int |
237 | mtk_wdma_rx_reset(struct mtk_wed_device *dev) |
238 | { |
239 | u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; |
240 | int i, ret; |
241 | |
242 | wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); |
243 | ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, |
244 | !(status & mask), 0, 10000); |
245 | if (ret) |
246 | dev_err(dev->hw->dev, "rx reset failed\n" ); |
247 | |
248 | mtk_wdma_v3_rx_reset(dev); |
249 | wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); |
250 | wdma_w32(dev, MTK_WDMA_RESET_IDX, val: 0); |
251 | |
252 | for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { |
253 | if (dev->rx_wdma[i].desc) |
254 | continue; |
255 | |
256 | wdma_w32(dev, |
257 | MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, val: 0); |
258 | } |
259 | |
260 | return ret; |
261 | } |
262 | |
263 | static u32 |
264 | mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
265 | { |
266 | return !!(wed_r32(dev, reg) & mask); |
267 | } |
268 | |
269 | static int |
270 | mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) |
271 | { |
272 | int sleep = 15000; |
273 | int timeout = 100 * sleep; |
274 | u32 val; |
275 | |
276 | return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, |
277 | timeout, false, dev, reg, mask); |
278 | } |
279 | |
280 | static void |
281 | mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) |
282 | { |
283 | u32 status; |
284 | |
285 | if (!mtk_wed_is_v3_or_greater(hw: dev->hw)) |
286 | return; |
287 | |
288 | wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); |
289 | wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); |
290 | |
291 | if (read_poll_timeout(wdma_r32, status, |
292 | !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), |
293 | 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) |
294 | dev_err(dev->hw->dev, "tx reset failed\n" ); |
295 | |
296 | if (read_poll_timeout(wdma_r32, status, |
297 | !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), |
298 | 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) |
299 | dev_err(dev->hw->dev, "tx reset failed\n" ); |
300 | |
301 | wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); |
302 | wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); |
303 | |
304 | if (read_poll_timeout(wdma_r32, status, |
305 | !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), |
306 | 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) |
307 | dev_err(dev->hw->dev, "tx reset failed\n" ); |
308 | |
309 | if (read_poll_timeout(wdma_r32, status, |
310 | !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), |
311 | 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) |
312 | dev_err(dev->hw->dev, "tx reset failed\n" ); |
313 | |
314 | /* prefetch FIFO */ |
315 | wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, |
316 | MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | |
317 | MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); |
318 | wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, |
319 | MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | |
320 | MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); |
321 | |
322 | /* core FIFO */ |
323 | wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, |
324 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | |
325 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | |
326 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | |
327 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); |
328 | wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, |
329 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | |
330 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | |
331 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | |
332 | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); |
333 | |
334 | /* writeback FIFO */ |
335 | wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), |
336 | MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
337 | wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), |
338 | MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
339 | |
340 | wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), |
341 | MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
342 | wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), |
343 | MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); |
344 | |
345 | /* prefetch ring status */ |
346 | wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, |
347 | MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); |
348 | wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, |
349 | MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); |
350 | |
351 | /* writeback ring status */ |
352 | wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, |
353 | MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); |
354 | wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, |
355 | MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); |
356 | } |
357 | |
358 | static void |
359 | mtk_wdma_tx_reset(struct mtk_wed_device *dev) |
360 | { |
361 | u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; |
362 | int i; |
363 | |
364 | wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); |
365 | if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, |
366 | !(status & mask), 0, 10000)) |
367 | dev_err(dev->hw->dev, "tx reset failed\n" ); |
368 | |
369 | mtk_wdma_v3_tx_reset(dev); |
370 | wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); |
371 | wdma_w32(dev, MTK_WDMA_RESET_IDX, val: 0); |
372 | |
373 | for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) |
374 | wdma_w32(dev, |
375 | MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, val: 0); |
376 | } |
377 | |
378 | static void |
379 | mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) |
380 | { |
381 | u32 status; |
382 | |
383 | wed_w32(dev, MTK_WED_RESET, val: mask); |
384 | if (readx_poll_timeout(mtk_wed_read_reset, dev, status, |
385 | !(status & mask), 0, 1000)) |
386 | WARN_ON_ONCE(1); |
387 | } |
388 | |
389 | static u32 |
390 | mtk_wed_wo_read_status(struct mtk_wed_device *dev) |
391 | { |
392 | return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); |
393 | } |
394 | |
395 | static void |
396 | mtk_wed_wo_reset(struct mtk_wed_device *dev) |
397 | { |
398 | struct mtk_wed_wo *wo = dev->hw->wed_wo; |
399 | u8 state = MTK_WED_WO_STATE_DISABLE; |
400 | void __iomem *reg; |
401 | u32 val; |
402 | |
403 | mtk_wdma_tx_reset(dev); |
404 | mtk_wed_reset(dev, MTK_WED_RESET_WED); |
405 | |
406 | if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, |
407 | cmd: MTK_WED_WO_CMD_CHANGE_STATE, data: &state, |
408 | len: sizeof(state), wait_resp: false)) |
409 | return; |
410 | |
411 | if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, |
412 | val == MTK_WED_WOIF_DISABLE_DONE, |
413 | 100, MTK_WOCPU_TIMEOUT)) |
414 | dev_err(dev->hw->dev, "failed to disable wed-wo\n" ); |
415 | |
416 | reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, size: 4); |
417 | |
418 | val = readl(addr: reg); |
419 | switch (dev->hw->index) { |
420 | case 0: |
421 | val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; |
422 | writel(val, addr: reg); |
423 | val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; |
424 | writel(val, addr: reg); |
425 | break; |
426 | case 1: |
427 | val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; |
428 | writel(val, addr: reg); |
429 | val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; |
430 | writel(val, addr: reg); |
431 | break; |
432 | default: |
433 | break; |
434 | } |
435 | iounmap(addr: reg); |
436 | } |
437 | |
438 | void mtk_wed_fe_reset(void) |
439 | { |
440 | int i; |
441 | |
442 | mutex_lock(&hw_lock); |
443 | |
444 | for (i = 0; i < ARRAY_SIZE(hw_list); i++) { |
445 | struct mtk_wed_hw *hw = hw_list[i]; |
446 | struct mtk_wed_device *dev; |
447 | int err; |
448 | |
449 | if (!hw) |
450 | break; |
451 | |
452 | dev = hw->wed_dev; |
453 | if (!dev || !dev->wlan.reset) |
454 | continue; |
455 | |
456 | /* reset callback blocks until WLAN reset is completed */ |
457 | err = dev->wlan.reset(dev); |
458 | if (err) |
459 | dev_err(dev->dev, "wlan reset failed: %d\n" , err); |
460 | } |
461 | |
462 | mutex_unlock(lock: &hw_lock); |
463 | } |
464 | |
465 | void mtk_wed_fe_reset_complete(void) |
466 | { |
467 | int i; |
468 | |
469 | mutex_lock(&hw_lock); |
470 | |
471 | for (i = 0; i < ARRAY_SIZE(hw_list); i++) { |
472 | struct mtk_wed_hw *hw = hw_list[i]; |
473 | struct mtk_wed_device *dev; |
474 | |
475 | if (!hw) |
476 | break; |
477 | |
478 | dev = hw->wed_dev; |
479 | if (!dev || !dev->wlan.reset_complete) |
480 | continue; |
481 | |
482 | dev->wlan.reset_complete(dev); |
483 | } |
484 | |
485 | mutex_unlock(lock: &hw_lock); |
486 | } |
487 | |
488 | static struct mtk_wed_hw * |
489 | mtk_wed_assign(struct mtk_wed_device *dev) |
490 | { |
491 | struct mtk_wed_hw *hw; |
492 | int i; |
493 | |
494 | if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { |
495 | hw = hw_list[pci_domain_nr(bus: dev->wlan.pci_dev->bus)]; |
496 | if (!hw) |
497 | return NULL; |
498 | |
499 | if (!hw->wed_dev) |
500 | goto out; |
501 | |
502 | if (mtk_wed_is_v1(hw)) |
503 | return NULL; |
504 | |
505 | /* MT7986 WED devices do not have any pcie slot restrictions */ |
506 | } |
507 | /* MT7986 PCIE or AXI */ |
508 | for (i = 0; i < ARRAY_SIZE(hw_list); i++) { |
509 | hw = hw_list[i]; |
510 | if (hw && !hw->wed_dev) |
511 | goto out; |
512 | } |
513 | |
514 | return NULL; |
515 | |
516 | out: |
517 | hw->wed_dev = dev; |
518 | return hw; |
519 | } |
520 | |
521 | static int |
522 | mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) |
523 | { |
524 | struct mtk_wed_hw *hw = dev->hw; |
525 | struct mtk_wed_amsdu *wed_amsdu; |
526 | int i; |
527 | |
528 | if (!mtk_wed_is_v3_or_greater(hw)) |
529 | return 0; |
530 | |
531 | wed_amsdu = devm_kcalloc(dev: hw->dev, MTK_WED_AMSDU_NPAGES, |
532 | size: sizeof(*wed_amsdu), GFP_KERNEL); |
533 | if (!wed_amsdu) |
534 | return -ENOMEM; |
535 | |
536 | for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { |
537 | void *ptr; |
538 | |
539 | /* each segment is 64K */ |
540 | ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | |
541 | __GFP_ZERO | __GFP_COMP | |
542 | GFP_DMA32, |
543 | order: get_order(MTK_WED_AMSDU_BUF_SIZE)); |
544 | if (!ptr) |
545 | goto error; |
546 | |
547 | wed_amsdu[i].txd = ptr; |
548 | wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, |
549 | MTK_WED_AMSDU_BUF_SIZE, |
550 | DMA_TO_DEVICE); |
551 | if (dma_mapping_error(dev: hw->dev, dma_addr: wed_amsdu[i].txd_phy)) |
552 | goto error; |
553 | } |
554 | dev->hw->wed_amsdu = wed_amsdu; |
555 | |
556 | return 0; |
557 | |
558 | error: |
559 | for (i--; i >= 0; i--) |
560 | dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, |
561 | MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); |
562 | return -ENOMEM; |
563 | } |
564 | |
565 | static void |
566 | mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) |
567 | { |
568 | struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; |
569 | int i; |
570 | |
571 | if (!wed_amsdu) |
572 | return; |
573 | |
574 | for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { |
575 | dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, |
576 | MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); |
577 | free_pages(addr: (unsigned long)wed_amsdu[i].txd, |
578 | order: get_order(MTK_WED_AMSDU_BUF_SIZE)); |
579 | } |
580 | } |
581 | |
582 | static int |
583 | mtk_wed_amsdu_init(struct mtk_wed_device *dev) |
584 | { |
585 | struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; |
586 | int i, ret; |
587 | |
588 | if (!wed_amsdu) |
589 | return 0; |
590 | |
591 | for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) |
592 | wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), |
593 | val: wed_amsdu[i].txd_phy); |
594 | |
595 | /* init all sta parameter */ |
596 | wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | |
597 | MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | |
598 | FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, |
599 | dev->wlan.amsdu_max_len >> 8) | |
600 | FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, |
601 | dev->wlan.amsdu_max_subframes)); |
602 | |
603 | wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); |
604 | |
605 | ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, |
606 | MTK_WED_AMSDU_STA_INFO_DO_INIT); |
607 | if (ret) { |
608 | dev_err(dev->hw->dev, "amsdu initialization failed\n" ); |
609 | return ret; |
610 | } |
611 | |
612 | /* init partial amsdu offload txd src */ |
613 | wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, |
614 | FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); |
615 | |
616 | /* init qmem */ |
617 | wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); |
618 | ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); |
619 | if (ret) { |
620 | pr_info("%s: amsdu qmem initialization failed\n" , __func__); |
621 | return ret; |
622 | } |
623 | |
624 | /* eagle E1 PCIE1 tx ring 22 flow control issue */ |
625 | if (dev->wlan.id == 0x7991) |
626 | wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); |
627 | |
628 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); |
629 | |
630 | return 0; |
631 | } |
632 | |
633 | static int |
634 | mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) |
635 | { |
636 | u32 desc_size = dev->hw->soc->tx_ring_desc_size; |
637 | int i, page_idx = 0, n_pages, ring_size; |
638 | int token = dev->wlan.token_start; |
639 | struct mtk_wed_buf *page_list; |
640 | dma_addr_t desc_phys; |
641 | void *desc_ptr; |
642 | |
643 | if (!mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
644 | ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); |
645 | dev->tx_buf_ring.size = ring_size; |
646 | } else { |
647 | dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; |
648 | ring_size = MTK_WED_TX_BM_PKT_CNT; |
649 | } |
650 | n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; |
651 | |
652 | page_list = kcalloc(n: n_pages, size: sizeof(*page_list), GFP_KERNEL); |
653 | if (!page_list) |
654 | return -ENOMEM; |
655 | |
656 | dev->tx_buf_ring.pages = page_list; |
657 | |
658 | desc_ptr = dma_alloc_coherent(dev: dev->hw->dev, |
659 | size: dev->tx_buf_ring.size * desc_size, |
660 | dma_handle: &desc_phys, GFP_KERNEL); |
661 | if (!desc_ptr) |
662 | return -ENOMEM; |
663 | |
664 | dev->tx_buf_ring.desc = desc_ptr; |
665 | dev->tx_buf_ring.desc_phys = desc_phys; |
666 | |
667 | for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { |
668 | dma_addr_t page_phys, buf_phys; |
669 | struct page *page; |
670 | void *buf; |
671 | int s; |
672 | |
673 | page = __dev_alloc_page(GFP_KERNEL); |
674 | if (!page) |
675 | return -ENOMEM; |
676 | |
677 | page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, |
678 | DMA_BIDIRECTIONAL); |
679 | if (dma_mapping_error(dev: dev->hw->dev, dma_addr: page_phys)) { |
680 | __free_page(page); |
681 | return -ENOMEM; |
682 | } |
683 | |
684 | page_list[page_idx].p = page; |
685 | page_list[page_idx++].phy_addr = page_phys; |
686 | dma_sync_single_for_cpu(dev: dev->hw->dev, addr: page_phys, PAGE_SIZE, |
687 | dir: DMA_BIDIRECTIONAL); |
688 | |
689 | buf = page_to_virt(page); |
690 | buf_phys = page_phys; |
691 | |
692 | for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { |
693 | struct mtk_wdma_desc *desc = desc_ptr; |
694 | u32 ctrl; |
695 | |
696 | desc->buf0 = cpu_to_le32(buf_phys); |
697 | if (!mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
698 | u32 txd_size; |
699 | |
700 | txd_size = dev->wlan.init_buf(buf, buf_phys, |
701 | token++); |
702 | desc->buf1 = cpu_to_le32(buf_phys + txd_size); |
703 | ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); |
704 | if (mtk_wed_is_v1(hw: dev->hw)) |
705 | ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | |
706 | FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, |
707 | MTK_WED_BUF_SIZE - txd_size); |
708 | else |
709 | ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | |
710 | FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, |
711 | MTK_WED_BUF_SIZE - txd_size); |
712 | desc->info = 0; |
713 | } else { |
714 | ctrl = token << 16 | TX_DMA_PREP_ADDR64(buf_phys); |
715 | } |
716 | desc->ctrl = cpu_to_le32(ctrl); |
717 | |
718 | desc_ptr += desc_size; |
719 | buf += MTK_WED_BUF_SIZE; |
720 | buf_phys += MTK_WED_BUF_SIZE; |
721 | } |
722 | |
723 | dma_sync_single_for_device(dev: dev->hw->dev, addr: page_phys, PAGE_SIZE, |
724 | dir: DMA_BIDIRECTIONAL); |
725 | } |
726 | |
727 | return 0; |
728 | } |
729 | |
730 | static void |
731 | mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) |
732 | { |
733 | struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; |
734 | struct mtk_wed_hw *hw = dev->hw; |
735 | int i, page_idx = 0; |
736 | |
737 | if (!page_list) |
738 | return; |
739 | |
740 | if (!dev->tx_buf_ring.desc) |
741 | goto free_pagelist; |
742 | |
743 | for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { |
744 | dma_addr_t page_phy = page_list[page_idx].phy_addr; |
745 | void *page = page_list[page_idx++].p; |
746 | |
747 | if (!page) |
748 | break; |
749 | |
750 | dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, |
751 | DMA_BIDIRECTIONAL); |
752 | __free_page(page); |
753 | } |
754 | |
755 | dma_free_coherent(dev: dev->hw->dev, |
756 | size: dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, |
757 | cpu_addr: dev->tx_buf_ring.desc, |
758 | dma_handle: dev->tx_buf_ring.desc_phys); |
759 | |
760 | free_pagelist: |
761 | kfree(objp: page_list); |
762 | } |
763 | |
764 | static int |
765 | mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) |
766 | { |
767 | int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; |
768 | struct mtk_wed_buf *page_list; |
769 | struct mtk_wed_bm_desc *desc; |
770 | dma_addr_t desc_phys; |
771 | int i, page_idx = 0; |
772 | |
773 | if (!dev->wlan.hw_rro) |
774 | return 0; |
775 | |
776 | page_list = kcalloc(n: n_pages, size: sizeof(*page_list), GFP_KERNEL); |
777 | if (!page_list) |
778 | return -ENOMEM; |
779 | |
780 | dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); |
781 | dev->hw_rro.pages = page_list; |
782 | desc = dma_alloc_coherent(dev: dev->hw->dev, |
783 | size: dev->wlan.rx_nbuf * sizeof(*desc), |
784 | dma_handle: &desc_phys, GFP_KERNEL); |
785 | if (!desc) |
786 | return -ENOMEM; |
787 | |
788 | dev->hw_rro.desc = desc; |
789 | dev->hw_rro.desc_phys = desc_phys; |
790 | |
791 | for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { |
792 | dma_addr_t page_phys, buf_phys; |
793 | struct page *page; |
794 | int s; |
795 | |
796 | page = __dev_alloc_page(GFP_KERNEL); |
797 | if (!page) |
798 | return -ENOMEM; |
799 | |
800 | page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, |
801 | DMA_BIDIRECTIONAL); |
802 | if (dma_mapping_error(dev: dev->hw->dev, dma_addr: page_phys)) { |
803 | __free_page(page); |
804 | return -ENOMEM; |
805 | } |
806 | |
807 | page_list[page_idx].p = page; |
808 | page_list[page_idx++].phy_addr = page_phys; |
809 | dma_sync_single_for_cpu(dev: dev->hw->dev, addr: page_phys, PAGE_SIZE, |
810 | dir: DMA_BIDIRECTIONAL); |
811 | |
812 | buf_phys = page_phys; |
813 | for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { |
814 | desc->buf0 = cpu_to_le32(buf_phys); |
815 | desc->token = cpu_to_le32(RX_DMA_PREP_ADDR64(buf_phys)); |
816 | buf_phys += MTK_WED_PAGE_BUF_SIZE; |
817 | desc++; |
818 | } |
819 | |
820 | dma_sync_single_for_device(dev: dev->hw->dev, addr: page_phys, PAGE_SIZE, |
821 | dir: DMA_BIDIRECTIONAL); |
822 | } |
823 | |
824 | return 0; |
825 | } |
826 | |
827 | static int |
828 | mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) |
829 | { |
830 | struct mtk_wed_bm_desc *desc; |
831 | dma_addr_t desc_phys; |
832 | |
833 | dev->rx_buf_ring.size = dev->wlan.rx_nbuf; |
834 | desc = dma_alloc_coherent(dev: dev->hw->dev, |
835 | size: dev->wlan.rx_nbuf * sizeof(*desc), |
836 | dma_handle: &desc_phys, GFP_KERNEL); |
837 | if (!desc) |
838 | return -ENOMEM; |
839 | |
840 | dev->rx_buf_ring.desc = desc; |
841 | dev->rx_buf_ring.desc_phys = desc_phys; |
842 | dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); |
843 | |
844 | return mtk_wed_hwrro_buffer_alloc(dev); |
845 | } |
846 | |
847 | static void |
848 | mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) |
849 | { |
850 | struct mtk_wed_buf *page_list = dev->hw_rro.pages; |
851 | struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; |
852 | int i, page_idx = 0; |
853 | |
854 | if (!dev->wlan.hw_rro) |
855 | return; |
856 | |
857 | if (!page_list) |
858 | return; |
859 | |
860 | if (!desc) |
861 | goto free_pagelist; |
862 | |
863 | for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { |
864 | dma_addr_t buf_addr = page_list[page_idx].phy_addr; |
865 | void *page = page_list[page_idx++].p; |
866 | |
867 | if (!page) |
868 | break; |
869 | |
870 | dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, |
871 | DMA_BIDIRECTIONAL); |
872 | __free_page(page); |
873 | } |
874 | |
875 | dma_free_coherent(dev: dev->hw->dev, size: dev->hw_rro.size * sizeof(*desc), |
876 | cpu_addr: desc, dma_handle: dev->hw_rro.desc_phys); |
877 | |
878 | free_pagelist: |
879 | kfree(objp: page_list); |
880 | } |
881 | |
882 | static void |
883 | mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) |
884 | { |
885 | struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; |
886 | |
887 | if (!desc) |
888 | return; |
889 | |
890 | dev->wlan.release_rx_buf(dev); |
891 | dma_free_coherent(dev: dev->hw->dev, size: dev->rx_buf_ring.size * sizeof(*desc), |
892 | cpu_addr: desc, dma_handle: dev->rx_buf_ring.desc_phys); |
893 | |
894 | mtk_wed_hwrro_free_buffer(dev); |
895 | } |
896 | |
897 | static void |
898 | mtk_wed_hwrro_init(struct mtk_wed_device *dev) |
899 | { |
900 | if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) |
901 | return; |
902 | |
903 | wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, |
904 | FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); |
905 | |
906 | wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, val: dev->hw_rro.desc_phys); |
907 | |
908 | wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, |
909 | MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | |
910 | FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, |
911 | MTK_WED_RX_PG_BM_CNT)); |
912 | |
913 | /* enable rx_page_bm to fetch dmad */ |
914 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); |
915 | } |
916 | |
917 | static void |
918 | mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) |
919 | { |
920 | wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, |
921 | FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); |
922 | wed_w32(dev, MTK_WED_RX_BM_BASE, val: dev->rx_buf_ring.desc_phys); |
923 | wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | |
924 | FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); |
925 | wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, |
926 | FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); |
927 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); |
928 | |
929 | mtk_wed_hwrro_init(dev); |
930 | } |
931 | |
932 | static void |
933 | mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) |
934 | { |
935 | if (!ring->desc) |
936 | return; |
937 | |
938 | dma_free_coherent(dev: dev->hw->dev, size: ring->size * ring->desc_size, |
939 | cpu_addr: ring->desc, dma_handle: ring->desc_phys); |
940 | } |
941 | |
942 | static void |
943 | mtk_wed_free_rx_rings(struct mtk_wed_device *dev) |
944 | { |
945 | mtk_wed_free_rx_buffer(dev); |
946 | mtk_wed_free_ring(dev, ring: &dev->rro.ring); |
947 | } |
948 | |
949 | static void |
950 | mtk_wed_free_tx_rings(struct mtk_wed_device *dev) |
951 | { |
952 | int i; |
953 | |
954 | for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) |
955 | mtk_wed_free_ring(dev, ring: &dev->tx_ring[i]); |
956 | for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) |
957 | mtk_wed_free_ring(dev, ring: &dev->rx_wdma[i]); |
958 | } |
959 | |
960 | static void |
961 | mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) |
962 | { |
963 | u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; |
964 | |
965 | switch (dev->hw->version) { |
966 | case 1: |
967 | mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; |
968 | break; |
969 | case 2: |
970 | mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | |
971 | MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | |
972 | MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | |
973 | MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; |
974 | break; |
975 | case 3: |
976 | mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | |
977 | MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; |
978 | break; |
979 | default: |
980 | break; |
981 | } |
982 | |
983 | if (!dev->hw->num_flows) |
984 | mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; |
985 | |
986 | wed_w32(dev, MTK_WED_EXT_INT_MASK, val: en ? mask : 0); |
987 | wed_r32(dev, MTK_WED_EXT_INT_MASK); |
988 | } |
989 | |
990 | static void |
991 | mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) |
992 | { |
993 | if (!mtk_wed_is_v2(hw: dev->hw)) |
994 | return; |
995 | |
996 | if (enable) { |
997 | wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); |
998 | wed_w32(dev, MTK_WED_TXP_DW1, |
999 | FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); |
1000 | } else { |
1001 | wed_w32(dev, MTK_WED_TXP_DW1, |
1002 | FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); |
1003 | wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); |
1004 | } |
1005 | } |
1006 | |
1007 | static int |
1008 | mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, |
1009 | struct mtk_wed_ring *ring) |
1010 | { |
1011 | int i; |
1012 | |
1013 | for (i = 0; i < 3; i++) { |
1014 | u32 cur_idx = readl(addr: ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); |
1015 | |
1016 | if (cur_idx == MTK_WED_RX_RING_SIZE - 1) |
1017 | break; |
1018 | |
1019 | usleep_range(min: 100000, max: 200000); |
1020 | } |
1021 | |
1022 | if (i == 3) { |
1023 | dev_err(dev->hw->dev, "rx dma enable failed\n" ); |
1024 | return -ETIMEDOUT; |
1025 | } |
1026 | |
1027 | return 0; |
1028 | } |
1029 | |
1030 | static void |
1031 | mtk_wed_dma_disable(struct mtk_wed_device *dev) |
1032 | { |
1033 | wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
1034 | MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | |
1035 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); |
1036 | |
1037 | wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
1038 | |
1039 | wed_clr(dev, MTK_WED_GLO_CFG, |
1040 | MTK_WED_GLO_CFG_TX_DMA_EN | |
1041 | MTK_WED_GLO_CFG_RX_DMA_EN); |
1042 | |
1043 | wdma_clr(dev, MTK_WDMA_GLO_CFG, |
1044 | MTK_WDMA_GLO_CFG_TX_DMA_EN | |
1045 | MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | |
1046 | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); |
1047 | |
1048 | if (mtk_wed_is_v1(hw: dev->hw)) { |
1049 | regmap_write(map: dev->hw->mirror, reg: dev->hw->index * 4, val: 0); |
1050 | wdma_clr(dev, MTK_WDMA_GLO_CFG, |
1051 | MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); |
1052 | } else { |
1053 | wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
1054 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | |
1055 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); |
1056 | |
1057 | wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
1058 | MTK_WED_WPDMA_RX_D_RX_DRV_EN); |
1059 | wed_clr(dev, MTK_WED_WDMA_GLO_CFG, |
1060 | MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); |
1061 | |
1062 | if (mtk_wed_is_v3_or_greater(hw: dev->hw) && |
1063 | mtk_wed_get_rx_capa(dev)) { |
1064 | wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, |
1065 | MTK_WDMA_PREF_TX_CFG_PREF_EN); |
1066 | wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, |
1067 | MTK_WDMA_PREF_RX_CFG_PREF_EN); |
1068 | } |
1069 | } |
1070 | |
1071 | mtk_wed_set_512_support(dev, enable: false); |
1072 | } |
1073 | |
1074 | static void |
1075 | mtk_wed_stop(struct mtk_wed_device *dev) |
1076 | { |
1077 | mtk_wed_dma_disable(dev); |
1078 | mtk_wed_set_ext_int(dev, en: false); |
1079 | |
1080 | wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, val: 0); |
1081 | wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, val: 0); |
1082 | wdma_w32(dev, MTK_WDMA_INT_MASK, val: 0); |
1083 | wdma_w32(dev, MTK_WDMA_INT_GRP2, val: 0); |
1084 | |
1085 | if (!mtk_wed_get_rx_capa(dev)) |
1086 | return; |
1087 | |
1088 | wed_w32(dev, MTK_WED_EXT_INT_MASK1, val: 0); |
1089 | wed_w32(dev, MTK_WED_EXT_INT_MASK2, val: 0); |
1090 | } |
1091 | |
1092 | static void |
1093 | mtk_wed_deinit(struct mtk_wed_device *dev) |
1094 | { |
1095 | mtk_wed_stop(dev); |
1096 | |
1097 | wed_clr(dev, MTK_WED_CTRL, |
1098 | MTK_WED_CTRL_WDMA_INT_AGENT_EN | |
1099 | MTK_WED_CTRL_WPDMA_INT_AGENT_EN | |
1100 | MTK_WED_CTRL_WED_TX_BM_EN | |
1101 | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
1102 | |
1103 | if (mtk_wed_is_v1(hw: dev->hw)) |
1104 | return; |
1105 | |
1106 | wed_clr(dev, MTK_WED_CTRL, |
1107 | MTK_WED_CTRL_RX_ROUTE_QM_EN | |
1108 | MTK_WED_CTRL_WED_RX_BM_EN | |
1109 | MTK_WED_CTRL_RX_RRO_QM_EN); |
1110 | |
1111 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1112 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); |
1113 | wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); |
1114 | wed_clr(dev, MTK_WED_PCIE_INT_CTRL, |
1115 | MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | |
1116 | MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); |
1117 | } |
1118 | } |
1119 | |
1120 | static void |
1121 | __mtk_wed_detach(struct mtk_wed_device *dev) |
1122 | { |
1123 | struct mtk_wed_hw *hw = dev->hw; |
1124 | |
1125 | mtk_wed_deinit(dev); |
1126 | |
1127 | mtk_wdma_rx_reset(dev); |
1128 | mtk_wed_reset(dev, MTK_WED_RESET_WED); |
1129 | mtk_wed_amsdu_free_buffer(dev); |
1130 | mtk_wed_free_tx_buffer(dev); |
1131 | mtk_wed_free_tx_rings(dev); |
1132 | |
1133 | if (mtk_wed_get_rx_capa(dev)) { |
1134 | if (hw->wed_wo) |
1135 | mtk_wed_wo_reset(dev); |
1136 | mtk_wed_free_rx_rings(dev); |
1137 | if (hw->wed_wo) |
1138 | mtk_wed_wo_deinit(hw); |
1139 | } |
1140 | |
1141 | if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { |
1142 | struct device_node *wlan_node; |
1143 | |
1144 | wlan_node = dev->wlan.pci_dev->dev.of_node; |
1145 | if (of_dma_is_coherent(np: wlan_node) && hw->hifsys) |
1146 | regmap_update_bits(map: hw->hifsys, HIFSYS_DMA_AG_MAP, |
1147 | BIT(hw->index), BIT(hw->index)); |
1148 | } |
1149 | |
1150 | if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) && |
1151 | hw->eth->dma_dev != hw->eth->dev) |
1152 | mtk_eth_set_dma_device(eth: hw->eth, dma_dev: hw->eth->dev); |
1153 | |
1154 | memset(dev, 0, sizeof(*dev)); |
1155 | module_put(THIS_MODULE); |
1156 | |
1157 | hw->wed_dev = NULL; |
1158 | } |
1159 | |
1160 | static void |
1161 | mtk_wed_detach(struct mtk_wed_device *dev) |
1162 | { |
1163 | mutex_lock(&hw_lock); |
1164 | __mtk_wed_detach(dev); |
1165 | mutex_unlock(lock: &hw_lock); |
1166 | } |
1167 | |
1168 | static void |
1169 | mtk_wed_bus_init(struct mtk_wed_device *dev) |
1170 | { |
1171 | switch (dev->wlan.bus_type) { |
1172 | case MTK_WED_BUS_PCIE: { |
1173 | struct device_node *np = dev->hw->eth->dev->of_node; |
1174 | |
1175 | if (mtk_wed_is_v2(hw: dev->hw)) { |
1176 | struct regmap *regs; |
1177 | |
1178 | regs = syscon_regmap_lookup_by_phandle(np, |
1179 | property: "mediatek,wed-pcie" ); |
1180 | if (IS_ERR(ptr: regs)) |
1181 | break; |
1182 | |
1183 | regmap_update_bits(map: regs, reg: 0, BIT(0), BIT(0)); |
1184 | } |
1185 | |
1186 | if (dev->wlan.msi) { |
1187 | wed_w32(dev, MTK_WED_PCIE_CFG_INTM, |
1188 | val: dev->hw->pcie_base | 0xc08); |
1189 | wed_w32(dev, MTK_WED_PCIE_CFG_BASE, |
1190 | val: dev->hw->pcie_base | 0xc04); |
1191 | wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); |
1192 | } else { |
1193 | wed_w32(dev, MTK_WED_PCIE_CFG_INTM, |
1194 | val: dev->hw->pcie_base | 0x180); |
1195 | wed_w32(dev, MTK_WED_PCIE_CFG_BASE, |
1196 | val: dev->hw->pcie_base | 0x184); |
1197 | wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); |
1198 | } |
1199 | |
1200 | wed_w32(dev, MTK_WED_PCIE_INT_CTRL, |
1201 | FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); |
1202 | |
1203 | /* pcie interrupt control: pola/source selection */ |
1204 | wed_set(dev, MTK_WED_PCIE_INT_CTRL, |
1205 | MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | |
1206 | MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | |
1207 | FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, |
1208 | dev->hw->index)); |
1209 | break; |
1210 | } |
1211 | case MTK_WED_BUS_AXI: |
1212 | wed_set(dev, MTK_WED_WPDMA_INT_CTRL, |
1213 | MTK_WED_WPDMA_INT_CTRL_SIG_SRC | |
1214 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); |
1215 | break; |
1216 | default: |
1217 | break; |
1218 | } |
1219 | } |
1220 | |
1221 | static void |
1222 | mtk_wed_set_wpdma(struct mtk_wed_device *dev) |
1223 | { |
1224 | int i; |
1225 | |
1226 | if (mtk_wed_is_v1(hw: dev->hw)) { |
1227 | wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, val: dev->wlan.wpdma_phys); |
1228 | return; |
1229 | } |
1230 | |
1231 | mtk_wed_bus_init(dev); |
1232 | |
1233 | wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, val: dev->wlan.wpdma_int); |
1234 | wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, val: dev->wlan.wpdma_mask); |
1235 | wed_w32(dev, MTK_WED_WPDMA_CFG_TX, val: dev->wlan.wpdma_tx); |
1236 | wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, val: dev->wlan.wpdma_txfree); |
1237 | |
1238 | if (!mtk_wed_get_rx_capa(dev)) |
1239 | return; |
1240 | |
1241 | wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, val: dev->wlan.wpdma_rx_glo); |
1242 | wed_w32(dev, reg: dev->hw->soc->regmap.wpdma_rx_ring0, val: dev->wlan.wpdma_rx); |
1243 | |
1244 | if (!dev->wlan.hw_rro) |
1245 | return; |
1246 | |
1247 | wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), val: dev->wlan.wpdma_rx_rro[0]); |
1248 | wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), val: dev->wlan.wpdma_rx_rro[1]); |
1249 | for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) |
1250 | wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), |
1251 | val: dev->wlan.wpdma_rx_pg + i * 0x10); |
1252 | } |
1253 | |
1254 | static void |
1255 | mtk_wed_hw_init_early(struct mtk_wed_device *dev) |
1256 | { |
1257 | u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); |
1258 | u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; |
1259 | |
1260 | mtk_wed_deinit(dev); |
1261 | mtk_wed_reset(dev, MTK_WED_RESET_WED); |
1262 | mtk_wed_set_wpdma(dev); |
1263 | |
1264 | if (!mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1265 | mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | |
1266 | MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; |
1267 | set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | |
1268 | MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; |
1269 | } |
1270 | wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, val: set); |
1271 | |
1272 | if (mtk_wed_is_v1(hw: dev->hw)) { |
1273 | u32 offset = dev->hw->index ? 0x04000400 : 0; |
1274 | |
1275 | wdma_set(dev, MTK_WDMA_GLO_CFG, |
1276 | MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | |
1277 | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | |
1278 | MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); |
1279 | |
1280 | wed_w32(dev, MTK_WED_WDMA_OFFSET0, val: 0x2a042a20 + offset); |
1281 | wed_w32(dev, MTK_WED_WDMA_OFFSET1, val: 0x29002800 + offset); |
1282 | wed_w32(dev, MTK_WED_PCIE_CFG_BASE, |
1283 | MTK_PCIE_BASE(dev->hw->index)); |
1284 | } else { |
1285 | wed_w32(dev, MTK_WED_WDMA_CFG_BASE, val: dev->hw->wdma_phy); |
1286 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); |
1287 | wed_w32(dev, MTK_WED_WDMA_OFFSET0, |
1288 | FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, |
1289 | MTK_WDMA_INT_STATUS) | |
1290 | FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, |
1291 | MTK_WDMA_GLO_CFG)); |
1292 | |
1293 | wed_w32(dev, MTK_WED_WDMA_OFFSET1, |
1294 | FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, |
1295 | MTK_WDMA_RING_TX(0)) | |
1296 | FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, |
1297 | MTK_WDMA_RING_RX(0))); |
1298 | } |
1299 | } |
1300 | |
1301 | static int |
1302 | mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, |
1303 | int size) |
1304 | { |
1305 | ring->desc = dma_alloc_coherent(dev: dev->hw->dev, |
1306 | size: size * sizeof(*ring->desc), |
1307 | dma_handle: &ring->desc_phys, GFP_KERNEL); |
1308 | if (!ring->desc) |
1309 | return -ENOMEM; |
1310 | |
1311 | ring->desc_size = sizeof(*ring->desc); |
1312 | ring->size = size; |
1313 | |
1314 | return 0; |
1315 | } |
1316 | |
1317 | #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) |
1318 | static int |
1319 | mtk_wed_rro_alloc(struct mtk_wed_device *dev) |
1320 | { |
1321 | struct reserved_mem *rmem; |
1322 | struct device_node *np; |
1323 | int index; |
1324 | |
1325 | index = of_property_match_string(np: dev->hw->node, propname: "memory-region-names" , |
1326 | string: "wo-dlm" ); |
1327 | if (index < 0) |
1328 | return index; |
1329 | |
1330 | np = of_parse_phandle(np: dev->hw->node, phandle_name: "memory-region" , index); |
1331 | if (!np) |
1332 | return -ENODEV; |
1333 | |
1334 | rmem = of_reserved_mem_lookup(np); |
1335 | of_node_put(node: np); |
1336 | |
1337 | if (!rmem) |
1338 | return -ENODEV; |
1339 | |
1340 | dev->rro.miod_phys = rmem->base; |
1341 | dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; |
1342 | |
1343 | return mtk_wed_rro_ring_alloc(dev, ring: &dev->rro.ring, |
1344 | MTK_WED_RRO_QUE_CNT); |
1345 | } |
1346 | |
1347 | static int |
1348 | mtk_wed_rro_cfg(struct mtk_wed_device *dev) |
1349 | { |
1350 | struct mtk_wed_wo *wo = dev->hw->wed_wo; |
1351 | struct { |
1352 | struct { |
1353 | __le32 base; |
1354 | __le32 cnt; |
1355 | __le32 unit; |
1356 | } ring[2]; |
1357 | __le32 wed; |
1358 | u8 version; |
1359 | } req = { |
1360 | .ring[0] = { |
1361 | .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), |
1362 | .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), |
1363 | .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), |
1364 | }, |
1365 | .ring[1] = { |
1366 | .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + |
1367 | MTK_WED_MIOD_COUNT), |
1368 | .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), |
1369 | .unit = cpu_to_le32(4), |
1370 | }, |
1371 | }; |
1372 | |
1373 | return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, |
1374 | cmd: MTK_WED_WO_CMD_WED_CFG, |
1375 | data: &req, len: sizeof(req), wait_resp: true); |
1376 | } |
1377 | |
1378 | static void |
1379 | mtk_wed_rro_hw_init(struct mtk_wed_device *dev) |
1380 | { |
1381 | wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, |
1382 | FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | |
1383 | FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | |
1384 | FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, |
1385 | MTK_WED_MIOD_ENTRY_CNT >> 2)); |
1386 | |
1387 | wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, val: dev->rro.miod_phys); |
1388 | wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, |
1389 | FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); |
1390 | wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, val: dev->rro.fdbk_phys); |
1391 | wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, |
1392 | FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); |
1393 | wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, val: 0); |
1394 | wed_w32(dev, MTK_WED_RROQ_BASE_L, val: dev->rro.ring.desc_phys); |
1395 | |
1396 | wed_set(dev, MTK_WED_RROQM_RST_IDX, |
1397 | MTK_WED_RROQM_RST_IDX_MIOD | |
1398 | MTK_WED_RROQM_RST_IDX_FDBK); |
1399 | |
1400 | wed_w32(dev, MTK_WED_RROQM_RST_IDX, val: 0); |
1401 | wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); |
1402 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); |
1403 | } |
1404 | |
1405 | static void |
1406 | mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) |
1407 | { |
1408 | wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); |
1409 | |
1410 | for (;;) { |
1411 | usleep_range(min: 100, max: 200); |
1412 | if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) |
1413 | break; |
1414 | } |
1415 | |
1416 | /* configure RX_ROUTE_QM */ |
1417 | if (mtk_wed_is_v2(hw: dev->hw)) { |
1418 | wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); |
1419 | wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); |
1420 | wed_set(dev, MTK_WED_RTQM_GLO_CFG, |
1421 | FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, |
1422 | 0x3 + dev->hw->index)); |
1423 | wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); |
1424 | } else { |
1425 | wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, |
1426 | FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, |
1427 | 0x3 + dev->hw->index)); |
1428 | } |
1429 | /* enable RX_ROUTE_QM */ |
1430 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); |
1431 | } |
1432 | |
1433 | static void |
1434 | mtk_wed_hw_init(struct mtk_wed_device *dev) |
1435 | { |
1436 | if (dev->init_done) |
1437 | return; |
1438 | |
1439 | dev->init_done = true; |
1440 | mtk_wed_set_ext_int(dev, en: false); |
1441 | |
1442 | wed_w32(dev, MTK_WED_TX_BM_BASE, val: dev->tx_buf_ring.desc_phys); |
1443 | wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); |
1444 | |
1445 | if (mtk_wed_is_v1(hw: dev->hw)) { |
1446 | wed_w32(dev, MTK_WED_TX_BM_CTRL, |
1447 | MTK_WED_TX_BM_CTRL_PAUSE | |
1448 | FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, |
1449 | dev->tx_buf_ring.size / 128) | |
1450 | FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, |
1451 | MTK_WED_TX_RING_SIZE / 256)); |
1452 | wed_w32(dev, MTK_WED_TX_BM_DYN_THR, |
1453 | FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | |
1454 | MTK_WED_TX_BM_DYN_THR_HI); |
1455 | } else if (mtk_wed_is_v2(hw: dev->hw)) { |
1456 | wed_w32(dev, MTK_WED_TX_BM_CTRL, |
1457 | MTK_WED_TX_BM_CTRL_PAUSE | |
1458 | FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, |
1459 | dev->tx_buf_ring.size / 128) | |
1460 | FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, |
1461 | MTK_WED_TX_RING_SIZE / 256)); |
1462 | wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, |
1463 | FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | |
1464 | MTK_WED_TX_TKID_DYN_THR_HI); |
1465 | wed_w32(dev, MTK_WED_TX_BM_DYN_THR, |
1466 | FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | |
1467 | MTK_WED_TX_BM_DYN_THR_HI_V2); |
1468 | wed_w32(dev, MTK_WED_TX_TKID_CTRL, |
1469 | MTK_WED_TX_TKID_CTRL_PAUSE | |
1470 | FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, |
1471 | dev->tx_buf_ring.size / 128) | |
1472 | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, |
1473 | dev->tx_buf_ring.size / 128)); |
1474 | } |
1475 | |
1476 | wed_w32(dev, reg: dev->hw->soc->regmap.tx_bm_tkid, |
1477 | FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | |
1478 | FIELD_PREP(MTK_WED_TX_BM_TKID_END, |
1479 | dev->wlan.token_start + dev->wlan.nbuf - 1)); |
1480 | |
1481 | mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); |
1482 | |
1483 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1484 | /* switch to new bm architecture */ |
1485 | wed_clr(dev, MTK_WED_TX_BM_CTRL, |
1486 | MTK_WED_TX_BM_CTRL_LEGACY_EN); |
1487 | |
1488 | wed_w32(dev, MTK_WED_TX_TKID_CTRL, |
1489 | MTK_WED_TX_TKID_CTRL_PAUSE | |
1490 | FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, |
1491 | dev->wlan.nbuf / 128) | |
1492 | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, |
1493 | dev->wlan.nbuf / 128)); |
1494 | /* return SKBID + SDP back to bm */ |
1495 | wed_set(dev, MTK_WED_TX_TKID_CTRL, |
1496 | MTK_WED_TX_TKID_CTRL_FREE_FORMAT); |
1497 | |
1498 | wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, |
1499 | MTK_WED_TX_BM_PKT_CNT | |
1500 | MTK_WED_TX_BM_INIT_SW_TAIL_IDX); |
1501 | } |
1502 | |
1503 | if (mtk_wed_is_v1(hw: dev->hw)) { |
1504 | wed_set(dev, MTK_WED_CTRL, |
1505 | MTK_WED_CTRL_WED_TX_BM_EN | |
1506 | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
1507 | } else if (mtk_wed_get_rx_capa(dev)) { |
1508 | /* rx hw init */ |
1509 | wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, |
1510 | MTK_WED_WPDMA_RX_D_RST_CRX_IDX | |
1511 | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); |
1512 | wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, val: 0); |
1513 | |
1514 | /* reset prefetch index of ring */ |
1515 | wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, |
1516 | MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); |
1517 | wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, |
1518 | MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); |
1519 | |
1520 | wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, |
1521 | MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); |
1522 | wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, |
1523 | MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); |
1524 | |
1525 | /* reset prefetch FIFO of ring */ |
1526 | wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, |
1527 | MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | |
1528 | MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); |
1529 | wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, val: 0); |
1530 | |
1531 | mtk_wed_rx_buffer_hw_init(dev); |
1532 | mtk_wed_rro_hw_init(dev); |
1533 | mtk_wed_route_qm_hw_init(dev); |
1534 | } |
1535 | |
1536 | wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); |
1537 | if (!mtk_wed_is_v1(hw: dev->hw)) |
1538 | wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); |
1539 | } |
1540 | |
1541 | static void |
1542 | mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) |
1543 | { |
1544 | void *head = (void *)ring->desc; |
1545 | int i; |
1546 | |
1547 | for (i = 0; i < size; i++) { |
1548 | struct mtk_wdma_desc *desc; |
1549 | |
1550 | desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); |
1551 | desc->buf0 = 0; |
1552 | if (tx) |
1553 | desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); |
1554 | else |
1555 | desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); |
1556 | desc->buf1 = 0; |
1557 | desc->info = 0; |
1558 | } |
1559 | } |
1560 | |
1561 | static int |
1562 | mtk_wed_rx_reset(struct mtk_wed_device *dev) |
1563 | { |
1564 | struct mtk_wed_wo *wo = dev->hw->wed_wo; |
1565 | u8 val = MTK_WED_WO_STATE_SER_RESET; |
1566 | int i, ret; |
1567 | |
1568 | ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, |
1569 | cmd: MTK_WED_WO_CMD_CHANGE_STATE, data: &val, |
1570 | len: sizeof(val), wait_resp: true); |
1571 | if (ret) |
1572 | return ret; |
1573 | |
1574 | if (dev->wlan.hw_rro) { |
1575 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); |
1576 | mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, |
1577 | MTK_WED_RX_IND_CMD_BUSY); |
1578 | mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); |
1579 | } |
1580 | |
1581 | wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); |
1582 | ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
1583 | MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); |
1584 | if (!ret && mtk_wed_is_v3_or_greater(hw: dev->hw)) |
1585 | ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, |
1586 | MTK_WED_WPDMA_RX_D_PREF_BUSY); |
1587 | if (ret) { |
1588 | mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); |
1589 | mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); |
1590 | } else { |
1591 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1592 | /* 1.a. disable prefetch HW */ |
1593 | wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, |
1594 | MTK_WED_WPDMA_RX_D_PREF_EN); |
1595 | mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, |
1596 | MTK_WED_WPDMA_RX_D_PREF_BUSY); |
1597 | wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, |
1598 | MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); |
1599 | } |
1600 | |
1601 | wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, |
1602 | MTK_WED_WPDMA_RX_D_RST_CRX_IDX | |
1603 | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); |
1604 | |
1605 | wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
1606 | MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | |
1607 | MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); |
1608 | wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
1609 | MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | |
1610 | MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); |
1611 | |
1612 | wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, val: 0); |
1613 | } |
1614 | |
1615 | /* reset rro qm */ |
1616 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); |
1617 | ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
1618 | MTK_WED_CTRL_RX_RRO_QM_BUSY); |
1619 | if (ret) { |
1620 | mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); |
1621 | } else { |
1622 | wed_set(dev, MTK_WED_RROQM_RST_IDX, |
1623 | MTK_WED_RROQM_RST_IDX_MIOD | |
1624 | MTK_WED_RROQM_RST_IDX_FDBK); |
1625 | wed_w32(dev, MTK_WED_RROQM_RST_IDX, val: 0); |
1626 | } |
1627 | |
1628 | if (dev->wlan.hw_rro) { |
1629 | /* disable rro msdu page drv */ |
1630 | wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
1631 | MTK_WED_RRO_MSDU_PG_DRV_EN); |
1632 | |
1633 | /* disable rro data drv */ |
1634 | wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); |
1635 | |
1636 | /* rro msdu page drv reset */ |
1637 | wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
1638 | MTK_WED_RRO_MSDU_PG_DRV_CLR); |
1639 | mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
1640 | MTK_WED_RRO_MSDU_PG_DRV_CLR); |
1641 | |
1642 | /* rro data drv reset */ |
1643 | wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), |
1644 | MTK_WED_RRO_RX_D_DRV_CLR); |
1645 | mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), |
1646 | MTK_WED_RRO_RX_D_DRV_CLR); |
1647 | } |
1648 | |
1649 | /* reset route qm */ |
1650 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); |
1651 | ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
1652 | MTK_WED_CTRL_RX_ROUTE_QM_BUSY); |
1653 | if (ret) { |
1654 | mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); |
1655 | } else if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1656 | wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); |
1657 | wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); |
1658 | mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); |
1659 | } else { |
1660 | wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); |
1661 | } |
1662 | |
1663 | /* reset tx wdma */ |
1664 | mtk_wdma_tx_reset(dev); |
1665 | |
1666 | /* reset tx wdma drv */ |
1667 | wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); |
1668 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) |
1669 | mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, |
1670 | MTK_WED_WPDMA_STATUS_TX_DRV); |
1671 | else |
1672 | mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
1673 | MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); |
1674 | mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); |
1675 | |
1676 | /* reset wed rx dma */ |
1677 | ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, |
1678 | MTK_WED_GLO_CFG_RX_DMA_BUSY); |
1679 | wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); |
1680 | if (ret) { |
1681 | mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); |
1682 | } else { |
1683 | wed_set(dev, MTK_WED_RESET_IDX, |
1684 | mask: dev->hw->soc->regmap.reset_idx_rx_mask); |
1685 | wed_w32(dev, MTK_WED_RESET_IDX, val: 0); |
1686 | } |
1687 | |
1688 | /* reset rx bm */ |
1689 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); |
1690 | mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
1691 | MTK_WED_CTRL_WED_RX_BM_BUSY); |
1692 | mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); |
1693 | |
1694 | if (dev->wlan.hw_rro) { |
1695 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); |
1696 | mtk_wed_poll_busy(dev, MTK_WED_CTRL, |
1697 | MTK_WED_CTRL_WED_RX_PG_BM_BUSY); |
1698 | wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); |
1699 | wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); |
1700 | } |
1701 | |
1702 | /* wo change to enable state */ |
1703 | val = MTK_WED_WO_STATE_ENABLE; |
1704 | ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, |
1705 | cmd: MTK_WED_WO_CMD_CHANGE_STATE, data: &val, |
1706 | len: sizeof(val), wait_resp: true); |
1707 | if (ret) |
1708 | return ret; |
1709 | |
1710 | /* wed_rx_ring_reset */ |
1711 | for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { |
1712 | if (!dev->rx_ring[i].desc) |
1713 | continue; |
1714 | |
1715 | mtk_wed_ring_reset(ring: &dev->rx_ring[i], MTK_WED_RX_RING_SIZE, |
1716 | tx: false); |
1717 | } |
1718 | mtk_wed_free_rx_buffer(dev); |
1719 | mtk_wed_hwrro_free_buffer(dev); |
1720 | |
1721 | return 0; |
1722 | } |
1723 | |
1724 | static void |
1725 | mtk_wed_reset_dma(struct mtk_wed_device *dev) |
1726 | { |
1727 | bool busy = false; |
1728 | u32 val; |
1729 | int i; |
1730 | |
1731 | for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { |
1732 | if (!dev->tx_ring[i].desc) |
1733 | continue; |
1734 | |
1735 | mtk_wed_ring_reset(ring: &dev->tx_ring[i], MTK_WED_TX_RING_SIZE, |
1736 | tx: true); |
1737 | } |
1738 | |
1739 | /* 1. reset WED tx DMA */ |
1740 | wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); |
1741 | busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, |
1742 | MTK_WED_GLO_CFG_TX_DMA_BUSY); |
1743 | if (busy) { |
1744 | mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); |
1745 | } else { |
1746 | wed_w32(dev, MTK_WED_RESET_IDX, |
1747 | val: dev->hw->soc->regmap.reset_idx_tx_mask); |
1748 | wed_w32(dev, MTK_WED_RESET_IDX, val: 0); |
1749 | } |
1750 | |
1751 | /* 2. reset WDMA rx DMA */ |
1752 | busy = !!mtk_wdma_rx_reset(dev); |
1753 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1754 | val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | |
1755 | wed_r32(dev, MTK_WED_WDMA_GLO_CFG); |
1756 | val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; |
1757 | wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); |
1758 | } else { |
1759 | wed_clr(dev, MTK_WED_WDMA_GLO_CFG, |
1760 | MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
1761 | } |
1762 | |
1763 | if (!busy) |
1764 | busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, |
1765 | MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); |
1766 | if (!busy && mtk_wed_is_v3_or_greater(hw: dev->hw)) |
1767 | busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, |
1768 | MTK_WED_WDMA_RX_PREF_BUSY); |
1769 | |
1770 | if (busy) { |
1771 | mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); |
1772 | mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); |
1773 | } else { |
1774 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1775 | /* 1.a. disable prefetch HW */ |
1776 | wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, |
1777 | MTK_WED_WDMA_RX_PREF_EN); |
1778 | mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, |
1779 | MTK_WED_WDMA_RX_PREF_BUSY); |
1780 | wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, |
1781 | MTK_WED_WDMA_RX_PREF_DDONE2_EN); |
1782 | |
1783 | /* 2. Reset dma index */ |
1784 | wed_w32(dev, MTK_WED_WDMA_RESET_IDX, |
1785 | MTK_WED_WDMA_RESET_IDX_RX_ALL); |
1786 | } |
1787 | |
1788 | wed_w32(dev, MTK_WED_WDMA_RESET_IDX, |
1789 | MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); |
1790 | wed_w32(dev, MTK_WED_WDMA_RESET_IDX, val: 0); |
1791 | |
1792 | wed_set(dev, MTK_WED_WDMA_GLO_CFG, |
1793 | MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); |
1794 | |
1795 | wed_clr(dev, MTK_WED_WDMA_GLO_CFG, |
1796 | MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); |
1797 | } |
1798 | |
1799 | /* 3. reset WED WPDMA tx */ |
1800 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
1801 | |
1802 | for (i = 0; i < 100; i++) { |
1803 | if (mtk_wed_is_v1(hw: dev->hw)) |
1804 | val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, |
1805 | wed_r32(dev, MTK_WED_TX_BM_INTF)); |
1806 | else |
1807 | val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, |
1808 | wed_r32(dev, MTK_WED_TX_TKID_INTF)); |
1809 | if (val == 0x40) |
1810 | break; |
1811 | } |
1812 | |
1813 | mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); |
1814 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); |
1815 | mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); |
1816 | |
1817 | /* 4. reset WED WPDMA tx */ |
1818 | busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, |
1819 | MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); |
1820 | wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
1821 | MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | |
1822 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); |
1823 | if (!busy) |
1824 | busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, |
1825 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); |
1826 | |
1827 | if (busy) { |
1828 | mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); |
1829 | mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); |
1830 | mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); |
1831 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) |
1832 | wed_w32(dev, MTK_WED_RX1_CTRL2, val: 0); |
1833 | } else { |
1834 | wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, |
1835 | MTK_WED_WPDMA_RESET_IDX_TX | |
1836 | MTK_WED_WPDMA_RESET_IDX_RX); |
1837 | wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, val: 0); |
1838 | } |
1839 | |
1840 | dev->init_done = false; |
1841 | if (mtk_wed_is_v1(hw: dev->hw)) |
1842 | return; |
1843 | |
1844 | if (!busy) { |
1845 | wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); |
1846 | wed_w32(dev, MTK_WED_RESET_IDX, val: 0); |
1847 | } |
1848 | |
1849 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1850 | /* reset amsdu engine */ |
1851 | wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); |
1852 | mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); |
1853 | } |
1854 | |
1855 | if (mtk_wed_get_rx_capa(dev)) |
1856 | mtk_wed_rx_reset(dev); |
1857 | } |
1858 | |
1859 | static int |
1860 | mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, |
1861 | int size, u32 desc_size, bool tx) |
1862 | { |
1863 | ring->desc = dma_alloc_coherent(dev: dev->hw->dev, size: size * desc_size, |
1864 | dma_handle: &ring->desc_phys, GFP_KERNEL); |
1865 | if (!ring->desc) |
1866 | return -ENOMEM; |
1867 | |
1868 | ring->desc_size = desc_size; |
1869 | ring->size = size; |
1870 | mtk_wed_ring_reset(ring, size, tx); |
1871 | |
1872 | return 0; |
1873 | } |
1874 | |
1875 | static int |
1876 | mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, |
1877 | bool reset) |
1878 | { |
1879 | struct mtk_wed_ring *wdma; |
1880 | |
1881 | if (idx >= ARRAY_SIZE(dev->rx_wdma)) |
1882 | return -EINVAL; |
1883 | |
1884 | wdma = &dev->rx_wdma[idx]; |
1885 | if (!reset && mtk_wed_ring_alloc(dev, ring: wdma, MTK_WED_WDMA_RING_SIZE, |
1886 | desc_size: dev->hw->soc->wdma_desc_size, tx: true)) |
1887 | return -ENOMEM; |
1888 | |
1889 | wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, |
1890 | val: wdma->desc_phys); |
1891 | wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, |
1892 | val: size); |
1893 | wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, val: 0); |
1894 | |
1895 | wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, |
1896 | val: wdma->desc_phys); |
1897 | wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, |
1898 | val: size); |
1899 | |
1900 | return 0; |
1901 | } |
1902 | |
1903 | static int |
1904 | mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, |
1905 | bool reset) |
1906 | { |
1907 | struct mtk_wed_ring *wdma; |
1908 | |
1909 | if (idx >= ARRAY_SIZE(dev->tx_wdma)) |
1910 | return -EINVAL; |
1911 | |
1912 | wdma = &dev->tx_wdma[idx]; |
1913 | if (!reset && mtk_wed_ring_alloc(dev, ring: wdma, MTK_WED_WDMA_RING_SIZE, |
1914 | desc_size: dev->hw->soc->wdma_desc_size, tx: true)) |
1915 | return -ENOMEM; |
1916 | |
1917 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
1918 | struct mtk_wdma_desc *desc = wdma->desc; |
1919 | int i; |
1920 | |
1921 | for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { |
1922 | desc->buf0 = 0; |
1923 | desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); |
1924 | desc->buf1 = 0; |
1925 | desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); |
1926 | desc++; |
1927 | desc->buf0 = 0; |
1928 | desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); |
1929 | desc->buf1 = 0; |
1930 | desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); |
1931 | desc++; |
1932 | } |
1933 | } |
1934 | |
1935 | wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, |
1936 | val: wdma->desc_phys); |
1937 | wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, |
1938 | val: size); |
1939 | wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, val: 0); |
1940 | wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, val: 0); |
1941 | |
1942 | if (reset) |
1943 | mtk_wed_ring_reset(ring: wdma, MTK_WED_WDMA_RING_SIZE, tx: true); |
1944 | |
1945 | if (!idx) { |
1946 | wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, |
1947 | val: wdma->desc_phys); |
1948 | wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, |
1949 | val: size); |
1950 | wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, |
1951 | val: 0); |
1952 | wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, |
1953 | val: 0); |
1954 | } |
1955 | |
1956 | return 0; |
1957 | } |
1958 | |
1959 | static void |
1960 | mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, |
1961 | u32 reason, u32 hash) |
1962 | { |
1963 | struct mtk_eth *eth = dev->hw->eth; |
1964 | struct ethhdr *eh; |
1965 | |
1966 | if (!skb) |
1967 | return; |
1968 | |
1969 | if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) |
1970 | return; |
1971 | |
1972 | skb_set_mac_header(skb, offset: 0); |
1973 | eh = eth_hdr(skb); |
1974 | skb->protocol = eh->h_proto; |
1975 | mtk_ppe_check_skb(ppe: eth->ppe[dev->hw->index], skb, hash); |
1976 | } |
1977 | |
1978 | static void |
1979 | mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) |
1980 | { |
1981 | u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); |
1982 | |
1983 | /* wed control cr set */ |
1984 | wed_set(dev, MTK_WED_CTRL, |
1985 | MTK_WED_CTRL_WDMA_INT_AGENT_EN | |
1986 | MTK_WED_CTRL_WPDMA_INT_AGENT_EN | |
1987 | MTK_WED_CTRL_WED_TX_BM_EN | |
1988 | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); |
1989 | |
1990 | if (mtk_wed_is_v1(hw: dev->hw)) { |
1991 | wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, |
1992 | MTK_WED_PCIE_INT_TRIGGER_STATUS); |
1993 | |
1994 | wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, |
1995 | MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | |
1996 | MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); |
1997 | |
1998 | wed_clr(dev, MTK_WED_WDMA_INT_CTRL, mask: wdma_mask); |
1999 | } else { |
2000 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) |
2001 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); |
2002 | |
2003 | /* initail tx interrupt trigger */ |
2004 | wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, |
2005 | MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | |
2006 | MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | |
2007 | MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | |
2008 | MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | |
2009 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, |
2010 | dev->wlan.tx_tbit[0]) | |
2011 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, |
2012 | dev->wlan.tx_tbit[1])); |
2013 | |
2014 | /* initail txfree interrupt trigger */ |
2015 | wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, |
2016 | MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | |
2017 | MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | |
2018 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, |
2019 | dev->wlan.txfree_tbit)); |
2020 | |
2021 | if (mtk_wed_get_rx_capa(dev)) { |
2022 | wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, |
2023 | MTK_WED_WPDMA_INT_CTRL_RX0_EN | |
2024 | MTK_WED_WPDMA_INT_CTRL_RX0_CLR | |
2025 | MTK_WED_WPDMA_INT_CTRL_RX1_EN | |
2026 | MTK_WED_WPDMA_INT_CTRL_RX1_CLR | |
2027 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, |
2028 | dev->wlan.rx_tbit[0]) | |
2029 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, |
2030 | dev->wlan.rx_tbit[1])); |
2031 | |
2032 | wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, |
2033 | GENMASK(1, 0)); |
2034 | } |
2035 | |
2036 | wed_w32(dev, MTK_WED_WDMA_INT_CLR, val: wdma_mask); |
2037 | wed_set(dev, MTK_WED_WDMA_INT_CTRL, |
2038 | FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, |
2039 | dev->wdma_idx)); |
2040 | } |
2041 | |
2042 | wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, val: wdma_mask); |
2043 | |
2044 | wdma_w32(dev, MTK_WDMA_INT_MASK, val: wdma_mask); |
2045 | wdma_w32(dev, MTK_WDMA_INT_GRP2, val: wdma_mask); |
2046 | wed_w32(dev, MTK_WED_WPDMA_INT_MASK, val: irq_mask); |
2047 | wed_w32(dev, MTK_WED_INT_MASK, val: irq_mask); |
2048 | } |
2049 | |
2050 | #define MTK_WFMDA_RX_DMA_EN BIT(2) |
2051 | static void |
2052 | mtk_wed_dma_enable(struct mtk_wed_device *dev) |
2053 | { |
2054 | int i; |
2055 | |
2056 | if (!mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
2057 | wed_set(dev, MTK_WED_WPDMA_INT_CTRL, |
2058 | MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); |
2059 | wed_set(dev, MTK_WED_WPDMA_GLO_CFG, |
2060 | MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | |
2061 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); |
2062 | wdma_set(dev, MTK_WDMA_GLO_CFG, |
2063 | MTK_WDMA_GLO_CFG_TX_DMA_EN | |
2064 | MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | |
2065 | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); |
2066 | wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); |
2067 | } else { |
2068 | wed_set(dev, MTK_WED_WPDMA_GLO_CFG, |
2069 | MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | |
2070 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | |
2071 | MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); |
2072 | wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); |
2073 | } |
2074 | |
2075 | wed_set(dev, MTK_WED_GLO_CFG, |
2076 | MTK_WED_GLO_CFG_TX_DMA_EN | |
2077 | MTK_WED_GLO_CFG_RX_DMA_EN); |
2078 | |
2079 | wed_set(dev, MTK_WED_WDMA_GLO_CFG, |
2080 | MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); |
2081 | |
2082 | if (mtk_wed_is_v1(hw: dev->hw)) { |
2083 | wdma_set(dev, MTK_WDMA_GLO_CFG, |
2084 | MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); |
2085 | return; |
2086 | } |
2087 | |
2088 | wed_set(dev, MTK_WED_WPDMA_GLO_CFG, |
2089 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | |
2090 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); |
2091 | |
2092 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
2093 | wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, |
2094 | FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | |
2095 | FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); |
2096 | wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, |
2097 | MTK_WED_WDMA_RX_PREF_DDONE2_EN); |
2098 | wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); |
2099 | |
2100 | wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
2101 | MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); |
2102 | wed_set(dev, MTK_WED_WPDMA_GLO_CFG, |
2103 | MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | |
2104 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | |
2105 | MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); |
2106 | |
2107 | wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); |
2108 | wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); |
2109 | } |
2110 | |
2111 | wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, |
2112 | MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | |
2113 | MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); |
2114 | |
2115 | if (!mtk_wed_get_rx_capa(dev)) |
2116 | return; |
2117 | |
2118 | wed_set(dev, MTK_WED_WDMA_GLO_CFG, |
2119 | MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | |
2120 | MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); |
2121 | |
2122 | wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); |
2123 | wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, |
2124 | MTK_WED_WPDMA_RX_D_RX_DRV_EN | |
2125 | FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | |
2126 | FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); |
2127 | |
2128 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
2129 | wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, |
2130 | MTK_WED_WPDMA_RX_D_PREF_EN | |
2131 | FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | |
2132 | FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); |
2133 | |
2134 | wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); |
2135 | wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); |
2136 | wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); |
2137 | } |
2138 | |
2139 | for (i = 0; i < MTK_WED_RX_QUEUES; i++) { |
2140 | struct mtk_wed_ring *ring = &dev->rx_ring[i]; |
2141 | u32 val; |
2142 | |
2143 | if (!(ring->flags & MTK_WED_RING_CONFIGURED)) |
2144 | continue; /* queue is not configured by mt76 */ |
2145 | |
2146 | if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { |
2147 | dev_err(dev->hw->dev, |
2148 | "rx_ring(%d) dma enable failed\n" , i); |
2149 | continue; |
2150 | } |
2151 | |
2152 | val = wifi_r32(dev, |
2153 | reg: dev->wlan.wpdma_rx_glo - |
2154 | dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; |
2155 | wifi_w32(dev, |
2156 | reg: dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, |
2157 | val); |
2158 | } |
2159 | } |
2160 | |
2161 | static void |
2162 | mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) |
2163 | { |
2164 | int i; |
2165 | |
2166 | wed_w32(dev, MTK_WED_WPDMA_INT_MASK, val: irq_mask); |
2167 | wed_w32(dev, MTK_WED_INT_MASK, val: irq_mask); |
2168 | |
2169 | if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) |
2170 | return; |
2171 | |
2172 | if (reset) { |
2173 | wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
2174 | MTK_WED_RRO_MSDU_PG_DRV_EN); |
2175 | return; |
2176 | } |
2177 | |
2178 | wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); |
2179 | wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
2180 | MTK_WED_RRO_MSDU_PG_DRV_CLR); |
2181 | |
2182 | wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, |
2183 | MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | |
2184 | MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | |
2185 | MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | |
2186 | MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | |
2187 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, |
2188 | dev->wlan.rro_rx_tbit[0]) | |
2189 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, |
2190 | dev->wlan.rro_rx_tbit[1])); |
2191 | |
2192 | wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, |
2193 | MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | |
2194 | MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | |
2195 | MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | |
2196 | MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | |
2197 | MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | |
2198 | MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | |
2199 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, |
2200 | dev->wlan.rx_pg_tbit[0]) | |
2201 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, |
2202 | dev->wlan.rx_pg_tbit[1]) | |
2203 | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, |
2204 | dev->wlan.rx_pg_tbit[2])); |
2205 | |
2206 | /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after |
2207 | * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken |
2208 | */ |
2209 | wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, |
2210 | MTK_WED_RRO_MSDU_PG_DRV_EN); |
2211 | |
2212 | for (i = 0; i < MTK_WED_RX_QUEUES; i++) { |
2213 | struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; |
2214 | |
2215 | if (!(ring->flags & MTK_WED_RING_CONFIGURED)) |
2216 | continue; |
2217 | |
2218 | if (mtk_wed_check_wfdma_rx_fill(dev, ring)) |
2219 | dev_err(dev->hw->dev, |
2220 | "rx_rro_ring(%d) initialization failed\n" , i); |
2221 | } |
2222 | |
2223 | for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { |
2224 | struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; |
2225 | |
2226 | if (!(ring->flags & MTK_WED_RING_CONFIGURED)) |
2227 | continue; |
2228 | |
2229 | if (mtk_wed_check_wfdma_rx_fill(dev, ring)) |
2230 | dev_err(dev->hw->dev, |
2231 | "rx_page_ring(%d) initialization failed\n" , i); |
2232 | } |
2233 | } |
2234 | |
2235 | static void |
2236 | mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, |
2237 | void __iomem *regs) |
2238 | { |
2239 | struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; |
2240 | |
2241 | ring->wpdma = regs; |
2242 | wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, |
2243 | readl(addr: regs)); |
2244 | wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, |
2245 | readl(addr: regs + MTK_WED_RING_OFS_COUNT)); |
2246 | ring->flags |= MTK_WED_RING_CONFIGURED; |
2247 | } |
2248 | |
2249 | static void |
2250 | mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) |
2251 | { |
2252 | struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; |
2253 | |
2254 | ring->wpdma = regs; |
2255 | wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, |
2256 | readl(addr: regs)); |
2257 | wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, |
2258 | readl(addr: regs + MTK_WED_RING_OFS_COUNT)); |
2259 | ring->flags |= MTK_WED_RING_CONFIGURED; |
2260 | } |
2261 | |
2262 | static int |
2263 | mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) |
2264 | { |
2265 | struct mtk_wed_ring *ring = &dev->ind_cmd_ring; |
2266 | u32 val = readl(addr: regs + MTK_WED_RING_OFS_COUNT); |
2267 | int i, count = 0; |
2268 | |
2269 | ring->wpdma = regs; |
2270 | wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, |
2271 | readl(addr: regs) & 0xfffffff0); |
2272 | |
2273 | wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, |
2274 | readl(addr: regs + MTK_WED_RING_OFS_COUNT)); |
2275 | |
2276 | /* ack sn cr */ |
2277 | wed_w32(dev, MTK_WED_RRO_CFG0, val: dev->wlan.phy_base + |
2278 | dev->wlan.ind_cmd.ack_sn_addr); |
2279 | wed_w32(dev, MTK_WED_RRO_CFG1, |
2280 | FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, |
2281 | dev->wlan.ind_cmd.win_size) | |
2282 | FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, |
2283 | dev->wlan.ind_cmd.particular_sid)); |
2284 | |
2285 | /* particular session addr element */ |
2286 | wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, |
2287 | val: dev->wlan.ind_cmd.particular_se_phys); |
2288 | |
2289 | for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { |
2290 | wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, |
2291 | val: dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); |
2292 | wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, |
2293 | MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); |
2294 | |
2295 | val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); |
2296 | while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) |
2297 | val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); |
2298 | if (count >= 100) |
2299 | dev_err(dev->hw->dev, |
2300 | "write ba session base failed\n" ); |
2301 | } |
2302 | |
2303 | /* pn check init */ |
2304 | for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { |
2305 | wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, |
2306 | MTK_WED_PN_CHECK_IS_FIRST); |
2307 | |
2308 | wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | |
2309 | FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); |
2310 | |
2311 | count = 0; |
2312 | val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); |
2313 | while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) |
2314 | val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); |
2315 | if (count >= 100) |
2316 | dev_err(dev->hw->dev, |
2317 | "session(%d) initialization failed\n" , i); |
2318 | } |
2319 | |
2320 | wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); |
2321 | wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); |
2322 | |
2323 | return 0; |
2324 | } |
2325 | |
2326 | static void |
2327 | mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) |
2328 | { |
2329 | int i; |
2330 | |
2331 | if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) |
2332 | return; |
2333 | |
2334 | for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) |
2335 | if (!dev->rx_wdma[i].desc) |
2336 | mtk_wed_wdma_rx_ring_setup(dev, idx: i, size: 16, reset: false); |
2337 | |
2338 | mtk_wed_hw_init(dev); |
2339 | mtk_wed_configure_irq(dev, irq_mask); |
2340 | |
2341 | mtk_wed_set_ext_int(dev, en: true); |
2342 | |
2343 | if (mtk_wed_is_v1(hw: dev->hw)) { |
2344 | u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | |
2345 | FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, |
2346 | dev->hw->index); |
2347 | |
2348 | val |= BIT(0) | (BIT(1) * !!dev->hw->index); |
2349 | regmap_write(map: dev->hw->mirror, reg: dev->hw->index * 4, val); |
2350 | } else if (mtk_wed_get_rx_capa(dev)) { |
2351 | /* driver set mid ready and only once */ |
2352 | wed_w32(dev, MTK_WED_EXT_INT_MASK1, |
2353 | MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); |
2354 | wed_w32(dev, MTK_WED_EXT_INT_MASK2, |
2355 | MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); |
2356 | |
2357 | wed_r32(dev, MTK_WED_EXT_INT_MASK1); |
2358 | wed_r32(dev, MTK_WED_EXT_INT_MASK2); |
2359 | |
2360 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) { |
2361 | wed_w32(dev, MTK_WED_EXT_INT_MASK3, |
2362 | MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); |
2363 | wed_r32(dev, MTK_WED_EXT_INT_MASK3); |
2364 | } |
2365 | |
2366 | if (mtk_wed_rro_cfg(dev)) |
2367 | return; |
2368 | } |
2369 | |
2370 | mtk_wed_set_512_support(dev, enable: dev->wlan.wcid_512); |
2371 | mtk_wed_amsdu_init(dev); |
2372 | |
2373 | mtk_wed_dma_enable(dev); |
2374 | dev->running = true; |
2375 | } |
2376 | |
2377 | static int |
2378 | mtk_wed_attach(struct mtk_wed_device *dev) |
2379 | __releases(RCU) |
2380 | { |
2381 | struct mtk_wed_hw *hw; |
2382 | struct device *device; |
2383 | int ret = 0; |
2384 | |
2385 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), |
2386 | "mtk_wed_attach without holding the RCU read lock" ); |
2387 | |
2388 | if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && |
2389 | pci_domain_nr(bus: dev->wlan.pci_dev->bus) > 1) || |
2390 | !try_module_get(THIS_MODULE)) |
2391 | ret = -ENODEV; |
2392 | |
2393 | rcu_read_unlock(); |
2394 | |
2395 | if (ret) |
2396 | return ret; |
2397 | |
2398 | mutex_lock(&hw_lock); |
2399 | |
2400 | hw = mtk_wed_assign(dev); |
2401 | if (!hw) { |
2402 | module_put(THIS_MODULE); |
2403 | ret = -ENODEV; |
2404 | goto unlock; |
2405 | } |
2406 | |
2407 | device = dev->wlan.bus_type == MTK_WED_BUS_PCIE |
2408 | ? &dev->wlan.pci_dev->dev |
2409 | : &dev->wlan.platform_dev->dev; |
2410 | dev_info(device, "attaching wed device %d version %d\n" , |
2411 | hw->index, hw->version); |
2412 | |
2413 | dev->hw = hw; |
2414 | dev->dev = hw->dev; |
2415 | dev->irq = hw->irq; |
2416 | dev->wdma_idx = hw->index; |
2417 | dev->version = hw->version; |
2418 | dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); |
2419 | |
2420 | if (hw->eth->dma_dev == hw->eth->dev && |
2421 | of_dma_is_coherent(np: hw->eth->dev->of_node)) |
2422 | mtk_eth_set_dma_device(eth: hw->eth, dma_dev: hw->dev); |
2423 | |
2424 | ret = mtk_wed_tx_buffer_alloc(dev); |
2425 | if (ret) |
2426 | goto out; |
2427 | |
2428 | ret = mtk_wed_amsdu_buffer_alloc(dev); |
2429 | if (ret) |
2430 | goto out; |
2431 | |
2432 | if (mtk_wed_get_rx_capa(dev)) { |
2433 | ret = mtk_wed_rro_alloc(dev); |
2434 | if (ret) |
2435 | goto out; |
2436 | } |
2437 | |
2438 | mtk_wed_hw_init_early(dev); |
2439 | if (mtk_wed_is_v1(hw)) |
2440 | regmap_update_bits(map: hw->hifsys, HIFSYS_DMA_AG_MAP, |
2441 | BIT(hw->index), val: 0); |
2442 | else |
2443 | dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); |
2444 | |
2445 | if (mtk_wed_get_rx_capa(dev)) |
2446 | ret = mtk_wed_wo_init(hw); |
2447 | out: |
2448 | if (ret) { |
2449 | dev_err(dev->hw->dev, "failed to attach wed device\n" ); |
2450 | __mtk_wed_detach(dev); |
2451 | } |
2452 | unlock: |
2453 | mutex_unlock(lock: &hw_lock); |
2454 | |
2455 | return ret; |
2456 | } |
2457 | |
2458 | static int |
2459 | mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, |
2460 | bool reset) |
2461 | { |
2462 | struct mtk_wed_ring *ring = &dev->tx_ring[idx]; |
2463 | |
2464 | /* |
2465 | * Tx ring redirection: |
2466 | * Instead of configuring the WLAN PDMA TX ring directly, the WLAN |
2467 | * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) |
2468 | * registers. |
2469 | * |
2470 | * WED driver posts its own DMA ring as WLAN PDMA TX and configures it |
2471 | * into MTK_WED_WPDMA_RING_TX(n) registers. |
2472 | * It gets filled with packets picked up from WED TX ring and from |
2473 | * WDMA RX. |
2474 | */ |
2475 | |
2476 | if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) |
2477 | return -EINVAL; |
2478 | |
2479 | if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, |
2480 | desc_size: sizeof(*ring->desc), tx: true)) |
2481 | return -ENOMEM; |
2482 | |
2483 | if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, |
2484 | reset)) |
2485 | return -ENOMEM; |
2486 | |
2487 | ring->reg_base = MTK_WED_RING_TX(idx); |
2488 | ring->wpdma = regs; |
2489 | |
2490 | if (mtk_wed_is_v3_or_greater(hw: dev->hw) && idx == 1) { |
2491 | /* reset prefetch index */ |
2492 | wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, |
2493 | MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | |
2494 | MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); |
2495 | |
2496 | wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, |
2497 | MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | |
2498 | MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); |
2499 | |
2500 | /* reset prefetch FIFO */ |
2501 | wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, |
2502 | MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | |
2503 | MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); |
2504 | wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, val: 0); |
2505 | } |
2506 | |
2507 | /* WED -> WPDMA */ |
2508 | wpdma_tx_w32(dev, ring: idx, MTK_WED_RING_OFS_BASE, val: ring->desc_phys); |
2509 | wpdma_tx_w32(dev, ring: idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); |
2510 | wpdma_tx_w32(dev, ring: idx, MTK_WED_RING_OFS_CPU_IDX, val: 0); |
2511 | |
2512 | wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, |
2513 | val: ring->desc_phys); |
2514 | wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, |
2515 | MTK_WED_TX_RING_SIZE); |
2516 | wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, val: 0); |
2517 | |
2518 | return 0; |
2519 | } |
2520 | |
2521 | static int |
2522 | mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) |
2523 | { |
2524 | struct mtk_wed_ring *ring = &dev->txfree_ring; |
2525 | int i, index = mtk_wed_is_v1(hw: dev->hw); |
2526 | |
2527 | /* |
2528 | * For txfree event handling, the same DMA ring is shared between WED |
2529 | * and WLAN. The WLAN driver accesses the ring index registers through |
2530 | * WED |
2531 | */ |
2532 | ring->reg_base = MTK_WED_RING_RX(index); |
2533 | ring->wpdma = regs; |
2534 | |
2535 | for (i = 0; i < 12; i += 4) { |
2536 | u32 val = readl(addr: regs + i); |
2537 | |
2538 | wed_w32(dev, MTK_WED_RING_RX(index) + i, val); |
2539 | wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); |
2540 | } |
2541 | |
2542 | return 0; |
2543 | } |
2544 | |
2545 | static int |
2546 | mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, |
2547 | bool reset) |
2548 | { |
2549 | struct mtk_wed_ring *ring = &dev->rx_ring[idx]; |
2550 | |
2551 | if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) |
2552 | return -EINVAL; |
2553 | |
2554 | if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, |
2555 | desc_size: sizeof(*ring->desc), tx: false)) |
2556 | return -ENOMEM; |
2557 | |
2558 | if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, |
2559 | reset)) |
2560 | return -ENOMEM; |
2561 | |
2562 | ring->reg_base = MTK_WED_RING_RX_DATA(idx); |
2563 | ring->wpdma = regs; |
2564 | ring->flags |= MTK_WED_RING_CONFIGURED; |
2565 | |
2566 | /* WPDMA -> WED */ |
2567 | wpdma_rx_w32(dev, ring: idx, MTK_WED_RING_OFS_BASE, val: ring->desc_phys); |
2568 | wpdma_rx_w32(dev, ring: idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); |
2569 | |
2570 | wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, |
2571 | val: ring->desc_phys); |
2572 | wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, |
2573 | MTK_WED_RX_RING_SIZE); |
2574 | |
2575 | return 0; |
2576 | } |
2577 | |
2578 | static u32 |
2579 | mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) |
2580 | { |
2581 | u32 val, ext_mask; |
2582 | |
2583 | if (mtk_wed_is_v3_or_greater(hw: dev->hw)) |
2584 | ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | |
2585 | MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; |
2586 | else |
2587 | ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; |
2588 | |
2589 | val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); |
2590 | wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); |
2591 | val &= ext_mask; |
2592 | if (!dev->hw->num_flows) |
2593 | val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; |
2594 | if (val && net_ratelimit()) |
2595 | pr_err("mtk_wed%d: error status=%08x\n" , dev->hw->index, val); |
2596 | |
2597 | val = wed_r32(dev, MTK_WED_INT_STATUS); |
2598 | val &= mask; |
2599 | wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ |
2600 | |
2601 | return val; |
2602 | } |
2603 | |
2604 | static void |
2605 | mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) |
2606 | { |
2607 | mtk_wed_set_ext_int(dev, en: !!mask); |
2608 | wed_w32(dev, MTK_WED_INT_MASK, val: mask); |
2609 | } |
2610 | |
2611 | int mtk_wed_flow_add(int index) |
2612 | { |
2613 | struct mtk_wed_hw *hw = hw_list[index]; |
2614 | int ret = 0; |
2615 | |
2616 | mutex_lock(&hw_lock); |
2617 | |
2618 | if (!hw || !hw->wed_dev) { |
2619 | ret = -ENODEV; |
2620 | goto out; |
2621 | } |
2622 | |
2623 | if (!hw->wed_dev->wlan.offload_enable) |
2624 | goto out; |
2625 | |
2626 | if (hw->num_flows) { |
2627 | hw->num_flows++; |
2628 | goto out; |
2629 | } |
2630 | |
2631 | ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); |
2632 | if (!ret) |
2633 | hw->num_flows++; |
2634 | mtk_wed_set_ext_int(dev: hw->wed_dev, en: true); |
2635 | |
2636 | out: |
2637 | mutex_unlock(lock: &hw_lock); |
2638 | |
2639 | return ret; |
2640 | } |
2641 | |
2642 | void mtk_wed_flow_remove(int index) |
2643 | { |
2644 | struct mtk_wed_hw *hw = hw_list[index]; |
2645 | |
2646 | mutex_lock(&hw_lock); |
2647 | |
2648 | if (!hw || !hw->wed_dev) |
2649 | goto out; |
2650 | |
2651 | if (!hw->wed_dev->wlan.offload_disable) |
2652 | goto out; |
2653 | |
2654 | if (--hw->num_flows) |
2655 | goto out; |
2656 | |
2657 | hw->wed_dev->wlan.offload_disable(hw->wed_dev); |
2658 | mtk_wed_set_ext_int(dev: hw->wed_dev, en: true); |
2659 | |
2660 | out: |
2661 | mutex_unlock(lock: &hw_lock); |
2662 | } |
2663 | |
2664 | static int |
2665 | mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) |
2666 | { |
2667 | struct mtk_wed_flow_block_priv *priv = cb_priv; |
2668 | struct flow_cls_offload *cls = type_data; |
2669 | struct mtk_wed_hw *hw = priv->hw; |
2670 | |
2671 | if (!tc_can_offload(dev: priv->dev)) |
2672 | return -EOPNOTSUPP; |
2673 | |
2674 | if (type != TC_SETUP_CLSFLOWER) |
2675 | return -EOPNOTSUPP; |
2676 | |
2677 | return mtk_flow_offload_cmd(eth: hw->eth, cls, ppe_index: hw->index); |
2678 | } |
2679 | |
2680 | static int |
2681 | mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, |
2682 | struct flow_block_offload *f) |
2683 | { |
2684 | struct mtk_wed_flow_block_priv *priv; |
2685 | static LIST_HEAD(block_cb_list); |
2686 | struct flow_block_cb *block_cb; |
2687 | struct mtk_eth *eth = hw->eth; |
2688 | flow_setup_cb_t *cb; |
2689 | |
2690 | if (!eth->soc->offload_version) |
2691 | return -EOPNOTSUPP; |
2692 | |
2693 | if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
2694 | return -EOPNOTSUPP; |
2695 | |
2696 | cb = mtk_wed_setup_tc_block_cb; |
2697 | f->driver_block_list = &block_cb_list; |
2698 | |
2699 | switch (f->command) { |
2700 | case FLOW_BLOCK_BIND: |
2701 | block_cb = flow_block_cb_lookup(block: f->block, cb, cb_ident: dev); |
2702 | if (block_cb) { |
2703 | flow_block_cb_incref(block_cb); |
2704 | return 0; |
2705 | } |
2706 | |
2707 | priv = kzalloc(size: sizeof(*priv), GFP_KERNEL); |
2708 | if (!priv) |
2709 | return -ENOMEM; |
2710 | |
2711 | priv->hw = hw; |
2712 | priv->dev = dev; |
2713 | block_cb = flow_block_cb_alloc(cb, cb_ident: dev, cb_priv: priv, NULL); |
2714 | if (IS_ERR(ptr: block_cb)) { |
2715 | kfree(objp: priv); |
2716 | return PTR_ERR(ptr: block_cb); |
2717 | } |
2718 | |
2719 | flow_block_cb_incref(block_cb); |
2720 | flow_block_cb_add(block_cb, offload: f); |
2721 | list_add_tail(new: &block_cb->driver_list, head: &block_cb_list); |
2722 | return 0; |
2723 | case FLOW_BLOCK_UNBIND: |
2724 | block_cb = flow_block_cb_lookup(block: f->block, cb, cb_ident: dev); |
2725 | if (!block_cb) |
2726 | return -ENOENT; |
2727 | |
2728 | if (!flow_block_cb_decref(block_cb)) { |
2729 | flow_block_cb_remove(block_cb, offload: f); |
2730 | list_del(entry: &block_cb->driver_list); |
2731 | kfree(objp: block_cb->cb_priv); |
2732 | } |
2733 | return 0; |
2734 | default: |
2735 | return -EOPNOTSUPP; |
2736 | } |
2737 | } |
2738 | |
2739 | static int |
2740 | mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, |
2741 | enum tc_setup_type type, void *type_data) |
2742 | { |
2743 | struct mtk_wed_hw *hw = wed->hw; |
2744 | |
2745 | if (mtk_wed_is_v1(hw)) |
2746 | return -EOPNOTSUPP; |
2747 | |
2748 | switch (type) { |
2749 | case TC_SETUP_BLOCK: |
2750 | case TC_SETUP_FT: |
2751 | return mtk_wed_setup_tc_block(hw, dev, f: type_data); |
2752 | default: |
2753 | return -EOPNOTSUPP; |
2754 | } |
2755 | } |
2756 | |
2757 | void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, |
2758 | void __iomem *wdma, phys_addr_t wdma_phy, |
2759 | int index) |
2760 | { |
2761 | static const struct mtk_wed_ops wed_ops = { |
2762 | .attach = mtk_wed_attach, |
2763 | .tx_ring_setup = mtk_wed_tx_ring_setup, |
2764 | .rx_ring_setup = mtk_wed_rx_ring_setup, |
2765 | .txfree_ring_setup = mtk_wed_txfree_ring_setup, |
2766 | .msg_update = mtk_wed_mcu_msg_update, |
2767 | .start = mtk_wed_start, |
2768 | .stop = mtk_wed_stop, |
2769 | .reset_dma = mtk_wed_reset_dma, |
2770 | .reg_read = wed_r32, |
2771 | .reg_write = wed_w32, |
2772 | .irq_get = mtk_wed_irq_get, |
2773 | .irq_set_mask = mtk_wed_irq_set_mask, |
2774 | .detach = mtk_wed_detach, |
2775 | .ppe_check = mtk_wed_ppe_check, |
2776 | .setup_tc = mtk_wed_setup_tc, |
2777 | .start_hw_rro = mtk_wed_start_hw_rro, |
2778 | .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, |
2779 | .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, |
2780 | .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, |
2781 | }; |
2782 | struct device_node *eth_np = eth->dev->of_node; |
2783 | struct platform_device *pdev; |
2784 | struct mtk_wed_hw *hw; |
2785 | struct regmap *regs; |
2786 | int irq; |
2787 | |
2788 | if (!np) |
2789 | return; |
2790 | |
2791 | pdev = of_find_device_by_node(np); |
2792 | if (!pdev) |
2793 | goto err_of_node_put; |
2794 | |
2795 | get_device(dev: &pdev->dev); |
2796 | irq = platform_get_irq(pdev, 0); |
2797 | if (irq < 0) |
2798 | goto err_put_device; |
2799 | |
2800 | regs = syscon_regmap_lookup_by_phandle(np, NULL); |
2801 | if (IS_ERR(ptr: regs)) |
2802 | goto err_put_device; |
2803 | |
2804 | rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); |
2805 | |
2806 | mutex_lock(&hw_lock); |
2807 | |
2808 | if (WARN_ON(hw_list[index])) |
2809 | goto unlock; |
2810 | |
2811 | hw = kzalloc(size: sizeof(*hw), GFP_KERNEL); |
2812 | if (!hw) |
2813 | goto unlock; |
2814 | |
2815 | hw->node = np; |
2816 | hw->regs = regs; |
2817 | hw->eth = eth; |
2818 | hw->dev = &pdev->dev; |
2819 | hw->wdma_phy = wdma_phy; |
2820 | hw->wdma = wdma; |
2821 | hw->index = index; |
2822 | hw->irq = irq; |
2823 | hw->version = eth->soc->version; |
2824 | |
2825 | switch (hw->version) { |
2826 | case 2: |
2827 | hw->soc = &mt7986_data; |
2828 | break; |
2829 | case 3: |
2830 | hw->soc = &mt7988_data; |
2831 | break; |
2832 | default: |
2833 | case 1: |
2834 | hw->mirror = syscon_regmap_lookup_by_phandle(np: eth_np, |
2835 | property: "mediatek,pcie-mirror" ); |
2836 | hw->hifsys = syscon_regmap_lookup_by_phandle(np: eth_np, |
2837 | property: "mediatek,hifsys" ); |
2838 | if (IS_ERR(ptr: hw->mirror) || IS_ERR(ptr: hw->hifsys)) { |
2839 | kfree(objp: hw); |
2840 | goto unlock; |
2841 | } |
2842 | |
2843 | if (!index) { |
2844 | regmap_write(map: hw->mirror, reg: 0, val: 0); |
2845 | regmap_write(map: hw->mirror, reg: 4, val: 0); |
2846 | } |
2847 | hw->soc = &mt7622_data; |
2848 | break; |
2849 | } |
2850 | |
2851 | mtk_wed_hw_add_debugfs(hw); |
2852 | |
2853 | hw_list[index] = hw; |
2854 | |
2855 | mutex_unlock(lock: &hw_lock); |
2856 | |
2857 | return; |
2858 | |
2859 | unlock: |
2860 | mutex_unlock(lock: &hw_lock); |
2861 | err_put_device: |
2862 | put_device(dev: &pdev->dev); |
2863 | err_of_node_put: |
2864 | of_node_put(node: np); |
2865 | } |
2866 | |
2867 | void mtk_wed_exit(void) |
2868 | { |
2869 | int i; |
2870 | |
2871 | rcu_assign_pointer(mtk_soc_wed_ops, NULL); |
2872 | |
2873 | synchronize_rcu(); |
2874 | |
2875 | for (i = 0; i < ARRAY_SIZE(hw_list); i++) { |
2876 | struct mtk_wed_hw *hw; |
2877 | |
2878 | hw = hw_list[i]; |
2879 | if (!hw) |
2880 | continue; |
2881 | |
2882 | hw_list[i] = NULL; |
2883 | debugfs_remove(dentry: hw->debugfs_dir); |
2884 | put_device(dev: hw->dev); |
2885 | of_node_put(node: hw->node); |
2886 | kfree(objp: hw); |
2887 | } |
2888 | } |
2889 | |