1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2022 MediaTek Inc.
3 *
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 * Sujuan Chen <sujuan.chen@mediatek.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/dma-mapping.h>
10#include <linux/interrupt.h>
11#include <linux/mfd/syscon.h>
12#include <linux/of.h>
13#include <linux/of_irq.h>
14#include <linux/bitfield.h>
15
16#include "mtk_wed.h"
17#include "mtk_wed_regs.h"
18#include "mtk_wed_wo.h"
19
20static u32
21mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
22{
23 u32 val;
24
25 if (regmap_read(map: wo->mmio.regs, reg, val: &val))
26 val = ~0;
27
28 return val;
29}
30
31static void
32mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
33{
34 regmap_write(map: wo->mmio.regs, reg, val);
35}
36
37static u32
38mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
39{
40 u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
41
42 return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
43}
44
45static void
46mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
47{
48 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, val: mask);
49}
50
51static void
52mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
53{
54 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, val: mask);
55}
56
57static void
58mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
59{
60 unsigned long flags;
61
62 spin_lock_irqsave(&wo->mmio.lock, flags);
63 wo->mmio.irq_mask &= ~mask;
64 wo->mmio.irq_mask |= val;
65 if (set)
66 mtk_wed_wo_set_isr(wo, mask: wo->mmio.irq_mask);
67 spin_unlock_irqrestore(lock: &wo->mmio.lock, flags);
68}
69
70static void
71mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
72{
73 mtk_wed_wo_set_isr_mask(wo, mask: 0, val: mask, set: false);
74 tasklet_schedule(t: &wo->mmio.irq_tasklet);
75}
76
77static void
78mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
79{
80 mtk_wed_wo_set_isr_mask(wo, mask, val: 0, set: true);
81}
82
83static void
84mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
85{
86 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, val: 1 << MTK_WED_WO_TXCH_NUM);
87 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
88}
89
90static void
91mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
92 u32 val)
93{
94 wmb();
95 mtk_wed_mmio_w32(wo, reg: q->regs.cpu_idx, val);
96}
97
98static void *
99mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
100 bool flush)
101{
102 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
103 int index = (q->tail + 1) % q->n_desc;
104 struct mtk_wed_wo_queue_entry *entry;
105 struct mtk_wed_wo_queue_desc *desc;
106 void *buf;
107
108 if (!q->queued)
109 return NULL;
110
111 if (flush)
112 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
113 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
114 return NULL;
115
116 q->tail = index;
117 q->queued--;
118
119 desc = &q->desc[index];
120 entry = &q->entry[index];
121 buf = entry->buf;
122 if (len)
123 *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
124 le32_to_cpu(READ_ONCE(desc->ctrl)));
125 if (buf)
126 dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
127 DMA_FROM_DEVICE);
128 entry->buf = NULL;
129
130 return buf;
131}
132
133static int
134mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
135 bool rx)
136{
137 enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
138 int n_buf = 0;
139
140 while (q->queued < q->n_desc) {
141 struct mtk_wed_wo_queue_entry *entry;
142 dma_addr_t addr;
143 void *buf;
144
145 buf = page_frag_alloc(nc: &q->cache, fragsz: q->buf_size,
146 GFP_ATOMIC | GFP_DMA32);
147 if (!buf)
148 break;
149
150 addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
151 if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
152 skb_free_frag(addr: buf);
153 break;
154 }
155
156 q->head = (q->head + 1) % q->n_desc;
157 entry = &q->entry[q->head];
158 entry->addr = addr;
159 entry->len = q->buf_size;
160 q->entry[q->head].buf = buf;
161
162 if (rx) {
163 struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
164 u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
165 FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
166 entry->len);
167
168 WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
169 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
170 }
171 q->queued++;
172 n_buf++;
173 }
174
175 return n_buf;
176}
177
178static void
179mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
180{
181 mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
182 mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
183}
184
185static void
186mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
187{
188 for (;;) {
189 struct mtk_wed_mcu_hdr *hdr;
190 struct sk_buff *skb;
191 void *data;
192 u32 len;
193
194 data = mtk_wed_wo_dequeue(wo, q, len: &len, flush: false);
195 if (!data)
196 break;
197
198 skb = build_skb(data, frag_size: q->buf_size);
199 if (!skb) {
200 skb_free_frag(addr: data);
201 continue;
202 }
203
204 __skb_put(skb, len);
205 if (mtk_wed_mcu_check_msg(wo, skb)) {
206 dev_kfree_skb(skb);
207 continue;
208 }
209
210 hdr = (struct mtk_wed_mcu_hdr *)skb->data;
211 if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
212 mtk_wed_mcu_rx_event(wo, skb);
213 else
214 mtk_wed_mcu_rx_unsolicited_event(wo, skb);
215 }
216
217 if (mtk_wed_wo_queue_refill(wo, q, rx: true)) {
218 u32 index = (q->head - 1) % q->n_desc;
219
220 mtk_wed_wo_queue_kick(wo, q, val: index);
221 }
222}
223
224static irqreturn_t
225mtk_wed_wo_irq_handler(int irq, void *data)
226{
227 struct mtk_wed_wo *wo = data;
228
229 mtk_wed_wo_set_isr(wo, mask: 0);
230 tasklet_schedule(t: &wo->mmio.irq_tasklet);
231
232 return IRQ_HANDLED;
233}
234
235static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
236{
237 struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
238 u32 intr, mask;
239
240 /* disable interrupts */
241 mtk_wed_wo_set_isr(wo, mask: 0);
242
243 intr = mtk_wed_wo_get_isr(wo);
244 intr &= wo->mmio.irq_mask;
245 mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
246 mtk_wed_wo_irq_disable(wo, mask);
247
248 if (intr & MTK_WED_WO_RXCH_INT_MASK) {
249 mtk_wed_wo_rx_run_queue(wo, q: &wo->q_rx);
250 mtk_wed_wo_rx_complete(wo);
251 }
252}
253
254/* mtk wed wo hw queues */
255
256static int
257mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
258 int n_desc, int buf_size, int index,
259 struct mtk_wed_wo_queue_regs *regs)
260{
261 q->regs = *regs;
262 q->n_desc = n_desc;
263 q->buf_size = buf_size;
264
265 q->desc = dmam_alloc_coherent(dev: wo->hw->dev, size: n_desc * sizeof(*q->desc),
266 dma_handle: &q->desc_dma, GFP_KERNEL);
267 if (!q->desc)
268 return -ENOMEM;
269
270 q->entry = devm_kzalloc(dev: wo->hw->dev, size: n_desc * sizeof(*q->entry),
271 GFP_KERNEL);
272 if (!q->entry)
273 return -ENOMEM;
274
275 return 0;
276}
277
278static void
279mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
280{
281 mtk_wed_mmio_w32(wo, reg: q->regs.cpu_idx, val: 0);
282 dma_free_coherent(dev: wo->hw->dev, size: q->n_desc * sizeof(*q->desc), cpu_addr: q->desc,
283 dma_handle: q->desc_dma);
284}
285
286static void
287mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
288{
289 int i;
290
291 for (i = 0; i < q->n_desc; i++) {
292 struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
293
294 if (!entry->buf)
295 continue;
296
297 dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
298 DMA_TO_DEVICE);
299 skb_free_frag(addr: entry->buf);
300 entry->buf = NULL;
301 }
302
303 page_frag_cache_drain(nc: &q->cache);
304}
305
306static void
307mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
308{
309 for (;;) {
310 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, flush: true);
311
312 if (!buf)
313 break;
314
315 skb_free_frag(addr: buf);
316 }
317
318 page_frag_cache_drain(nc: &q->cache);
319}
320
321static void
322mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
323{
324 mtk_wed_mmio_w32(wo, reg: q->regs.cpu_idx, val: 0);
325 mtk_wed_mmio_w32(wo, reg: q->regs.desc_base, val: q->desc_dma);
326 mtk_wed_mmio_w32(wo, reg: q->regs.ring_size, val: q->n_desc);
327}
328
329int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
330 struct sk_buff *skb)
331{
332 struct mtk_wed_wo_queue_entry *entry;
333 struct mtk_wed_wo_queue_desc *desc;
334 int ret = 0, index;
335 u32 ctrl;
336
337 q->tail = mtk_wed_mmio_r32(wo, reg: q->regs.dma_idx);
338 index = (q->head + 1) % q->n_desc;
339 if (q->tail == index) {
340 ret = -ENOMEM;
341 goto out;
342 }
343
344 entry = &q->entry[index];
345 if (skb->len > entry->len) {
346 ret = -ENOMEM;
347 goto out;
348 }
349
350 desc = &q->desc[index];
351 q->head = index;
352
353 dma_sync_single_for_cpu(dev: wo->hw->dev, addr: entry->addr, size: skb->len,
354 dir: DMA_TO_DEVICE);
355 memcpy(entry->buf, skb->data, skb->len);
356 dma_sync_single_for_device(dev: wo->hw->dev, addr: entry->addr, size: skb->len,
357 dir: DMA_TO_DEVICE);
358
359 ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
360 MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
361 WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
362 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
363
364 mtk_wed_wo_queue_kick(wo, q, val: q->head);
365 mtk_wed_wo_kickout(wo);
366out:
367 dev_kfree_skb(skb);
368
369 return ret;
370}
371
372static int
373mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
374{
375 return 0;
376}
377
378static int
379mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
380{
381 struct mtk_wed_wo_queue_regs regs;
382 struct device_node *np;
383 int ret;
384
385 np = of_parse_phandle(np: wo->hw->node, phandle_name: "mediatek,wo-ccif", index: 0);
386 if (!np)
387 return -ENODEV;
388
389 wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
390 if (IS_ERR(ptr: wo->mmio.regs)) {
391 ret = PTR_ERR(ptr: wo->mmio.regs);
392 goto error_put;
393 }
394
395 wo->mmio.irq = irq_of_parse_and_map(node: np, index: 0);
396 wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
397 spin_lock_init(&wo->mmio.lock);
398 tasklet_setup(t: &wo->mmio.irq_tasklet, callback: mtk_wed_wo_irq_tasklet);
399
400 ret = devm_request_irq(dev: wo->hw->dev, irq: wo->mmio.irq,
401 handler: mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
402 KBUILD_MODNAME, dev_id: wo);
403 if (ret)
404 goto error;
405
406 regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
407 regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
408 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
409 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
410
411 ret = mtk_wed_wo_queue_alloc(wo, q: &wo->q_tx, MTK_WED_WO_RING_SIZE,
412 MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
413 regs: &regs);
414 if (ret)
415 goto error;
416
417 mtk_wed_wo_queue_refill(wo, q: &wo->q_tx, rx: false);
418 mtk_wed_wo_queue_reset(wo, q: &wo->q_tx);
419
420 regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
421 regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
422 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
423 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
424
425 ret = mtk_wed_wo_queue_alloc(wo, q: &wo->q_rx, MTK_WED_WO_RING_SIZE,
426 MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
427 regs: &regs);
428 if (ret)
429 goto error;
430
431 mtk_wed_wo_queue_refill(wo, q: &wo->q_rx, rx: true);
432 mtk_wed_wo_queue_reset(wo, q: &wo->q_rx);
433
434 /* rx queue irqmask */
435 mtk_wed_wo_set_isr(wo, mask: wo->mmio.irq_mask);
436
437 return 0;
438
439error:
440 devm_free_irq(dev: wo->hw->dev, irq: wo->mmio.irq, dev_id: wo);
441error_put:
442 of_node_put(node: np);
443 return ret;
444}
445
446static void
447mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
448{
449 /* disable interrupts */
450 mtk_wed_wo_set_isr(wo, mask: 0);
451
452 tasklet_disable(t: &wo->mmio.irq_tasklet);
453
454 disable_irq(irq: wo->mmio.irq);
455 devm_free_irq(dev: wo->hw->dev, irq: wo->mmio.irq, dev_id: wo);
456
457 mtk_wed_wo_queue_tx_clean(wo, q: &wo->q_tx);
458 mtk_wed_wo_queue_rx_clean(wo, q: &wo->q_rx);
459 mtk_wed_wo_queue_free(wo, q: &wo->q_tx);
460 mtk_wed_wo_queue_free(wo, q: &wo->q_rx);
461}
462
463int mtk_wed_wo_init(struct mtk_wed_hw *hw)
464{
465 struct mtk_wed_wo *wo;
466 int ret;
467
468 wo = devm_kzalloc(dev: hw->dev, size: sizeof(*wo), GFP_KERNEL);
469 if (!wo)
470 return -ENOMEM;
471
472 hw->wed_wo = wo;
473 wo->hw = hw;
474
475 ret = mtk_wed_wo_hardware_init(wo);
476 if (ret)
477 return ret;
478
479 ret = mtk_wed_mcu_init(wo);
480 if (ret)
481 return ret;
482
483 return mtk_wed_wo_exception_init(wo);
484}
485
486void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
487{
488 struct mtk_wed_wo *wo = hw->wed_wo;
489
490 mtk_wed_wo_hw_deinit(wo);
491}
492

source code of linux/drivers/net/ethernet/mediatek/mtk_wed_wo.c