1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <linux/dma-mapping.h>
8#include <linux/firmware/qcom/qcom_scm.h>
9#include <linux/of.h>
10#include <linux/of_device.h>
11#include <linux/platform_device.h>
12#include <linux/remoteproc.h>
13#include <linux/soc/qcom/mdt_loader.h>
14#include <linux/soc/qcom/smem_state.h>
15#include "ahb.h"
16#include "debug.h"
17#include "hif.h"
18
19static const struct of_device_id ath12k_ahb_of_match[] = {
20 { .compatible = "qcom,ipq5332-wifi",
21 .data = (void *)ATH12K_HW_IPQ5332_HW10,
22 },
23 { }
24};
25
26MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
27
28#define ATH12K_IRQ_CE0_OFFSET 4
29#define ATH12K_MAX_UPDS 1
30#define ATH12K_UPD_IRQ_WRD_LEN 18
31static const char ath12k_userpd_irq[][9] = {"spawn",
32 "ready",
33 "stop-ack"};
34
35static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
36 "misc-pulse1",
37 "misc-latch",
38 "sw-exception",
39 "watchdog",
40 "ce0",
41 "ce1",
42 "ce2",
43 "ce3",
44 "ce4",
45 "ce5",
46 "ce6",
47 "ce7",
48 "ce8",
49 "ce9",
50 "ce10",
51 "ce11",
52 "host2wbm-desc-feed",
53 "host2reo-re-injection",
54 "host2reo-command",
55 "host2rxdma-monitor-ring3",
56 "host2rxdma-monitor-ring2",
57 "host2rxdma-monitor-ring1",
58 "reo2ost-exception",
59 "wbm2host-rx-release",
60 "reo2host-status",
61 "reo2host-destination-ring4",
62 "reo2host-destination-ring3",
63 "reo2host-destination-ring2",
64 "reo2host-destination-ring1",
65 "rxdma2host-monitor-destination-mac3",
66 "rxdma2host-monitor-destination-mac2",
67 "rxdma2host-monitor-destination-mac1",
68 "ppdu-end-interrupts-mac3",
69 "ppdu-end-interrupts-mac2",
70 "ppdu-end-interrupts-mac1",
71 "rxdma2host-monitor-status-ring-mac3",
72 "rxdma2host-monitor-status-ring-mac2",
73 "rxdma2host-monitor-status-ring-mac1",
74 "host2rxdma-host-buf-ring-mac3",
75 "host2rxdma-host-buf-ring-mac2",
76 "host2rxdma-host-buf-ring-mac1",
77 "rxdma2host-destination-ring-mac3",
78 "rxdma2host-destination-ring-mac2",
79 "rxdma2host-destination-ring-mac1",
80 "host2tcl-input-ring4",
81 "host2tcl-input-ring3",
82 "host2tcl-input-ring2",
83 "host2tcl-input-ring1",
84 "wbm2host-tx-completions-ring4",
85 "wbm2host-tx-completions-ring3",
86 "wbm2host-tx-completions-ring2",
87 "wbm2host-tx-completions-ring1",
88 "tcl2host-status-ring",
89};
90
91enum ext_irq_num {
92 host2wbm_desc_feed = 16,
93 host2reo_re_injection,
94 host2reo_command,
95 host2rxdma_monitor_ring3,
96 host2rxdma_monitor_ring2,
97 host2rxdma_monitor_ring1,
98 reo2host_exception,
99 wbm2host_rx_release,
100 reo2host_status,
101 reo2host_destination_ring4,
102 reo2host_destination_ring3,
103 reo2host_destination_ring2,
104 reo2host_destination_ring1,
105 rxdma2host_monitor_destination_mac3,
106 rxdma2host_monitor_destination_mac2,
107 rxdma2host_monitor_destination_mac1,
108 ppdu_end_interrupts_mac3,
109 ppdu_end_interrupts_mac2,
110 ppdu_end_interrupts_mac1,
111 rxdma2host_monitor_status_ring_mac3,
112 rxdma2host_monitor_status_ring_mac2,
113 rxdma2host_monitor_status_ring_mac1,
114 host2rxdma_host_buf_ring_mac3,
115 host2rxdma_host_buf_ring_mac2,
116 host2rxdma_host_buf_ring_mac1,
117 rxdma2host_destination_ring_mac3,
118 rxdma2host_destination_ring_mac2,
119 rxdma2host_destination_ring_mac1,
120 host2tcl_input_ring4,
121 host2tcl_input_ring3,
122 host2tcl_input_ring2,
123 host2tcl_input_ring1,
124 wbm2host_tx_completions_ring4,
125 wbm2host_tx_completions_ring3,
126 wbm2host_tx_completions_ring2,
127 wbm2host_tx_completions_ring1,
128 tcl2host_status_ring,
129};
130
131static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
132{
133 if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
134 return ioread32(ab->mem_ce + offset);
135 return ioread32(ab->mem + offset);
136}
137
138static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
139 u32 value)
140{
141 if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
142 iowrite32(value, ab->mem_ce + offset);
143 else
144 iowrite32(value, ab->mem + offset);
145}
146
147static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
148{
149 int i;
150
151 for (i = 0; i < ab->hw_params->ce_count; i++) {
152 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
153
154 if (ath12k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
155 continue;
156
157 cancel_work_sync(work: &ce_pipe->intr_wq);
158 }
159}
160
161static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
162{
163 int i;
164
165 for (i = 0; i < irq_grp->num_irq; i++)
166 disable_irq_nosync(irq: irq_grp->ab->irq_num[irq_grp->irqs[i]]);
167}
168
169static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
170{
171 int i;
172
173 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
174 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
175
176 ath12k_ahb_ext_grp_disable(irq_grp);
177 if (irq_grp->napi_enabled) {
178 napi_synchronize(n: &irq_grp->napi);
179 napi_disable(n: &irq_grp->napi);
180 irq_grp->napi_enabled = false;
181 }
182 }
183}
184
185static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
186{
187 int i;
188
189 for (i = 0; i < irq_grp->num_irq; i++)
190 enable_irq(irq: irq_grp->ab->irq_num[irq_grp->irqs[i]]);
191}
192
193static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
194{
195 u32 val;
196
197 val = ath12k_ahb_read32(ab, offset);
198 ath12k_ahb_write32(ab, offset, value: val | BIT(bit));
199}
200
201static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
202{
203 u32 val;
204
205 val = ath12k_ahb_read32(ab, offset);
206 ath12k_ahb_write32(ab, offset, value: val & ~BIT(bit));
207}
208
209static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
210{
211 const struct ce_attr *ce_attr;
212 const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
213 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
214
215 ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
216 ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
217 ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
218
219 ce_attr = &ab->hw_params->host_ce_config[ce_id];
220 if (ce_attr->src_nentries)
221 ath12k_ahb_setbit32(ab, bit: ce_id, offset: ie1_reg_addr);
222
223 if (ce_attr->dest_nentries) {
224 ath12k_ahb_setbit32(ab, bit: ce_id, offset: ie2_reg_addr);
225 ath12k_ahb_setbit32(ab, bit: ce_id + CE_HOST_IE_3_SHIFT,
226 offset: ie3_reg_addr);
227 }
228}
229
230static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
231{
232 const struct ce_attr *ce_attr;
233 const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
234 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
235
236 ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
237 ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
238 ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
239
240 ce_attr = &ab->hw_params->host_ce_config[ce_id];
241 if (ce_attr->src_nentries)
242 ath12k_ahb_clearbit32(ab, bit: ce_id, offset: ie1_reg_addr);
243
244 if (ce_attr->dest_nentries) {
245 ath12k_ahb_clearbit32(ab, bit: ce_id, offset: ie2_reg_addr);
246 ath12k_ahb_clearbit32(ab, bit: ce_id + CE_HOST_IE_3_SHIFT,
247 offset: ie3_reg_addr);
248 }
249}
250
251static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
252{
253 int i;
254 int irq_idx;
255
256 for (i = 0; i < ab->hw_params->ce_count; i++) {
257 if (ath12k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
258 continue;
259
260 irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
261 synchronize_irq(irq: ab->irq_num[irq_idx]);
262 }
263}
264
265static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
266{
267 int i, j;
268 int irq_idx;
269
270 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
271 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
272
273 for (j = 0; j < irq_grp->num_irq; j++) {
274 irq_idx = irq_grp->irqs[j];
275 synchronize_irq(irq: ab->irq_num[irq_idx]);
276 }
277 }
278}
279
280static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
281{
282 int i;
283
284 for (i = 0; i < ab->hw_params->ce_count; i++) {
285 if (ath12k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
286 continue;
287 ath12k_ahb_ce_irq_enable(ab, ce_id: i);
288 }
289}
290
291static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
292{
293 int i;
294
295 for (i = 0; i < ab->hw_params->ce_count; i++) {
296 if (ath12k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
297 continue;
298 ath12k_ahb_ce_irq_disable(ab, ce_id: i);
299 }
300}
301
302static int ath12k_ahb_start(struct ath12k_base *ab)
303{
304 ath12k_ahb_ce_irqs_enable(ab);
305 ath12k_ce_rx_post_buf(ab);
306
307 return 0;
308}
309
310static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
311{
312 struct ath12k_ext_irq_grp *irq_grp;
313 int i;
314
315 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
316 irq_grp = &ab->ext_irq_grp[i];
317 if (!irq_grp->napi_enabled) {
318 napi_enable(n: &irq_grp->napi);
319 irq_grp->napi_enabled = true;
320 }
321 ath12k_ahb_ext_grp_enable(irq_grp);
322 }
323}
324
325static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
326{
327 __ath12k_ahb_ext_irq_disable(ab);
328 ath12k_ahb_sync_ext_irqs(ab);
329}
330
331static void ath12k_ahb_stop(struct ath12k_base *ab)
332{
333 if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
334 ath12k_ahb_ce_irqs_disable(ab);
335 ath12k_ahb_sync_ce_irqs(ab);
336 ath12k_ahb_cancel_workqueue(ab);
337 timer_delete_sync(timer: &ab->rx_replenish_retry);
338 ath12k_ce_cleanup_pipes(ab);
339}
340
341static int ath12k_ahb_power_up(struct ath12k_base *ab)
342{
343 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
344 char fw_name[ATH12K_USERPD_FW_NAME_LEN];
345 char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
346 struct device *dev = ab->dev;
347 const struct firmware *fw, *fw2;
348 struct reserved_mem *rmem = NULL;
349 unsigned long time_left;
350 phys_addr_t mem_phys;
351 void *mem_region;
352 size_t mem_size;
353 u32 pasid;
354 int ret;
355
356 rmem = ath12k_core_get_reserved_mem(ab, index: 0);
357 if (!rmem)
358 return -ENODEV;
359
360 mem_phys = rmem->base;
361 mem_size = rmem->size;
362 mem_region = devm_memremap(dev, offset: mem_phys, size: mem_size, flags: MEMREMAP_WC);
363 if (IS_ERR(ptr: mem_region)) {
364 ath12k_err(ab, fmt: "unable to map memory region: %pa+%pa\n",
365 &rmem->base, &rmem->size);
366 return PTR_ERR(ptr: mem_region);
367 }
368
369 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "%s/%s/%s%d%s", ATH12K_FW_DIR,
370 ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
371 ATH12K_AHB_FW_SUFFIX);
372
373 ret = request_firmware(fw: &fw, name: fw_name, device: dev);
374 if (ret < 0) {
375 ath12k_err(ab, fmt: "request_firmware failed\n");
376 return ret;
377 }
378
379 ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
380 fw->size);
381
382 if (!fw->size) {
383 ath12k_err(ab, fmt: "Invalid firmware size\n");
384 ret = -EINVAL;
385 goto err_fw;
386 }
387
388 pasid = (u32_encode_bits(v: ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
389 ATH12K_AHB_UPD_SWID;
390
391 /* Load FW image to a reserved memory location */
392 ret = qcom_mdt_load(dev, fw, fw_name, pas_id: pasid, mem_region, mem_phys, mem_size,
393 reloc_base: &mem_phys);
394 if (ret) {
395 ath12k_err(ab, fmt: "Failed to load MDT segments: %d\n", ret);
396 goto err_fw;
397 }
398
399 snprintf(buf: fw2_name, size: sizeof(fw2_name), fmt: "%s/%s/%s", ATH12K_FW_DIR,
400 ab->hw_params->fw.dir, ATH12K_AHB_FW2);
401
402 ret = request_firmware(fw: &fw2, name: fw2_name, device: dev);
403 if (ret < 0) {
404 ath12k_err(ab, fmt: "request_firmware failed\n");
405 goto err_fw;
406 }
407
408 ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
409 fw2->size);
410
411 if (!fw2->size) {
412 ath12k_err(ab, fmt: "Invalid firmware size\n");
413 ret = -EINVAL;
414 goto err_fw2;
415 }
416
417 ret = qcom_mdt_load_no_init(dev, fw: fw2, fw_name: fw2_name, pas_id: pasid, mem_region, mem_phys,
418 mem_size, reloc_base: &mem_phys);
419 if (ret) {
420 ath12k_err(ab, fmt: "Failed to load MDT segments: %d\n", ret);
421 goto err_fw2;
422 }
423
424 /* Authenticate FW image using peripheral ID */
425 ret = qcom_scm_pas_auth_and_reset(peripheral: pasid);
426 if (ret) {
427 ath12k_err(ab, fmt: "failed to boot the remote processor %d\n", ret);
428 goto err_fw2;
429 }
430
431 /* Instruct Q6 to spawn userPD thread */
432 ret = qcom_smem_state_update_bits(state: ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
433 BIT(ab_ahb->spawn_bit));
434 if (ret) {
435 ath12k_err(ab, fmt: "Failed to update spawn state %d\n", ret);
436 goto err_fw2;
437 }
438
439 time_left = wait_for_completion_timeout(x: &ab_ahb->userpd_spawned,
440 ATH12K_USERPD_SPAWN_TIMEOUT);
441 if (!time_left) {
442 ath12k_err(ab, fmt: "UserPD spawn wait timed out\n");
443 ret = -ETIMEDOUT;
444 goto err_fw2;
445 }
446
447 time_left = wait_for_completion_timeout(x: &ab_ahb->userpd_ready,
448 ATH12K_USERPD_READY_TIMEOUT);
449 if (!time_left) {
450 ath12k_err(ab, fmt: "UserPD ready wait timed out\n");
451 ret = -ETIMEDOUT;
452 goto err_fw2;
453 }
454
455 qcom_smem_state_update_bits(state: ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), value: 0);
456
457 ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
458
459err_fw2:
460 release_firmware(fw: fw2);
461err_fw:
462 release_firmware(fw);
463 return ret;
464}
465
466static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
467{
468 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
469 unsigned long time_left;
470 u32 pasid;
471 int ret;
472
473 qcom_smem_state_update_bits(state: ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
474 BIT(ab_ahb->stop_bit));
475
476 time_left = wait_for_completion_timeout(x: &ab_ahb->userpd_stopped,
477 ATH12K_USERPD_STOP_TIMEOUT);
478 if (!time_left) {
479 ath12k_err(ab, fmt: "UserPD stop wait timed out\n");
480 return;
481 }
482
483 qcom_smem_state_update_bits(state: ab_ahb->stop_state, BIT(ab_ahb->stop_bit), value: 0);
484
485 pasid = (u32_encode_bits(v: ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
486 ATH12K_AHB_UPD_SWID;
487 /* Release the firmware */
488 ret = qcom_scm_pas_shutdown(peripheral: pasid);
489 if (ret)
490 ath12k_err(ab, fmt: "scm pas shutdown failed for userPD%d: %d\n",
491 ab_ahb->userpd_id, ret);
492}
493
494static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
495{
496 struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
497
498 cfg->tgt_ce_len = ab->hw_params->target_ce_count;
499 cfg->tgt_ce = ab->hw_params->target_ce_config;
500 cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
501 cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
502 ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
503}
504
505static void ath12k_ahb_ce_workqueue(struct work_struct *work)
506{
507 struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
508
509 ath12k_ce_per_engine_service(ab: ce_pipe->ab, ce_id: ce_pipe->pipe_num);
510
511 ath12k_ahb_ce_irq_enable(ab: ce_pipe->ab, ce_id: ce_pipe->pipe_num);
512}
513
514static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
515{
516 struct ath12k_ce_pipe *ce_pipe = arg;
517
518 /* last interrupt received for this CE */
519 ce_pipe->timestamp = jiffies;
520
521 ath12k_ahb_ce_irq_disable(ab: ce_pipe->ab, ce_id: ce_pipe->pipe_num);
522
523 queue_work(wq: system_bh_wq, work: &ce_pipe->intr_wq);
524
525 return IRQ_HANDLED;
526}
527
528static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
529{
530 struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
531 struct ath12k_ext_irq_grp,
532 napi);
533 struct ath12k_base *ab = irq_grp->ab;
534 int work_done;
535
536 work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
537 if (work_done < budget) {
538 napi_complete_done(n: napi, work_done);
539 ath12k_ahb_ext_grp_enable(irq_grp);
540 }
541
542 if (work_done > budget)
543 work_done = budget;
544
545 return work_done;
546}
547
548static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
549{
550 struct ath12k_ext_irq_grp *irq_grp = arg;
551
552 /* last interrupt received for this group */
553 irq_grp->timestamp = jiffies;
554
555 ath12k_ahb_ext_grp_disable(irq_grp);
556
557 napi_schedule(n: &irq_grp->napi);
558
559 return IRQ_HANDLED;
560}
561
562static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
563{
564 const struct ath12k_hw_ring_mask *ring_mask;
565 struct ath12k_ext_irq_grp *irq_grp;
566 const struct hal_ops *hal_ops;
567 int i, j, irq, irq_idx, ret;
568 u32 num_irq;
569
570 ring_mask = ab->hw_params->ring_mask;
571 hal_ops = ab->hw_params->hal_ops;
572 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
573 irq_grp = &ab->ext_irq_grp[i];
574 num_irq = 0;
575
576 irq_grp->ab = ab;
577 irq_grp->grp_id = i;
578
579 irq_grp->napi_ndev = alloc_netdev_dummy(sizeof_priv: 0);
580 if (!irq_grp->napi_ndev)
581 return -ENOMEM;
582
583 netif_napi_add(dev: irq_grp->napi_ndev, napi: &irq_grp->napi,
584 poll: ath12k_ahb_ext_grp_napi_poll);
585
586 for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
587 /* For TX ring, ensure that the ring mask and the
588 * tcl_to_wbm_rbm_map point to the same ring number.
589 */
590 if (ring_mask->tx[i] &
591 BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
592 irq_grp->irqs[num_irq++] =
593 wbm2host_tx_completions_ring1 - j;
594 }
595
596 if (ring_mask->rx[i] & BIT(j)) {
597 irq_grp->irqs[num_irq++] =
598 reo2host_destination_ring1 - j;
599 }
600
601 if (ring_mask->rx_err[i] & BIT(j))
602 irq_grp->irqs[num_irq++] = reo2host_exception;
603
604 if (ring_mask->rx_wbm_rel[i] & BIT(j))
605 irq_grp->irqs[num_irq++] = wbm2host_rx_release;
606
607 if (ring_mask->reo_status[i] & BIT(j))
608 irq_grp->irqs[num_irq++] = reo2host_status;
609
610 if (ring_mask->rx_mon_dest[i] & BIT(j))
611 irq_grp->irqs[num_irq++] =
612 rxdma2host_monitor_destination_mac1;
613 }
614
615 irq_grp->num_irq = num_irq;
616
617 for (j = 0; j < irq_grp->num_irq; j++) {
618 irq_idx = irq_grp->irqs[j];
619
620 irq = platform_get_irq_byname(ab->pdev,
621 irq_name[irq_idx]);
622 ab->irq_num[irq_idx] = irq;
623 irq_set_status_flags(irq, set: IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
624 ret = devm_request_irq(dev: ab->dev, irq,
625 handler: ath12k_ahb_ext_interrupt_handler,
626 IRQF_TRIGGER_RISING,
627 devname: irq_name[irq_idx], dev_id: irq_grp);
628 if (ret)
629 ath12k_warn(ab, "failed request_irq for %d\n", irq);
630 }
631 }
632
633 return 0;
634}
635
636static int ath12k_ahb_config_irq(struct ath12k_base *ab)
637{
638 int irq, irq_idx, i;
639 int ret;
640
641 /* Configure CE irqs */
642 for (i = 0; i < ab->hw_params->ce_count; i++) {
643 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
644
645 if (ath12k_ce_get_attr_flags(ab, ce_id: i) & CE_ATTR_DIS_INTR)
646 continue;
647
648 irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
649
650 INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
651 irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
652 ret = devm_request_irq(dev: ab->dev, irq, handler: ath12k_ahb_ce_interrupt_handler,
653 IRQF_TRIGGER_RISING, devname: irq_name[irq_idx],
654 dev_id: ce_pipe);
655 if (ret)
656 return ret;
657
658 ab->irq_num[irq_idx] = irq;
659 }
660
661 /* Configure external interrupts */
662 ret = ath12k_ahb_config_ext_irq(ab);
663
664 return ret;
665}
666
667static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
668 u8 *ul_pipe, u8 *dl_pipe)
669{
670 const struct service_to_pipe *entry;
671 bool ul_set = false, dl_set = false;
672 u32 pipedir;
673 int i;
674
675 for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
676 entry = &ab->hw_params->svc_to_ce_map[i];
677
678 if (__le32_to_cpu(entry->service_id) != service_id)
679 continue;
680
681 pipedir = __le32_to_cpu(entry->pipedir);
682 if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
683 WARN_ON(dl_set);
684 *dl_pipe = __le32_to_cpu(entry->pipenum);
685 dl_set = true;
686 }
687
688 if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
689 WARN_ON(ul_set);
690 *ul_pipe = __le32_to_cpu(entry->pipenum);
691 ul_set = true;
692 }
693 }
694
695 if (WARN_ON(!ul_set || !dl_set))
696 return -ENOENT;
697
698 return 0;
699}
700
701static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
702 .start = ath12k_ahb_start,
703 .stop = ath12k_ahb_stop,
704 .read32 = ath12k_ahb_read32,
705 .write32 = ath12k_ahb_write32,
706 .irq_enable = ath12k_ahb_ext_irq_enable,
707 .irq_disable = ath12k_ahb_ext_irq_disable,
708 .map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
709 .power_up = ath12k_ahb_power_up,
710 .power_down = ath12k_ahb_power_down,
711};
712
713static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
714{
715 struct ath12k_base *ab = data;
716 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
717
718 if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
719 complete(&ab_ahb->userpd_spawned);
720 } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
721 complete(&ab_ahb->userpd_ready);
722 } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ]) {
723 complete(&ab_ahb->userpd_stopped);
724 } else {
725 ath12k_err(ab, fmt: "Invalid userpd interrupt\n");
726 return IRQ_NONE;
727 }
728
729 return IRQ_HANDLED;
730}
731
732static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
733{
734 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
735 int i, ret;
736 char *upd_irq_name;
737
738 for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
739 ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
740 ath12k_userpd_irq[i]);
741 if (ab_ahb->userpd_irq_num[i] < 0)
742 return ab_ahb->userpd_irq_num[i];
743
744 upd_irq_name = devm_kzalloc(dev: &ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
745 GFP_KERNEL);
746 if (!upd_irq_name)
747 return -ENOMEM;
748
749 scnprintf(buf: upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, fmt: "UserPD%u-%s",
750 ab_ahb->userpd_id, ath12k_userpd_irq[i]);
751 ret = devm_request_threaded_irq(dev: &ab->pdev->dev, irq: ab_ahb->userpd_irq_num[i],
752 NULL, thread_fn: ath12k_userpd_irq_handler,
753 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
754 devname: upd_irq_name, dev_id: ab);
755 if (ret)
756 return dev_err_probe(dev: &ab->pdev->dev, err: ret,
757 fmt: "Request %s irq failed: %d\n",
758 ath12k_userpd_irq[i], ret);
759 }
760
761 ab_ahb->spawn_state = devm_qcom_smem_state_get(dev: &ab->pdev->dev, con_id: "spawn",
762 bit: &ab_ahb->spawn_bit);
763 if (IS_ERR(ptr: ab_ahb->spawn_state))
764 return dev_err_probe(dev: &ab->pdev->dev, err: PTR_ERR(ptr: ab_ahb->spawn_state),
765 fmt: "Failed to acquire spawn state\n");
766
767 ab_ahb->stop_state = devm_qcom_smem_state_get(dev: &ab->pdev->dev, con_id: "stop",
768 bit: &ab_ahb->stop_bit);
769 if (IS_ERR(ptr: ab_ahb->stop_state))
770 return dev_err_probe(dev: &ab->pdev->dev, err: PTR_ERR(ptr: ab_ahb->stop_state),
771 fmt: "Failed to acquire stop state\n");
772
773 init_completion(x: &ab_ahb->userpd_spawned);
774 init_completion(x: &ab_ahb->userpd_ready);
775 init_completion(x: &ab_ahb->userpd_stopped);
776 return 0;
777}
778
779static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
780 const unsigned long event, void *data)
781{
782 struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
783 struct ath12k_base *ab = ab_ahb->ab;
784
785 if (event == ATH12K_RPROC_AFTER_POWERUP) {
786 ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
787 complete(&ab_ahb->rootpd_ready);
788 }
789
790 return 0;
791}
792
793static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
794{
795 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
796
797 ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
798 init_completion(x: &ab_ahb->rootpd_ready);
799
800 ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(name: ab_ahb->tgt_rproc->name,
801 nb: &ab_ahb->root_pd_nb);
802 if (IS_ERR(ptr: ab_ahb->root_pd_notifier))
803 return PTR_ERR(ptr: ab_ahb->root_pd_notifier);
804
805 return 0;
806}
807
808static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
809{
810 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
811
812 if (!ab_ahb->root_pd_notifier) {
813 ath12k_err(ab, fmt: "Rproc notifier not registered\n");
814 return;
815 }
816
817 qcom_unregister_ssr_notifier(notify: ab_ahb->root_pd_notifier,
818 nb: &ab_ahb->root_pd_nb);
819 ab_ahb->root_pd_notifier = NULL;
820}
821
822static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
823{
824 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
825 struct device *dev = ab->dev;
826 struct device_node *np;
827 struct rproc *prproc;
828
829 np = of_parse_phandle(np: dev->of_node, phandle_name: "qcom,rproc", index: 0);
830 if (!np) {
831 ath12k_err(ab, fmt: "failed to get q6_rproc handle\n");
832 return -ENOENT;
833 }
834
835 prproc = rproc_get_by_phandle(phandle: np->phandle);
836 of_node_put(node: np);
837 if (!prproc)
838 return dev_err_probe(dev: &ab->pdev->dev, err: -EPROBE_DEFER,
839 fmt: "failed to get rproc\n");
840
841 ab_ahb->tgt_rproc = prproc;
842
843 return 0;
844}
845
846static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
847{
848 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
849 unsigned long time_left;
850 int ret;
851
852 ret = rproc_boot(rproc: ab_ahb->tgt_rproc);
853 if (ret < 0) {
854 ath12k_err(ab, fmt: "RootPD boot failed\n");
855 return ret;
856 }
857
858 time_left = wait_for_completion_timeout(x: &ab_ahb->rootpd_ready,
859 ATH12K_ROOTPD_READY_TIMEOUT);
860 if (!time_left) {
861 ath12k_err(ab, fmt: "RootPD ready wait timed out\n");
862 return -ETIMEDOUT;
863 }
864
865 return 0;
866}
867
868static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
869{
870 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
871 int ret;
872
873 ret = ath12k_ahb_get_rproc(ab);
874 if (ret < 0)
875 return ret;
876
877 ret = ath12k_ahb_register_rproc_notifier(ab);
878 if (ret < 0) {
879 ret = dev_err_probe(dev: &ab->pdev->dev, err: ret,
880 fmt: "failed to register rproc notifier\n");
881 goto err_put_rproc;
882 }
883
884 if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
885 ret = ath12k_ahb_boot_root_pd(ab);
886 if (ret < 0) {
887 ath12k_err(ab, fmt: "failed to boot the remote processor Q6\n");
888 goto err_unreg_notifier;
889 }
890 }
891
892 return ath12k_ahb_config_rproc_irq(ab);
893
894err_unreg_notifier:
895 ath12k_ahb_unregister_rproc_notifier(ab);
896
897err_put_rproc:
898 rproc_put(rproc: ab_ahb->tgt_rproc);
899 return ret;
900}
901
902static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
903{
904 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
905
906 ath12k_ahb_unregister_rproc_notifier(ab);
907 rproc_put(rproc: ab_ahb->tgt_rproc);
908}
909
910static int ath12k_ahb_resource_init(struct ath12k_base *ab)
911{
912 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
913 struct platform_device *pdev = ab->pdev;
914 struct resource *mem_res;
915 int ret;
916
917 ab->mem = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &mem_res);
918 if (IS_ERR(ptr: ab->mem)) {
919 ret = dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: ab->mem), fmt: "ioremap error\n");
920 goto out;
921 }
922
923 ab->mem_len = resource_size(res: mem_res);
924
925 if (ab->hw_params->ce_remap) {
926 const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
927 /* CE register space is moved out of WCSS and the space is not
928 * contiguous, hence remapping the CE registers to a new space
929 * for accessing them.
930 */
931 ab->mem_ce = ioremap(offset: ce_remap->base, size: ce_remap->size);
932 if (!ab->mem_ce) {
933 dev_err(&pdev->dev, "ce ioremap error\n");
934 ret = -ENOMEM;
935 goto err_mem_unmap;
936 }
937 ab->ce_remap = true;
938 ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
939 }
940
941 ab_ahb->xo_clk = devm_clk_get(dev: ab->dev, id: "xo");
942 if (IS_ERR(ptr: ab_ahb->xo_clk)) {
943 ret = dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: ab_ahb->xo_clk),
944 fmt: "failed to get xo clock\n");
945 goto err_mem_ce_unmap;
946 }
947
948 ret = clk_prepare_enable(clk: ab_ahb->xo_clk);
949 if (ret) {
950 dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
951 goto err_clock_deinit;
952 }
953
954 return 0;
955
956err_clock_deinit:
957 devm_clk_put(dev: ab->dev, clk: ab_ahb->xo_clk);
958
959err_mem_ce_unmap:
960 ab_ahb->xo_clk = NULL;
961 if (ab->hw_params->ce_remap)
962 iounmap(addr: ab->mem_ce);
963
964err_mem_unmap:
965 ab->mem_ce = NULL;
966 devm_iounmap(dev: ab->dev, addr: ab->mem);
967
968out:
969 ab->mem = NULL;
970 return ret;
971}
972
973static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
974{
975 struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
976
977 if (ab->mem)
978 devm_iounmap(dev: ab->dev, addr: ab->mem);
979
980 if (ab->mem_ce)
981 iounmap(addr: ab->mem_ce);
982
983 ab->mem = NULL;
984 ab->mem_ce = NULL;
985
986 clk_disable_unprepare(clk: ab_ahb->xo_clk);
987 devm_clk_put(dev: ab->dev, clk: ab_ahb->xo_clk);
988 ab_ahb->xo_clk = NULL;
989}
990
991static int ath12k_ahb_probe(struct platform_device *pdev)
992{
993 struct ath12k_base *ab;
994 const struct ath12k_hif_ops *hif_ops;
995 struct ath12k_ahb *ab_ahb;
996 enum ath12k_hw_rev hw_rev;
997 u32 addr, userpd_id;
998 int ret;
999
1000 ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32));
1001 if (ret) {
1002 dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
1003 return ret;
1004 }
1005
1006 ab = ath12k_core_alloc(dev: &pdev->dev, priv_size: sizeof(struct ath12k_ahb),
1007 bus: ATH12K_BUS_AHB);
1008 if (!ab)
1009 return -ENOMEM;
1010
1011 hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(dev: &pdev->dev);
1012 switch (hw_rev) {
1013 case ATH12K_HW_IPQ5332_HW10:
1014 hif_ops = &ath12k_ahb_hif_ops_ipq5332;
1015 userpd_id = ATH12K_IPQ5332_USERPD_ID;
1016 break;
1017 default:
1018 ret = -EOPNOTSUPP;
1019 goto err_core_free;
1020 }
1021
1022 ab->hif.ops = hif_ops;
1023 ab->pdev = pdev;
1024 ab->hw_rev = hw_rev;
1025 platform_set_drvdata(pdev, data: ab);
1026 ab_ahb = ath12k_ab_to_ahb(ab);
1027 ab_ahb->ab = ab;
1028 ab_ahb->userpd_id = userpd_id;
1029
1030 /* Set fixed_mem_region to true for platforms that support fixed memory
1031 * reservation from DT. If memory is reserved from DT for FW, ath12k driver
1032 * need not to allocate memory.
1033 */
1034 if (!of_property_read_u32(np: ab->dev->of_node, propname: "memory-region", out_value: &addr))
1035 set_bit(nr: ATH12K_FLAG_FIXED_MEM_REGION, addr: &ab->dev_flags);
1036
1037 ret = ath12k_core_pre_init(ab);
1038 if (ret)
1039 goto err_core_free;
1040
1041 ret = ath12k_ahb_resource_init(ab);
1042 if (ret)
1043 goto err_core_free;
1044
1045 ret = ath12k_hal_srng_init(ath12k: ab);
1046 if (ret)
1047 goto err_resource_deinit;
1048
1049 ret = ath12k_ce_alloc_pipes(ab);
1050 if (ret) {
1051 ath12k_err(ab, fmt: "failed to allocate ce pipes: %d\n", ret);
1052 goto err_hal_srng_deinit;
1053 }
1054
1055 ath12k_ahb_init_qmi_ce_config(ab);
1056
1057 ret = ath12k_ahb_configure_rproc(ab);
1058 if (ret)
1059 goto err_ce_free;
1060
1061 ret = ath12k_ahb_config_irq(ab);
1062 if (ret) {
1063 ath12k_err(ab, fmt: "failed to configure irq: %d\n", ret);
1064 goto err_rproc_deconfigure;
1065 }
1066
1067 ret = ath12k_core_init(ath12k: ab);
1068 if (ret) {
1069 ath12k_err(ab, fmt: "failed to init core: %d\n", ret);
1070 goto err_rproc_deconfigure;
1071 }
1072
1073 return 0;
1074
1075err_rproc_deconfigure:
1076 ath12k_ahb_deconfigure_rproc(ab);
1077
1078err_ce_free:
1079 ath12k_ce_free_pipes(ab);
1080
1081err_hal_srng_deinit:
1082 ath12k_hal_srng_deinit(ath12k: ab);
1083
1084err_resource_deinit:
1085 ath12k_ahb_resource_deinit(ab);
1086
1087err_core_free:
1088 ath12k_core_free(ath12k: ab);
1089 platform_set_drvdata(pdev, NULL);
1090
1091 return ret;
1092}
1093
1094static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
1095{
1096 unsigned long left;
1097
1098 if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
1099 left = wait_for_completion_timeout(x: &ab->driver_recovery,
1100 ATH12K_AHB_RECOVERY_TIMEOUT);
1101 if (!left)
1102 ath12k_warn(ab, "failed to receive recovery response completion\n");
1103 }
1104
1105 set_bit(nr: ATH12K_FLAG_UNREGISTERING, addr: &ab->dev_flags);
1106 cancel_work_sync(work: &ab->restart_work);
1107 cancel_work_sync(work: &ab->qmi.event_work);
1108}
1109
1110static void ath12k_ahb_free_resources(struct ath12k_base *ab)
1111{
1112 struct platform_device *pdev = ab->pdev;
1113
1114 ath12k_hal_srng_deinit(ath12k: ab);
1115 ath12k_ce_free_pipes(ab);
1116 ath12k_ahb_resource_deinit(ab);
1117 ath12k_ahb_deconfigure_rproc(ab);
1118 ath12k_core_free(ath12k: ab);
1119 platform_set_drvdata(pdev, NULL);
1120}
1121
1122static void ath12k_ahb_remove(struct platform_device *pdev)
1123{
1124 struct ath12k_base *ab = platform_get_drvdata(pdev);
1125
1126 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1127 ath12k_ahb_power_down(ab, is_suspend: false);
1128 goto qmi_fail;
1129 }
1130
1131 ath12k_ahb_remove_prepare(ab);
1132 ath12k_core_hw_group_cleanup(ag: ab->ag);
1133qmi_fail:
1134 ath12k_core_deinit(ath12k: ab);
1135 ath12k_ahb_free_resources(ab);
1136}
1137
1138static struct platform_driver ath12k_ahb_driver = {
1139 .driver = {
1140 .name = "ath12k_ahb",
1141 .of_match_table = ath12k_ahb_of_match,
1142 },
1143 .probe = ath12k_ahb_probe,
1144 .remove = ath12k_ahb_remove,
1145};
1146
1147int ath12k_ahb_init(void)
1148{
1149 return platform_driver_register(&ath12k_ahb_driver);
1150}
1151
1152void ath12k_ahb_exit(void)
1153{
1154 platform_driver_unregister(&ath12k_ahb_driver);
1155}
1156

source code of linux/drivers/net/wireless/ath/ath12k/ahb.c