1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <crypto/hash.h>
8#include "core.h"
9#include "dp_tx.h"
10#include "hal_tx.h"
11#include "hif.h"
12#include "debug.h"
13#include "dp_rx.h"
14#include "peer.h"
15#include "dp_mon.h"
16
17enum ath12k_dp_desc_type {
18 ATH12K_DP_TX_DESC,
19 ATH12K_DP_RX_DESC,
20};
21
22static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
23 struct sk_buff *skb)
24{
25 dev_kfree_skb_any(skb);
26}
27
28void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
29{
30 struct ath12k_base *ab = ar->ab;
31 struct ath12k_peer *peer;
32
33 /* TODO: Any other peer specific DP cleanup */
34
35 spin_lock_bh(lock: &ab->base_lock);
36 peer = ath12k_peer_find(ab, vdev_id, addr);
37 if (!peer) {
38 ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
39 addr, vdev_id);
40 spin_unlock_bh(lock: &ab->base_lock);
41 return;
42 }
43
44 if (!peer->primary_link) {
45 spin_unlock_bh(lock: &ab->base_lock);
46 return;
47 }
48
49 ath12k_dp_rx_peer_tid_cleanup(ar, peer);
50 crypto_free_shash(tfm: peer->tfm_mmic);
51 peer->dp_setup_done = false;
52 spin_unlock_bh(lock: &ab->base_lock);
53}
54
55int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
56{
57 struct ath12k_base *ab = ar->ab;
58 struct ath12k_peer *peer;
59 u32 reo_dest;
60 int ret = 0, tid;
61
62 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
63 reo_dest = ar->dp.mac_id + 1;
64 ret = ath12k_wmi_set_peer_param(ar, peer_addr: addr, vdev_id,
65 param_id: WMI_PEER_SET_DEFAULT_ROUTING,
66 DP_RX_HASH_ENABLE | (reo_dest << 1));
67
68 if (ret) {
69 ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
70 ret, addr, vdev_id);
71 return ret;
72 }
73
74 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
75 ret = ath12k_dp_rx_peer_tid_setup(ar, peer_mac: addr, vdev_id, tid, ba_win_sz: 1, ssn: 0,
76 pn_type: HAL_PN_TYPE_NONE);
77 if (ret) {
78 ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
79 tid, ret);
80 goto peer_clean;
81 }
82 }
83
84 ret = ath12k_dp_rx_peer_frag_setup(ar, peer_mac: addr, vdev_id);
85 if (ret) {
86 ath12k_warn(ab, "failed to setup rx defrag context\n");
87 goto peer_clean;
88 }
89
90 /* TODO: Setup other peer specific resource used in data path */
91
92 return 0;
93
94peer_clean:
95 spin_lock_bh(lock: &ab->base_lock);
96
97 peer = ath12k_peer_find(ab, vdev_id, addr);
98 if (!peer) {
99 ath12k_warn(ab, "failed to find the peer to del rx tid\n");
100 spin_unlock_bh(lock: &ab->base_lock);
101 return -ENOENT;
102 }
103
104 for (; tid >= 0; tid--)
105 ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
106
107 spin_unlock_bh(lock: &ab->base_lock);
108
109 return ret;
110}
111
112void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
113{
114 if (!ring->vaddr_unaligned)
115 return;
116
117 dma_free_coherent(dev: ab->dev, size: ring->size, cpu_addr: ring->vaddr_unaligned,
118 dma_handle: ring->paddr_unaligned);
119
120 ring->vaddr_unaligned = NULL;
121}
122
123static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
124{
125 int ext_group_num;
126 u8 mask = 1 << ring_num;
127
128 for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
129 ext_group_num++) {
130 if (mask & grp_mask[ext_group_num])
131 return ext_group_num;
132 }
133
134 return -ENOENT;
135}
136
137static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
138 enum hal_ring_type type, int ring_num)
139{
140 const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
141 const u8 *grp_mask;
142 int i;
143
144 switch (type) {
145 case HAL_WBM2SW_RELEASE:
146 if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
147 grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
148 ring_num = 0;
149 } else {
150 map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
151 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
152 if (ring_num == map[i].wbm_ring_num) {
153 ring_num = i;
154 break;
155 }
156 }
157
158 grp_mask = &ab->hw_params->ring_mask->tx[0];
159 }
160 break;
161 case HAL_REO_EXCEPTION:
162 grp_mask = &ab->hw_params->ring_mask->rx_err[0];
163 break;
164 case HAL_REO_DST:
165 grp_mask = &ab->hw_params->ring_mask->rx[0];
166 break;
167 case HAL_REO_STATUS:
168 grp_mask = &ab->hw_params->ring_mask->reo_status[0];
169 break;
170 case HAL_RXDMA_MONITOR_STATUS:
171 grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0];
172 break;
173 case HAL_RXDMA_MONITOR_DST:
174 grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
175 break;
176 case HAL_TX_MONITOR_DST:
177 grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
178 break;
179 case HAL_RXDMA_BUF:
180 grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
181 break;
182 case HAL_RXDMA_MONITOR_BUF:
183 case HAL_TCL_DATA:
184 case HAL_TCL_CMD:
185 case HAL_REO_CMD:
186 case HAL_SW2WBM_RELEASE:
187 case HAL_WBM_IDLE_LINK:
188 case HAL_TCL_STATUS:
189 case HAL_REO_REINJECT:
190 case HAL_CE_SRC:
191 case HAL_CE_DST:
192 case HAL_CE_DST_STATUS:
193 default:
194 return -ENOENT;
195 }
196
197 return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
198}
199
200static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
201 struct hal_srng_params *ring_params,
202 enum hal_ring_type type, int ring_num)
203{
204 int msi_group_number, msi_data_count;
205 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
206 int ret;
207
208 ret = ath12k_hif_get_user_msi_vector(ab, user_name: "DP",
209 num_vectors: &msi_data_count, user_base_data: &msi_data_start,
210 base_vector: &msi_irq_start);
211 if (ret)
212 return;
213
214 msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
215 ring_num);
216 if (msi_group_number < 0) {
217 ath12k_dbg(ab, ATH12K_DBG_PCI,
218 "ring not part of an ext_group; ring_type: %d,ring_num %d",
219 type, ring_num);
220 ring_params->msi_addr = 0;
221 ring_params->msi_data = 0;
222 return;
223 }
224
225 if (msi_group_number > msi_data_count) {
226 ath12k_dbg(ab, ATH12K_DBG_PCI,
227 "multiple msi_groups share one msi, msi_group_num %d",
228 msi_group_number);
229 }
230
231 ath12k_hif_get_msi_address(ab, msi_addr_lo: &addr_lo, msi_addr_hi: &addr_hi);
232
233 ring_params->msi_addr = addr_lo;
234 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
235 ring_params->msi_data = (msi_group_number % msi_data_count)
236 + msi_data_start;
237 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
238}
239
240int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
241 enum hal_ring_type type, int ring_num,
242 int mac_id, int num_entries)
243{
244 struct hal_srng_params params = { 0 };
245 int entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type: type);
246 int max_entries = ath12k_hal_srng_get_max_entries(ab, ring_type: type);
247 int ret;
248
249 if (max_entries < 0 || entry_sz < 0)
250 return -EINVAL;
251
252 if (num_entries > max_entries)
253 num_entries = max_entries;
254
255 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
256 ring->vaddr_unaligned = dma_alloc_coherent(dev: ab->dev, size: ring->size,
257 dma_handle: &ring->paddr_unaligned,
258 GFP_KERNEL);
259 if (!ring->vaddr_unaligned)
260 return -ENOMEM;
261
262 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
263 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
264 (unsigned long)ring->vaddr_unaligned);
265
266 params.ring_base_vaddr = ring->vaddr;
267 params.ring_base_paddr = ring->paddr;
268 params.num_entries = num_entries;
269 ath12k_dp_srng_msi_setup(ab, ring_params: &params, type, ring_num: ring_num + mac_id);
270
271 switch (type) {
272 case HAL_REO_DST:
273 params.intr_batch_cntr_thres_entries =
274 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
275 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
276 break;
277 case HAL_RXDMA_BUF:
278 case HAL_RXDMA_MONITOR_BUF:
279 params.low_threshold = num_entries >> 3;
280 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
281 params.intr_batch_cntr_thres_entries = 0;
282 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
283 break;
284 case HAL_RXDMA_MONITOR_STATUS:
285 params.low_threshold = num_entries >> 3;
286 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
287 params.intr_batch_cntr_thres_entries = 1;
288 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
289 break;
290 case HAL_TX_MONITOR_DST:
291 params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
292 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
293 params.intr_batch_cntr_thres_entries = 0;
294 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
295 break;
296 case HAL_WBM2SW_RELEASE:
297 if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
298 params.intr_batch_cntr_thres_entries =
299 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
300 params.intr_timer_thres_us =
301 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
302 break;
303 }
304 /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
305 fallthrough;
306 case HAL_REO_EXCEPTION:
307 case HAL_REO_REINJECT:
308 case HAL_REO_CMD:
309 case HAL_REO_STATUS:
310 case HAL_TCL_DATA:
311 case HAL_TCL_CMD:
312 case HAL_TCL_STATUS:
313 case HAL_WBM_IDLE_LINK:
314 case HAL_SW2WBM_RELEASE:
315 case HAL_RXDMA_DST:
316 case HAL_RXDMA_MONITOR_DST:
317 case HAL_RXDMA_MONITOR_DESC:
318 params.intr_batch_cntr_thres_entries =
319 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
320 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
321 break;
322 case HAL_RXDMA_DIR_BUF:
323 break;
324 default:
325 ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
326 return -EINVAL;
327 }
328
329 ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, params: &params);
330 if (ret < 0) {
331 ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
332 ret, ring_num);
333 return ret;
334 }
335
336 ring->ring_id = ret;
337
338 return 0;
339}
340
341static
342u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
343 struct ath12k_link_vif *arvif)
344{
345 u32 bank_config = 0;
346 struct ath12k_vif *ahvif = arvif->ahvif;
347
348 /* Only valid for raw frames with HW crypto enabled.
349 * With SW crypto, mac80211 sets key per packet
350 */
351 if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
352 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
353 bank_config |=
354 u32_encode_bits(v: ath12k_dp_tx_get_encrypt_type(cipher: ahvif->key_cipher),
355 HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
356
357 bank_config |= u32_encode_bits(v: ahvif->tx_encap_type,
358 HAL_TX_BANK_CONFIG_ENCAP_TYPE);
359 bank_config |= u32_encode_bits(v: 0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
360 u32_encode_bits(v: 0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
361 u32_encode_bits(v: 0, HAL_TX_BANK_CONFIG_EPD);
362
363 /* only valid if idx_lookup_override is not set in tcl_data_cmd */
364 if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
365 bank_config |= u32_encode_bits(v: 1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
366 else
367 bank_config |= u32_encode_bits(v: 0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
368
369 bank_config |= u32_encode_bits(v: arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
370 HAL_TX_BANK_CONFIG_ADDRX_EN) |
371 u32_encode_bits(v: !!(arvif->hal_addr_search_flags &
372 HAL_TX_ADDRY_EN),
373 HAL_TX_BANK_CONFIG_ADDRY_EN);
374
375 bank_config |= u32_encode_bits(v: ieee80211_vif_is_mesh(vif: ahvif->vif) ? 3 : 0,
376 HAL_TX_BANK_CONFIG_MESH_EN) |
377 u32_encode_bits(v: arvif->vdev_id_check_en,
378 HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
379
380 bank_config |= u32_encode_bits(v: 0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
381
382 return bank_config;
383}
384
385static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab,
386 struct ath12k_link_vif *arvif,
387 struct ath12k_dp *dp)
388{
389 int bank_id = DP_INVALID_BANK_ID;
390 int i;
391 u32 bank_config;
392 bool configure_register = false;
393
394 /* convert vdev params into hal_tx_bank_config */
395 bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
396
397 spin_lock_bh(lock: &dp->tx_bank_lock);
398 /* TODO: implement using idr kernel framework*/
399 for (i = 0; i < dp->num_bank_profiles; i++) {
400 if (dp->bank_profiles[i].is_configured &&
401 (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
402 bank_id = i;
403 goto inc_ref_and_return;
404 }
405 if (!dp->bank_profiles[i].is_configured ||
406 !dp->bank_profiles[i].num_users) {
407 bank_id = i;
408 goto configure_and_return;
409 }
410 }
411
412 if (bank_id == DP_INVALID_BANK_ID) {
413 spin_unlock_bh(lock: &dp->tx_bank_lock);
414 ath12k_err(ab, fmt: "unable to find TX bank!");
415 return bank_id;
416 }
417
418configure_and_return:
419 dp->bank_profiles[bank_id].is_configured = true;
420 dp->bank_profiles[bank_id].bank_config = bank_config;
421 configure_register = true;
422inc_ref_and_return:
423 dp->bank_profiles[bank_id].num_users++;
424 spin_unlock_bh(lock: &dp->tx_bank_lock);
425
426 if (configure_register)
427 ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
428
429 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
430 bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
431 dp->bank_profiles[bank_id].num_users);
432
433 return bank_id;
434}
435
436void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
437{
438 spin_lock_bh(lock: &dp->tx_bank_lock);
439 dp->bank_profiles[bank_id].num_users--;
440 spin_unlock_bh(lock: &dp->tx_bank_lock);
441}
442
443static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
444{
445 struct ath12k_dp *dp = &ab->dp;
446
447 kfree(objp: dp->bank_profiles);
448 dp->bank_profiles = NULL;
449}
450
451static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
452{
453 struct ath12k_dp *dp = &ab->dp;
454 u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
455 int i;
456
457 dp->num_bank_profiles = num_tcl_banks;
458 dp->bank_profiles = kmalloc_array(num_tcl_banks,
459 sizeof(struct ath12k_dp_tx_bank_profile),
460 GFP_KERNEL);
461 if (!dp->bank_profiles)
462 return -ENOMEM;
463
464 spin_lock_init(&dp->tx_bank_lock);
465
466 for (i = 0; i < num_tcl_banks; i++) {
467 dp->bank_profiles[i].is_configured = false;
468 dp->bank_profiles[i].num_users = 0;
469 }
470
471 return 0;
472}
473
474static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
475{
476 struct ath12k_dp *dp = &ab->dp;
477 int i;
478
479 ath12k_dp_srng_cleanup(ab, ring: &dp->reo_status_ring);
480 ath12k_dp_srng_cleanup(ab, ring: &dp->reo_cmd_ring);
481 ath12k_dp_srng_cleanup(ab, ring: &dp->reo_except_ring);
482 ath12k_dp_srng_cleanup(ab, ring: &dp->rx_rel_ring);
483 ath12k_dp_srng_cleanup(ab, ring: &dp->reo_reinject_ring);
484 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
485 ath12k_dp_srng_cleanup(ab, ring: &dp->tx_ring[i].tcl_comp_ring);
486 ath12k_dp_srng_cleanup(ab, ring: &dp->tx_ring[i].tcl_data_ring);
487 }
488 ath12k_dp_srng_cleanup(ab, ring: &dp->wbm_desc_rel_ring);
489}
490
491static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
492{
493 struct ath12k_dp *dp = &ab->dp;
494 const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
495 struct hal_srng *srng;
496 int i, ret, tx_comp_ring_num;
497 u32 ring_hash_map;
498
499 ret = ath12k_dp_srng_setup(ab, ring: &dp->wbm_desc_rel_ring,
500 type: HAL_SW2WBM_RELEASE, ring_num: 0, mac_id: 0,
501 DP_WBM_RELEASE_RING_SIZE);
502 if (ret) {
503 ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
504 ret);
505 goto err;
506 }
507
508 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
509 map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
510 tx_comp_ring_num = map[i].wbm_ring_num;
511
512 ret = ath12k_dp_srng_setup(ab, ring: &dp->tx_ring[i].tcl_data_ring,
513 type: HAL_TCL_DATA, ring_num: i, mac_id: 0,
514 DP_TCL_DATA_RING_SIZE);
515 if (ret) {
516 ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
517 i, ret);
518 goto err;
519 }
520
521 ret = ath12k_dp_srng_setup(ab, ring: &dp->tx_ring[i].tcl_comp_ring,
522 type: HAL_WBM2SW_RELEASE, ring_num: tx_comp_ring_num, mac_id: 0,
523 DP_TX_COMP_RING_SIZE);
524 if (ret) {
525 ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
526 tx_comp_ring_num, ret);
527 goto err;
528 }
529 }
530
531 ret = ath12k_dp_srng_setup(ab, ring: &dp->reo_reinject_ring, type: HAL_REO_REINJECT,
532 ring_num: 0, mac_id: 0, DP_REO_REINJECT_RING_SIZE);
533 if (ret) {
534 ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
535 ret);
536 goto err;
537 }
538
539 ret = ath12k_dp_srng_setup(ab, ring: &dp->rx_rel_ring, type: HAL_WBM2SW_RELEASE,
540 HAL_WBM2SW_REL_ERR_RING_NUM, mac_id: 0,
541 DP_RX_RELEASE_RING_SIZE);
542 if (ret) {
543 ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
544 goto err;
545 }
546
547 ret = ath12k_dp_srng_setup(ab, ring: &dp->reo_except_ring, type: HAL_REO_EXCEPTION,
548 ring_num: 0, mac_id: 0, DP_REO_EXCEPTION_RING_SIZE);
549 if (ret) {
550 ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
551 ret);
552 goto err;
553 }
554
555 ret = ath12k_dp_srng_setup(ab, ring: &dp->reo_cmd_ring, type: HAL_REO_CMD,
556 ring_num: 0, mac_id: 0, DP_REO_CMD_RING_SIZE);
557 if (ret) {
558 ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
559 goto err;
560 }
561
562 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
563 ath12k_hal_reo_init_cmd_ring(ab, srng);
564
565 ret = ath12k_dp_srng_setup(ab, ring: &dp->reo_status_ring, type: HAL_REO_STATUS,
566 ring_num: 0, mac_id: 0, DP_REO_STATUS_RING_SIZE);
567 if (ret) {
568 ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
569 goto err;
570 }
571
572 /* When hash based routing of rx packet is enabled, 32 entries to map
573 * the hash values to the ring will be configured. Each hash entry uses
574 * four bits to map to a particular ring. The ring mapping will be
575 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
576 * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
577 */
578 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
579 HAL_HASH_ROUTING_RING_SW2 << 4 |
580 HAL_HASH_ROUTING_RING_SW3 << 8 |
581 HAL_HASH_ROUTING_RING_SW4 << 12 |
582 HAL_HASH_ROUTING_RING_SW1 << 16 |
583 HAL_HASH_ROUTING_RING_SW2 << 20 |
584 HAL_HASH_ROUTING_RING_SW3 << 24 |
585 HAL_HASH_ROUTING_RING_SW4 << 28;
586
587 ath12k_hal_reo_hw_setup(ab, ring_hash_map);
588
589 return 0;
590
591err:
592 ath12k_dp_srng_common_cleanup(ab);
593
594 return ret;
595}
596
597static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
598{
599 struct ath12k_dp *dp = &ab->dp;
600 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
601 int i;
602
603 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
604 if (!slist[i].vaddr)
605 continue;
606
607 dma_free_coherent(dev: ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
608 cpu_addr: slist[i].vaddr, dma_handle: slist[i].paddr);
609 slist[i].vaddr = NULL;
610 }
611}
612
613static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
614 int size,
615 u32 n_link_desc_bank,
616 u32 n_link_desc,
617 u32 last_bank_sz)
618{
619 struct ath12k_dp *dp = &ab->dp;
620 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
621 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
622 u32 n_entries_per_buf;
623 int num_scatter_buf, scatter_idx;
624 struct hal_wbm_link_desc *scatter_buf;
625 int align_bytes, n_entries;
626 dma_addr_t paddr;
627 int rem_entries;
628 int i;
629 int ret = 0;
630 u32 end_offset, cookie;
631 enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
632
633 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
634 ath12k_hal_srng_get_entrysize(ab, ring_type: HAL_WBM_IDLE_LINK);
635 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
636
637 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
638 return -EINVAL;
639
640 for (i = 0; i < num_scatter_buf; i++) {
641 slist[i].vaddr = dma_alloc_coherent(dev: ab->dev,
642 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
643 dma_handle: &slist[i].paddr, GFP_KERNEL);
644 if (!slist[i].vaddr) {
645 ret = -ENOMEM;
646 goto err;
647 }
648 }
649
650 scatter_idx = 0;
651 scatter_buf = slist[scatter_idx].vaddr;
652 rem_entries = n_entries_per_buf;
653
654 for (i = 0; i < n_link_desc_bank; i++) {
655 align_bytes = link_desc_banks[i].vaddr -
656 link_desc_banks[i].vaddr_unaligned;
657 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
658 HAL_LINK_DESC_SIZE;
659 paddr = link_desc_banks[i].paddr;
660 while (n_entries) {
661 cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
662 ath12k_hal_set_link_desc_addr(desc: scatter_buf, cookie,
663 paddr, rbm);
664 n_entries--;
665 paddr += HAL_LINK_DESC_SIZE;
666 if (rem_entries) {
667 rem_entries--;
668 scatter_buf++;
669 continue;
670 }
671
672 rem_entries = n_entries_per_buf;
673 scatter_idx++;
674 scatter_buf = slist[scatter_idx].vaddr;
675 }
676 }
677
678 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
679 sizeof(struct hal_wbm_link_desc);
680 ath12k_hal_setup_link_idle_list(ab, sbuf: slist, nsbufs: num_scatter_buf,
681 tot_link_desc: n_link_desc, end_offset);
682
683 return 0;
684
685err:
686 ath12k_dp_scatter_idle_link_desc_cleanup(ab);
687
688 return ret;
689}
690
691static void
692ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
693 struct dp_link_desc_bank *link_desc_banks)
694{
695 int i;
696
697 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
698 if (link_desc_banks[i].vaddr_unaligned) {
699 dma_free_coherent(dev: ab->dev,
700 size: link_desc_banks[i].size,
701 cpu_addr: link_desc_banks[i].vaddr_unaligned,
702 dma_handle: link_desc_banks[i].paddr_unaligned);
703 link_desc_banks[i].vaddr_unaligned = NULL;
704 }
705 }
706}
707
708static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
709 struct dp_link_desc_bank *desc_bank,
710 int n_link_desc_bank,
711 int last_bank_sz)
712{
713 struct ath12k_dp *dp = &ab->dp;
714 int i;
715 int ret = 0;
716 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
717
718 for (i = 0; i < n_link_desc_bank; i++) {
719 if (i == (n_link_desc_bank - 1) && last_bank_sz)
720 desc_sz = last_bank_sz;
721
722 desc_bank[i].vaddr_unaligned =
723 dma_alloc_coherent(dev: ab->dev, size: desc_sz,
724 dma_handle: &desc_bank[i].paddr_unaligned,
725 GFP_KERNEL);
726 if (!desc_bank[i].vaddr_unaligned) {
727 ret = -ENOMEM;
728 goto err;
729 }
730
731 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
732 HAL_LINK_DESC_ALIGN);
733 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
734 ((unsigned long)desc_bank[i].vaddr -
735 (unsigned long)desc_bank[i].vaddr_unaligned);
736 desc_bank[i].size = desc_sz;
737 }
738
739 return 0;
740
741err:
742 ath12k_dp_link_desc_bank_free(ab, link_desc_banks: dp->link_desc_banks);
743
744 return ret;
745}
746
747void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
748 struct dp_link_desc_bank *desc_bank,
749 u32 ring_type, struct dp_srng *ring)
750{
751 ath12k_dp_link_desc_bank_free(ab, link_desc_banks: desc_bank);
752
753 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
754 ath12k_dp_srng_cleanup(ab, ring);
755 ath12k_dp_scatter_idle_link_desc_cleanup(ab);
756 }
757}
758
759static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
760{
761 struct ath12k_dp *dp = &ab->dp;
762 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
763 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
764 int ret = 0;
765
766 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
767 HAL_NUM_MPDUS_PER_LINK_DESC;
768
769 n_mpdu_queue_desc = n_mpdu_link_desc /
770 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
771
772 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
773 DP_AVG_MSDUS_PER_FLOW) /
774 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
775
776 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
777 DP_AVG_MSDUS_PER_MPDU) /
778 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
779
780 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
781 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
782
783 if (*n_link_desc & (*n_link_desc - 1))
784 *n_link_desc = 1 << fls(x: *n_link_desc);
785
786 ret = ath12k_dp_srng_setup(ab, ring: &dp->wbm_idle_ring,
787 type: HAL_WBM_IDLE_LINK, ring_num: 0, mac_id: 0, num_entries: *n_link_desc);
788 if (ret) {
789 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
790 return ret;
791 }
792 return ret;
793}
794
795int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
796 struct dp_link_desc_bank *link_desc_banks,
797 u32 ring_type, struct hal_srng *srng,
798 u32 n_link_desc)
799{
800 u32 tot_mem_sz;
801 u32 n_link_desc_bank, last_bank_sz;
802 u32 entry_sz, align_bytes, n_entries;
803 struct hal_wbm_link_desc *desc;
804 u32 paddr;
805 int i, ret;
806 u32 cookie;
807 enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
808
809 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
810 tot_mem_sz += HAL_LINK_DESC_ALIGN;
811
812 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
813 n_link_desc_bank = 1;
814 last_bank_sz = tot_mem_sz;
815 } else {
816 n_link_desc_bank = tot_mem_sz /
817 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
818 HAL_LINK_DESC_ALIGN);
819 last_bank_sz = tot_mem_sz %
820 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
821 HAL_LINK_DESC_ALIGN);
822
823 if (last_bank_sz)
824 n_link_desc_bank += 1;
825 }
826
827 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
828 return -EINVAL;
829
830 ret = ath12k_dp_link_desc_bank_alloc(ab, desc_bank: link_desc_banks,
831 n_link_desc_bank, last_bank_sz);
832 if (ret)
833 return ret;
834
835 /* Setup link desc idle list for HW internal usage */
836 entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
837 tot_mem_sz = entry_sz * n_link_desc;
838
839 /* Setup scatter desc list when the total memory requirement is more */
840 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
841 ring_type != HAL_RXDMA_MONITOR_DESC) {
842 ret = ath12k_dp_scatter_idle_link_desc_setup(ab, size: tot_mem_sz,
843 n_link_desc_bank,
844 n_link_desc,
845 last_bank_sz);
846 if (ret) {
847 ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
848 ret);
849 goto fail_desc_bank_free;
850 }
851
852 return 0;
853 }
854
855 spin_lock_bh(lock: &srng->lock);
856
857 ath12k_hal_srng_access_begin(ab, srng);
858
859 for (i = 0; i < n_link_desc_bank; i++) {
860 align_bytes = link_desc_banks[i].vaddr -
861 link_desc_banks[i].vaddr_unaligned;
862 n_entries = (link_desc_banks[i].size - align_bytes) /
863 HAL_LINK_DESC_SIZE;
864 paddr = link_desc_banks[i].paddr;
865 while (n_entries &&
866 (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
867 cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
868 ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
869 n_entries--;
870 paddr += HAL_LINK_DESC_SIZE;
871 }
872 }
873
874 ath12k_hal_srng_access_end(ab, srng);
875
876 spin_unlock_bh(lock: &srng->lock);
877
878 return 0;
879
880fail_desc_bank_free:
881 ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
882
883 return ret;
884}
885
886int ath12k_dp_service_srng(struct ath12k_base *ab,
887 struct ath12k_ext_irq_grp *irq_grp,
888 int budget)
889{
890 struct napi_struct *napi = &irq_grp->napi;
891 int grp_id = irq_grp->grp_id;
892 int work_done = 0;
893 int i = 0, j;
894 int tot_work_done = 0;
895 enum dp_monitor_mode monitor_mode;
896 u8 ring_mask;
897
898 if (ab->hw_params->ring_mask->tx[grp_id]) {
899 i = fls(x: ab->hw_params->ring_mask->tx[grp_id]) - 1;
900 ath12k_dp_tx_completion_handler(ab, ring_id: i);
901 }
902
903 if (ab->hw_params->ring_mask->rx_err[grp_id]) {
904 work_done = ath12k_dp_rx_process_err(ab, napi, budget);
905 budget -= work_done;
906 tot_work_done += work_done;
907 if (budget <= 0)
908 goto done;
909 }
910
911 if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
912 work_done = ath12k_dp_rx_process_wbm_err(ab,
913 napi,
914 budget);
915 budget -= work_done;
916 tot_work_done += work_done;
917
918 if (budget <= 0)
919 goto done;
920 }
921
922 if (ab->hw_params->ring_mask->rx[grp_id]) {
923 i = fls(x: ab->hw_params->ring_mask->rx[grp_id]) - 1;
924 work_done = ath12k_dp_rx_process(ab, mac_id: i, napi,
925 budget);
926 budget -= work_done;
927 tot_work_done += work_done;
928 if (budget <= 0)
929 goto done;
930 }
931
932 if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) {
933 ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id];
934 for (i = 0; i < ab->num_radios; i++) {
935 for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
936 int id = i * ab->hw_params->num_rxdma_per_pdev + j;
937
938 if (ring_mask & BIT(id)) {
939 work_done =
940 ath12k_dp_mon_process_ring(ab, mac_id: id, napi, budget,
941 monitor_mode: 0);
942 budget -= work_done;
943 tot_work_done += work_done;
944 if (budget <= 0)
945 goto done;
946 }
947 }
948 }
949 }
950
951 if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
952 monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
953 ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
954 for (i = 0; i < ab->num_radios; i++) {
955 for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
956 int id = i * ab->hw_params->num_rxdma_per_pdev + j;
957
958 if (ring_mask & BIT(id)) {
959 work_done =
960 ath12k_dp_mon_process_ring(ab, mac_id: id, napi, budget,
961 monitor_mode);
962 budget -= work_done;
963 tot_work_done += work_done;
964
965 if (budget <= 0)
966 goto done;
967 }
968 }
969 }
970 }
971
972 if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
973 monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
974 ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
975 for (i = 0; i < ab->num_radios; i++) {
976 for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
977 int id = i * ab->hw_params->num_rxdma_per_pdev + j;
978
979 if (ring_mask & BIT(id)) {
980 work_done =
981 ath12k_dp_mon_process_ring(ab, mac_id: id, napi, budget,
982 monitor_mode);
983 budget -= work_done;
984 tot_work_done += work_done;
985
986 if (budget <= 0)
987 goto done;
988 }
989 }
990 }
991 }
992
993 if (ab->hw_params->ring_mask->reo_status[grp_id])
994 ath12k_dp_rx_process_reo_status(ab);
995
996 if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
997 struct ath12k_dp *dp = &ab->dp;
998 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
999 LIST_HEAD(list);
1000
1001 ath12k_dp_rx_bufs_replenish(ab, rx_ring, used_list: &list, req_entries: 0);
1002 }
1003
1004 /* TODO: Implement handler for other interrupts */
1005
1006done:
1007 return tot_work_done;
1008}
1009
1010void ath12k_dp_pdev_free(struct ath12k_base *ab)
1011{
1012 int i;
1013
1014 for (i = 0; i < ab->num_radios; i++)
1015 ath12k_dp_rx_pdev_free(ab, pdev_idx: i);
1016}
1017
1018void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
1019{
1020 struct ath12k_pdev_dp *dp = &ar->dp;
1021
1022 dp->mac_id = ar->pdev_idx;
1023 atomic_set(v: &dp->num_tx_pending, i: 0);
1024 init_waitqueue_head(&dp->tx_empty_waitq);
1025 /* TODO: Add any RXDMA setup required per pdev */
1026}
1027
1028bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
1029{
1030 if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
1031 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
1032 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
1033 ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
1034 return true;
1035 }
1036 return false;
1037}
1038
1039void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
1040{
1041 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
1042 /* RX TLVS compaction is supported, hence change the hal_rx_ops
1043 * to compact hal_rx_ops.
1044 */
1045 ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
1046 }
1047 ab->hal.hal_desc_sz =
1048 ab->hal_rx_ops->rx_desc_get_desc_size();
1049}
1050
1051int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
1052{
1053 struct ath12k *ar;
1054 int ret;
1055 int i;
1056
1057 ret = ath12k_dp_rx_htt_setup(ab);
1058 if (ret)
1059 goto out;
1060
1061 /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
1062 for (i = 0; i < ab->num_radios; i++) {
1063 ar = ab->pdevs[i].ar;
1064 ret = ath12k_dp_rx_pdev_alloc(ab, pdev_idx: i);
1065 if (ret) {
1066 ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
1067 i);
1068 goto err;
1069 }
1070 ret = ath12k_dp_rx_pdev_mon_attach(ar);
1071 if (ret) {
1072 ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
1073 goto err;
1074 }
1075 }
1076
1077 return 0;
1078err:
1079 ath12k_dp_pdev_free(ab);
1080out:
1081 return ret;
1082}
1083
1084int ath12k_dp_htt_connect(struct ath12k_dp *dp)
1085{
1086 struct ath12k_htc_svc_conn_req conn_req = {0};
1087 struct ath12k_htc_svc_conn_resp conn_resp = {0};
1088 int status;
1089
1090 conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
1091 conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
1092
1093 /* connect to control service */
1094 conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
1095
1096 status = ath12k_htc_connect_service(htc: &dp->ab->htc, conn_req: &conn_req,
1097 conn_resp: &conn_resp);
1098
1099 if (status)
1100 return status;
1101
1102 dp->eid = conn_resp.eid;
1103
1104 return 0;
1105}
1106
1107static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
1108{
1109 switch (arvif->ahvif->vdev_type) {
1110 case WMI_VDEV_TYPE_STA:
1111 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
1112 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
1113 break;
1114 case WMI_VDEV_TYPE_AP:
1115 case WMI_VDEV_TYPE_IBSS:
1116 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1117 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1118 break;
1119 case WMI_VDEV_TYPE_MONITOR:
1120 default:
1121 return;
1122 }
1123}
1124
1125void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif)
1126{
1127 struct ath12k_base *ab = ar->ab;
1128
1129 arvif->tcl_metadata |= u32_encode_bits(v: 1, HTT_TCL_META_DATA_TYPE) |
1130 u32_encode_bits(v: arvif->vdev_id,
1131 HTT_TCL_META_DATA_VDEV_ID) |
1132 u32_encode_bits(v: ar->pdev->pdev_id,
1133 HTT_TCL_META_DATA_PDEV_ID);
1134
1135 /* set HTT extension valid bit to 0 by default */
1136 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1137
1138 ath12k_dp_update_vdev_search(arvif);
1139 arvif->vdev_id_check_en = true;
1140 arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, dp: &ab->dp);
1141
1142 /* TODO: error path for bank id failure */
1143 if (arvif->bank_id == DP_INVALID_BANK_ID) {
1144 ath12k_err(ab: ar->ab, fmt: "Failed to initialize DP TX Banks");
1145 return;
1146 }
1147}
1148
1149static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
1150{
1151 struct ath12k_rx_desc_info *desc_info;
1152 struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
1153 struct ath12k_dp *dp = &ab->dp;
1154 struct ath12k_skb_cb *skb_cb;
1155 struct sk_buff *skb;
1156 struct ath12k *ar;
1157 int i, j;
1158 u32 pool_id, tx_spt_page;
1159
1160 if (!dp->spt_info)
1161 return;
1162
1163 /* RX Descriptor cleanup */
1164 spin_lock_bh(lock: &dp->rx_desc_lock);
1165
1166 for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1167 desc_info = dp->rxbaddr[i];
1168
1169 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1170 if (!desc_info[j].in_use) {
1171 list_del(entry: &desc_info[j].list);
1172 continue;
1173 }
1174
1175 skb = desc_info[j].skb;
1176 if (!skb)
1177 continue;
1178
1179 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
1180 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
1181 dev_kfree_skb_any(skb);
1182 }
1183 }
1184
1185 for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1186 if (!dp->rxbaddr[i])
1187 continue;
1188
1189 kfree(objp: dp->rxbaddr[i]);
1190 dp->rxbaddr[i] = NULL;
1191 }
1192
1193 spin_unlock_bh(lock: &dp->rx_desc_lock);
1194
1195 /* TX Descriptor cleanup */
1196 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1197 spin_lock_bh(lock: &dp->tx_desc_lock[i]);
1198
1199 list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
1200 list) {
1201 list_del(entry: &tx_desc_info->list);
1202 skb = tx_desc_info->skb;
1203
1204 if (!skb)
1205 continue;
1206
1207 skb_cb = ATH12K_SKB_CB(skb);
1208 if (skb_cb->paddr_ext_desc) {
1209 dma_unmap_single(ab->dev,
1210 skb_cb->paddr_ext_desc,
1211 tx_desc_info->skb_ext_desc->len,
1212 DMA_TO_DEVICE);
1213 dev_kfree_skb_any(skb: tx_desc_info->skb_ext_desc);
1214 }
1215
1216 /* if we are unregistering, hw would've been destroyed and
1217 * ar is no longer valid.
1218 */
1219 if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
1220 ar = skb_cb->ar;
1221
1222 if (atomic_dec_and_test(v: &ar->dp.num_tx_pending))
1223 wake_up(&ar->dp.tx_empty_waitq);
1224 }
1225
1226 dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
1227 skb->len, DMA_TO_DEVICE);
1228 dev_kfree_skb_any(skb);
1229 }
1230
1231 spin_unlock_bh(lock: &dp->tx_desc_lock[i]);
1232 }
1233
1234 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1235 spin_lock_bh(lock: &dp->tx_desc_lock[pool_id]);
1236
1237 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1238 tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1239 if (!dp->txbaddr[tx_spt_page])
1240 continue;
1241
1242 kfree(objp: dp->txbaddr[tx_spt_page]);
1243 dp->txbaddr[tx_spt_page] = NULL;
1244 }
1245
1246 spin_unlock_bh(lock: &dp->tx_desc_lock[pool_id]);
1247 }
1248
1249 /* unmap SPT pages */
1250 for (i = 0; i < dp->num_spt_pages; i++) {
1251 if (!dp->spt_info[i].vaddr)
1252 continue;
1253
1254 dma_free_coherent(dev: ab->dev, ATH12K_PAGE_SIZE,
1255 cpu_addr: dp->spt_info[i].vaddr, dma_handle: dp->spt_info[i].paddr);
1256 dp->spt_info[i].vaddr = NULL;
1257 }
1258
1259 kfree(objp: dp->spt_info);
1260 dp->spt_info = NULL;
1261}
1262
1263static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
1264{
1265 struct ath12k_dp *dp = &ab->dp;
1266
1267 if (!ab->hw_params->reoq_lut_support)
1268 return;
1269
1270 if (dp->reoq_lut.vaddr_unaligned) {
1271 ath12k_hif_write32(ab,
1272 HAL_SEQ_WCSS_UMAC_REO_REG +
1273 HAL_REO1_QDESC_LUT_BASE0(ab), data: 0);
1274 dma_free_coherent(dev: ab->dev, size: dp->reoq_lut.size,
1275 cpu_addr: dp->reoq_lut.vaddr_unaligned,
1276 dma_handle: dp->reoq_lut.paddr_unaligned);
1277 dp->reoq_lut.vaddr_unaligned = NULL;
1278 }
1279
1280 if (dp->ml_reoq_lut.vaddr_unaligned) {
1281 ath12k_hif_write32(ab,
1282 HAL_SEQ_WCSS_UMAC_REO_REG +
1283 HAL_REO1_QDESC_LUT_BASE1(ab), data: 0);
1284 dma_free_coherent(dev: ab->dev, size: dp->ml_reoq_lut.size,
1285 cpu_addr: dp->ml_reoq_lut.vaddr_unaligned,
1286 dma_handle: dp->ml_reoq_lut.paddr_unaligned);
1287 dp->ml_reoq_lut.vaddr_unaligned = NULL;
1288 }
1289}
1290
1291void ath12k_dp_free(struct ath12k_base *ab)
1292{
1293 struct ath12k_dp *dp = &ab->dp;
1294 int i;
1295
1296 if (!dp->ab)
1297 return;
1298
1299 ath12k_dp_link_desc_cleanup(ab, desc_bank: dp->link_desc_banks,
1300 ring_type: HAL_WBM_IDLE_LINK, ring: &dp->wbm_idle_ring);
1301
1302 ath12k_dp_cc_cleanup(ab);
1303 ath12k_dp_reoq_lut_cleanup(ab);
1304 ath12k_dp_deinit_bank_profiles(ab);
1305 ath12k_dp_srng_common_cleanup(ab);
1306
1307 ath12k_dp_rx_reo_cmd_list_cleanup(ab);
1308
1309 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1310 kfree(objp: dp->tx_ring[i].tx_status);
1311 dp->tx_ring[i].tx_status = NULL;
1312 }
1313
1314 ath12k_dp_rx_free(ab);
1315 /* Deinit any SOC level resource */
1316 dp->ab = NULL;
1317}
1318
1319void ath12k_dp_cc_config(struct ath12k_base *ab)
1320{
1321 u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1322 u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1323 u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
1324 u32 val = 0;
1325
1326 if (ath12k_ftm_mode)
1327 return;
1328
1329 ath12k_hif_write32(ab, address: reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), data: cmem_base);
1330
1331 val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1332 HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1333 u32_encode_bits(ATH12K_CC_PPT_MSB,
1334 HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1335 u32_encode_bits(ATH12K_CC_SPT_MSB,
1336 HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1337 u32_encode_bits(v: 1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
1338 u32_encode_bits(v: 1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
1339 u32_encode_bits(v: 1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
1340
1341 ath12k_hif_write32(ab, address: reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), data: val);
1342
1343 /* Enable HW CC for WBM */
1344 ath12k_hif_write32(ab, address: wbm_base + HAL_WBM_SW_COOKIE_CFG0, data: cmem_base);
1345
1346 val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1347 HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1348 u32_encode_bits(ATH12K_CC_PPT_MSB,
1349 HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1350 u32_encode_bits(ATH12K_CC_SPT_MSB,
1351 HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1352 u32_encode_bits(v: 1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
1353
1354 ath12k_hif_write32(ab, address: wbm_base + HAL_WBM_SW_COOKIE_CFG1, data: val);
1355
1356 /* Enable conversion complete indication */
1357 val = ath12k_hif_read32(ab, address: wbm_base + HAL_WBM_SW_COOKIE_CFG2);
1358 val |= u32_encode_bits(v: 1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
1359 u32_encode_bits(v: 1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
1360 u32_encode_bits(v: 1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
1361
1362 ath12k_hif_write32(ab, address: wbm_base + HAL_WBM_SW_COOKIE_CFG2, data: val);
1363
1364 /* Enable Cookie conversion for WBM2SW Rings */
1365 val = ath12k_hif_read32(ab, address: wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
1366 val |= u32_encode_bits(v: 1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
1367 ab->hw_params->hal_params->wbm2sw_cc_enable;
1368
1369 ath12k_hif_write32(ab, address: wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, data: val);
1370}
1371
1372static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
1373{
1374 return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
1375}
1376
1377static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
1378 u16 ppt_idx, u16 spt_idx)
1379{
1380 struct ath12k_dp *dp = &ab->dp;
1381
1382 return dp->spt_info[ppt_idx].vaddr + spt_idx;
1383}
1384
1385struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
1386 u32 cookie)
1387{
1388 struct ath12k_dp *dp = &ab->dp;
1389 struct ath12k_rx_desc_info **desc_addr_ptr;
1390 u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1391
1392 ppt_idx = u32_get_bits(v: cookie, ATH12K_DP_CC_COOKIE_PPT);
1393 spt_idx = u32_get_bits(v: cookie, ATH12K_DP_CC_COOKIE_SPT);
1394
1395 start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
1396 end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
1397
1398 if (ppt_idx < start_ppt_idx ||
1399 ppt_idx >= end_ppt_idx ||
1400 spt_idx > ATH12K_MAX_SPT_ENTRIES)
1401 return NULL;
1402
1403 ppt_idx = ppt_idx - dp->rx_ppt_base;
1404 desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1405
1406 return *desc_addr_ptr;
1407}
1408
1409struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
1410 u32 cookie)
1411{
1412 struct ath12k_tx_desc_info **desc_addr_ptr;
1413 u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1414
1415 ppt_idx = u32_get_bits(v: cookie, ATH12K_DP_CC_COOKIE_PPT);
1416 spt_idx = u32_get_bits(v: cookie, ATH12K_DP_CC_COOKIE_SPT);
1417
1418 start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
1419 end_ppt_idx = start_ppt_idx +
1420 (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
1421
1422 if (ppt_idx < start_ppt_idx ||
1423 ppt_idx >= end_ppt_idx ||
1424 spt_idx > ATH12K_MAX_SPT_ENTRIES)
1425 return NULL;
1426
1427 desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1428
1429 return *desc_addr_ptr;
1430}
1431
1432static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
1433{
1434 struct ath12k_dp *dp = &ab->dp;
1435 struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
1436 struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
1437 u32 i, j, pool_id, tx_spt_page;
1438 u32 ppt_idx, cookie_ppt_idx;
1439
1440 spin_lock_bh(lock: &dp->rx_desc_lock);
1441
1442 /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
1443 for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1444 rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
1445 GFP_ATOMIC);
1446
1447 if (!rx_descs) {
1448 spin_unlock_bh(lock: &dp->rx_desc_lock);
1449 return -ENOMEM;
1450 }
1451
1452 ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
1453 cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
1454 dp->rxbaddr[i] = &rx_descs[0];
1455
1456 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1457 rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx: cookie_ppt_idx, spt_idx: j);
1458 rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
1459 rx_descs[j].device_id = ab->device_id;
1460 list_add_tail(new: &rx_descs[j].list, head: &dp->rx_desc_free_list);
1461
1462 /* Update descriptor VA in SPT */
1463 rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx: j);
1464 *rx_desc_addr = &rx_descs[j];
1465 }
1466 }
1467
1468 spin_unlock_bh(lock: &dp->rx_desc_lock);
1469
1470 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1471 spin_lock_bh(lock: &dp->tx_desc_lock[pool_id]);
1472 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1473 tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
1474 GFP_ATOMIC);
1475
1476 if (!tx_descs) {
1477 spin_unlock_bh(lock: &dp->tx_desc_lock[pool_id]);
1478 /* Caller takes care of TX pending and RX desc cleanup */
1479 return -ENOMEM;
1480 }
1481
1482 tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1483 ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
1484
1485 dp->txbaddr[tx_spt_page] = &tx_descs[0];
1486
1487 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1488 tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, spt_idx: j);
1489 tx_descs[j].pool_id = pool_id;
1490 list_add_tail(new: &tx_descs[j].list,
1491 head: &dp->tx_desc_free_list[pool_id]);
1492
1493 /* Update descriptor VA in SPT */
1494 tx_desc_addr =
1495 ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx: j);
1496 *tx_desc_addr = &tx_descs[j];
1497 }
1498 }
1499 spin_unlock_bh(lock: &dp->tx_desc_lock[pool_id]);
1500 }
1501 return 0;
1502}
1503
1504static int ath12k_dp_cmem_init(struct ath12k_base *ab,
1505 struct ath12k_dp *dp,
1506 enum ath12k_dp_desc_type type)
1507{
1508 u32 cmem_base;
1509 int i, start, end;
1510
1511 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1512
1513 switch (type) {
1514 case ATH12K_DP_TX_DESC:
1515 start = ATH12K_TX_SPT_PAGE_OFFSET;
1516 end = start + ATH12K_NUM_TX_SPT_PAGES;
1517 break;
1518 case ATH12K_DP_RX_DESC:
1519 cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
1520 start = ATH12K_RX_SPT_PAGE_OFFSET;
1521 end = start + ATH12K_NUM_RX_SPT_PAGES;
1522 break;
1523 default:
1524 ath12k_err(ab, fmt: "invalid descriptor type %d in cmem init\n", type);
1525 return -EINVAL;
1526 }
1527
1528 /* Write to PPT in CMEM */
1529 for (i = start; i < end; i++)
1530 ath12k_hif_write32(ab, address: cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
1531 data: dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
1532
1533 return 0;
1534}
1535
1536void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
1537{
1538 struct ath12k_hw_group *ag = ab->ag;
1539 int i;
1540
1541 for (i = 0; i < ag->num_devices; i++) {
1542 if (ag->ab[i] == ab)
1543 continue;
1544
1545 ath12k_dp_cmem_init(ab, dp: &ag->ab[i]->dp, type: ATH12K_DP_RX_DESC);
1546 }
1547}
1548
1549static int ath12k_dp_cc_init(struct ath12k_base *ab)
1550{
1551 struct ath12k_dp *dp = &ab->dp;
1552 int i, ret = 0;
1553
1554 INIT_LIST_HEAD(list: &dp->rx_desc_free_list);
1555 spin_lock_init(&dp->rx_desc_lock);
1556
1557 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1558 INIT_LIST_HEAD(list: &dp->tx_desc_free_list[i]);
1559 INIT_LIST_HEAD(list: &dp->tx_desc_used_list[i]);
1560 spin_lock_init(&dp->tx_desc_lock[i]);
1561 }
1562
1563 dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
1564 if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
1565 dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
1566
1567 dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
1568 GFP_KERNEL);
1569
1570 if (!dp->spt_info) {
1571 ath12k_warn(ab, "SPT page allocation failure");
1572 return -ENOMEM;
1573 }
1574
1575 dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
1576
1577 for (i = 0; i < dp->num_spt_pages; i++) {
1578 dp->spt_info[i].vaddr = dma_alloc_coherent(dev: ab->dev,
1579 ATH12K_PAGE_SIZE,
1580 dma_handle: &dp->spt_info[i].paddr,
1581 GFP_KERNEL);
1582
1583 if (!dp->spt_info[i].vaddr) {
1584 ret = -ENOMEM;
1585 goto free;
1586 }
1587
1588 if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
1589 ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
1590 ret = -EINVAL;
1591 goto free;
1592 }
1593 }
1594
1595 ret = ath12k_dp_cmem_init(ab, dp, type: ATH12K_DP_TX_DESC);
1596 if (ret) {
1597 ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
1598 goto free;
1599 }
1600
1601 ret = ath12k_dp_cmem_init(ab, dp, type: ATH12K_DP_RX_DESC);
1602 if (ret) {
1603 ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
1604 goto free;
1605 }
1606
1607 ret = ath12k_dp_cc_desc_init(ab);
1608 if (ret) {
1609 ath12k_warn(ab, "HW CC desc init failed %d", ret);
1610 goto free;
1611 }
1612
1613 return 0;
1614free:
1615 ath12k_dp_cc_cleanup(ab);
1616 return ret;
1617}
1618
1619static int ath12k_dp_alloc_reoq_lut(struct ath12k_base *ab,
1620 struct ath12k_reo_q_addr_lut *lut)
1621{
1622 lut->size = DP_REOQ_LUT_SIZE + HAL_REO_QLUT_ADDR_ALIGN - 1;
1623 lut->vaddr_unaligned = dma_alloc_coherent(dev: ab->dev, size: lut->size,
1624 dma_handle: &lut->paddr_unaligned,
1625 GFP_KERNEL | __GFP_ZERO);
1626 if (!lut->vaddr_unaligned)
1627 return -ENOMEM;
1628
1629 lut->vaddr = PTR_ALIGN(lut->vaddr_unaligned, HAL_REO_QLUT_ADDR_ALIGN);
1630 lut->paddr = lut->paddr_unaligned +
1631 ((unsigned long)lut->vaddr - (unsigned long)lut->vaddr_unaligned);
1632 return 0;
1633}
1634
1635static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
1636{
1637 struct ath12k_dp *dp = &ab->dp;
1638 u32 val;
1639 int ret;
1640
1641 if (!ab->hw_params->reoq_lut_support)
1642 return 0;
1643
1644 ret = ath12k_dp_alloc_reoq_lut(ab, lut: &dp->reoq_lut);
1645 if (ret) {
1646 ath12k_warn(ab, "failed to allocate memory for reoq table");
1647 return ret;
1648 }
1649
1650 ret = ath12k_dp_alloc_reoq_lut(ab, lut: &dp->ml_reoq_lut);
1651 if (ret) {
1652 ath12k_warn(ab, "failed to allocate memory for ML reoq table");
1653 dma_free_coherent(dev: ab->dev, size: dp->reoq_lut.size,
1654 cpu_addr: dp->reoq_lut.vaddr_unaligned,
1655 dma_handle: dp->reoq_lut.paddr_unaligned);
1656 dp->reoq_lut.vaddr_unaligned = NULL;
1657 return ret;
1658 }
1659
1660 /* Bits in the register have address [39:8] LUT base address to be
1661 * allocated such that LSBs are assumed to be zero. Also, current
1662 * design supports paddr up to 4 GB max hence it fits in 32 bit
1663 * register only
1664 */
1665
1666 ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
1667 data: dp->reoq_lut.paddr >> 8);
1668
1669 ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
1670 data: dp->ml_reoq_lut.paddr >> 8);
1671
1672 val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab));
1673
1674 ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab),
1675 data: val | HAL_REO_QDESC_ADDR_READ_LUT_ENABLE);
1676
1677 ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_MAX_PEERID(ab),
1678 HAL_REO_QDESC_MAX_PEERID);
1679
1680 return 0;
1681}
1682
1683static enum hal_rx_buf_return_buf_manager
1684ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
1685{
1686 switch (ab->device_id) {
1687 case 0:
1688 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
1689 case 1:
1690 return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
1691 case 2:
1692 return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
1693 default:
1694 ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
1695 ab->device_id);
1696 WARN_ON(1);
1697 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
1698 }
1699}
1700
1701int ath12k_dp_alloc(struct ath12k_base *ab)
1702{
1703 struct ath12k_dp *dp = &ab->dp;
1704 struct hal_srng *srng = NULL;
1705 size_t size = 0;
1706 u32 n_link_desc = 0;
1707 int ret;
1708 int i;
1709
1710 dp->ab = ab;
1711
1712 INIT_LIST_HEAD(list: &dp->reo_cmd_list);
1713 INIT_LIST_HEAD(list: &dp->reo_cmd_cache_flush_list);
1714 spin_lock_init(&dp->reo_cmd_lock);
1715
1716 dp->reo_cmd_cache_flush_count = 0;
1717 dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
1718
1719 ret = ath12k_wbm_idle_ring_setup(ab, n_link_desc: &n_link_desc);
1720 if (ret) {
1721 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1722 return ret;
1723 }
1724
1725 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1726
1727 ret = ath12k_dp_link_desc_setup(ab, link_desc_banks: dp->link_desc_banks,
1728 ring_type: HAL_WBM_IDLE_LINK, srng, n_link_desc);
1729 if (ret) {
1730 ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
1731 return ret;
1732 }
1733
1734 ret = ath12k_dp_cc_init(ab);
1735
1736 if (ret) {
1737 ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
1738 goto fail_link_desc_cleanup;
1739 }
1740 ret = ath12k_dp_init_bank_profiles(ab);
1741 if (ret) {
1742 ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
1743 goto fail_hw_cc_cleanup;
1744 }
1745
1746 ret = ath12k_dp_srng_common_setup(ab);
1747 if (ret)
1748 goto fail_dp_bank_profiles_cleanup;
1749
1750 size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
1751
1752 ret = ath12k_dp_reoq_lut_setup(ab);
1753 if (ret) {
1754 ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
1755 goto fail_cmn_srng_cleanup;
1756 }
1757
1758 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1759 dp->tx_ring[i].tcl_data_ring_id = i;
1760
1761 dp->tx_ring[i].tx_status_head = 0;
1762 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1763 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1764 if (!dp->tx_ring[i].tx_status) {
1765 ret = -ENOMEM;
1766 /* FIXME: The allocated tx status is not freed
1767 * properly here
1768 */
1769 goto fail_cmn_reoq_cleanup;
1770 }
1771 }
1772
1773 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1774 ath12k_hal_tx_set_dscp_tid_map(ab, id: i);
1775
1776 ret = ath12k_dp_rx_alloc(ab);
1777 if (ret)
1778 goto fail_dp_rx_free;
1779
1780 /* Init any SOC level resource for DP */
1781
1782 return 0;
1783
1784fail_dp_rx_free:
1785 ath12k_dp_rx_free(ab);
1786
1787fail_cmn_reoq_cleanup:
1788 ath12k_dp_reoq_lut_cleanup(ab);
1789
1790fail_cmn_srng_cleanup:
1791 ath12k_dp_srng_common_cleanup(ab);
1792
1793fail_dp_bank_profiles_cleanup:
1794 ath12k_dp_deinit_bank_profiles(ab);
1795
1796fail_hw_cc_cleanup:
1797 ath12k_dp_cc_cleanup(ab);
1798
1799fail_link_desc_cleanup:
1800 ath12k_dp_link_desc_cleanup(ab, desc_bank: dp->link_desc_banks,
1801 ring_type: HAL_WBM_IDLE_LINK, ring: &dp->wbm_idle_ring);
1802
1803 return ret;
1804}
1805

source code of linux/drivers/net/wireless/ath/ath12k/dp.c