1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include "core.h"
8#include "dp_tx.h"
9#include "debug.h"
10#include "debugfs.h"
11#include "hw.h"
12#include "peer.h"
13#include "mac.h"
14
15static enum hal_tcl_encap_type
16ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
17{
18 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
19 struct ath12k_base *ab = arvif->ar->ab;
20
21 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
22 return HAL_TCL_ENCAP_TYPE_RAW;
23
24 if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
25 return HAL_TCL_ENCAP_TYPE_ETHERNET;
26
27 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
28}
29
30static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
31{
32 struct ieee80211_hdr *hdr = (void *)skb->data;
33 u8 *qos_ctl;
34
35 if (!ieee80211_is_data_qos(fc: hdr->frame_control))
36 return;
37
38 qos_ctl = ieee80211_get_qos_ctl(hdr);
39 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
40 skb->data, (void *)qos_ctl - (void *)skb->data);
41 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
42
43 hdr = (void *)skb->data;
44 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
45}
46
47static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
48{
49 struct ieee80211_hdr *hdr = (void *)skb->data;
50 struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
51
52 if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
53 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
54 else if (!ieee80211_is_data_qos(fc: hdr->frame_control))
55 return HAL_DESC_REO_NON_QOS_TID;
56 else
57 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
58}
59
60enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
61{
62 switch (cipher) {
63 case WLAN_CIPHER_SUITE_WEP40:
64 return HAL_ENCRYPT_TYPE_WEP_40;
65 case WLAN_CIPHER_SUITE_WEP104:
66 return HAL_ENCRYPT_TYPE_WEP_104;
67 case WLAN_CIPHER_SUITE_TKIP:
68 return HAL_ENCRYPT_TYPE_TKIP_MIC;
69 case WLAN_CIPHER_SUITE_CCMP:
70 return HAL_ENCRYPT_TYPE_CCMP_128;
71 case WLAN_CIPHER_SUITE_CCMP_256:
72 return HAL_ENCRYPT_TYPE_CCMP_256;
73 case WLAN_CIPHER_SUITE_GCMP:
74 return HAL_ENCRYPT_TYPE_GCMP_128;
75 case WLAN_CIPHER_SUITE_GCMP_256:
76 return HAL_ENCRYPT_TYPE_AES_GCMP_256;
77 default:
78 return HAL_ENCRYPT_TYPE_OPEN;
79 }
80}
81
82static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
83 struct ath12k_tx_desc_info *tx_desc,
84 u8 pool_id)
85{
86 spin_lock_bh(lock: &dp->tx_desc_lock[pool_id]);
87 tx_desc->skb_ext_desc = NULL;
88 list_move_tail(list: &tx_desc->list, head: &dp->tx_desc_free_list[pool_id]);
89 spin_unlock_bh(lock: &dp->tx_desc_lock[pool_id]);
90}
91
92static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
93 u8 pool_id)
94{
95 struct ath12k_tx_desc_info *desc;
96
97 spin_lock_bh(lock: &dp->tx_desc_lock[pool_id]);
98 desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
99 struct ath12k_tx_desc_info,
100 list);
101 if (!desc) {
102 spin_unlock_bh(lock: &dp->tx_desc_lock[pool_id]);
103 ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
104 return NULL;
105 }
106
107 list_move_tail(list: &desc->list, head: &dp->tx_desc_used_list[pool_id]);
108 spin_unlock_bh(lock: &dp->tx_desc_lock[pool_id]);
109
110 return desc;
111}
112
113static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
114 struct hal_tx_msdu_ext_desc *tcl_ext_cmd,
115 struct hal_tx_info *ti)
116{
117 tcl_ext_cmd->info0 = le32_encode_bits(v: ti->paddr,
118 HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
119 tcl_ext_cmd->info1 = le32_encode_bits(v: 0x0,
120 HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
121 le32_encode_bits(v: ti->data_len,
122 HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
123
124 tcl_ext_cmd->info1 |= le32_encode_bits(v: 1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
125 le32_encode_bits(v: ti->encap_type,
126 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
127 le32_encode_bits(v: ti->encrypt_type,
128 HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
129}
130
131#define HTT_META_DATA_ALIGNMENT 0x8
132
133static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
134{
135 struct sk_buff *tail;
136 void *metadata;
137
138 if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
139 return NULL;
140
141 metadata = pskb_put(skb, tail, len: tail_len);
142 memset(metadata, 0, tail_len);
143 return metadata;
144}
145
146/* Preparing HTT Metadata when utilized with ext MSDU */
147static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
148{
149 struct hal_tx_msdu_metadata *desc_ext;
150 u8 htt_desc_size;
151 /* Size rounded of multiple of 8 bytes */
152 u8 htt_desc_size_aligned;
153
154 htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
155 htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
156
157 desc_ext = ath12k_dp_metadata_align_skb(skb, tail_len: htt_desc_size_aligned);
158 if (!desc_ext)
159 return -ENOMEM;
160
161 desc_ext->info0 = le32_encode_bits(v: 1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
162 le32_encode_bits(v: 0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
163 le32_encode_bits(v: 1,
164 HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
165
166 return 0;
167}
168
169static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
170 unsigned long delta,
171 bool head)
172{
173 unsigned long len = skb->len;
174
175 if (head) {
176 skb_push(skb, len: delta);
177 memmove(skb->data, skb->data + delta, len);
178 skb_trim(skb, len);
179 } else {
180 skb_put(skb, len: delta);
181 memmove(skb->data + delta, skb->data, len);
182 skb_pull(skb, len: delta);
183 }
184}
185
186static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
187 struct sk_buff **pskb)
188{
189 u32 iova_mask = ab->hw_params->iova_mask;
190 unsigned long offset, delta1, delta2;
191 struct sk_buff *skb2, *skb = *pskb;
192 unsigned int headroom = skb_headroom(skb);
193 int tailroom = skb_tailroom(skb);
194 int ret = 0;
195
196 offset = (unsigned long)skb->data & iova_mask;
197 delta1 = offset;
198 delta2 = iova_mask - offset + 1;
199
200 if (headroom >= delta1) {
201 ath12k_dp_tx_move_payload(skb, delta: delta1, head: true);
202 } else if (tailroom >= delta2) {
203 ath12k_dp_tx_move_payload(skb, delta: delta2, head: false);
204 } else {
205 skb2 = skb_realloc_headroom(skb, headroom: iova_mask);
206 if (!skb2) {
207 ret = -ENOMEM;
208 goto out;
209 }
210
211 dev_kfree_skb_any(skb);
212
213 offset = (unsigned long)skb2->data & iova_mask;
214 if (offset)
215 ath12k_dp_tx_move_payload(skb: skb2, delta: offset, head: true);
216 *pskb = skb2;
217 }
218
219out:
220 return ret;
221}
222
223int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
224 struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
225 bool is_mcast)
226{
227 struct ath12k_base *ab = ar->ab;
228 struct ath12k_dp *dp = &ab->dp;
229 struct hal_tx_info ti = {0};
230 struct ath12k_tx_desc_info *tx_desc;
231 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
232 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
233 struct hal_tcl_data_cmd *hal_tcl_desc;
234 struct hal_tx_msdu_ext_desc *msg;
235 struct sk_buff *skb_ext_desc = NULL;
236 struct hal_srng *tcl_ring;
237 struct ieee80211_hdr *hdr = (void *)skb->data;
238 struct ath12k_vif *ahvif = arvif->ahvif;
239 struct dp_tx_ring *tx_ring;
240 u8 pool_id;
241 u8 hal_ring_id;
242 int ret;
243 u8 ring_selector, ring_map = 0;
244 bool tcl_ring_retry;
245 bool msdu_ext_desc = false;
246 bool add_htt_metadata = false;
247 u32 iova_mask = ab->hw_params->iova_mask;
248
249 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
250 return -ESHUTDOWN;
251
252 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
253 !ieee80211_is_data(fc: hdr->frame_control))
254 return -EOPNOTSUPP;
255
256 pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
257
258 /* Let the default ring selection be based on current processor
259 * number, where one of the 3 tcl rings are selected based on
260 * the smp_processor_id(). In case that ring
261 * is full/busy, we resort to other available rings.
262 * If all rings are full, we drop the packet.
263 * TODO: Add throttling logic when all rings are full
264 */
265 ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
266
267tcl_ring_sel:
268 tcl_ring_retry = false;
269 ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
270
271 ring_map |= BIT(ti.ring_id);
272 ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
273
274 tx_ring = &dp->tx_ring[ti.ring_id];
275
276 tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
277 if (!tx_desc)
278 return -ENOMEM;
279
280 ti.bank_id = arvif->bank_id;
281 ti.meta_data_flags = arvif->tcl_metadata;
282
283 if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
284 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
285 if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
286 ti.encrypt_type =
287 ath12k_dp_tx_get_encrypt_type(cipher: skb_cb->cipher);
288
289 if (ieee80211_has_protected(fc: hdr->frame_control))
290 skb_put(skb, IEEE80211_CCMP_MIC_LEN);
291 } else {
292 ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
293 }
294
295 msdu_ext_desc = true;
296 }
297
298 if (gsn_valid) {
299 /* Reset and Initialize meta_data_flags with Global Sequence
300 * Number (GSN) info.
301 */
302 ti.meta_data_flags =
303 u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
304 HTT_TCL_META_DATA_TYPE) |
305 u32_encode_bits(v: mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
306 }
307
308 ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
309 ti.addr_search_flags = arvif->hal_addr_search_flags;
310 ti.search_type = arvif->search_type;
311 ti.type = HAL_TCL_DESC_TYPE_BUFFER;
312 ti.pkt_offset = 0;
313 ti.lmac_id = ar->lmac_id;
314
315 ti.vdev_id = arvif->vdev_id;
316 if (gsn_valid)
317 ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
318
319 ti.bss_ast_hash = arvif->ast_hash;
320 ti.bss_ast_idx = arvif->ast_idx;
321 ti.dscp_tid_tbl_idx = 0;
322
323 if (skb->ip_summed == CHECKSUM_PARTIAL &&
324 ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
325 ti.flags0 |= u32_encode_bits(v: 1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
326 u32_encode_bits(v: 1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
327 u32_encode_bits(v: 1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
328 u32_encode_bits(v: 1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
329 u32_encode_bits(v: 1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
330 }
331
332 ti.flags1 |= u32_encode_bits(v: 1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
333
334 ti.tid = ath12k_dp_tx_get_tid(skb);
335
336 switch (ti.encap_type) {
337 case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
338 ath12k_dp_tx_encap_nwifi(skb);
339 break;
340 case HAL_TCL_ENCAP_TYPE_RAW:
341 if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
342 ret = -EINVAL;
343 goto fail_remove_tx_buf;
344 }
345 break;
346 case HAL_TCL_ENCAP_TYPE_ETHERNET:
347 /* no need to encap */
348 break;
349 case HAL_TCL_ENCAP_TYPE_802_3:
350 default:
351 /* TODO: Take care of other encap modes as well */
352 ret = -EINVAL;
353 atomic_inc(v: &ab->device_stats.tx_err.misc_fail);
354 goto fail_remove_tx_buf;
355 }
356
357 if (iova_mask &&
358 (unsigned long)skb->data & iova_mask) {
359 ret = ath12k_dp_tx_align_payload(ab, pskb: &skb);
360 if (ret) {
361 ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
362 /* don't bail out, give original buffer
363 * a chance even unaligned.
364 */
365 goto map;
366 }
367
368 /* hdr is pointing to a wrong place after alignment,
369 * so refresh it for later use.
370 */
371 hdr = (void *)skb->data;
372 }
373map:
374 ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
375 if (dma_mapping_error(dev: ab->dev, dma_addr: ti.paddr)) {
376 atomic_inc(v: &ab->device_stats.tx_err.misc_fail);
377 ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
378 ret = -ENOMEM;
379 goto fail_remove_tx_buf;
380 }
381
382 if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
383 !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
384 !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
385 ieee80211_has_protected(fc: hdr->frame_control)) {
386 /* Add metadata for sw encrypted vlan group traffic */
387 add_htt_metadata = true;
388 msdu_ext_desc = true;
389 ti.flags0 |= u32_encode_bits(v: 1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
390 ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
391 ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
392 ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
393 }
394
395 tx_desc->skb = skb;
396 tx_desc->mac_id = ar->pdev_idx;
397 ti.desc_id = tx_desc->desc_id;
398 ti.data_len = skb->len;
399 skb_cb->paddr = ti.paddr;
400 skb_cb->vif = ahvif->vif;
401 skb_cb->ar = ar;
402
403 if (msdu_ext_desc) {
404 skb_ext_desc = dev_alloc_skb(length: sizeof(struct hal_tx_msdu_ext_desc));
405 if (!skb_ext_desc) {
406 ret = -ENOMEM;
407 goto fail_unmap_dma;
408 }
409
410 skb_put(skb: skb_ext_desc, len: sizeof(struct hal_tx_msdu_ext_desc));
411 memset(skb_ext_desc->data, 0, skb_ext_desc->len);
412
413 msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
414 ath12k_hal_tx_cmd_ext_desc_setup(ab, tcl_ext_cmd: msg, ti: &ti);
415
416 if (add_htt_metadata) {
417 ret = ath12k_dp_prepare_htt_metadata(skb: skb_ext_desc);
418 if (ret < 0) {
419 ath12k_dbg(ab, ATH12K_DBG_DP_TX,
420 "Failed to add HTT meta data, dropping packet\n");
421 goto fail_free_ext_skb;
422 }
423 }
424
425 ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
426 skb_ext_desc->len, DMA_TO_DEVICE);
427 ret = dma_mapping_error(dev: ab->dev, dma_addr: ti.paddr);
428 if (ret)
429 goto fail_free_ext_skb;
430
431 ti.data_len = skb_ext_desc->len;
432 ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
433
434 skb_cb->paddr_ext_desc = ti.paddr;
435 tx_desc->skb_ext_desc = skb_ext_desc;
436 }
437
438 hal_ring_id = tx_ring->tcl_data_ring.ring_id;
439 tcl_ring = &ab->hal.srng_list[hal_ring_id];
440
441 spin_lock_bh(lock: &tcl_ring->lock);
442
443 ath12k_hal_srng_access_begin(ab, srng: tcl_ring);
444
445 hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, srng: tcl_ring);
446 if (!hal_tcl_desc) {
447 /* NOTE: It is highly unlikely we'll be running out of tcl_ring
448 * desc because the desc is directly enqueued onto hw queue.
449 */
450 ath12k_hal_srng_access_end(ab, srng: tcl_ring);
451 ab->device_stats.tx_err.desc_na[ti.ring_id]++;
452 spin_unlock_bh(lock: &tcl_ring->lock);
453 ret = -ENOMEM;
454
455 /* Checking for available tcl descriptors in another ring in
456 * case of failure due to full tcl ring now, is better than
457 * checking this ring earlier for each pkt tx.
458 * Restart ring selection if some rings are not checked yet.
459 */
460 if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
461 ab->hw_params->tcl_ring_retry) {
462 tcl_ring_retry = true;
463 ring_selector++;
464 }
465
466 goto fail_unmap_dma_ext;
467 }
468
469 spin_lock_bh(lock: &arvif->link_stats_lock);
470 arvif->link_stats.tx_encap_type[ti.encap_type]++;
471 arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
472 arvif->link_stats.tx_desc_type[ti.type]++;
473
474 if (is_mcast)
475 arvif->link_stats.tx_bcast_mcast++;
476 else
477 arvif->link_stats.tx_enqueued++;
478 spin_unlock_bh(lock: &arvif->link_stats_lock);
479
480 ab->device_stats.tx_enqueued[ti.ring_id]++;
481
482 ath12k_hal_tx_cmd_desc_setup(ab, tcl_cmd: hal_tcl_desc, ti: &ti);
483
484 ath12k_hal_srng_access_end(ab, srng: tcl_ring);
485
486 spin_unlock_bh(lock: &tcl_ring->lock);
487
488 ath12k_dbg_dump(ab, mask: ATH12K_DBG_DP_TX, NULL, prefix: "dp tx msdu: ",
489 buf: skb->data, len: skb->len);
490
491 atomic_inc(v: &ar->dp.num_tx_pending);
492
493 return 0;
494
495fail_unmap_dma_ext:
496 if (skb_cb->paddr_ext_desc)
497 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
498 skb_ext_desc->len,
499 DMA_TO_DEVICE);
500fail_free_ext_skb:
501 kfree_skb(skb: skb_ext_desc);
502
503fail_unmap_dma:
504 dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
505
506fail_remove_tx_buf:
507 ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
508
509 spin_lock_bh(lock: &arvif->link_stats_lock);
510 arvif->link_stats.tx_dropped++;
511 spin_unlock_bh(lock: &arvif->link_stats_lock);
512
513 if (tcl_ring_retry)
514 goto tcl_ring_sel;
515
516 return ret;
517}
518
519static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
520 struct dp_tx_ring *tx_ring,
521 struct ath12k_tx_desc_params *desc_params)
522{
523 struct ath12k *ar;
524 struct sk_buff *msdu = desc_params->skb;
525 struct ath12k_skb_cb *skb_cb;
526 u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(hw: ab->hw_params, mac_id: desc_params->mac_id);
527
528 skb_cb = ATH12K_SKB_CB(skb: msdu);
529 ar = ab->pdevs[pdev_id].ar;
530
531 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
532 if (skb_cb->paddr_ext_desc) {
533 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
534 desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
535 dev_kfree_skb_any(skb: desc_params->skb_ext_desc);
536 }
537
538 ieee80211_free_txskb(hw: ar->ah->hw, skb: msdu);
539
540 if (atomic_dec_and_test(v: &ar->dp.num_tx_pending))
541 wake_up(&ar->dp.tx_empty_waitq);
542}
543
544static void
545ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
546 struct ath12k_tx_desc_params *desc_params,
547 struct dp_tx_ring *tx_ring,
548 struct ath12k_dp_htt_wbm_tx_status *ts)
549{
550 struct ieee80211_tx_info *info;
551 struct ath12k_link_vif *arvif;
552 struct ath12k_skb_cb *skb_cb;
553 struct ieee80211_vif *vif;
554 struct ath12k_vif *ahvif;
555 struct ath12k *ar;
556 struct sk_buff *msdu = desc_params->skb;
557
558 skb_cb = ATH12K_SKB_CB(skb: msdu);
559 info = IEEE80211_SKB_CB(skb: msdu);
560
561 ar = skb_cb->ar;
562 ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
563
564 if (atomic_dec_and_test(v: &ar->dp.num_tx_pending))
565 wake_up(&ar->dp.tx_empty_waitq);
566
567 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
568 if (skb_cb->paddr_ext_desc) {
569 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
570 desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
571 dev_kfree_skb_any(skb: desc_params->skb_ext_desc);
572 }
573
574 vif = skb_cb->vif;
575 if (vif) {
576 ahvif = ath12k_vif_to_ahvif(vif);
577 rcu_read_lock();
578 arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
579 if (arvif) {
580 spin_lock_bh(lock: &arvif->link_stats_lock);
581 arvif->link_stats.tx_completed++;
582 spin_unlock_bh(lock: &arvif->link_stats_lock);
583 }
584 rcu_read_unlock();
585 }
586
587 memset(&info->status, 0, sizeof(info->status));
588
589 if (ts->acked) {
590 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
591 info->flags |= IEEE80211_TX_STAT_ACK;
592 info->status.ack_signal = ts->ack_rssi;
593
594 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
595 ab->wmi_ab.svc_map))
596 info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
597
598 info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
599 } else {
600 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
601 }
602 }
603
604 ieee80211_tx_status_skb(hw: ath12k_ar_to_hw(ar), skb: msdu);
605}
606
607static void
608ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
609 struct dp_tx_ring *tx_ring,
610 struct ath12k_tx_desc_params *desc_params)
611{
612 struct htt_tx_wbm_completion *status_desc;
613 struct ath12k_dp_htt_wbm_tx_status ts = {0};
614 enum hal_wbm_htt_tx_comp_status wbm_status;
615
616 status_desc = desc;
617
618 wbm_status = le32_get_bits(v: status_desc->info0,
619 HTT_TX_WBM_COMP_INFO0_STATUS);
620 ab->device_stats.fw_tx_status[wbm_status]++;
621
622 switch (wbm_status) {
623 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
624 ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
625 ts.ack_rssi = le32_get_bits(v: status_desc->info2,
626 HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
627 ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, ts: &ts);
628 break;
629 case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
630 case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
631 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
632 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
633 case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
634 ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
635 break;
636 case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
637 /* This event is to be handled only when the driver decides to
638 * use WDS offload functionality.
639 */
640 break;
641 default:
642 ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
643 break;
644 }
645}
646
647static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
648{
649 struct ath12k_base *ab = ar->ab;
650 struct ath12k_peer *peer;
651 struct ieee80211_sta *sta;
652 struct ath12k_sta *ahsta;
653 struct ath12k_link_sta *arsta;
654 struct rate_info txrate = {0};
655 u16 rate, ru_tones;
656 u8 rate_idx = 0;
657 int ret;
658
659 spin_lock_bh(lock: &ab->base_lock);
660 peer = ath12k_peer_find_by_id(ab, peer_id: ts->peer_id);
661 if (!peer || !peer->sta) {
662 ath12k_dbg(ab, ATH12K_DBG_DP_TX,
663 "failed to find the peer by id %u\n", ts->peer_id);
664 spin_unlock_bh(lock: &ab->base_lock);
665 return;
666 }
667 sta = peer->sta;
668 ahsta = ath12k_sta_to_ahsta(sta);
669 arsta = &ahsta->deflink;
670
671 /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
672 * if it is invalid, then choose the NSS value while assoc.
673 */
674 if (arsta->last_txrate.nss)
675 txrate.nss = arsta->last_txrate.nss;
676 else
677 txrate.nss = arsta->peer_nss;
678 spin_unlock_bh(lock: &ab->base_lock);
679
680 switch (ts->pkt_type) {
681 case HAL_TX_RATE_STATS_PKT_TYPE_11A:
682 case HAL_TX_RATE_STATS_PKT_TYPE_11B:
683 ret = ath12k_mac_hw_ratecode_to_legacy_rate(hw_rc: ts->mcs,
684 preamble: ts->pkt_type,
685 rateidx: &rate_idx,
686 rate: &rate);
687 if (ret < 0) {
688 ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
689 return;
690 }
691
692 txrate.legacy = rate;
693 break;
694 case HAL_TX_RATE_STATS_PKT_TYPE_11N:
695 if (ts->mcs > ATH12K_HT_MCS_MAX) {
696 ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
697 return;
698 }
699
700 if (txrate.nss != 0)
701 txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
702
703 txrate.flags = RATE_INFO_FLAGS_MCS;
704
705 if (ts->sgi)
706 txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
707 break;
708 case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
709 if (ts->mcs > ATH12K_VHT_MCS_MAX) {
710 ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
711 return;
712 }
713
714 txrate.mcs = ts->mcs;
715 txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
716
717 if (ts->sgi)
718 txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
719 break;
720 case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
721 if (ts->mcs > ATH12K_HE_MCS_MAX) {
722 ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
723 return;
724 }
725
726 txrate.mcs = ts->mcs;
727 txrate.flags = RATE_INFO_FLAGS_HE_MCS;
728 txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi: ts->sgi);
729 break;
730 case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
731 if (ts->mcs > ATH12K_EHT_MCS_MAX) {
732 ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
733 return;
734 }
735
736 txrate.mcs = ts->mcs;
737 txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
738 txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi: ts->sgi);
739 break;
740 default:
741 ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
742 return;
743 }
744
745 txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw: ts->bw);
746
747 if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
748 txrate.bw = RATE_INFO_BW_HE_RU;
749 ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(tones: ts->tones);
750 txrate.he_ru_alloc =
751 ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
752 }
753
754 if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
755 txrate.bw = RATE_INFO_BW_EHT_RU;
756 txrate.eht_ru_alloc =
757 ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ru_tones: ts->tones);
758 }
759
760 spin_lock_bh(lock: &ab->base_lock);
761 arsta->txrate = txrate;
762 spin_unlock_bh(lock: &ab->base_lock);
763}
764
765static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
766 struct ath12k_tx_desc_params *desc_params,
767 struct hal_tx_status *ts,
768 int ring)
769{
770 struct ath12k_base *ab = ar->ab;
771 struct ath12k_hw *ah = ar->ah;
772 struct ieee80211_tx_info *info;
773 struct ath12k_link_vif *arvif;
774 struct ath12k_skb_cb *skb_cb;
775 struct ieee80211_vif *vif;
776 struct ath12k_vif *ahvif;
777 struct sk_buff *msdu = desc_params->skb;
778
779 if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
780 /* Must not happen */
781 return;
782 }
783
784 skb_cb = ATH12K_SKB_CB(skb: msdu);
785 ab->device_stats.tx_completed[ring]++;
786
787 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
788 if (skb_cb->paddr_ext_desc) {
789 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
790 desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
791 dev_kfree_skb_any(skb: desc_params->skb_ext_desc);
792 }
793
794 rcu_read_lock();
795
796 if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
797 ieee80211_free_txskb(hw: ah->hw, skb: msdu);
798 goto exit;
799 }
800
801 if (!skb_cb->vif) {
802 ieee80211_free_txskb(hw: ah->hw, skb: msdu);
803 goto exit;
804 }
805
806 vif = skb_cb->vif;
807 if (vif) {
808 ahvif = ath12k_vif_to_ahvif(vif);
809 arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
810 if (arvif) {
811 spin_lock_bh(lock: &arvif->link_stats_lock);
812 arvif->link_stats.tx_completed++;
813 spin_unlock_bh(lock: &arvif->link_stats_lock);
814 }
815 }
816
817 info = IEEE80211_SKB_CB(skb: msdu);
818 memset(&info->status, 0, sizeof(info->status));
819
820 /* skip tx rate update from ieee80211_status*/
821 info->status.rates[0].idx = -1;
822
823 switch (ts->status) {
824 case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
825 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
826 info->flags |= IEEE80211_TX_STAT_ACK;
827 info->status.ack_signal = ts->ack_rssi;
828
829 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
830 ab->wmi_ab.svc_map))
831 info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
832
833 info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
834 }
835 break;
836 case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
837 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
838 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
839 break;
840 }
841 fallthrough;
842 case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
843 case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
844 case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
845 /* The failure status is due to internal firmware tx failure
846 * hence drop the frame; do not update the status of frame to
847 * the upper layer
848 */
849 ieee80211_free_txskb(hw: ah->hw, skb: msdu);
850 goto exit;
851 default:
852 ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
853 ts->status);
854 break;
855 }
856
857 /* NOTE: Tx rate status reporting. Tx completion status does not have
858 * necessary information (for example nss) to build the tx rate.
859 * Might end up reporting it out-of-band from HTT stats.
860 */
861
862 ath12k_dp_tx_update_txcompl(ar, ts);
863
864 ieee80211_tx_status_skb(hw: ath12k_ar_to_hw(ar), skb: msdu);
865
866exit:
867 rcu_read_unlock();
868}
869
870static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
871 struct hal_wbm_completion_ring_tx *desc,
872 struct hal_tx_status *ts)
873{
874 u32 info0 = le32_to_cpu(desc->rate_stats.info0);
875
876 ts->buf_rel_source =
877 le32_get_bits(v: desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
878 if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
879 ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
880 return;
881
882 if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
883 return;
884
885 ts->status = le32_get_bits(v: desc->info0,
886 HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
887
888 ts->ppdu_id = le32_get_bits(v: desc->info1,
889 HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
890
891 ts->peer_id = le32_get_bits(v: desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
892
893 if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
894 ts->pkt_type = u32_get_bits(v: info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
895 ts->mcs = u32_get_bits(v: info0, HAL_TX_RATE_STATS_INFO0_MCS);
896 ts->sgi = u32_get_bits(v: info0, HAL_TX_RATE_STATS_INFO0_SGI);
897 ts->bw = u32_get_bits(v: info0, HAL_TX_RATE_STATS_INFO0_BW);
898 ts->tones = u32_get_bits(v: info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
899 ts->ofdma = u32_get_bits(v: info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
900 }
901}
902
903void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
904{
905 struct ath12k *ar;
906 struct ath12k_dp *dp = &ab->dp;
907 int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
908 struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
909 struct ath12k_tx_desc_info *tx_desc = NULL;
910 struct hal_tx_status ts = { 0 };
911 struct ath12k_tx_desc_params desc_params;
912 struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
913 struct hal_wbm_release_ring *desc;
914 u8 pdev_id;
915 u64 desc_va;
916 enum hal_wbm_rel_src_module buf_rel_source;
917 enum hal_wbm_tqm_rel_reason rel_status;
918
919 spin_lock_bh(lock: &status_ring->lock);
920
921 ath12k_hal_srng_access_begin(ab, srng: status_ring);
922
923 while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
924 desc = ath12k_hal_srng_dst_get_next_entry(ab, srng: status_ring);
925 if (!desc)
926 break;
927
928 memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
929 desc, sizeof(*desc));
930 tx_ring->tx_status_head =
931 ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
932 }
933
934 if (ath12k_hal_srng_dst_peek(ab, srng: status_ring) &&
935 (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
936 /* TODO: Process pending tx_status messages when kfifo_is_full() */
937 ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
938 }
939
940 ath12k_hal_srng_access_end(ab, srng: status_ring);
941
942 spin_unlock_bh(lock: &status_ring->lock);
943
944 while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
945 struct hal_wbm_completion_ring_tx *tx_status;
946 u32 desc_id;
947
948 tx_ring->tx_status_tail =
949 ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
950 tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
951 ath12k_dp_tx_status_parse(ab, desc: tx_status, ts: &ts);
952
953 if (le32_get_bits(v: tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
954 /* HW done cookie conversion */
955 desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
956 le32_to_cpu(tx_status->buf_va_lo));
957 tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
958 } else {
959 /* SW does cookie conversion to VA */
960 desc_id = le32_get_bits(v: tx_status->buf_va_hi,
961 BUFFER_ADDR_INFO1_SW_COOKIE);
962
963 tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
964 }
965 if (!tx_desc) {
966 ath12k_warn(ab, "unable to retrieve tx_desc!");
967 continue;
968 }
969
970 desc_params.mac_id = tx_desc->mac_id;
971 desc_params.skb = tx_desc->skb;
972 desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
973
974 /* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
975 buf_rel_source = le32_get_bits(v: tx_status->info0,
976 HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
977 ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
978
979 rel_status = le32_get_bits(v: tx_status->info0,
980 HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
981 ab->device_stats.tqm_rel_reason[rel_status]++;
982
983 /* Release descriptor as soon as extracting necessary info
984 * to reduce contention
985 */
986 ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id: tx_desc->pool_id);
987 if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
988 ath12k_dp_tx_process_htt_tx_complete(ab, desc: (void *)tx_status,
989 tx_ring, desc_params: &desc_params);
990 continue;
991 }
992
993 pdev_id = ath12k_hw_mac_id_to_pdev_id(hw: ab->hw_params, mac_id: desc_params.mac_id);
994 ar = ab->pdevs[pdev_id].ar;
995
996 if (atomic_dec_and_test(v: &ar->dp.num_tx_pending))
997 wake_up(&ar->dp.tx_empty_waitq);
998
999 ath12k_dp_tx_complete_msdu(ar, desc_params: &desc_params, ts: &ts,
1000 ring: tx_ring->tcl_data_ring_id);
1001 }
1002}
1003
1004static int
1005ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
1006 int mac_id, u32 ring_id,
1007 enum hal_ring_type ring_type,
1008 enum htt_srng_ring_type *htt_ring_type,
1009 enum htt_srng_ring_id *htt_ring_id)
1010{
1011 int ret = 0;
1012
1013 switch (ring_type) {
1014 case HAL_RXDMA_BUF:
1015 /* for some targets, host fills rx buffer to fw and fw fills to
1016 * rxbuf ring for each rxdma
1017 */
1018 if (!ab->hw_params->rx_mac_buf_ring) {
1019 if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
1020 ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
1021 ret = -EINVAL;
1022 }
1023 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1024 *htt_ring_type = HTT_SW_TO_HW_RING;
1025 } else {
1026 if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
1027 *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
1028 *htt_ring_type = HTT_SW_TO_SW_RING;
1029 } else {
1030 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1031 *htt_ring_type = HTT_SW_TO_HW_RING;
1032 }
1033 }
1034 break;
1035 case HAL_RXDMA_DST:
1036 *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1037 *htt_ring_type = HTT_HW_TO_SW_RING;
1038 break;
1039 case HAL_RXDMA_MONITOR_BUF:
1040 *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
1041 *htt_ring_type = HTT_SW_TO_HW_RING;
1042 break;
1043 case HAL_RXDMA_MONITOR_STATUS:
1044 *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1045 *htt_ring_type = HTT_SW_TO_HW_RING;
1046 break;
1047 case HAL_RXDMA_MONITOR_DST:
1048 *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
1049 *htt_ring_type = HTT_HW_TO_SW_RING;
1050 break;
1051 case HAL_RXDMA_MONITOR_DESC:
1052 *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1053 *htt_ring_type = HTT_SW_TO_HW_RING;
1054 break;
1055 default:
1056 ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
1057 ret = -EINVAL;
1058 }
1059 return ret;
1060}
1061
1062int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
1063 int mac_id, enum hal_ring_type ring_type)
1064{
1065 struct htt_srng_setup_cmd *cmd;
1066 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1067 struct hal_srng_params params;
1068 struct sk_buff *skb;
1069 u32 ring_entry_sz;
1070 int len = sizeof(*cmd);
1071 dma_addr_t hp_addr, tp_addr;
1072 enum htt_srng_ring_type htt_ring_type;
1073 enum htt_srng_ring_id htt_ring_id;
1074 int ret;
1075
1076 skb = ath12k_htc_alloc_skb(ar: ab, size: len);
1077 if (!skb)
1078 return -ENOMEM;
1079
1080 memset(&params, 0, sizeof(params));
1081 ath12k_hal_srng_get_params(ab, srng, params: &params);
1082
1083 hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
1084 tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
1085
1086 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1087 ring_type, htt_ring_type: &htt_ring_type,
1088 htt_ring_id: &htt_ring_id);
1089 if (ret)
1090 goto err_free;
1091
1092 skb_put(skb, len);
1093 cmd = (struct htt_srng_setup_cmd *)skb->data;
1094 cmd->info0 = le32_encode_bits(v: HTT_H2T_MSG_TYPE_SRING_SETUP,
1095 HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
1096 if (htt_ring_type == HTT_SW_TO_HW_RING ||
1097 htt_ring_type == HTT_HW_TO_SW_RING)
1098 cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
1099 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
1100 else
1101 cmd->info0 |= le32_encode_bits(v: mac_id,
1102 HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
1103 cmd->info0 |= le32_encode_bits(v: htt_ring_type,
1104 HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
1105 cmd->info0 |= le32_encode_bits(v: htt_ring_id,
1106 HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
1107
1108 cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
1109 HAL_ADDR_LSB_REG_MASK);
1110
1111 cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
1112 HAL_ADDR_MSB_REG_SHIFT);
1113
1114 ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
1115 if (ret < 0)
1116 goto err_free;
1117
1118 ring_entry_sz = ret;
1119
1120 ring_entry_sz >>= 2;
1121 cmd->info1 = le32_encode_bits(v: ring_entry_sz,
1122 HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
1123 cmd->info1 |= le32_encode_bits(v: params.num_entries * ring_entry_sz,
1124 HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
1125 cmd->info1 |= le32_encode_bits(v: !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1126 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
1127 cmd->info1 |= le32_encode_bits(v: !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1128 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
1129 cmd->info1 |= le32_encode_bits(v: !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
1130 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
1131 if (htt_ring_type == HTT_SW_TO_HW_RING)
1132 cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
1133
1134 cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
1135 cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
1136
1137 cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
1138 cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
1139
1140 cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
1141 cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
1142 cmd->msi_data = cpu_to_le32(params.msi_data);
1143
1144 cmd->intr_info =
1145 le32_encode_bits(v: params.intr_batch_cntr_thres_entries * ring_entry_sz,
1146 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
1147 cmd->intr_info |=
1148 le32_encode_bits(v: params.intr_timer_thres_us >> 3,
1149 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
1150
1151 cmd->info2 = 0;
1152 if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
1153 cmd->info2 = le32_encode_bits(v: params.low_threshold,
1154 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
1155 }
1156
1157 ath12k_dbg(ab, ATH12K_DBG_HAL,
1158 "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
1159 __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
1160 cmd->msi_data);
1161
1162 ath12k_dbg(ab, ATH12K_DBG_HAL,
1163 "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
1164 ring_id, ring_type, cmd->intr_info, cmd->info2);
1165
1166 ret = ath12k_htc_send(htc: &ab->htc, eid: ab->dp.eid, packet: skb);
1167 if (ret)
1168 goto err_free;
1169
1170 return 0;
1171
1172err_free:
1173 dev_kfree_skb_any(skb);
1174
1175 return ret;
1176}
1177
1178#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
1179
1180int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
1181{
1182 struct ath12k_dp *dp = &ab->dp;
1183 struct sk_buff *skb;
1184 struct htt_ver_req_cmd *cmd;
1185 int len = sizeof(*cmd);
1186 int ret;
1187
1188 init_completion(x: &dp->htt_tgt_version_received);
1189
1190 skb = ath12k_htc_alloc_skb(ar: ab, size: len);
1191 if (!skb)
1192 return -ENOMEM;
1193
1194 skb_put(skb, len);
1195 cmd = (struct htt_ver_req_cmd *)skb->data;
1196 cmd->ver_reg_info = le32_encode_bits(v: HTT_H2T_MSG_TYPE_VERSION_REQ,
1197 HTT_OPTION_TAG);
1198
1199 cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
1200 HTT_OPTION_TAG) |
1201 le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
1202 HTT_OPTION_LEN) |
1203 le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
1204 HTT_OPTION_VALUE);
1205
1206 ret = ath12k_htc_send(htc: &ab->htc, eid: dp->eid, packet: skb);
1207 if (ret) {
1208 dev_kfree_skb_any(skb);
1209 return ret;
1210 }
1211
1212 ret = wait_for_completion_timeout(x: &dp->htt_tgt_version_received,
1213 HTT_TARGET_VERSION_TIMEOUT_HZ);
1214 if (ret == 0) {
1215 ath12k_warn(ab, "htt target version request timed out\n");
1216 return -ETIMEDOUT;
1217 }
1218
1219 if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
1220 ath12k_err(ab, fmt: "unsupported htt major version %d supported version is %d\n",
1221 dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
1222 return -EOPNOTSUPP;
1223 }
1224
1225 return 0;
1226}
1227
1228int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
1229{
1230 struct ath12k_base *ab = ar->ab;
1231 struct ath12k_dp *dp = &ab->dp;
1232 struct sk_buff *skb;
1233 struct htt_ppdu_stats_cfg_cmd *cmd;
1234 int len = sizeof(*cmd);
1235 u8 pdev_mask;
1236 int ret;
1237 int i;
1238
1239 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1240 skb = ath12k_htc_alloc_skb(ar: ab, size: len);
1241 if (!skb)
1242 return -ENOMEM;
1243
1244 skb_put(skb, len);
1245 cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
1246 cmd->msg = le32_encode_bits(v: HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
1247 HTT_PPDU_STATS_CFG_MSG_TYPE);
1248
1249 pdev_mask = 1 << (i + 1);
1250 cmd->msg |= le32_encode_bits(v: pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
1251 cmd->msg |= le32_encode_bits(v: mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
1252
1253 ret = ath12k_htc_send(htc: &ab->htc, eid: dp->eid, packet: skb);
1254 if (ret) {
1255 dev_kfree_skb_any(skb);
1256 return ret;
1257 }
1258 }
1259
1260 return 0;
1261}
1262
1263int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1264 int mac_id, enum hal_ring_type ring_type,
1265 int rx_buf_size,
1266 struct htt_rx_ring_tlv_filter *tlv_filter)
1267{
1268 struct htt_rx_ring_selection_cfg_cmd *cmd;
1269 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1270 struct hal_srng_params params;
1271 struct sk_buff *skb;
1272 int len = sizeof(*cmd);
1273 enum htt_srng_ring_type htt_ring_type;
1274 enum htt_srng_ring_id htt_ring_id;
1275 int ret;
1276
1277 skb = ath12k_htc_alloc_skb(ar: ab, size: len);
1278 if (!skb)
1279 return -ENOMEM;
1280
1281 memset(&params, 0, sizeof(params));
1282 ath12k_hal_srng_get_params(ab, srng, params: &params);
1283
1284 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1285 ring_type, htt_ring_type: &htt_ring_type,
1286 htt_ring_id: &htt_ring_id);
1287 if (ret)
1288 goto err_free;
1289
1290 skb_put(skb, len);
1291 cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
1292 cmd->info0 = le32_encode_bits(v: HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1293 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1294 if (htt_ring_type == HTT_SW_TO_HW_RING ||
1295 htt_ring_type == HTT_HW_TO_SW_RING)
1296 cmd->info0 |=
1297 le32_encode_bits(DP_SW2HW_MACID(mac_id),
1298 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1299 else
1300 cmd->info0 |=
1301 le32_encode_bits(v: mac_id,
1302 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1303 cmd->info0 |= le32_encode_bits(v: htt_ring_id,
1304 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1305 cmd->info0 |= le32_encode_bits(v: !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1306 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
1307 cmd->info0 |= le32_encode_bits(v: !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1308 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
1309 cmd->info0 |= le32_encode_bits(v: tlv_filter->offset_valid,
1310 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
1311 cmd->info0 |=
1312 le32_encode_bits(v: tlv_filter->drop_threshold_valid,
1313 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
1314 cmd->info0 |= le32_encode_bits(v: !tlv_filter->rxmon_disable,
1315 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
1316
1317 cmd->info1 = le32_encode_bits(v: rx_buf_size,
1318 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
1319 cmd->info1 |= le32_encode_bits(v: tlv_filter->conf_len_mgmt,
1320 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1321 cmd->info1 |= le32_encode_bits(v: tlv_filter->conf_len_ctrl,
1322 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1323 cmd->info1 |= le32_encode_bits(v: tlv_filter->conf_len_data,
1324 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1325 cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
1326 cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
1327 cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
1328 cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
1329 cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
1330
1331 cmd->info2 = le32_encode_bits(v: tlv_filter->rx_drop_threshold,
1332 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
1333 cmd->info2 |=
1334 le32_encode_bits(v: tlv_filter->enable_log_mgmt_type,
1335 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
1336 cmd->info2 |=
1337 le32_encode_bits(v: tlv_filter->enable_log_ctrl_type,
1338 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
1339 cmd->info2 |=
1340 le32_encode_bits(v: tlv_filter->enable_log_data_type,
1341 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
1342
1343 cmd->info3 =
1344 le32_encode_bits(v: tlv_filter->enable_rx_tlv_offset,
1345 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
1346 cmd->info3 |=
1347 le32_encode_bits(v: tlv_filter->rx_tlv_offset,
1348 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
1349
1350 if (tlv_filter->offset_valid) {
1351 cmd->rx_packet_offset =
1352 le32_encode_bits(v: tlv_filter->rx_packet_offset,
1353 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
1354
1355 cmd->rx_packet_offset |=
1356 le32_encode_bits(v: tlv_filter->rx_header_offset,
1357 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
1358
1359 cmd->rx_mpdu_offset =
1360 le32_encode_bits(v: tlv_filter->rx_mpdu_end_offset,
1361 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
1362
1363 cmd->rx_mpdu_offset |=
1364 le32_encode_bits(v: tlv_filter->rx_mpdu_start_offset,
1365 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
1366
1367 cmd->rx_msdu_offset =
1368 le32_encode_bits(v: tlv_filter->rx_msdu_end_offset,
1369 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
1370
1371 cmd->rx_msdu_offset |=
1372 le32_encode_bits(v: tlv_filter->rx_msdu_start_offset,
1373 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
1374
1375 cmd->rx_attn_offset =
1376 le32_encode_bits(v: tlv_filter->rx_attn_offset,
1377 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
1378 }
1379
1380 if (tlv_filter->rx_mpdu_start_wmask > 0 &&
1381 tlv_filter->rx_msdu_end_wmask > 0) {
1382 cmd->info2 |=
1383 le32_encode_bits(v: true,
1384 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
1385 cmd->rx_mpdu_start_end_mask =
1386 le32_encode_bits(v: tlv_filter->rx_mpdu_start_wmask,
1387 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
1388 /* mpdu_end is not used for any hardwares so far
1389 * please assign it in future if any chip is
1390 * using through hal ops
1391 */
1392 cmd->rx_mpdu_start_end_mask |=
1393 le32_encode_bits(v: tlv_filter->rx_mpdu_end_wmask,
1394 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
1395 cmd->rx_msdu_end_word_mask =
1396 le32_encode_bits(v: tlv_filter->rx_msdu_end_wmask,
1397 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
1398 }
1399
1400 ret = ath12k_htc_send(htc: &ab->htc, eid: ab->dp.eid, packet: skb);
1401 if (ret)
1402 goto err_free;
1403
1404 return 0;
1405
1406err_free:
1407 dev_kfree_skb_any(skb);
1408
1409 return ret;
1410}
1411
1412int
1413ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
1414 struct htt_ext_stats_cfg_params *cfg_params,
1415 u64 cookie)
1416{
1417 struct ath12k_base *ab = ar->ab;
1418 struct ath12k_dp *dp = &ab->dp;
1419 struct sk_buff *skb;
1420 struct htt_ext_stats_cfg_cmd *cmd;
1421 int len = sizeof(*cmd);
1422 int ret;
1423 u32 pdev_id;
1424
1425 skb = ath12k_htc_alloc_skb(ar: ab, size: len);
1426 if (!skb)
1427 return -ENOMEM;
1428
1429 skb_put(skb, len);
1430
1431 cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1432 memset(cmd, 0, sizeof(*cmd));
1433 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1434
1435 pdev_id = ath12k_mac_get_target_pdev_id(ar);
1436 cmd->hdr.pdev_mask = 1 << pdev_id;
1437
1438 cmd->hdr.stats_type = type;
1439 cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
1440 cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
1441 cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
1442 cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
1443 cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
1444 cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
1445
1446 ret = ath12k_htc_send(htc: &ab->htc, eid: dp->eid, packet: skb);
1447 if (ret) {
1448 ath12k_warn(ab, "failed to send htt type stats request: %d",
1449 ret);
1450 dev_kfree_skb_any(skb);
1451 return ret;
1452 }
1453
1454 return 0;
1455}
1456
1457int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1458{
1459 struct ath12k_base *ab = ar->ab;
1460 int ret;
1461
1462 ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
1463 if (ret) {
1464 ath12k_err(ab, fmt: "failed to setup rx monitor filter %d\n", ret);
1465 return ret;
1466 }
1467
1468 return 0;
1469}
1470
1471int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1472{
1473 struct ath12k_base *ab = ar->ab;
1474 struct htt_rx_ring_tlv_filter tlv_filter = {0};
1475 int ret, ring_id, i;
1476
1477 tlv_filter.offset_valid = false;
1478
1479 if (!reset) {
1480 tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
1481
1482 tlv_filter.drop_threshold_valid = true;
1483 tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
1484
1485 tlv_filter.enable_log_mgmt_type = true;
1486 tlv_filter.enable_log_ctrl_type = true;
1487 tlv_filter.enable_log_data_type = true;
1488
1489 tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1490 tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1491 tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1492
1493 tlv_filter.enable_rx_tlv_offset = true;
1494 tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
1495
1496 tlv_filter.pkt_filter_flags0 =
1497 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1498 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1499 tlv_filter.pkt_filter_flags1 =
1500 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1501 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1502 tlv_filter.pkt_filter_flags2 =
1503 HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1504 HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1505 tlv_filter.pkt_filter_flags3 =
1506 HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1507 HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1508 HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1509 HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1510 } else {
1511 tlv_filter = ath12k_mac_mon_status_filter_default;
1512
1513 if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
1514 tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
1515 }
1516
1517 if (ab->hw_params->rxdma1_enable) {
1518 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1519 ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
1520 ret = ath12k_dp_tx_htt_rx_filter_setup(ab: ar->ab, ring_id,
1521 mac_id: ar->dp.mac_id + i,
1522 ring_type: HAL_RXDMA_MONITOR_DST,
1523 DP_RXDMA_REFILL_RING_SIZE,
1524 tlv_filter: &tlv_filter);
1525 if (ret) {
1526 ath12k_err(ab,
1527 fmt: "failed to setup filter for monitor buf %d\n",
1528 ret);
1529 return ret;
1530 }
1531 }
1532 return 0;
1533 }
1534
1535 if (!reset) {
1536 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1537 ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
1538 ret = ath12k_dp_tx_htt_rx_filter_setup(ab: ar->ab, ring_id,
1539 mac_id: i,
1540 ring_type: HAL_RXDMA_BUF,
1541 DP_RXDMA_REFILL_RING_SIZE,
1542 tlv_filter: &tlv_filter);
1543 if (ret) {
1544 ath12k_err(ab,
1545 fmt: "failed to setup filter for mon rx buf %d\n",
1546 ret);
1547 return ret;
1548 }
1549 }
1550 }
1551
1552 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1553 ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1554 if (!reset) {
1555 tlv_filter.rx_filter =
1556 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
1557 }
1558
1559 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1560 mac_id: i,
1561 ring_type: HAL_RXDMA_MONITOR_STATUS,
1562 RX_MON_STATUS_BUF_SIZE,
1563 tlv_filter: &tlv_filter);
1564 if (ret) {
1565 ath12k_err(ab,
1566 fmt: "failed to setup filter for mon status buf %d\n",
1567 ret);
1568 return ret;
1569 }
1570 }
1571
1572 return 0;
1573}
1574
1575int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1576 int mac_id, enum hal_ring_type ring_type,
1577 int tx_buf_size,
1578 struct htt_tx_ring_tlv_filter *htt_tlv_filter)
1579{
1580 struct htt_tx_ring_selection_cfg_cmd *cmd;
1581 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1582 struct hal_srng_params params;
1583 struct sk_buff *skb;
1584 int len = sizeof(*cmd);
1585 enum htt_srng_ring_type htt_ring_type;
1586 enum htt_srng_ring_id htt_ring_id;
1587 int ret;
1588
1589 skb = ath12k_htc_alloc_skb(ar: ab, size: len);
1590 if (!skb)
1591 return -ENOMEM;
1592
1593 memset(&params, 0, sizeof(params));
1594 ath12k_hal_srng_get_params(ab, srng, params: &params);
1595
1596 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1597 ring_type, htt_ring_type: &htt_ring_type,
1598 htt_ring_id: &htt_ring_id);
1599
1600 if (ret)
1601 goto err_free;
1602
1603 skb_put(skb, len);
1604 cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
1605 cmd->info0 = le32_encode_bits(v: HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
1606 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1607 if (htt_ring_type == HTT_SW_TO_HW_RING ||
1608 htt_ring_type == HTT_HW_TO_SW_RING)
1609 cmd->info0 |=
1610 le32_encode_bits(DP_SW2HW_MACID(mac_id),
1611 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1612 else
1613 cmd->info0 |=
1614 le32_encode_bits(v: mac_id,
1615 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1616 cmd->info0 |= le32_encode_bits(v: htt_ring_id,
1617 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1618 cmd->info0 |= le32_encode_bits(v: !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1619 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
1620 cmd->info0 |= le32_encode_bits(v: !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1621 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
1622
1623 cmd->info1 |=
1624 le32_encode_bits(v: tx_buf_size,
1625 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
1626
1627 if (htt_tlv_filter->tx_mon_mgmt_filter) {
1628 cmd->info1 |=
1629 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1630 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1631 cmd->info1 |=
1632 le32_encode_bits(v: htt_tlv_filter->tx_mon_pkt_dma_len,
1633 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1634 cmd->info2 |=
1635 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1636 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1637 }
1638
1639 if (htt_tlv_filter->tx_mon_data_filter) {
1640 cmd->info1 |=
1641 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1642 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1643 cmd->info1 |=
1644 le32_encode_bits(v: htt_tlv_filter->tx_mon_pkt_dma_len,
1645 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1646 cmd->info2 |=
1647 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1648 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1649 }
1650
1651 if (htt_tlv_filter->tx_mon_ctrl_filter) {
1652 cmd->info1 |=
1653 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1654 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1655 cmd->info1 |=
1656 le32_encode_bits(v: htt_tlv_filter->tx_mon_pkt_dma_len,
1657 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1658 cmd->info2 |=
1659 le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1660 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1661 }
1662
1663 cmd->tlv_filter_mask_in0 =
1664 cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
1665 cmd->tlv_filter_mask_in1 =
1666 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
1667 cmd->tlv_filter_mask_in2 =
1668 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
1669 cmd->tlv_filter_mask_in3 =
1670 cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
1671
1672 ret = ath12k_htc_send(htc: &ab->htc, eid: ab->dp.eid, packet: skb);
1673 if (ret)
1674 goto err_free;
1675
1676 return 0;
1677
1678err_free:
1679 dev_kfree_skb_any(skb);
1680 return ret;
1681}
1682

source code of linux/drivers/net/wireless/ath/ath12k/dp_tx.c