| 1 | // SPDX-License-Identifier: ISC |
| 2 | /* Copyright (C) 2020 MediaTek Inc. */ |
| 3 | |
| 4 | #include "mt7915.h" |
| 5 | #include "../dma.h" |
| 6 | #include "mac.h" |
| 7 | |
| 8 | static int |
| 9 | mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) |
| 10 | { |
| 11 | struct mt7915_dev *dev = phy->dev; |
| 12 | struct mtk_wed_device *wed = NULL; |
| 13 | |
| 14 | if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { |
| 15 | if (is_mt798x(dev: &dev->mt76)) |
| 16 | ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; |
| 17 | else |
| 18 | ring_base = MT_WED_TX_RING_BASE; |
| 19 | |
| 20 | idx -= MT_TXQ_ID(0); |
| 21 | wed = &dev->mt76.mmio.wed; |
| 22 | } |
| 23 | |
| 24 | return mt76_connac_init_tx_queues(phy: phy->mt76, idx, n_desc, ring_base, |
| 25 | wed, MT_WED_Q_TX(idx)); |
| 26 | } |
| 27 | |
| 28 | static int mt7915_poll_tx(struct napi_struct *napi, int budget) |
| 29 | { |
| 30 | struct mt7915_dev *dev; |
| 31 | |
| 32 | dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); |
| 33 | |
| 34 | mt76_connac_tx_cleanup(dev: &dev->mt76); |
| 35 | if (napi_complete_done(n: napi, work_done: 0)) |
| 36 | mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU); |
| 37 | |
| 38 | return 0; |
| 39 | } |
| 40 | |
| 41 | static void mt7915_dma_config(struct mt7915_dev *dev) |
| 42 | { |
| 43 | #define Q_CONFIG(q, wfdma, int, id) do { \ |
| 44 | if (wfdma) \ |
| 45 | dev->wfdma_mask |= (1 << (q)); \ |
| 46 | dev->q_int_mask[(q)] = int; \ |
| 47 | dev->q_id[(q)] = id; \ |
| 48 | } while (0) |
| 49 | |
| 50 | #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) |
| 51 | #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) |
| 52 | #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) |
| 53 | |
| 54 | if (is_mt7915(dev: &dev->mt76)) { |
| 55 | RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, |
| 56 | MT7915_RXQ_BAND0); |
| 57 | RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, |
| 58 | MT7915_RXQ_MCU_WM); |
| 59 | RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, |
| 60 | MT7915_RXQ_MCU_WA); |
| 61 | RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, |
| 62 | MT7915_RXQ_BAND1); |
| 63 | RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, |
| 64 | MT7915_RXQ_MCU_WA_EXT); |
| 65 | RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, |
| 66 | MT7915_RXQ_MCU_WA); |
| 67 | TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0); |
| 68 | TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1); |
| 69 | MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, |
| 70 | MT7915_TXQ_MCU_WM); |
| 71 | MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, |
| 72 | MT7915_TXQ_MCU_WA); |
| 73 | MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, |
| 74 | MT7915_TXQ_FWDL); |
| 75 | } else { |
| 76 | RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, |
| 77 | MT7916_RXQ_MCU_WM); |
| 78 | RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, |
| 79 | MT7916_RXQ_MCU_WA_EXT); |
| 80 | MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, |
| 81 | MT7915_TXQ_MCU_WM); |
| 82 | MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, |
| 83 | MT7915_TXQ_MCU_WA); |
| 84 | MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, |
| 85 | MT7915_TXQ_FWDL); |
| 86 | |
| 87 | if (is_mt7916(dev: &dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) { |
| 88 | RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916, |
| 89 | MT7916_RXQ_BAND0); |
| 90 | RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916, |
| 91 | MT7916_RXQ_MCU_WA); |
| 92 | if (dev->hif2) |
| 93 | RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, |
| 94 | MT_INT_RX_DONE_BAND1_MT7916, |
| 95 | MT7916_RXQ_BAND1); |
| 96 | else |
| 97 | RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, |
| 98 | MT_INT_WED_RX_DONE_BAND1_MT7916, |
| 99 | MT7916_RXQ_BAND1); |
| 100 | RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916, |
| 101 | MT7916_RXQ_MCU_WA_MAIN); |
| 102 | TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0, |
| 103 | MT7915_TXQ_BAND0); |
| 104 | TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1, |
| 105 | MT7915_TXQ_BAND1); |
| 106 | } else { |
| 107 | RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, |
| 108 | MT7916_RXQ_BAND0); |
| 109 | RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, |
| 110 | MT7916_RXQ_MCU_WA); |
| 111 | RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, |
| 112 | MT7916_RXQ_BAND1); |
| 113 | RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, |
| 114 | MT7916_RXQ_MCU_WA_MAIN); |
| 115 | TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, |
| 116 | MT7915_TXQ_BAND0); |
| 117 | TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, |
| 118 | MT7915_TXQ_BAND1); |
| 119 | } |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs) |
| 124 | { |
| 125 | #define PREFETCH(_base, _depth) ((_base) << 16 | (_depth)) |
| 126 | u32 base = 0; |
| 127 | |
| 128 | /* prefetch SRAM wrapping boundary for tx/rx ring. */ |
| 129 | mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4)); |
| 130 | mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4)); |
| 131 | mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4)); |
| 132 | mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4)); |
| 133 | mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4)); |
| 134 | |
| 135 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, |
| 136 | PREFETCH(0x140, 0x4)); |
| 137 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, |
| 138 | PREFETCH(0x180, 0x4)); |
| 139 | if (!is_mt7915(dev: &dev->mt76)) { |
| 140 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, |
| 141 | PREFETCH(0x1c0, 0x4)); |
| 142 | base = 0x40; |
| 143 | } |
| 144 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, |
| 145 | PREFETCH(0x1c0 + base, 0x4)); |
| 146 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, |
| 147 | PREFETCH(0x200 + base, 0x4)); |
| 148 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, |
| 149 | PREFETCH(0x240 + base, 0x4)); |
| 150 | |
| 151 | /* for mt7915, the ring which is next the last |
| 152 | * used ring must be initialized. |
| 153 | */ |
| 154 | if (is_mt7915(dev: &dev->mt76)) { |
| 155 | ofs += 0x4; |
| 156 | mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, |
| 157 | PREFETCH(0x140, 0x0)); |
| 158 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, |
| 159 | PREFETCH(0x200 + base, 0x0)); |
| 160 | mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, |
| 161 | PREFETCH(0x280 + base, 0x0)); |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | void mt7915_dma_prefetch(struct mt7915_dev *dev) |
| 166 | { |
| 167 | __mt7915_dma_prefetch(dev, ofs: 0); |
| 168 | if (dev->hif2) |
| 169 | __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); |
| 170 | } |
| 171 | |
| 172 | static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst) |
| 173 | { |
| 174 | struct mt76_dev *mdev = &dev->mt76; |
| 175 | u32 hif1_ofs = 0; |
| 176 | |
| 177 | if (dev->hif2) |
| 178 | hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); |
| 179 | |
| 180 | /* reset */ |
| 181 | if (rst) { |
| 182 | mt76_clear(dev, MT_WFDMA0_RST, |
| 183 | MT_WFDMA0_RST_DMASHDL_ALL_RST | |
| 184 | MT_WFDMA0_RST_LOGIC_RST); |
| 185 | |
| 186 | mt76_set(dev, MT_WFDMA0_RST, |
| 187 | MT_WFDMA0_RST_DMASHDL_ALL_RST | |
| 188 | MT_WFDMA0_RST_LOGIC_RST); |
| 189 | |
| 190 | if (is_mt7915(dev: mdev)) { |
| 191 | mt76_clear(dev, MT_WFDMA1_RST, |
| 192 | MT_WFDMA1_RST_DMASHDL_ALL_RST | |
| 193 | MT_WFDMA1_RST_LOGIC_RST); |
| 194 | |
| 195 | mt76_set(dev, MT_WFDMA1_RST, |
| 196 | MT_WFDMA1_RST_DMASHDL_ALL_RST | |
| 197 | MT_WFDMA1_RST_LOGIC_RST); |
| 198 | } |
| 199 | |
| 200 | if (dev->hif2) { |
| 201 | mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, |
| 202 | MT_WFDMA0_RST_DMASHDL_ALL_RST | |
| 203 | MT_WFDMA0_RST_LOGIC_RST); |
| 204 | |
| 205 | mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, |
| 206 | MT_WFDMA0_RST_DMASHDL_ALL_RST | |
| 207 | MT_WFDMA0_RST_LOGIC_RST); |
| 208 | |
| 209 | if (is_mt7915(dev: mdev)) { |
| 210 | mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs, |
| 211 | MT_WFDMA1_RST_DMASHDL_ALL_RST | |
| 212 | MT_WFDMA1_RST_LOGIC_RST); |
| 213 | |
| 214 | mt76_set(dev, MT_WFDMA1_RST + hif1_ofs, |
| 215 | MT_WFDMA1_RST_DMASHDL_ALL_RST | |
| 216 | MT_WFDMA1_RST_LOGIC_RST); |
| 217 | } |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | /* disable */ |
| 222 | mt76_clear(dev, MT_WFDMA0_GLO_CFG, |
| 223 | MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| 224 | MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| 225 | MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| 226 | MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | |
| 227 | MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 228 | |
| 229 | if (is_mt7915(dev: mdev)) |
| 230 | mt76_clear(dev, MT_WFDMA1_GLO_CFG, |
| 231 | MT_WFDMA1_GLO_CFG_TX_DMA_EN | |
| 232 | MT_WFDMA1_GLO_CFG_RX_DMA_EN | |
| 233 | MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | |
| 234 | MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | |
| 235 | MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 236 | |
| 237 | if (dev->hif2) { |
| 238 | mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, |
| 239 | MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| 240 | MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| 241 | MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| 242 | MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | |
| 243 | MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 244 | |
| 245 | if (is_mt7915(dev: mdev)) |
| 246 | mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, |
| 247 | MT_WFDMA1_GLO_CFG_TX_DMA_EN | |
| 248 | MT_WFDMA1_GLO_CFG_RX_DMA_EN | |
| 249 | MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | |
| 250 | MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | |
| 251 | MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset) |
| 256 | { |
| 257 | struct mt76_dev *mdev = &dev->mt76; |
| 258 | u32 hif1_ofs = 0; |
| 259 | u32 irq_mask; |
| 260 | |
| 261 | if (dev->hif2) |
| 262 | hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); |
| 263 | |
| 264 | /* enable wpdma tx/rx */ |
| 265 | if (!reset) { |
| 266 | mt76_set(dev, MT_WFDMA0_GLO_CFG, |
| 267 | MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| 268 | MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| 269 | MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| 270 | MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 271 | |
| 272 | if (is_mt7915(dev: mdev)) |
| 273 | mt76_set(dev, MT_WFDMA1_GLO_CFG, |
| 274 | MT_WFDMA1_GLO_CFG_TX_DMA_EN | |
| 275 | MT_WFDMA1_GLO_CFG_RX_DMA_EN | |
| 276 | MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | |
| 277 | MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); |
| 278 | |
| 279 | if (dev->hif2) { |
| 280 | mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, |
| 281 | MT_WFDMA0_GLO_CFG_TX_DMA_EN | |
| 282 | MT_WFDMA0_GLO_CFG_RX_DMA_EN | |
| 283 | MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | |
| 284 | MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); |
| 285 | |
| 286 | if (is_mt7915(dev: mdev)) |
| 287 | mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, |
| 288 | MT_WFDMA1_GLO_CFG_TX_DMA_EN | |
| 289 | MT_WFDMA1_GLO_CFG_RX_DMA_EN | |
| 290 | MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | |
| 291 | MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); |
| 292 | |
| 293 | mt76_set(dev, MT_WFDMA_HOST_CONFIG, |
| 294 | MT_WFDMA_HOST_CONFIG_PDMA_BAND); |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | /* enable interrupts for TX/RX rings */ |
| 299 | irq_mask = MT_INT_RX_DONE_MCU | |
| 300 | MT_INT_TX_DONE_MCU | |
| 301 | MT_INT_MCU_CMD; |
| 302 | |
| 303 | if (!dev->phy.mt76->band_idx) |
| 304 | irq_mask |= MT_INT_BAND0_RX_DONE; |
| 305 | |
| 306 | if (dev->dbdc_support || dev->phy.mt76->band_idx) |
| 307 | irq_mask |= MT_INT_BAND1_RX_DONE; |
| 308 | |
| 309 | if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { |
| 310 | u32 wed_irq_mask = irq_mask; |
| 311 | int ret; |
| 312 | |
| 313 | wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; |
| 314 | if (!is_mt798x(dev: &dev->mt76)) |
| 315 | mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); |
| 316 | else |
| 317 | mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| 318 | |
| 319 | ret = mt7915_mcu_wed_enable_rx_stats(dev); |
| 320 | if (ret) |
| 321 | return ret; |
| 322 | |
| 323 | mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); |
| 324 | } |
| 325 | |
| 326 | irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; |
| 327 | |
| 328 | mt7915_irq_enable(dev, mask: irq_mask); |
| 329 | mt7915_irq_disable(dev, mask: 0); |
| 330 | |
| 331 | return 0; |
| 332 | } |
| 333 | |
| 334 | static int mt7915_dma_enable(struct mt7915_dev *dev, bool reset) |
| 335 | { |
| 336 | struct mt76_dev *mdev = &dev->mt76; |
| 337 | u32 hif1_ofs = 0; |
| 338 | |
| 339 | if (dev->hif2) |
| 340 | hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); |
| 341 | |
| 342 | /* reset dma idx */ |
| 343 | mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); |
| 344 | if (is_mt7915(dev: mdev)) |
| 345 | mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0); |
| 346 | if (dev->hif2) { |
| 347 | mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); |
| 348 | if (is_mt7915(dev: mdev)) |
| 349 | mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0); |
| 350 | } |
| 351 | |
| 352 | /* configure delay interrupt off */ |
| 353 | mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); |
| 354 | if (is_mt7915(dev: mdev)) { |
| 355 | mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0); |
| 356 | } else { |
| 357 | mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); |
| 358 | mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); |
| 359 | } |
| 360 | |
| 361 | if (dev->hif2) { |
| 362 | mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); |
| 363 | if (is_mt7915(dev: mdev)) { |
| 364 | mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + |
| 365 | hif1_ofs, 0); |
| 366 | } else { |
| 367 | mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + |
| 368 | hif1_ofs, 0); |
| 369 | mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + |
| 370 | hif1_ofs, 0); |
| 371 | } |
| 372 | } |
| 373 | |
| 374 | /* configure perfetch settings */ |
| 375 | mt7915_dma_prefetch(dev); |
| 376 | |
| 377 | /* hif wait WFDMA idle */ |
| 378 | mt76_set(dev, MT_WFDMA0_BUSY_ENA, |
| 379 | MT_WFDMA0_BUSY_ENA_TX_FIFO0 | |
| 380 | MT_WFDMA0_BUSY_ENA_TX_FIFO1 | |
| 381 | MT_WFDMA0_BUSY_ENA_RX_FIFO); |
| 382 | |
| 383 | if (is_mt7915(dev: mdev)) |
| 384 | mt76_set(dev, MT_WFDMA1_BUSY_ENA, |
| 385 | MT_WFDMA1_BUSY_ENA_TX_FIFO0 | |
| 386 | MT_WFDMA1_BUSY_ENA_TX_FIFO1 | |
| 387 | MT_WFDMA1_BUSY_ENA_RX_FIFO); |
| 388 | |
| 389 | if (dev->hif2) { |
| 390 | mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, |
| 391 | MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | |
| 392 | MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | |
| 393 | MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); |
| 394 | |
| 395 | if (is_mt7915(dev: mdev)) |
| 396 | mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs, |
| 397 | MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 | |
| 398 | MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 | |
| 399 | MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO); |
| 400 | } |
| 401 | |
| 402 | mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, |
| 403 | MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); |
| 404 | |
| 405 | return mt7915_dma_start(dev, reset, wed_reset: true); |
| 406 | } |
| 407 | |
| 408 | int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) |
| 409 | { |
| 410 | struct mt76_dev *mdev = &dev->mt76; |
| 411 | u32 wa_rx_base, wa_rx_idx; |
| 412 | u32 hif1_ofs = 0; |
| 413 | int ret; |
| 414 | |
| 415 | mt7915_dma_config(dev); |
| 416 | |
| 417 | mt76_dma_attach(dev: &dev->mt76); |
| 418 | |
| 419 | if (dev->hif2) |
| 420 | hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); |
| 421 | |
| 422 | mt7915_dma_disable(dev, rst: true); |
| 423 | |
| 424 | if (mtk_wed_device_active(&mdev->mmio.wed)) { |
| 425 | if (!is_mt798x(dev: mdev)) { |
| 426 | u8 wed_control_rx1 = is_mt7915(dev: mdev) ? 1 : 2; |
| 427 | |
| 428 | mt76_set(dev, MT_WFDMA_HOST_CONFIG, |
| 429 | MT_WFDMA_HOST_CONFIG_WED); |
| 430 | mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL, |
| 431 | FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) | |
| 432 | FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) | |
| 433 | FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, |
| 434 | wed_control_rx1)); |
| 435 | if (is_mt7915(dev: mdev)) |
| 436 | mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, |
| 437 | MT_WFDMA0_EXT0_RXWB_KEEP); |
| 438 | } |
| 439 | } else { |
| 440 | mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED); |
| 441 | } |
| 442 | |
| 443 | /* init tx queue */ |
| 444 | ret = mt7915_init_tx_queues(phy: &dev->phy, |
| 445 | MT_TXQ_ID(dev->phy.mt76->band_idx), |
| 446 | MT7915_TX_RING_SIZE, |
| 447 | MT_TXQ_RING_BASE(0)); |
| 448 | if (ret) |
| 449 | return ret; |
| 450 | |
| 451 | if (phy2) { |
| 452 | ret = mt7915_init_tx_queues(phy: phy2, |
| 453 | MT_TXQ_ID(phy2->mt76->band_idx), |
| 454 | MT7915_TX_RING_SIZE, |
| 455 | MT_TXQ_RING_BASE(1)); |
| 456 | if (ret) |
| 457 | return ret; |
| 458 | } |
| 459 | |
| 460 | /* command to WM */ |
| 461 | ret = mt76_init_mcu_queue(dev: &dev->mt76, qid: MT_MCUQ_WM, |
| 462 | MT_MCUQ_ID(MT_MCUQ_WM), |
| 463 | MT7915_TX_MCU_RING_SIZE, |
| 464 | MT_MCUQ_RING_BASE(MT_MCUQ_WM)); |
| 465 | if (ret) |
| 466 | return ret; |
| 467 | |
| 468 | /* command to WA */ |
| 469 | ret = mt76_init_mcu_queue(dev: &dev->mt76, qid: MT_MCUQ_WA, |
| 470 | MT_MCUQ_ID(MT_MCUQ_WA), |
| 471 | MT7915_TX_MCU_RING_SIZE, |
| 472 | MT_MCUQ_RING_BASE(MT_MCUQ_WA)); |
| 473 | if (ret) |
| 474 | return ret; |
| 475 | |
| 476 | /* firmware download */ |
| 477 | ret = mt76_init_mcu_queue(dev: &dev->mt76, qid: MT_MCUQ_FWDL, |
| 478 | MT_MCUQ_ID(MT_MCUQ_FWDL), |
| 479 | MT7915_TX_FWDL_RING_SIZE, |
| 480 | MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); |
| 481 | if (ret) |
| 482 | return ret; |
| 483 | |
| 484 | /* event from WM */ |
| 485 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], |
| 486 | MT_RXQ_ID(MT_RXQ_MCU), |
| 487 | MT7915_RX_MCU_RING_SIZE, |
| 488 | MT_RX_BUF_SIZE, |
| 489 | MT_RXQ_RING_BASE(MT_RXQ_MCU)); |
| 490 | if (ret) |
| 491 | return ret; |
| 492 | |
| 493 | /* event from WA */ |
| 494 | if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(dev: mdev)) { |
| 495 | wa_rx_base = MT_WED_RX_RING_BASE; |
| 496 | wa_rx_idx = MT7915_RXQ_MCU_WA; |
| 497 | mdev->q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE; |
| 498 | mdev->q_rx[MT_RXQ_MCU_WA].wed = &mdev->mmio.wed; |
| 499 | } else { |
| 500 | wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA); |
| 501 | wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA); |
| 502 | } |
| 503 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], |
| 504 | wa_rx_idx, MT7915_RX_MCU_RING_SIZE, |
| 505 | MT_RX_BUF_SIZE, wa_rx_base); |
| 506 | if (ret) |
| 507 | return ret; |
| 508 | |
| 509 | /* rx data queue for band0 */ |
| 510 | if (!dev->phy.mt76->band_idx) { |
| 511 | if (mtk_wed_device_active(&mdev->mmio.wed) && |
| 512 | mtk_wed_get_rx_capa(dev: &mdev->mmio.wed)) { |
| 513 | mdev->q_rx[MT_RXQ_MAIN].flags = |
| 514 | MT_WED_Q_RX(MT7915_RXQ_BAND0); |
| 515 | dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| 516 | mdev->q_rx[MT_RXQ_MAIN].wed = &mdev->mmio.wed; |
| 517 | } |
| 518 | |
| 519 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], |
| 520 | MT_RXQ_ID(MT_RXQ_MAIN), |
| 521 | MT7915_RX_RING_SIZE, |
| 522 | MT_RX_BUF_SIZE, |
| 523 | MT_RXQ_RING_BASE(MT_RXQ_MAIN)); |
| 524 | if (ret) |
| 525 | return ret; |
| 526 | } |
| 527 | |
| 528 | /* tx free notify event from WA for band0 */ |
| 529 | if (!is_mt7915(dev: mdev)) { |
| 530 | wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA); |
| 531 | wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA); |
| 532 | |
| 533 | if (mtk_wed_device_active(&mdev->mmio.wed)) { |
| 534 | mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; |
| 535 | mdev->q_rx[MT_RXQ_MAIN_WA].wed = &mdev->mmio.wed; |
| 536 | if (is_mt7916(dev: mdev)) { |
| 537 | wa_rx_base = MT_WED_RX_RING_BASE; |
| 538 | wa_rx_idx = MT7915_RXQ_MCU_WA; |
| 539 | } |
| 540 | } |
| 541 | |
| 542 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], |
| 543 | wa_rx_idx, MT7915_RX_MCU_RING_SIZE, |
| 544 | MT_RX_BUF_SIZE, wa_rx_base); |
| 545 | if (ret) |
| 546 | return ret; |
| 547 | } |
| 548 | |
| 549 | if (dev->dbdc_support || dev->phy.mt76->band_idx) { |
| 550 | if (mtk_wed_device_active(&mdev->mmio.wed) && |
| 551 | mtk_wed_get_rx_capa(dev: &mdev->mmio.wed)) { |
| 552 | mdev->q_rx[MT_RXQ_BAND1].flags = |
| 553 | MT_WED_Q_RX(MT7915_RXQ_BAND1); |
| 554 | dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; |
| 555 | mdev->q_rx[MT_RXQ_BAND1].wed = &mdev->mmio.wed; |
| 556 | } |
| 557 | |
| 558 | /* rx data queue for band1 */ |
| 559 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], |
| 560 | MT_RXQ_ID(MT_RXQ_BAND1), |
| 561 | MT7915_RX_RING_SIZE, |
| 562 | MT_RX_BUF_SIZE, |
| 563 | MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs); |
| 564 | if (ret) |
| 565 | return ret; |
| 566 | |
| 567 | /* tx free notify event from WA for band1 */ |
| 568 | ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], |
| 569 | MT_RXQ_ID(MT_RXQ_BAND1_WA), |
| 570 | MT7915_RX_MCU_RING_SIZE, |
| 571 | MT_RX_BUF_SIZE, |
| 572 | MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs); |
| 573 | if (ret) |
| 574 | return ret; |
| 575 | } |
| 576 | |
| 577 | ret = mt76_init_queues(dev, mt76_dma_rx_poll); |
| 578 | if (ret < 0) |
| 579 | return ret; |
| 580 | |
| 581 | netif_napi_add_tx(dev: dev->mt76.tx_napi_dev, napi: &dev->mt76.tx_napi, |
| 582 | poll: mt7915_poll_tx); |
| 583 | napi_enable(n: &dev->mt76.tx_napi); |
| 584 | |
| 585 | mt7915_dma_enable(dev, reset: false); |
| 586 | |
| 587 | return 0; |
| 588 | } |
| 589 | |
| 590 | int mt7915_dma_reset(struct mt7915_dev *dev, bool force) |
| 591 | { |
| 592 | struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; |
| 593 | struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| 594 | int i; |
| 595 | |
| 596 | /* clean up hw queues */ |
| 597 | for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) { |
| 598 | mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); |
| 599 | if (mphy_ext) |
| 600 | mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); |
| 601 | } |
| 602 | |
| 603 | for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) |
| 604 | mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); |
| 605 | |
| 606 | mt76_for_each_q_rx(&dev->mt76, i) |
| 607 | mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); |
| 608 | |
| 609 | /* reset wfsys */ |
| 610 | if (force) |
| 611 | mt7915_wfsys_reset(dev); |
| 612 | |
| 613 | if (mtk_wed_device_active(wed)) |
| 614 | mtk_wed_device_dma_reset(wed); |
| 615 | |
| 616 | mt7915_dma_disable(dev, rst: force); |
| 617 | mt76_wed_dma_reset(dev: &dev->mt76); |
| 618 | |
| 619 | /* reset hw queues */ |
| 620 | for (i = 0; i < __MT_TXQ_MAX; i++) { |
| 621 | mt76_dma_reset_tx_queue(dev: &dev->mt76, q: dev->mphy.q_tx[i]); |
| 622 | if (mphy_ext) |
| 623 | mt76_dma_reset_tx_queue(dev: &dev->mt76, q: mphy_ext->q_tx[i]); |
| 624 | } |
| 625 | |
| 626 | for (i = 0; i < __MT_MCUQ_MAX; i++) |
| 627 | mt76_queue_reset(dev, dev->mt76.q_mcu[i]); |
| 628 | |
| 629 | mt76_for_each_q_rx(&dev->mt76, i) { |
| 630 | if (mt76_queue_is_wed_tx_free(q: &dev->mt76.q_rx[i])) |
| 631 | continue; |
| 632 | |
| 633 | mt76_queue_reset(dev, &dev->mt76.q_rx[i]); |
| 634 | } |
| 635 | |
| 636 | mt76_tx_status_check(dev: &dev->mt76, flush: true); |
| 637 | |
| 638 | mt76_for_each_q_rx(&dev->mt76, i) |
| 639 | mt76_queue_rx_reset(dev, i); |
| 640 | |
| 641 | if (mtk_wed_device_active(wed) && is_mt7915(dev: &dev->mt76)) |
| 642 | mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, |
| 643 | MT_WFDMA0_EXT0_RXWB_KEEP); |
| 644 | |
| 645 | mt7915_dma_enable(dev, reset: !force); |
| 646 | |
| 647 | return 0; |
| 648 | } |
| 649 | |
| 650 | void mt7915_dma_cleanup(struct mt7915_dev *dev) |
| 651 | { |
| 652 | mt7915_dma_disable(dev, rst: true); |
| 653 | |
| 654 | mt76_dma_cleanup(dev: &dev->mt76); |
| 655 | } |
| 656 | |