1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
4 * All rights reserved.
5 *
6 * Purpose: driver entry for initial, open, close, tx and rx.
7 *
8 * Author: Lyndon Chen
9 *
10 * Date: Jan 8, 2003
11 *
12 * Functions:
13 *
14 * vt6655_probe - module initial (insmod) driver entry
15 * vt6655_remove - module remove entry
16 * device_free_info - device structure resource free function
17 * device_print_info - print out resource
18 * device_rx_srv - rx service function
19 * device_alloc_rx_buf - rx buffer pre-allocated function
20 * device_free_rx_buf - free rx buffer function
21 * device_free_tx_buf - free tx buffer function
22 * device_init_rd0_ring - initial rd dma0 ring
23 * device_init_rd1_ring - initial rd dma1 ring
24 * device_init_td0_ring - initial tx dma0 ring buffer
25 * device_init_td1_ring - initial tx dma1 ring buffer
26 * device_init_registers - initial MAC & BBP & RF internal registers.
27 * device_init_rings - initial tx/rx ring buffer
28 * device_free_rings - free all allocated ring buffer
29 * device_tx_srv - tx interrupt service function
30 *
31 * Revision History:
32 */
33
34#include <linux/file.h>
35#include "device.h"
36#include "card.h"
37#include "channel.h"
38#include "baseband.h"
39#include "mac.h"
40#include "power.h"
41#include "rxtx.h"
42#include "dpc.h"
43#include "rf.h"
44#include <linux/delay.h>
45#include <linux/kthread.h>
46#include <linux/slab.h>
47
48/*--------------------- Static Definitions -------------------------*/
49/*
50 * Define module options
51 */
52MODULE_AUTHOR("VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>");
53MODULE_LICENSE("GPL");
54MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
55
56#define DEVICE_PARAM(N, D)
57
58#define RX_DESC_MIN0 16
59#define RX_DESC_MAX0 128
60#define RX_DESC_DEF0 32
61DEVICE_PARAM(RxDescriptors0, "Number of receive descriptors0");
62
63#define RX_DESC_MIN1 16
64#define RX_DESC_MAX1 128
65#define RX_DESC_DEF1 32
66DEVICE_PARAM(RxDescriptors1, "Number of receive descriptors1");
67
68#define TX_DESC_MIN0 16
69#define TX_DESC_MAX0 128
70#define TX_DESC_DEF0 32
71DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
72
73#define TX_DESC_MIN1 16
74#define TX_DESC_MAX1 128
75#define TX_DESC_DEF1 64
76DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
77
78#define INT_WORKS_DEF 20
79#define INT_WORKS_MIN 10
80#define INT_WORKS_MAX 64
81
82DEVICE_PARAM(int_works, "Number of packets per interrupt services");
83
84#define RTS_THRESH_DEF 2347
85
86#define FRAG_THRESH_DEF 2346
87
88#define SHORT_RETRY_MIN 0
89#define SHORT_RETRY_MAX 31
90#define SHORT_RETRY_DEF 8
91
92DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
93
94#define LONG_RETRY_MIN 0
95#define LONG_RETRY_MAX 15
96#define LONG_RETRY_DEF 4
97
98DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
99
100/* BasebandType[] baseband type selected
101 * 0: indicate 802.11a type
102 * 1: indicate 802.11b type
103 * 2: indicate 802.11g type
104 */
105#define BBP_TYPE_MIN 0
106#define BBP_TYPE_MAX 2
107#define BBP_TYPE_DEF 2
108
109DEVICE_PARAM(BasebandType, "baseband type");
110
111/*
112 * Static vars definitions
113 */
114static const struct pci_device_id vt6655_pci_id_table[] = {
115 { PCI_VDEVICE(VIA, 0x3253) },
116 { 0, }
117};
118
119/*--------------------- Static Functions --------------------------*/
120
121static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
122static void device_free_info(struct vnt_private *priv);
123static void device_print_info(struct vnt_private *priv);
124
125static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr);
126static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr);
127
128static int device_init_rd0_ring(struct vnt_private *priv);
129static int device_init_rd1_ring(struct vnt_private *priv);
130static int device_init_td0_ring(struct vnt_private *priv);
131static int device_init_td1_ring(struct vnt_private *priv);
132
133static int device_rx_srv(struct vnt_private *priv, unsigned int idx);
134static int device_tx_srv(struct vnt_private *priv, unsigned int idx);
135static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
136static void device_free_rx_buf(struct vnt_private *priv,
137 struct vnt_rx_desc *rd);
138static void device_init_registers(struct vnt_private *priv);
139static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
140static void device_free_td0_ring(struct vnt_private *priv);
141static void device_free_td1_ring(struct vnt_private *priv);
142static void device_free_rd0_ring(struct vnt_private *priv);
143static void device_free_rd1_ring(struct vnt_private *priv);
144static void device_free_rings(struct vnt_private *priv);
145
146/*--------------------- Export Variables --------------------------*/
147
148/*--------------------- Export Functions --------------------------*/
149
150static void vt6655_remove(struct pci_dev *pcid)
151{
152 struct vnt_private *priv = pci_get_drvdata(pdev: pcid);
153
154 if (!priv)
155 return;
156 device_free_info(priv);
157}
158
159static void device_get_options(struct vnt_private *priv)
160{
161 struct vnt_options *opts = &priv->opts;
162
163 opts->rx_descs0 = RX_DESC_DEF0;
164 opts->rx_descs1 = RX_DESC_DEF1;
165 opts->tx_descs[0] = TX_DESC_DEF0;
166 opts->tx_descs[1] = TX_DESC_DEF1;
167 opts->int_works = INT_WORKS_DEF;
168
169 opts->short_retry = SHORT_RETRY_DEF;
170 opts->long_retry = LONG_RETRY_DEF;
171 opts->bbp_type = BBP_TYPE_DEF;
172}
173
174static void
175device_set_options(struct vnt_private *priv)
176{
177 priv->byShortRetryLimit = priv->opts.short_retry;
178 priv->byLongRetryLimit = priv->opts.long_retry;
179 priv->byBBType = priv->opts.bbp_type;
180 priv->packet_type = priv->byBBType;
181 priv->byAutoFBCtrl = AUTO_FB_0;
182 priv->update_bbvga = true;
183 priv->preamble_type = 0;
184
185 pr_debug(" byShortRetryLimit= %d\n", (int)priv->byShortRetryLimit);
186 pr_debug(" byLongRetryLimit= %d\n", (int)priv->byLongRetryLimit);
187 pr_debug(" preamble_type= %d\n", (int)priv->preamble_type);
188 pr_debug(" byShortPreamble= %d\n", (int)priv->byShortPreamble);
189 pr_debug(" byBBType= %d\n", (int)priv->byBBType);
190}
191
192static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr)
193{
194 iowrite8(1, iobase + MAC_REG_PAGE1SEL);
195 for (int i = 0; i < 6; i++)
196 iowrite8(mac_addr[i], iobase + MAC_REG_BSSID0 + i);
197 iowrite8(0, iobase + MAC_REG_PAGE1SEL);
198}
199
200static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
201{
202 iowrite8(1, iobase + MAC_REG_PAGE1SEL);
203 for (int i = 0; i < 6; i++)
204 mac_addr[i] = ioread8(iobase + MAC_REG_PAR0 + i);
205 iowrite8(0, iobase + MAC_REG_PAGE1SEL);
206}
207
208static void vt6655_mac_dma_ctl(void __iomem *iobase, u8 reg_index)
209{
210 u32 reg_value;
211
212 reg_value = ioread32(iobase + reg_index);
213 if (reg_value & DMACTL_RUN)
214 iowrite32(DMACTL_WAKE, iobase + reg_index);
215 else
216 iowrite32(DMACTL_RUN, iobase + reg_index);
217}
218
219static void vt6655_mac_set_bits(void __iomem *iobase, u32 mask)
220{
221 u32 reg_value;
222
223 reg_value = ioread32(iobase + MAC_REG_ENCFG);
224 reg_value = reg_value | mask;
225 iowrite32(reg_value, iobase + MAC_REG_ENCFG);
226}
227
228static void vt6655_mac_clear_bits(void __iomem *iobase, u32 mask)
229{
230 u32 reg_value;
231
232 reg_value = ioread32(iobase + MAC_REG_ENCFG);
233 reg_value = reg_value & ~mask;
234 iowrite32(reg_value, iobase + MAC_REG_ENCFG);
235}
236
237static void vt6655_mac_en_protect_md(void __iomem *iobase)
238{
239 vt6655_mac_set_bits(iobase, ENCFG_PROTECTMD);
240}
241
242static void vt6655_mac_dis_protect_md(void __iomem *iobase)
243{
244 vt6655_mac_clear_bits(iobase, ENCFG_PROTECTMD);
245}
246
247static void vt6655_mac_en_barker_preamble_md(void __iomem *iobase)
248{
249 vt6655_mac_set_bits(iobase, ENCFG_BARKERPREAM);
250}
251
252static void vt6655_mac_dis_barker_preamble_md(void __iomem *iobase)
253{
254 vt6655_mac_clear_bits(iobase, ENCFG_BARKERPREAM);
255}
256
257/*
258 * Initialisation of MAC & BBP registers
259 */
260
261static void device_init_registers(struct vnt_private *priv)
262{
263 unsigned long flags;
264 unsigned int ii;
265 unsigned char byValue;
266 unsigned char byCCKPwrdBm = 0;
267 unsigned char byOFDMPwrdBm = 0;
268
269 MACbShutdown(priv);
270 bb_software_reset(priv);
271
272 /* Do MACbSoftwareReset in MACvInitialize */
273 MACbSoftwareReset(priv);
274
275 priv->bAES = false;
276
277 /* Only used in 11g type, sync with ERP IE */
278 priv->bProtectMode = false;
279
280 priv->bNonERPPresent = false;
281 priv->bBarkerPreambleMd = false;
282 priv->wCurrentRate = RATE_1M;
283 priv->byTopOFDMBasicRate = RATE_24M;
284 priv->byTopCCKBasicRate = RATE_1M;
285
286 /* init MAC */
287 MACvInitialize(priv);
288
289 /* Get Local ID */
290 priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
291
292 spin_lock_irqsave(&priv->lock, flags);
293
294 SROMvReadAllContents(iobase: priv->port_offset, pbyEepromRegs: priv->abyEEPROM);
295
296 spin_unlock_irqrestore(lock: &priv->lock, flags);
297
298 /* Get Channel range */
299 priv->byMinChannel = 1;
300 priv->byMaxChannel = CB_MAX_CHANNEL;
301
302 /* Get Antena */
303 byValue = SROMbyReadEmbedded(iobase: priv->port_offset, EEP_OFS_ANTENNA);
304 if (byValue & EEP_ANTINV)
305 priv->bTxRxAntInv = true;
306 else
307 priv->bTxRxAntInv = false;
308
309 byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
310 /* if not set default is All */
311 if (byValue == 0)
312 byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
313
314 if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
315 priv->byAntennaCount = 2;
316 priv->byTxAntennaMode = ANT_B;
317 priv->dwTxAntennaSel = 1;
318 priv->dwRxAntennaSel = 1;
319
320 if (priv->bTxRxAntInv)
321 priv->byRxAntennaMode = ANT_A;
322 else
323 priv->byRxAntennaMode = ANT_B;
324 } else {
325 priv->byAntennaCount = 1;
326 priv->dwTxAntennaSel = 0;
327 priv->dwRxAntennaSel = 0;
328
329 if (byValue & EEP_ANTENNA_AUX) {
330 priv->byTxAntennaMode = ANT_A;
331
332 if (priv->bTxRxAntInv)
333 priv->byRxAntennaMode = ANT_B;
334 else
335 priv->byRxAntennaMode = ANT_A;
336 } else {
337 priv->byTxAntennaMode = ANT_B;
338
339 if (priv->bTxRxAntInv)
340 priv->byRxAntennaMode = ANT_A;
341 else
342 priv->byRxAntennaMode = ANT_B;
343 }
344 }
345
346 /* Set initial antenna mode */
347 bb_set_tx_antenna_mode(priv, by_antenna_mode: priv->byTxAntennaMode);
348 bb_set_rx_antenna_mode(priv, by_antenna_mode: priv->byRxAntennaMode);
349
350 /* zonetype initial */
351 priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
352
353 if (!priv->bZoneRegExist)
354 priv->byZoneType = priv->abyEEPROM[EEP_OFS_ZONETYPE];
355
356 pr_debug("priv->byZoneType = %x\n", priv->byZoneType);
357
358 /* Init RF module */
359 RFbInit(priv);
360
361 /* Get Desire Power Value */
362 priv->cur_pwr = 0xFF;
363 priv->byCCKPwr = SROMbyReadEmbedded(iobase: priv->port_offset, EEP_OFS_PWR_CCK);
364 priv->byOFDMPwrG = SROMbyReadEmbedded(iobase: priv->port_offset,
365 EEP_OFS_PWR_OFDMG);
366
367 /* Load power Table */
368 for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
369 priv->abyCCKPwrTbl[ii + 1] =
370 SROMbyReadEmbedded(iobase: priv->port_offset,
371 byContntOffset: (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
372 if (priv->abyCCKPwrTbl[ii + 1] == 0)
373 priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
374
375 priv->abyOFDMPwrTbl[ii + 1] =
376 SROMbyReadEmbedded(iobase: priv->port_offset,
377 byContntOffset: (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
378 if (priv->abyOFDMPwrTbl[ii + 1] == 0)
379 priv->abyOFDMPwrTbl[ii + 1] = priv->byOFDMPwrG;
380
381 priv->abyCCKDefaultPwr[ii + 1] = byCCKPwrdBm;
382 priv->abyOFDMDefaultPwr[ii + 1] = byOFDMPwrdBm;
383 }
384
385 /* recover 12,13 ,14channel for EUROPE by 11 channel */
386 for (ii = 11; ii < 14; ii++) {
387 priv->abyCCKPwrTbl[ii] = priv->abyCCKPwrTbl[10];
388 priv->abyOFDMPwrTbl[ii] = priv->abyOFDMPwrTbl[10];
389 }
390
391 /* Load OFDM A Power Table */
392 for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
393 priv->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] =
394 SROMbyReadEmbedded(iobase: priv->port_offset,
395 byContntOffset: (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
396
397 priv->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] =
398 SROMbyReadEmbedded(iobase: priv->port_offset,
399 byContntOffset: (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
400 }
401
402 if (priv->local_id > REV_ID_VT3253_B1) {
403 VT6655_MAC_SELECT_PAGE1(priv->port_offset);
404
405 iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
406
407 VT6655_MAC_SELECT_PAGE0(priv->port_offset);
408 }
409
410 /* use relative tx timeout and 802.11i D4 */
411 vt6655_mac_word_reg_bits_on(iobase: priv->port_offset, MAC_REG_CFG,
412 bit_mask: (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
413
414 /* set performance parameter by registry */
415 vt6655_mac_set_short_retry_limit(priv, retry_limit: priv->byShortRetryLimit);
416 MACvSetLongRetryLimit(priv, byRetryLimit: priv->byLongRetryLimit);
417
418 /* reset TSF counter */
419 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
420 /* enable TSF counter */
421 iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
422
423 /* initialize BBP registers */
424 bb_vt3253_init(priv);
425
426 if (priv->update_bbvga) {
427 priv->bbvga_current = priv->bbvga[0];
428 priv->bbvga_new = priv->bbvga_current;
429 bb_set_vga_gain_offset(priv, by_data: priv->bbvga[0]);
430 }
431
432 bb_set_rx_antenna_mode(priv, by_antenna_mode: priv->byRxAntennaMode);
433 bb_set_tx_antenna_mode(priv, by_antenna_mode: priv->byTxAntennaMode);
434
435 /* Set BB and packet type at the same time. */
436 /* Set Short Slot Time, xIFS, and RSPINF. */
437 priv->wCurrentRate = RATE_54M;
438
439 priv->radio_off = false;
440
441 priv->byRadioCtl = SROMbyReadEmbedded(iobase: priv->port_offset,
442 EEP_OFS_RADIOCTL);
443 priv->hw_radio_off = false;
444
445 if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
446 /* Get GPIO */
447 priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
448
449 if (((priv->byGPIO & GPIO0_DATA) &&
450 !(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
451 (!(priv->byGPIO & GPIO0_DATA) &&
452 (priv->byRadioCtl & EEP_RADIOCTL_INV)))
453 priv->hw_radio_off = true;
454 }
455
456 if (priv->hw_radio_off || priv->bRadioControlOff)
457 card_radio_power_off(priv);
458
459 /* get Permanent network address */
460 SROMvReadEtherAddress(iobase: priv->port_offset, pbyEtherAddress: priv->abyCurrentNetAddr);
461 pr_debug("Network address = %pM\n", priv->abyCurrentNetAddr);
462
463 /* reset Tx pointer */
464 CARDvSafeResetRx(priv);
465 /* reset Rx pointer */
466 card_safe_reset_tx(priv);
467
468 if (priv->local_id <= REV_ID_VT3253_A1)
469 vt6655_mac_reg_bits_on(iobase: priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
470
471 /* Turn On Rx DMA */
472 vt6655_mac_dma_ctl(iobase: priv->port_offset, MAC_REG_RXDMACTL0);
473 vt6655_mac_dma_ctl(iobase: priv->port_offset, MAC_REG_RXDMACTL1);
474
475 /* start the adapter */
476 iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
477}
478
479static void device_print_info(struct vnt_private *priv)
480{
481 dev_info(&priv->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
482 priv->abyCurrentNetAddr, (unsigned long)priv->ioaddr,
483 (unsigned long)priv->port_offset, priv->pcid->irq);
484}
485
486static void device_free_info(struct vnt_private *priv)
487{
488 if (!priv)
489 return;
490
491 if (priv->mac_hw)
492 ieee80211_unregister_hw(hw: priv->hw);
493
494 if (priv->port_offset)
495 iounmap(addr: priv->port_offset);
496
497 if (priv->pcid)
498 pci_release_regions(priv->pcid);
499
500 if (priv->hw)
501 ieee80211_free_hw(hw: priv->hw);
502}
503
504static bool device_init_rings(struct vnt_private *priv)
505{
506 void *vir_pool;
507
508 /*allocate all RD/TD rings a single pool*/
509 vir_pool = dma_alloc_coherent(dev: &priv->pcid->dev,
510 size: priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
511 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
512 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
513 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
514 dma_handle: &priv->pool_dma, GFP_ATOMIC);
515 if (!vir_pool) {
516 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
517 return false;
518 }
519
520 priv->aRD0Ring = vir_pool;
521 priv->aRD1Ring = vir_pool +
522 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
523
524 priv->rd0_pool_dma = priv->pool_dma;
525 priv->rd1_pool_dma = priv->rd0_pool_dma +
526 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
527
528 priv->tx0_bufs = dma_alloc_coherent(dev: &priv->pcid->dev,
529 size: priv->opts.tx_descs[0] * PKT_BUF_SZ +
530 priv->opts.tx_descs[1] * PKT_BUF_SZ +
531 CB_BEACON_BUF_SIZE +
532 CB_MAX_BUF_SIZE,
533 dma_handle: &priv->tx_bufs_dma0, GFP_ATOMIC);
534 if (!priv->tx0_bufs) {
535 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
536
537 dma_free_coherent(dev: &priv->pcid->dev,
538 size: priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
539 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
540 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
541 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
542 cpu_addr: vir_pool, dma_handle: priv->pool_dma);
543 return false;
544 }
545
546 priv->td0_pool_dma = priv->rd1_pool_dma +
547 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
548
549 priv->td1_pool_dma = priv->td0_pool_dma +
550 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
551
552 /* vir_pool: pvoid type */
553 priv->apTD0Rings = vir_pool
554 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
555 + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
556
557 priv->apTD1Rings = vir_pool
558 + priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
559 + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc)
560 + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
561
562 priv->tx1_bufs = priv->tx0_bufs +
563 priv->opts.tx_descs[0] * PKT_BUF_SZ;
564
565 priv->tx_beacon_bufs = priv->tx1_bufs +
566 priv->opts.tx_descs[1] * PKT_BUF_SZ;
567
568 priv->pbyTmpBuff = priv->tx_beacon_bufs +
569 CB_BEACON_BUF_SIZE;
570
571 priv->tx_bufs_dma1 = priv->tx_bufs_dma0 +
572 priv->opts.tx_descs[0] * PKT_BUF_SZ;
573
574 priv->tx_beacon_dma = priv->tx_bufs_dma1 +
575 priv->opts.tx_descs[1] * PKT_BUF_SZ;
576
577 return true;
578}
579
580static void device_free_rings(struct vnt_private *priv)
581{
582 dma_free_coherent(dev: &priv->pcid->dev,
583 size: priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
584 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
585 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
586 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
587 cpu_addr: priv->aRD0Ring, dma_handle: priv->pool_dma);
588
589 dma_free_coherent(dev: &priv->pcid->dev,
590 size: priv->opts.tx_descs[0] * PKT_BUF_SZ +
591 priv->opts.tx_descs[1] * PKT_BUF_SZ +
592 CB_BEACON_BUF_SIZE +
593 CB_MAX_BUF_SIZE,
594 cpu_addr: priv->tx0_bufs, dma_handle: priv->tx_bufs_dma0);
595}
596
597static int device_init_rd0_ring(struct vnt_private *priv)
598{
599 int i;
600 dma_addr_t curr = priv->rd0_pool_dma;
601 struct vnt_rx_desc *desc;
602 int ret;
603
604 /* Init the RD0 ring entries */
605 for (i = 0; i < priv->opts.rx_descs0;
606 i ++, curr += sizeof(struct vnt_rx_desc)) {
607 desc = &priv->aRD0Ring[i];
608 desc->rd_info = kzalloc(size: sizeof(*desc->rd_info), GFP_KERNEL);
609 if (!desc->rd_info) {
610 ret = -ENOMEM;
611 goto err_free_desc;
612 }
613
614 if (!device_alloc_rx_buf(priv, desc)) {
615 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
616 ret = -ENOMEM;
617 goto err_free_rd;
618 }
619
620 desc->next = &priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0];
621 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
622 }
623
624 if (i > 0)
625 priv->aRD0Ring[i - 1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
626 priv->pCurrRD[0] = &priv->aRD0Ring[0];
627
628 return 0;
629
630err_free_rd:
631 kfree(objp: desc->rd_info);
632
633err_free_desc:
634 while (i--) {
635 desc = &priv->aRD0Ring[i];
636 device_free_rx_buf(priv, rd: desc);
637 kfree(objp: desc->rd_info);
638 }
639
640 return ret;
641}
642
643static int device_init_rd1_ring(struct vnt_private *priv)
644{
645 int i;
646 dma_addr_t curr = priv->rd1_pool_dma;
647 struct vnt_rx_desc *desc;
648 int ret;
649
650 /* Init the RD1 ring entries */
651 for (i = 0; i < priv->opts.rx_descs1;
652 i ++, curr += sizeof(struct vnt_rx_desc)) {
653 desc = &priv->aRD1Ring[i];
654 desc->rd_info = kzalloc(size: sizeof(*desc->rd_info), GFP_KERNEL);
655 if (!desc->rd_info) {
656 ret = -ENOMEM;
657 goto err_free_desc;
658 }
659
660 if (!device_alloc_rx_buf(priv, desc)) {
661 dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
662 ret = -ENOMEM;
663 goto err_free_rd;
664 }
665
666 desc->next = &priv->aRD1Ring[(i + 1) % priv->opts.rx_descs1];
667 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
668 }
669
670 if (i > 0)
671 priv->aRD1Ring[i - 1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
672 priv->pCurrRD[1] = &priv->aRD1Ring[0];
673
674 return 0;
675
676err_free_rd:
677 kfree(objp: desc->rd_info);
678
679err_free_desc:
680 while (i--) {
681 desc = &priv->aRD1Ring[i];
682 device_free_rx_buf(priv, rd: desc);
683 kfree(objp: desc->rd_info);
684 }
685
686 return ret;
687}
688
689static void device_free_rd0_ring(struct vnt_private *priv)
690{
691 int i;
692
693 for (i = 0; i < priv->opts.rx_descs0; i++) {
694 struct vnt_rx_desc *desc = &priv->aRD0Ring[i];
695
696 device_free_rx_buf(priv, rd: desc);
697 kfree(objp: desc->rd_info);
698 }
699}
700
701static void device_free_rd1_ring(struct vnt_private *priv)
702{
703 int i;
704
705 for (i = 0; i < priv->opts.rx_descs1; i++) {
706 struct vnt_rx_desc *desc = &priv->aRD1Ring[i];
707
708 device_free_rx_buf(priv, rd: desc);
709 kfree(objp: desc->rd_info);
710 }
711}
712
713static int device_init_td0_ring(struct vnt_private *priv)
714{
715 int i;
716 dma_addr_t curr;
717 struct vnt_tx_desc *desc;
718 int ret;
719
720 curr = priv->td0_pool_dma;
721 for (i = 0; i < priv->opts.tx_descs[0];
722 i++, curr += sizeof(struct vnt_tx_desc)) {
723 desc = &priv->apTD0Rings[i];
724 desc->td_info = kzalloc(size: sizeof(*desc->td_info), GFP_KERNEL);
725 if (!desc->td_info) {
726 ret = -ENOMEM;
727 goto err_free_desc;
728 }
729
730 desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
731 desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
732
733 desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
734 desc->next_desc = cpu_to_le32(curr +
735 sizeof(struct vnt_tx_desc));
736 }
737
738 if (i > 0)
739 priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
740 priv->tail_td[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
741
742 return 0;
743
744err_free_desc:
745 while (i--) {
746 desc = &priv->apTD0Rings[i];
747 kfree(objp: desc->td_info);
748 }
749
750 return ret;
751}
752
753static int device_init_td1_ring(struct vnt_private *priv)
754{
755 int i;
756 dma_addr_t curr;
757 struct vnt_tx_desc *desc;
758 int ret;
759
760 /* Init the TD ring entries */
761 curr = priv->td1_pool_dma;
762 for (i = 0; i < priv->opts.tx_descs[1];
763 i++, curr += sizeof(struct vnt_tx_desc)) {
764 desc = &priv->apTD1Rings[i];
765 desc->td_info = kzalloc(size: sizeof(*desc->td_info), GFP_KERNEL);
766 if (!desc->td_info) {
767 ret = -ENOMEM;
768 goto err_free_desc;
769 }
770
771 desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
772 desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
773
774 desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]);
775 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
776 }
777
778 if (i > 0)
779 priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
780 priv->tail_td[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
781
782 return 0;
783
784err_free_desc:
785 while (i--) {
786 desc = &priv->apTD1Rings[i];
787 kfree(objp: desc->td_info);
788 }
789
790 return ret;
791}
792
793static void device_free_td0_ring(struct vnt_private *priv)
794{
795 int i;
796
797 for (i = 0; i < priv->opts.tx_descs[0]; i++) {
798 struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
799 struct vnt_td_info *td_info = desc->td_info;
800
801 dev_kfree_skb(td_info->skb);
802 kfree(objp: desc->td_info);
803 }
804}
805
806static void device_free_td1_ring(struct vnt_private *priv)
807{
808 int i;
809
810 for (i = 0; i < priv->opts.tx_descs[1]; i++) {
811 struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
812 struct vnt_td_info *td_info = desc->td_info;
813
814 dev_kfree_skb(td_info->skb);
815 kfree(objp: desc->td_info);
816 }
817}
818
819/*-----------------------------------------------------------------*/
820
821static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
822{
823 struct vnt_rx_desc *rd;
824 int works = 0;
825
826 for (rd = priv->pCurrRD[idx];
827 rd->rd0.owner == OWNED_BY_HOST;
828 rd = rd->next) {
829 if (works++ > 15)
830 break;
831
832 if (!rd->rd_info->skb)
833 break;
834
835 if (vnt_receive_frame(priv, curr_rd: rd)) {
836 if (!device_alloc_rx_buf(priv, rd)) {
837 dev_err(&priv->pcid->dev,
838 "can not allocate rx buf\n");
839 break;
840 }
841 }
842 rd->rd0.owner = OWNED_BY_NIC;
843 }
844
845 priv->pCurrRD[idx] = rd;
846
847 return works;
848}
849
850static bool device_alloc_rx_buf(struct vnt_private *priv,
851 struct vnt_rx_desc *rd)
852{
853 struct vnt_rd_info *rd_info = rd->rd_info;
854
855 rd_info->skb = dev_alloc_skb(length: (int)priv->rx_buf_sz);
856 if (!rd_info->skb)
857 return false;
858
859 rd_info->skb_dma =
860 dma_map_single(&priv->pcid->dev,
861 skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
862 priv->rx_buf_sz, DMA_FROM_DEVICE);
863 if (dma_mapping_error(dev: &priv->pcid->dev, dma_addr: rd_info->skb_dma)) {
864 dev_kfree_skb(rd_info->skb);
865 rd_info->skb = NULL;
866 return false;
867 }
868
869 *((unsigned int *)&rd->rd0) = 0; /* FIX cast */
870
871 rd->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
872 rd->rd0.owner = OWNED_BY_NIC;
873 rd->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
874 rd->buff_addr = cpu_to_le32(rd_info->skb_dma);
875
876 return true;
877}
878
879static void device_free_rx_buf(struct vnt_private *priv,
880 struct vnt_rx_desc *rd)
881{
882 struct vnt_rd_info *rd_info = rd->rd_info;
883
884 dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
885 priv->rx_buf_sz, DMA_FROM_DEVICE);
886 dev_kfree_skb(rd_info->skb);
887}
888
889static const u8 fallback_rate0[5][5] = {
890 {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
891 {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
892 {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
893 {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
894 {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
895};
896
897static const u8 fallback_rate1[5][5] = {
898 {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
899 {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
900 {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
901 {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
902 {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
903};
904
905static int vnt_int_report_rate(struct vnt_private *priv,
906 struct vnt_td_info *context, u8 tsr0, u8 tsr1)
907{
908 struct vnt_tx_fifo_head *fifo_head;
909 struct ieee80211_tx_info *info;
910 struct ieee80211_rate *rate;
911 u16 fb_option;
912 u8 tx_retry = (tsr0 & TSR0_NCR);
913 s8 idx;
914
915 if (!context)
916 return -ENOMEM;
917
918 if (!context->skb)
919 return -EINVAL;
920
921 fifo_head = (struct vnt_tx_fifo_head *)context->buf;
922 fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
923 (FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
924
925 info = IEEE80211_SKB_CB(skb: context->skb);
926 idx = info->control.rates[0].idx;
927
928 if (fb_option && !(tsr1 & TSR1_TERR)) {
929 u8 tx_rate;
930 u8 retry = tx_retry;
931
932 rate = ieee80211_get_tx_rate(hw: priv->hw, c: info);
933 tx_rate = rate->hw_value - RATE_18M;
934
935 if (retry > 4)
936 retry = 4;
937
938 if (fb_option & FIFOCTL_AUTO_FB_0)
939 tx_rate = fallback_rate0[tx_rate][retry];
940 else if (fb_option & FIFOCTL_AUTO_FB_1)
941 tx_rate = fallback_rate1[tx_rate][retry];
942
943 if (info->band == NL80211_BAND_5GHZ)
944 idx = tx_rate - RATE_6M;
945 else
946 idx = tx_rate;
947 }
948
949 ieee80211_tx_info_clear_status(info);
950
951 info->status.rates[0].count = tx_retry;
952
953 if (!(tsr1 & TSR1_TERR)) {
954 info->status.rates[0].idx = idx;
955
956 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
957 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
958 else
959 info->flags |= IEEE80211_TX_STAT_ACK;
960 }
961
962 return 0;
963}
964
965static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
966{
967 struct vnt_tx_desc *desc;
968 int works = 0;
969 unsigned char byTsr0;
970 unsigned char byTsr1;
971
972 for (desc = priv->tail_td[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
973 if (desc->td0.owner == OWNED_BY_NIC)
974 break;
975 if (works++ > 15)
976 break;
977
978 byTsr0 = desc->td0.tsr0;
979 byTsr1 = desc->td0.tsr1;
980
981 /* Only the status of first TD in the chain is correct */
982 if (desc->td1.tcr & TCR_STP) {
983 if ((desc->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
984 if (!(byTsr1 & TSR1_TERR)) {
985 if (byTsr0 != 0) {
986 pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
987 (int)idx, byTsr1,
988 byTsr0);
989 }
990 } else {
991 pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
992 (int)idx, byTsr1, byTsr0);
993 }
994 }
995
996 if (byTsr1 & TSR1_TERR) {
997 if ((desc->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
998 pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
999 (int)idx, byTsr1, byTsr0);
1000 }
1001 }
1002
1003 vnt_int_report_rate(priv, context: desc->td_info, tsr0: byTsr0, tsr1: byTsr1);
1004
1005 device_free_tx_buf(priv, desc);
1006 priv->iTDUsed[idx]--;
1007 }
1008 }
1009
1010 priv->tail_td[idx] = desc;
1011
1012 return works;
1013}
1014
1015static void device_error(struct vnt_private *priv, unsigned short status)
1016{
1017 if (status & ISR_FETALERR) {
1018 dev_err(&priv->pcid->dev, "Hardware fatal error\n");
1019
1020 MACbShutdown(priv);
1021 return;
1022 }
1023}
1024
1025static void device_free_tx_buf(struct vnt_private *priv,
1026 struct vnt_tx_desc *desc)
1027{
1028 struct vnt_td_info *td_info = desc->td_info;
1029 struct sk_buff *skb = td_info->skb;
1030
1031 if (skb)
1032 ieee80211_tx_status_irqsafe(hw: priv->hw, skb);
1033
1034 td_info->skb = NULL;
1035 td_info->flags = 0;
1036}
1037
1038static void vnt_check_bb_vga(struct vnt_private *priv)
1039{
1040 long dbm;
1041 int i;
1042
1043 if (!priv->update_bbvga)
1044 return;
1045
1046 if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
1047 return;
1048
1049 if (!(priv->vif->cfg.assoc && priv->current_rssi))
1050 return;
1051
1052 RFvRSSITodBm(priv, byCurrRSSI: (u8)priv->current_rssi, pldBm: &dbm);
1053
1054 for (i = 0; i < BB_VGA_LEVEL; i++) {
1055 if (dbm < priv->dbm_threshold[i]) {
1056 priv->bbvga_new = priv->bbvga[i];
1057 break;
1058 }
1059 }
1060
1061 if (priv->bbvga_new == priv->bbvga_current) {
1062 priv->uBBVGADiffCount = 1;
1063 return;
1064 }
1065
1066 priv->uBBVGADiffCount++;
1067
1068 if (priv->uBBVGADiffCount == 1) {
1069 /* first VGA diff gain */
1070 bb_set_vga_gain_offset(priv, by_data: priv->bbvga_new);
1071
1072 dev_dbg(&priv->pcid->dev,
1073 "First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1074 (int)dbm, priv->bbvga_new,
1075 priv->bbvga_current,
1076 (int)priv->uBBVGADiffCount);
1077 }
1078
1079 if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
1080 dev_dbg(&priv->pcid->dev,
1081 "RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1082 (int)dbm, priv->bbvga_new,
1083 priv->bbvga_current,
1084 (int)priv->uBBVGADiffCount);
1085
1086 bb_set_vga_gain_offset(priv, by_data: priv->bbvga_new);
1087 }
1088}
1089
1090static void vnt_interrupt_process(struct vnt_private *priv)
1091{
1092 struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
1093 int max_count = 0;
1094 u32 mib_counter;
1095 u32 isr;
1096 unsigned long flags;
1097
1098 isr = ioread32(priv->port_offset + MAC_REG_ISR);
1099
1100 if (isr == 0)
1101 return;
1102
1103 if (isr == 0xffffffff) {
1104 pr_debug("isr = 0xffff\n");
1105 return;
1106 }
1107
1108 spin_lock_irqsave(&priv->lock, flags);
1109
1110 /* Read low level stats */
1111 mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
1112
1113 low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
1114 low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
1115 low_stats->dot11ACKFailureCount += (mib_counter >> 16) & 0xff;
1116 low_stats->dot11FCSErrorCount += (mib_counter >> 24) & 0xff;
1117
1118 /*
1119 * TBD....
1120 * Must do this after doing rx/tx, cause ISR bit is slow
1121 * than RD/TD write back
1122 * update ISR counter
1123 */
1124 while (isr && priv->vif) {
1125 iowrite32(isr, priv->port_offset + MAC_REG_ISR);
1126
1127 if (isr & ISR_FETALERR) {
1128 pr_debug(" ISR_FETALERR\n");
1129 iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
1130 iowrite16(SOFTPWRCTL_SWPECTI, priv->port_offset + MAC_REG_SOFTPWRCTL);
1131 device_error(priv, status: isr);
1132 }
1133
1134 if (isr & ISR_TBTT) {
1135 if (priv->op_mode != NL80211_IFTYPE_ADHOC)
1136 vnt_check_bb_vga(priv);
1137
1138 priv->bBeaconSent = false;
1139 if (priv->bEnablePSMode)
1140 PSbIsNextTBTTWakeUp(priv: (void *)priv);
1141
1142 if ((priv->op_mode == NL80211_IFTYPE_AP ||
1143 priv->op_mode == NL80211_IFTYPE_ADHOC) &&
1144 priv->vif->bss_conf.enable_beacon)
1145 MACvOneShotTimer1MicroSec(priv,
1146 uDelayTime: (priv->vif->bss_conf.beacon_int -
1147 MAKE_BEACON_RESERVED) << 10);
1148
1149 /* TODO: adhoc PS mode */
1150 }
1151
1152 if (isr & ISR_BNTX) {
1153 if (priv->op_mode == NL80211_IFTYPE_ADHOC) {
1154 priv->bIsBeaconBufReadySet = false;
1155 priv->cbBeaconBufReadySetCnt = 0;
1156 }
1157
1158 priv->bBeaconSent = true;
1159 }
1160
1161 if (isr & ISR_RXDMA0)
1162 max_count += device_rx_srv(priv, TYPE_RXDMA0);
1163
1164 if (isr & ISR_RXDMA1)
1165 max_count += device_rx_srv(priv, TYPE_RXDMA1);
1166
1167 if (isr & ISR_TXDMA0)
1168 max_count += device_tx_srv(priv, TYPE_TXDMA0);
1169
1170 if (isr & ISR_AC0DMA)
1171 max_count += device_tx_srv(priv, TYPE_AC0DMA);
1172
1173 if (isr & ISR_SOFTTIMER1) {
1174 if (priv->vif->bss_conf.enable_beacon)
1175 vnt_beacon_make(priv, vif: priv->vif);
1176 }
1177
1178 /* If both buffers available wake the queue */
1179 if (AVAIL_TD(priv, TYPE_TXDMA0) &&
1180 AVAIL_TD(priv, TYPE_AC0DMA) &&
1181 ieee80211_queue_stopped(hw: priv->hw, queue: 0))
1182 ieee80211_wake_queues(hw: priv->hw);
1183
1184 isr = ioread32(priv->port_offset + MAC_REG_ISR);
1185
1186 vt6655_mac_dma_ctl(iobase: priv->port_offset, MAC_REG_RXDMACTL0);
1187 vt6655_mac_dma_ctl(iobase: priv->port_offset, MAC_REG_RXDMACTL1);
1188
1189 if (max_count > priv->opts.int_works)
1190 break;
1191 }
1192
1193 spin_unlock_irqrestore(lock: &priv->lock, flags);
1194}
1195
1196static void vnt_interrupt_work(struct work_struct *work)
1197{
1198 struct vnt_private *priv =
1199 container_of(work, struct vnt_private, interrupt_work);
1200
1201 if (priv->vif)
1202 vnt_interrupt_process(priv);
1203
1204 iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1205}
1206
1207static irqreturn_t vnt_interrupt(int irq, void *arg)
1208{
1209 struct vnt_private *priv = arg;
1210
1211 schedule_work(work: &priv->interrupt_work);
1212
1213 iowrite32(0, priv->port_offset + MAC_REG_IMR);
1214
1215 return IRQ_HANDLED;
1216}
1217
1218static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1219{
1220 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1221 struct vnt_tx_desc *head_td;
1222 u32 dma_idx;
1223 unsigned long flags;
1224
1225 spin_lock_irqsave(&priv->lock, flags);
1226
1227 if (ieee80211_is_data(fc: hdr->frame_control))
1228 dma_idx = TYPE_AC0DMA;
1229 else
1230 dma_idx = TYPE_TXDMA0;
1231
1232 if (AVAIL_TD(priv, dma_idx) < 1) {
1233 spin_unlock_irqrestore(lock: &priv->lock, flags);
1234 ieee80211_stop_queues(hw: priv->hw);
1235 return -ENOMEM;
1236 }
1237
1238 head_td = priv->apCurrTD[dma_idx];
1239
1240 head_td->td1.tcr = 0;
1241
1242 head_td->td_info->skb = skb;
1243
1244 if (dma_idx == TYPE_AC0DMA)
1245 head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
1246
1247 priv->apCurrTD[dma_idx] = head_td->next;
1248
1249 spin_unlock_irqrestore(lock: &priv->lock, flags);
1250
1251 vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
1252
1253 spin_lock_irqsave(&priv->lock, flags);
1254
1255 priv->bPWBitOn = false;
1256
1257 /* Set TSR1 & ReqCount in TxDescHead */
1258 head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
1259 head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
1260
1261 head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
1262
1263 /* Poll Transmit the adapter */
1264 wmb();
1265 head_td->td0.owner = OWNED_BY_NIC;
1266 wmb(); /* second memory barrier */
1267
1268 if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
1269 vt6655_mac_dma_ctl(iobase: priv->port_offset, MAC_REG_AC0DMACTL);
1270 else
1271 vt6655_mac_dma_ctl(iobase: priv->port_offset, MAC_REG_TXDMACTL0);
1272
1273 priv->iTDUsed[dma_idx]++;
1274
1275 spin_unlock_irqrestore(lock: &priv->lock, flags);
1276
1277 return 0;
1278}
1279
1280static void vnt_tx_80211(struct ieee80211_hw *hw,
1281 struct ieee80211_tx_control *control,
1282 struct sk_buff *skb)
1283{
1284 struct vnt_private *priv = hw->priv;
1285
1286 if (vnt_tx_packet(priv, skb))
1287 ieee80211_free_txskb(hw, skb);
1288}
1289
1290static int vnt_start(struct ieee80211_hw *hw)
1291{
1292 struct vnt_private *priv = hw->priv;
1293 int ret;
1294
1295 priv->rx_buf_sz = PKT_BUF_SZ;
1296 if (!device_init_rings(priv))
1297 return -ENOMEM;
1298
1299 ret = request_irq(irq: priv->pcid->irq, handler: vnt_interrupt,
1300 IRQF_SHARED, name: "vt6655", dev: priv);
1301 if (ret) {
1302 dev_dbg(&priv->pcid->dev, "failed to start irq\n");
1303 goto err_free_rings;
1304 }
1305
1306 dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
1307 ret = device_init_rd0_ring(priv);
1308 if (ret)
1309 goto err_free_irq;
1310 ret = device_init_rd1_ring(priv);
1311 if (ret)
1312 goto err_free_rd0_ring;
1313 ret = device_init_td0_ring(priv);
1314 if (ret)
1315 goto err_free_rd1_ring;
1316 ret = device_init_td1_ring(priv);
1317 if (ret)
1318 goto err_free_td0_ring;
1319
1320 device_init_registers(priv);
1321
1322 dev_dbg(&priv->pcid->dev, "enable MAC interrupt\n");
1323 iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1324
1325 ieee80211_wake_queues(hw);
1326
1327 return 0;
1328
1329err_free_td0_ring:
1330 device_free_td0_ring(priv);
1331err_free_rd1_ring:
1332 device_free_rd1_ring(priv);
1333err_free_rd0_ring:
1334 device_free_rd0_ring(priv);
1335err_free_irq:
1336 free_irq(priv->pcid->irq, priv);
1337err_free_rings:
1338 device_free_rings(priv);
1339 return ret;
1340}
1341
1342static void vnt_stop(struct ieee80211_hw *hw)
1343{
1344 struct vnt_private *priv = hw->priv;
1345
1346 ieee80211_stop_queues(hw);
1347
1348 cancel_work_sync(work: &priv->interrupt_work);
1349
1350 MACbShutdown(priv);
1351 MACbSoftwareReset(priv);
1352 card_radio_power_off(priv);
1353
1354 device_free_td0_ring(priv);
1355 device_free_td1_ring(priv);
1356 device_free_rd0_ring(priv);
1357 device_free_rd1_ring(priv);
1358 device_free_rings(priv);
1359
1360 free_irq(priv->pcid->irq, priv);
1361}
1362
1363static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1364{
1365 struct vnt_private *priv = hw->priv;
1366
1367 priv->vif = vif;
1368
1369 switch (vif->type) {
1370 case NL80211_IFTYPE_STATION:
1371 break;
1372 case NL80211_IFTYPE_ADHOC:
1373 vt6655_mac_reg_bits_off(iobase: priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1374
1375 vt6655_mac_reg_bits_on(iobase: priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1376
1377 break;
1378 case NL80211_IFTYPE_AP:
1379 vt6655_mac_reg_bits_off(iobase: priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1380
1381 vt6655_mac_reg_bits_on(iobase: priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1382
1383 break;
1384 default:
1385 return -EOPNOTSUPP;
1386 }
1387
1388 priv->op_mode = vif->type;
1389
1390 return 0;
1391}
1392
1393static void vnt_remove_interface(struct ieee80211_hw *hw,
1394 struct ieee80211_vif *vif)
1395{
1396 struct vnt_private *priv = hw->priv;
1397
1398 switch (vif->type) {
1399 case NL80211_IFTYPE_STATION:
1400 break;
1401 case NL80211_IFTYPE_ADHOC:
1402 vt6655_mac_reg_bits_off(iobase: priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1403 vt6655_mac_reg_bits_off(iobase: priv->port_offset,
1404 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1405 vt6655_mac_reg_bits_off(iobase: priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1406 break;
1407 case NL80211_IFTYPE_AP:
1408 vt6655_mac_reg_bits_off(iobase: priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1409 vt6655_mac_reg_bits_off(iobase: priv->port_offset,
1410 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1411 vt6655_mac_reg_bits_off(iobase: priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1412 break;
1413 default:
1414 break;
1415 }
1416
1417 priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
1418}
1419
1420static int vnt_config(struct ieee80211_hw *hw, u32 changed)
1421{
1422 struct vnt_private *priv = hw->priv;
1423 struct ieee80211_conf *conf = &hw->conf;
1424 u8 bb_type;
1425
1426 if (changed & IEEE80211_CONF_CHANGE_PS) {
1427 if (conf->flags & IEEE80211_CONF_PS)
1428 PSvEnablePowerSaving(priv, wListenInterval: conf->listen_interval);
1429 else
1430 PSvDisablePowerSaving(priv);
1431 }
1432
1433 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
1434 (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
1435 set_channel(priv, ch: conf->chandef.chan);
1436
1437 if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
1438 bb_type = BB_TYPE_11A;
1439 else
1440 bb_type = BB_TYPE_11G;
1441
1442 if (priv->byBBType != bb_type) {
1443 priv->byBBType = bb_type;
1444
1445 card_set_phy_parameter(priv, bb_type: priv->byBBType);
1446 }
1447 }
1448
1449 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1450 if (priv->byBBType == BB_TYPE_11B)
1451 priv->wCurrentRate = RATE_1M;
1452 else
1453 priv->wCurrentRate = RATE_54M;
1454
1455 RFbSetPower(priv, rate: priv->wCurrentRate,
1456 uCH: conf->chandef.chan->hw_value);
1457 }
1458
1459 return 0;
1460}
1461
1462static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1463 struct ieee80211_vif *vif,
1464 struct ieee80211_bss_conf *conf, u64 changed)
1465{
1466 struct vnt_private *priv = hw->priv;
1467
1468 priv->current_aid = vif->cfg.aid;
1469
1470 if (changed & BSS_CHANGED_BSSID && conf->bssid) {
1471 unsigned long flags;
1472
1473 spin_lock_irqsave(&priv->lock, flags);
1474
1475 vt6655_mac_write_bssid_addr(iobase: priv->port_offset, mac_addr: conf->bssid);
1476
1477 spin_unlock_irqrestore(lock: &priv->lock, flags);
1478 }
1479
1480 if (changed & BSS_CHANGED_BASIC_RATES) {
1481 priv->basic_rates = conf->basic_rates;
1482
1483 CARDvUpdateBasicTopRate(priv);
1484
1485 dev_dbg(&priv->pcid->dev,
1486 "basic rates %x\n", conf->basic_rates);
1487 }
1488
1489 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1490 if (conf->use_short_preamble) {
1491 vt6655_mac_en_barker_preamble_md(iobase: priv->port_offset);
1492 priv->preamble_type = true;
1493 } else {
1494 vt6655_mac_dis_barker_preamble_md(iobase: priv->port_offset);
1495 priv->preamble_type = false;
1496 }
1497 }
1498
1499 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1500 if (conf->use_cts_prot)
1501 vt6655_mac_en_protect_md(iobase: priv->port_offset);
1502 else
1503 vt6655_mac_dis_protect_md(iobase: priv->port_offset);
1504 }
1505
1506 if (changed & BSS_CHANGED_ERP_SLOT) {
1507 if (conf->use_short_slot)
1508 priv->short_slot_time = true;
1509 else
1510 priv->short_slot_time = false;
1511
1512 card_set_phy_parameter(priv, bb_type: priv->byBBType);
1513 bb_set_vga_gain_offset(priv, by_data: priv->bbvga[0]);
1514 }
1515
1516 if (changed & BSS_CHANGED_TXPOWER)
1517 RFbSetPower(priv, rate: priv->wCurrentRate,
1518 uCH: conf->chanreq.oper.chan->hw_value);
1519
1520 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1521 dev_dbg(&priv->pcid->dev,
1522 "Beacon enable %d\n", conf->enable_beacon);
1523
1524 if (conf->enable_beacon) {
1525 vnt_beacon_enable(priv, vif, conf);
1526
1527 vt6655_mac_reg_bits_on(iobase: priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1528 } else {
1529 vt6655_mac_reg_bits_off(iobase: priv->port_offset, MAC_REG_TCR,
1530 TCR_AUTOBCNTX);
1531 }
1532 }
1533
1534 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1535 priv->op_mode != NL80211_IFTYPE_AP) {
1536 if (vif->cfg.assoc && conf->beacon_rate) {
1537 card_update_tsf(priv, rx_rate: conf->beacon_rate->hw_value,
1538 bss_timestamp: conf->sync_tsf);
1539
1540 card_set_beacon_period(priv, beacon_interval: conf->beacon_int);
1541
1542 CARDvSetFirstNextTBTT(priv, beacon_interval: conf->beacon_int);
1543 } else {
1544 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1545 iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
1546 }
1547 }
1548}
1549
1550static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
1551 struct netdev_hw_addr_list *mc_list)
1552{
1553 struct vnt_private *priv = hw->priv;
1554 struct netdev_hw_addr *ha;
1555 u64 mc_filter = 0;
1556 u32 bit_nr = 0;
1557
1558 netdev_hw_addr_list_for_each(ha, mc_list) {
1559 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1560
1561 mc_filter |= 1ULL << (bit_nr & 0x3f);
1562 }
1563
1564 priv->mc_list_count = mc_list->count;
1565
1566 return mc_filter;
1567}
1568
1569static void vnt_configure(struct ieee80211_hw *hw,
1570 unsigned int changed_flags,
1571 unsigned int *total_flags, u64 multicast)
1572{
1573 struct vnt_private *priv = hw->priv;
1574 u8 rx_mode = 0;
1575
1576 *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
1577
1578 rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
1579
1580 dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
1581
1582 if (changed_flags & FIF_ALLMULTI) {
1583 if (*total_flags & FIF_ALLMULTI) {
1584 unsigned long flags;
1585
1586 spin_lock_irqsave(&priv->lock, flags);
1587
1588 if (priv->mc_list_count > 2) {
1589 VT6655_MAC_SELECT_PAGE1(priv->port_offset);
1590
1591 iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
1592 iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
1593
1594 VT6655_MAC_SELECT_PAGE0(priv->port_offset);
1595 } else {
1596 VT6655_MAC_SELECT_PAGE1(priv->port_offset);
1597
1598 multicast = le64_to_cpu(multicast);
1599 iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0);
1600 iowrite32((u32)(multicast >> 32),
1601 priv->port_offset + MAC_REG_MAR0 + 4);
1602
1603 VT6655_MAC_SELECT_PAGE0(priv->port_offset);
1604 }
1605
1606 spin_unlock_irqrestore(lock: &priv->lock, flags);
1607
1608 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1609 } else {
1610 rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
1611 }
1612 }
1613
1614 if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
1615 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1616
1617 if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
1618 rx_mode &= ~RCR_BSSID;
1619 else
1620 rx_mode |= RCR_BSSID;
1621 }
1622
1623 iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
1624
1625 dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
1626}
1627
1628static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1629 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1630 struct ieee80211_key_conf *key)
1631{
1632 struct vnt_private *priv = hw->priv;
1633
1634 switch (cmd) {
1635 case SET_KEY:
1636 if (vnt_set_keys(hw, sta, vif, key))
1637 return -EOPNOTSUPP;
1638 break;
1639 case DISABLE_KEY:
1640 if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
1641 clear_bit(nr: key->hw_key_idx, addr: &priv->key_entry_inuse);
1642 break;
1643 default:
1644 break;
1645 }
1646
1647 return 0;
1648}
1649
1650static int vnt_get_stats(struct ieee80211_hw *hw,
1651 struct ieee80211_low_level_stats *stats)
1652{
1653 struct vnt_private *priv = hw->priv;
1654
1655 memcpy(stats, &priv->low_stats, sizeof(*stats));
1656
1657 return 0;
1658}
1659
1660static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1661{
1662 struct vnt_private *priv = hw->priv;
1663 u64 tsf;
1664
1665 tsf = vt6655_get_current_tsf(priv);
1666
1667 return tsf;
1668}
1669
1670static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1671 u64 tsf)
1672{
1673 struct vnt_private *priv = hw->priv;
1674
1675 CARDvUpdateNextTBTT(priv, qwTSF: tsf, beacon_interval: vif->bss_conf.beacon_int);
1676}
1677
1678static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1679{
1680 struct vnt_private *priv = hw->priv;
1681
1682 /* reset TSF counter */
1683 iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1684}
1685
1686static const struct ieee80211_ops vnt_mac_ops = {
1687 .add_chanctx = ieee80211_emulate_add_chanctx,
1688 .remove_chanctx = ieee80211_emulate_remove_chanctx,
1689 .change_chanctx = ieee80211_emulate_change_chanctx,
1690 .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
1691 .tx = vnt_tx_80211,
1692 .wake_tx_queue = ieee80211_handle_wake_tx_queue,
1693 .start = vnt_start,
1694 .stop = vnt_stop,
1695 .add_interface = vnt_add_interface,
1696 .remove_interface = vnt_remove_interface,
1697 .config = vnt_config,
1698 .bss_info_changed = vnt_bss_info_changed,
1699 .prepare_multicast = vnt_prepare_multicast,
1700 .configure_filter = vnt_configure,
1701 .set_key = vnt_set_key,
1702 .get_stats = vnt_get_stats,
1703 .get_tsf = vnt_get_tsf,
1704 .set_tsf = vnt_set_tsf,
1705 .reset_tsf = vnt_reset_tsf,
1706};
1707
1708static int vnt_init(struct vnt_private *priv)
1709{
1710 SET_IEEE80211_PERM_ADDR(hw: priv->hw, addr: priv->abyCurrentNetAddr);
1711
1712 vnt_init_bands(priv);
1713
1714 if (ieee80211_register_hw(hw: priv->hw))
1715 return -ENODEV;
1716
1717 priv->mac_hw = true;
1718
1719 card_radio_power_off(priv);
1720
1721 return 0;
1722}
1723
1724static int
1725vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1726{
1727 struct vnt_private *priv;
1728 struct ieee80211_hw *hw;
1729 struct wiphy *wiphy;
1730 int rc;
1731
1732 dev_notice(&pcid->dev,
1733 "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
1734
1735 dev_notice(&pcid->dev,
1736 "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
1737
1738 hw = ieee80211_alloc_hw(priv_data_len: sizeof(*priv), ops: &vnt_mac_ops);
1739 if (!hw) {
1740 dev_err(&pcid->dev, "could not register ieee80211_hw\n");
1741 return -ENOMEM;
1742 }
1743
1744 priv = hw->priv;
1745 priv->pcid = pcid;
1746
1747 spin_lock_init(&priv->lock);
1748
1749 priv->hw = hw;
1750
1751 SET_IEEE80211_DEV(hw: priv->hw, dev: &pcid->dev);
1752
1753 if (pci_enable_device(dev: pcid)) {
1754 device_free_info(priv);
1755 return -ENODEV;
1756 }
1757
1758 dev_dbg(&pcid->dev,
1759 "Before get pci_info memaddr is %x\n", priv->memaddr);
1760
1761 pci_set_master(dev: pcid);
1762
1763 priv->memaddr = pci_resource_start(pcid, 0);
1764 priv->ioaddr = pci_resource_start(pcid, 1);
1765 priv->port_offset = ioremap(offset: priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
1766 size: 256);
1767 if (!priv->port_offset) {
1768 dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
1769 device_free_info(priv);
1770 return -ENODEV;
1771 }
1772
1773 rc = pci_request_regions(pcid, DEVICE_NAME);
1774 if (rc) {
1775 dev_err(&pcid->dev, ": Failed to find PCI device\n");
1776 device_free_info(priv);
1777 return -ENODEV;
1778 }
1779
1780 if (dma_set_mask(dev: &pcid->dev, DMA_BIT_MASK(32))) {
1781 dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
1782 device_free_info(priv);
1783 return -ENODEV;
1784 }
1785
1786 INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
1787
1788 /* do reset */
1789 if (!MACbSoftwareReset(priv)) {
1790 dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
1791 device_free_info(priv);
1792 return -ENODEV;
1793 }
1794 /* initial to reload eeprom */
1795 MACvInitialize(priv);
1796 vt6655_mac_read_ether_addr(iobase: priv->port_offset, mac_addr: priv->abyCurrentNetAddr);
1797
1798 /* Get RFType */
1799 priv->rf_type = SROMbyReadEmbedded(iobase: priv->port_offset, EEP_OFS_RFTYPE);
1800 priv->rf_type &= RF_MASK;
1801
1802 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->rf_type);
1803
1804 device_get_options(priv);
1805 device_set_options(priv);
1806
1807 wiphy = priv->hw->wiphy;
1808
1809 wiphy->frag_threshold = FRAG_THRESH_DEF;
1810 wiphy->rts_threshold = RTS_THRESH_DEF;
1811 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1812 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
1813
1814 ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
1815 ieee80211_hw_set(priv->hw, SIGNAL_DBM);
1816 ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
1817 ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
1818 ieee80211_hw_set(priv->hw, SUPPORTS_PS);
1819
1820 priv->hw->max_signal = 100;
1821
1822 if (vnt_init(priv)) {
1823 device_free_info(priv);
1824 return -ENODEV;
1825 }
1826
1827 device_print_info(priv);
1828 pci_set_drvdata(pdev: pcid, data: priv);
1829
1830 return 0;
1831}
1832
1833/*------------------------------------------------------------------*/
1834
1835static int __maybe_unused vt6655_suspend(struct device *dev_d)
1836{
1837 struct vnt_private *priv = dev_get_drvdata(dev: dev_d);
1838 unsigned long flags;
1839
1840 spin_lock_irqsave(&priv->lock, flags);
1841
1842 MACbShutdown(priv);
1843
1844 spin_unlock_irqrestore(lock: &priv->lock, flags);
1845
1846 return 0;
1847}
1848
1849static int __maybe_unused vt6655_resume(struct device *dev_d)
1850{
1851 device_wakeup_disable(dev: dev_d);
1852
1853 return 0;
1854}
1855
1856MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
1857
1858static SIMPLE_DEV_PM_OPS(vt6655_pm_ops, vt6655_suspend, vt6655_resume);
1859
1860static struct pci_driver device_driver = {
1861 .name = DEVICE_NAME,
1862 .id_table = vt6655_pci_id_table,
1863 .probe = vt6655_probe,
1864 .remove = vt6655_remove,
1865 .driver.pm = &vt6655_pm_ops,
1866};
1867
1868module_pci_driver(device_driver);
1869

source code of linux/drivers/staging/vt6655/device_main.c