1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * sata_via.c - VIA Serial ATA controllers
4 *
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
10 * Copyright 2003-2004 Jeff Garzik
11 *
12 * libata documentation is available via 'make {ps|pdf}docs',
13 * as Documentation/driver-api/libata.rst
14 *
15 * Hardware documentation available under NDA.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/blkdev.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <scsi/scsi.h>
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_host.h>
27#include <linux/libata.h>
28
29#define DRV_NAME "sata_via"
30#define DRV_VERSION "2.6"
31
32/*
33 * vt8251 is different from other sata controllers of VIA. It has two
34 * channels, each channel has both Master and Slave slot.
35 */
36enum board_ids_enum {
37 vt6420,
38 vt6421,
39 vt8251,
40};
41
42enum {
43 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
44 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
45 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
46 SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */
47 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
48 PATA_PIO_TIMING = 0xAB, /* PATA timing register */
49
50 PORT0 = (1 << 1),
51 PORT1 = (1 << 0),
52 ALL_PORTS = PORT0 | PORT1,
53
54 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
55
56 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
57
58 SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */
59};
60
61struct svia_priv {
62 bool wd_workaround;
63};
64
65static int vt6420_hotplug;
66module_param_named(vt6420_hotplug, vt6420_hotplug, int, 0644);
67MODULE_PARM_DESC(vt6420_hotplug, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
68
69static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
70#ifdef CONFIG_PM_SLEEP
71static int svia_pci_device_resume(struct pci_dev *pdev);
72#endif
73static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
74static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
75static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
76static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
77static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
78static void svia_noop_freeze(struct ata_port *ap);
79static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
80static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
81static int vt6421_pata_cable_detect(struct ata_port *ap);
82static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
83static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
84static void vt6421_error_handler(struct ata_port *ap);
85
86static const struct pci_device_id svia_pci_tbl[] = {
87 { PCI_VDEVICE(VIA, 0x5337), vt6420 },
88 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
89 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
90 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
91 { PCI_VDEVICE(VIA, 0x5372), vt6420 },
92 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
93 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
94 { PCI_VDEVICE(VIA, 0x9000), vt8251 },
95
96 { } /* terminate list */
97};
98
99static struct pci_driver svia_pci_driver = {
100 .name = DRV_NAME,
101 .id_table = svia_pci_tbl,
102 .probe = svia_init_one,
103#ifdef CONFIG_PM_SLEEP
104 .suspend = ata_pci_device_suspend,
105 .resume = svia_pci_device_resume,
106#endif
107 .remove = ata_pci_remove_one,
108};
109
110static const struct scsi_host_template svia_sht = {
111 ATA_BMDMA_SHT(DRV_NAME),
112};
113
114static struct ata_port_operations svia_base_ops = {
115 .inherits = &ata_bmdma_port_ops,
116 .sff_tf_load = svia_tf_load,
117};
118
119static struct ata_port_operations vt6420_sata_ops = {
120 .inherits = &svia_base_ops,
121 .freeze = svia_noop_freeze,
122 .prereset = vt6420_prereset,
123 .bmdma_start = vt6420_bmdma_start,
124};
125
126static struct ata_port_operations vt6421_pata_ops = {
127 .inherits = &svia_base_ops,
128 .cable_detect = vt6421_pata_cable_detect,
129 .set_piomode = vt6421_set_pio_mode,
130 .set_dmamode = vt6421_set_dma_mode,
131};
132
133static struct ata_port_operations vt6421_sata_ops = {
134 .inherits = &svia_base_ops,
135 .scr_read = svia_scr_read,
136 .scr_write = svia_scr_write,
137 .error_handler = vt6421_error_handler,
138};
139
140static struct ata_port_operations vt8251_ops = {
141 .inherits = &svia_base_ops,
142 .hardreset = sata_std_hardreset,
143 .scr_read = vt8251_scr_read,
144 .scr_write = vt8251_scr_write,
145};
146
147static const struct ata_port_info vt6420_port_info = {
148 .flags = ATA_FLAG_SATA,
149 .pio_mask = ATA_PIO4,
150 .mwdma_mask = ATA_MWDMA2,
151 .udma_mask = ATA_UDMA6,
152 .port_ops = &vt6420_sata_ops,
153};
154
155static const struct ata_port_info vt6421_sport_info = {
156 .flags = ATA_FLAG_SATA,
157 .pio_mask = ATA_PIO4,
158 .mwdma_mask = ATA_MWDMA2,
159 .udma_mask = ATA_UDMA6,
160 .port_ops = &vt6421_sata_ops,
161};
162
163static const struct ata_port_info vt6421_pport_info = {
164 .flags = ATA_FLAG_SLAVE_POSS,
165 .pio_mask = ATA_PIO4,
166 /* No MWDMA */
167 .udma_mask = ATA_UDMA6,
168 .port_ops = &vt6421_pata_ops,
169};
170
171static const struct ata_port_info vt8251_port_info = {
172 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
173 .pio_mask = ATA_PIO4,
174 .mwdma_mask = ATA_MWDMA2,
175 .udma_mask = ATA_UDMA6,
176 .port_ops = &vt8251_ops,
177};
178
179MODULE_AUTHOR("Jeff Garzik");
180MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
181MODULE_LICENSE("GPL");
182MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
183MODULE_VERSION(DRV_VERSION);
184
185static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
186{
187 if (sc_reg > SCR_CONTROL)
188 return -EINVAL;
189 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
190 return 0;
191}
192
193static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
194{
195 if (sc_reg > SCR_CONTROL)
196 return -EINVAL;
197 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
198 return 0;
199}
200
201static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
202{
203 static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
204 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
205 int slot = 2 * link->ap->port_no + link->pmp;
206 u32 v = 0;
207 u8 raw;
208
209 switch (scr) {
210 case SCR_STATUS:
211 pci_read_config_byte(dev: pdev, where: 0xA0 + slot, val: &raw);
212
213 /* read the DET field, bit0 and 1 of the config byte */
214 v |= raw & 0x03;
215
216 /* read the SPD field, bit4 of the configure byte */
217 if (raw & (1 << 4))
218 v |= 0x02 << 4;
219 else
220 v |= 0x01 << 4;
221
222 /* read the IPM field, bit2 and 3 of the config byte */
223 v |= ipm_tbl[(raw >> 2) & 0x3];
224 break;
225
226 case SCR_ERROR:
227 /* devices other than 5287 uses 0xA8 as base */
228 WARN_ON(pdev->device != 0x5287);
229 pci_read_config_dword(dev: pdev, where: 0xB0 + slot * 4, val: &v);
230 break;
231
232 case SCR_CONTROL:
233 pci_read_config_byte(dev: pdev, where: 0xA4 + slot, val: &raw);
234
235 /* read the DET field, bit0 and bit1 */
236 v |= ((raw & 0x02) << 1) | (raw & 0x01);
237
238 /* read the IPM field, bit2 and bit3 */
239 v |= ((raw >> 2) & 0x03) << 8;
240 break;
241
242 default:
243 return -EINVAL;
244 }
245
246 *val = v;
247 return 0;
248}
249
250static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
251{
252 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
253 int slot = 2 * link->ap->port_no + link->pmp;
254 u32 v = 0;
255
256 switch (scr) {
257 case SCR_ERROR:
258 /* devices other than 5287 uses 0xA8 as base */
259 WARN_ON(pdev->device != 0x5287);
260 pci_write_config_dword(dev: pdev, where: 0xB0 + slot * 4, val);
261 return 0;
262
263 case SCR_CONTROL:
264 /* set the DET field */
265 v |= ((val & 0x4) >> 1) | (val & 0x1);
266
267 /* set the IPM field */
268 v |= ((val >> 8) & 0x3) << 2;
269
270 pci_write_config_byte(dev: pdev, where: 0xA4 + slot, val: v);
271 return 0;
272
273 default:
274 return -EINVAL;
275 }
276}
277
278/**
279 * svia_tf_load - send taskfile registers to host controller
280 * @ap: Port to which output is sent
281 * @tf: ATA taskfile register set
282 *
283 * Outputs ATA taskfile to standard ATA host controller.
284 *
285 * This is to fix the internal bug of via chipsets, which will
286 * reset the device register after changing the IEN bit on ctl
287 * register.
288 */
289static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
290{
291 struct ata_taskfile ttf;
292
293 if (tf->ctl != ap->last_ctl) {
294 ttf = *tf;
295 ttf.flags |= ATA_TFLAG_DEVICE;
296 tf = &ttf;
297 }
298 ata_sff_tf_load(ap, tf);
299}
300
301static void svia_noop_freeze(struct ata_port *ap)
302{
303 /* Some VIA controllers choke if ATA_NIEN is manipulated in
304 * certain way. Leave it alone and just clear pending IRQ.
305 */
306 ap->ops->sff_check_status(ap);
307 ata_bmdma_irq_clear(ap);
308}
309
310/**
311 * vt6420_prereset - prereset for vt6420
312 * @link: target ATA link
313 * @deadline: deadline jiffies for the operation
314 *
315 * SCR registers on vt6420 are pieces of shit and may hang the
316 * whole machine completely if accessed with the wrong timing.
317 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
318 * access operations, but uses SStatus and SControl only during
319 * boot probing in controlled way.
320 *
321 * As the old (pre EH update) probing code is proven to work, we
322 * strictly follow the access pattern.
323 *
324 * LOCKING:
325 * Kernel thread context (may sleep)
326 *
327 * RETURNS:
328 * 0 on success, -errno otherwise.
329 */
330static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
331{
332 struct ata_port *ap = link->ap;
333 struct ata_eh_context *ehc = &ap->link.eh_context;
334 unsigned long timeout = jiffies + (HZ * 5);
335 u32 sstatus, scontrol;
336 int online;
337
338 /* don't do any SCR stuff if we're not loading */
339 if (!(ap->pflags & ATA_PFLAG_LOADING))
340 goto skip_scr;
341
342 /* Resume phy. This is the old SATA resume sequence */
343 svia_scr_write(link, sc_reg: SCR_CONTROL, val: 0x300);
344 svia_scr_read(link, sc_reg: SCR_CONTROL, val: &scontrol); /* flush */
345
346 /* wait for phy to become ready, if necessary */
347 do {
348 ata_msleep(ap: link->ap, msecs: 200);
349 svia_scr_read(link, sc_reg: SCR_STATUS, val: &sstatus);
350 if ((sstatus & 0xf) != 1)
351 break;
352 } while (time_before(jiffies, timeout));
353
354 /* open code sata_print_link_status() */
355 svia_scr_read(link, sc_reg: SCR_STATUS, val: &sstatus);
356 svia_scr_read(link, sc_reg: SCR_CONTROL, val: &scontrol);
357
358 online = (sstatus & 0xf) == 0x3;
359
360 ata_port_info(ap,
361 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
362 online ? "up" : "down", sstatus, scontrol);
363
364 /* SStatus is read one more time */
365 svia_scr_read(link, sc_reg: SCR_STATUS, val: &sstatus);
366
367 if (!online) {
368 /* tell EH to bail */
369 ehc->i.action &= ~ATA_EH_RESET;
370 return 0;
371 }
372
373 skip_scr:
374 /* wait for !BSY */
375 ata_sff_wait_ready(link, deadline);
376
377 return 0;
378}
379
380static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
381{
382 struct ata_port *ap = qc->ap;
383 if ((qc->tf.command == ATA_CMD_PACKET) &&
384 (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
385 /* Prevents corruption on some ATAPI burners */
386 ata_sff_pause(ap);
387 }
388 ata_bmdma_start(qc);
389}
390
391static int vt6421_pata_cable_detect(struct ata_port *ap)
392{
393 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
394 u8 tmp;
395
396 pci_read_config_byte(dev: pdev, where: PATA_UDMA_TIMING, val: &tmp);
397 if (tmp & 0x10)
398 return ATA_CBL_PATA40;
399 return ATA_CBL_PATA80;
400}
401
402static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
403{
404 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
405 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
406 pci_write_config_byte(dev: pdev, where: PATA_PIO_TIMING - adev->devno,
407 val: pio_bits[adev->pio_mode - XFER_PIO_0]);
408}
409
410static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
411{
412 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
413 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
414 pci_write_config_byte(dev: pdev, where: PATA_UDMA_TIMING - adev->devno,
415 val: udma_bits[adev->dma_mode - XFER_UDMA_0]);
416}
417
418static const unsigned int svia_bar_sizes[] = {
419 8, 4, 8, 4, 16, 256
420};
421
422static const unsigned int vt6421_bar_sizes[] = {
423 16, 16, 16, 16, 32, 128
424};
425
426static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
427{
428 return addr + (port * 128);
429}
430
431static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
432{
433 return addr + (port * 64);
434}
435
436static void vt6421_init_addrs(struct ata_port *ap)
437{
438 void __iomem * const * iomap = ap->host->iomap;
439 void __iomem *reg_addr = iomap[ap->port_no];
440 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
441 struct ata_ioports *ioaddr = &ap->ioaddr;
442
443 ioaddr->cmd_addr = reg_addr;
444 ioaddr->altstatus_addr =
445 ioaddr->ctl_addr = (void __iomem *)
446 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
447 ioaddr->bmdma_addr = bmdma_addr;
448 ioaddr->scr_addr = vt6421_scr_addr(addr: iomap[5], port: ap->port_no);
449
450 ata_sff_std_ports(ioaddr);
451
452 ata_port_pbar_desc(ap, bar: ap->port_no, offset: -1, name: "port");
453 ata_port_pbar_desc(ap, bar: 4, offset: ap->port_no * 8, name: "bmdma");
454}
455
456static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
457{
458 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
459 struct ata_host *host;
460 int rc;
461
462 if (vt6420_hotplug) {
463 ppi[0]->port_ops->scr_read = svia_scr_read;
464 ppi[0]->port_ops->scr_write = svia_scr_write;
465 }
466
467 rc = ata_pci_bmdma_prepare_host(pdev, ppi, r_host: &host);
468 if (rc)
469 return rc;
470 *r_host = host;
471
472 rc = pcim_iomap_regions(pdev, mask: 1 << 5, DRV_NAME);
473 if (rc) {
474 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
475 return rc;
476 }
477
478 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(addr: host->iomap[5], port: 0);
479 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(addr: host->iomap[5], port: 1);
480
481 return 0;
482}
483
484static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
485{
486 const struct ata_port_info *ppi[] =
487 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
488 struct ata_host *host;
489 int i, rc;
490
491 *r_host = host = ata_host_alloc_pinfo(dev: &pdev->dev, ppi, ARRAY_SIZE(ppi));
492 if (!host) {
493 dev_err(&pdev->dev, "failed to allocate host\n");
494 return -ENOMEM;
495 }
496
497 rc = pcim_iomap_regions(pdev, mask: 0x3f, DRV_NAME);
498 if (rc) {
499 dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
500 rc);
501 return rc;
502 }
503 host->iomap = pcim_iomap_table(pdev);
504
505 for (i = 0; i < host->n_ports; i++)
506 vt6421_init_addrs(ap: host->ports[i]);
507
508 return dma_set_mask_and_coherent(dev: &pdev->dev, ATA_DMA_MASK);
509}
510
511static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
512{
513 const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
514 struct ata_host *host;
515 int i, rc;
516
517 rc = ata_pci_bmdma_prepare_host(pdev, ppi, r_host: &host);
518 if (rc)
519 return rc;
520 *r_host = host;
521
522 rc = pcim_iomap_regions(pdev, mask: 1 << 5, DRV_NAME);
523 if (rc) {
524 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
525 return rc;
526 }
527
528 /* 8251 hosts four sata ports as M/S of the two channels */
529 for (i = 0; i < host->n_ports; i++)
530 ata_slave_link_init(ap: host->ports[i]);
531
532 return 0;
533}
534
535static void svia_wd_fix(struct pci_dev *pdev)
536{
537 u8 tmp8;
538
539 pci_read_config_byte(dev: pdev, where: 0x52, val: &tmp8);
540 pci_write_config_byte(dev: pdev, where: 0x52, val: tmp8 | BIT(2));
541}
542
543static irqreturn_t vt642x_interrupt(int irq, void *dev_instance)
544{
545 struct ata_host *host = dev_instance;
546 irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
547
548 /* if the IRQ was not handled, it might be a hotplug IRQ */
549 if (rc != IRQ_HANDLED) {
550 u32 serror;
551 unsigned long flags;
552
553 spin_lock_irqsave(&host->lock, flags);
554 /* check for hotplug on port 0 */
555 svia_scr_read(link: &host->ports[0]->link, sc_reg: SCR_ERROR, val: &serror);
556 if (serror & SERR_PHYRDY_CHG) {
557 ata_ehi_hotplugged(ehi: &host->ports[0]->link.eh_info);
558 ata_port_freeze(ap: host->ports[0]);
559 rc = IRQ_HANDLED;
560 }
561 /* check for hotplug on port 1 */
562 svia_scr_read(link: &host->ports[1]->link, sc_reg: SCR_ERROR, val: &serror);
563 if (serror & SERR_PHYRDY_CHG) {
564 ata_ehi_hotplugged(ehi: &host->ports[1]->link.eh_info);
565 ata_port_freeze(ap: host->ports[1]);
566 rc = IRQ_HANDLED;
567 }
568 spin_unlock_irqrestore(lock: &host->lock, flags);
569 }
570
571 return rc;
572}
573
574static void vt6421_error_handler(struct ata_port *ap)
575{
576 struct svia_priv *hpriv = ap->host->private_data;
577 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
578 u32 serror;
579
580 /* see svia_configure() for description */
581 if (!hpriv->wd_workaround) {
582 svia_scr_read(link: &ap->link, sc_reg: SCR_ERROR, val: &serror);
583 if (serror == 0x1000500) {
584 ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
585 svia_wd_fix(pdev);
586 hpriv->wd_workaround = true;
587 ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
588 }
589 }
590
591 ata_sff_error_handler(ap);
592}
593
594static void svia_configure(struct pci_dev *pdev, int board_id,
595 struct svia_priv *hpriv)
596{
597 u8 tmp8;
598
599 pci_read_config_byte(dev: pdev, PCI_INTERRUPT_LINE, val: &tmp8);
600 dev_info(&pdev->dev, "routed to hard irq line %d\n",
601 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
602
603 /* make sure SATA channels are enabled */
604 pci_read_config_byte(dev: pdev, where: SATA_CHAN_ENAB, val: &tmp8);
605 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
606 dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
607 (int)tmp8);
608 tmp8 |= ALL_PORTS;
609 pci_write_config_byte(dev: pdev, where: SATA_CHAN_ENAB, val: tmp8);
610 }
611
612 /* make sure interrupts for each channel sent to us */
613 pci_read_config_byte(dev: pdev, where: SATA_INT_GATE, val: &tmp8);
614 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
615 dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
616 (int) tmp8);
617 tmp8 |= ALL_PORTS;
618 pci_write_config_byte(dev: pdev, where: SATA_INT_GATE, val: tmp8);
619 }
620
621 /* make sure native mode is enabled */
622 pci_read_config_byte(dev: pdev, where: SATA_NATIVE_MODE, val: &tmp8);
623 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
624 dev_dbg(&pdev->dev,
625 "enabling SATA channel native mode (0x%x)\n",
626 (int) tmp8);
627 tmp8 |= NATIVE_MODE_ALL;
628 pci_write_config_byte(dev: pdev, where: SATA_NATIVE_MODE, val: tmp8);
629 }
630
631 if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421) {
632 /* enable IRQ on hotplug */
633 pci_read_config_byte(dev: pdev, where: SVIA_MISC_3, val: &tmp8);
634 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
635 dev_dbg(&pdev->dev,
636 "enabling SATA hotplug (0x%x)\n",
637 (int) tmp8);
638 tmp8 |= SATA_HOTPLUG;
639 pci_write_config_byte(dev: pdev, where: SVIA_MISC_3, val: tmp8);
640 }
641 }
642
643 /*
644 * vt6420/1 has problems talking to some drives. The following
645 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
646 *
647 * When host issues HOLD, device may send up to 20DW of data
648 * before acknowledging it with HOLDA and the host should be
649 * able to buffer them in FIFO. Unfortunately, some WD drives
650 * send up to 40DW before acknowledging HOLD and, in the
651 * default configuration, this ends up overflowing vt6421's
652 * FIFO, making the controller abort the transaction with
653 * R_ERR.
654 *
655 * Rx52[2] is the internal 128DW FIFO Flow control watermark
656 * adjusting mechanism enable bit and the default value 0
657 * means host will issue HOLD to device when the left FIFO
658 * size goes below 32DW. Setting it to 1 makes the watermark
659 * 64DW.
660 *
661 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
662 * http://article.gmane.org/gmane.linux.ide/46352
663 * http://thread.gmane.org/gmane.linux.kernel/1062139
664 *
665 * As the fix slows down data transfer, apply it only if the error
666 * actually appears - see vt6421_error_handler()
667 * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
668 * read safely.
669 */
670 if (board_id == vt6420) {
671 svia_wd_fix(pdev);
672 hpriv->wd_workaround = true;
673 }
674}
675
676static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
677{
678 unsigned int i;
679 int rc;
680 struct ata_host *host = NULL;
681 int board_id = (int) ent->driver_data;
682 const unsigned *bar_sizes;
683 struct svia_priv *hpriv;
684
685 ata_print_version_once(&pdev->dev, DRV_VERSION);
686
687 rc = pcim_enable_device(pdev);
688 if (rc)
689 return rc;
690
691 if (board_id == vt6421)
692 bar_sizes = &vt6421_bar_sizes[0];
693 else
694 bar_sizes = &svia_bar_sizes[0];
695
696 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
697 if ((pci_resource_start(pdev, i) == 0) ||
698 (pci_resource_len(pdev, i) < bar_sizes[i])) {
699 dev_err(&pdev->dev,
700 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
701 i,
702 (unsigned long long)pci_resource_start(pdev, i),
703 (unsigned long long)pci_resource_len(pdev, i));
704 return -ENODEV;
705 }
706
707 switch (board_id) {
708 case vt6420:
709 rc = vt6420_prepare_host(pdev, r_host: &host);
710 break;
711 case vt6421:
712 rc = vt6421_prepare_host(pdev, r_host: &host);
713 break;
714 case vt8251:
715 rc = vt8251_prepare_host(pdev, r_host: &host);
716 break;
717 default:
718 rc = -EINVAL;
719 }
720 if (rc)
721 return rc;
722
723 hpriv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hpriv), GFP_KERNEL);
724 if (!hpriv)
725 return -ENOMEM;
726 host->private_data = hpriv;
727
728 svia_configure(pdev, board_id, hpriv);
729
730 pci_set_master(dev: pdev);
731 if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421)
732 return ata_host_activate(host, irq: pdev->irq, irq_handler: vt642x_interrupt,
733 IRQF_SHARED, sht: &svia_sht);
734 else
735 return ata_host_activate(host, irq: pdev->irq, irq_handler: ata_bmdma_interrupt,
736 IRQF_SHARED, sht: &svia_sht);
737}
738
739#ifdef CONFIG_PM_SLEEP
740static int svia_pci_device_resume(struct pci_dev *pdev)
741{
742 struct ata_host *host = pci_get_drvdata(pdev);
743 struct svia_priv *hpriv = host->private_data;
744 int rc;
745
746 rc = ata_pci_device_do_resume(pdev);
747 if (rc)
748 return rc;
749
750 if (hpriv->wd_workaround)
751 svia_wd_fix(pdev);
752 ata_host_resume(host);
753
754 return 0;
755}
756#endif
757
758module_pci_driver(svia_pci_driver);
759

source code of linux/drivers/ata/sata_via.c