Lines Matching refs:ap
295 static void nv_nf2_freeze(struct ata_port *ap);
296 static void nv_nf2_thaw(struct ata_port *ap);
297 static void nv_ck804_freeze(struct ata_port *ap);
298 static void nv_ck804_thaw(struct ata_port *ap);
304 static void nv_adma_irq_clear(struct ata_port *ap);
305 static int nv_adma_port_start(struct ata_port *ap);
306 static void nv_adma_port_stop(struct ata_port *ap);
308 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
309 static int nv_adma_port_resume(struct ata_port *ap);
311 static void nv_adma_freeze(struct ata_port *ap);
312 static void nv_adma_thaw(struct ata_port *ap);
313 static void nv_adma_error_handler(struct ata_port *ap);
316 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
318 static void nv_mcp55_thaw(struct ata_port *ap);
319 static void nv_mcp55_freeze(struct ata_port *ap);
320 static void nv_swncq_error_handler(struct ata_port *ap);
322 static int nv_swncq_port_start(struct ata_port *ap);
326 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
329 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
330 static int nv_swncq_port_resume(struct ata_port *ap);
596 static void nv_adma_register_mode(struct ata_port *ap) in nv_adma_register_mode() argument
598 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_register_mode()
613 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n", in nv_adma_register_mode()
627 ata_port_warn(ap, in nv_adma_register_mode()
634 static void nv_adma_mode(struct ata_port *ap) in nv_adma_mode() argument
636 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_mode()
657 ata_port_warn(ap, in nv_adma_mode()
666 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_adma_slave_config() local
667 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_slave_config()
669 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_adma_slave_config()
682 spin_lock_irqsave(ap->lock, flags); in nv_adma_slave_config()
684 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { in nv_adma_slave_config()
700 nv_adma_register_mode(ap); in nv_adma_slave_config()
709 if (ap->port_no == 1) in nv_adma_slave_config()
727 port0 = ap->host->ports[0]->private_data; in nv_adma_slave_config()
728 port1 = ap->host->ports[1]->private_data; in nv_adma_slave_config()
745 ata_port_info(ap, in nv_adma_slave_config()
747 (unsigned long long)*ap->host->dev->dma_mask, in nv_adma_slave_config()
750 spin_unlock_irqrestore(ap->lock, flags); in nv_adma_slave_config()
757 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
761 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf) in nv_adma_tf_read() argument
770 nv_adma_register_mode(ap); in nv_adma_tf_read()
772 ata_sff_tf_read(ap, tf); in nv_adma_tf_read()
807 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) in nv_adma_check_cpb() argument
809 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_check_cpb()
812 ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags); in nv_adma_check_cpb()
818 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_check_cpb()
841 ata_port_freeze(ap); in nv_adma_check_cpb()
843 ata_port_abort(ap); in nv_adma_check_cpb()
852 static int nv_host_intr(struct ata_port *ap, u8 irq_stat) in nv_host_intr() argument
854 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr()
858 ata_port_freeze(ap); in nv_host_intr()
868 ata_sff_check_status(ap); in nv_host_intr()
873 return ata_bmdma_port_intr(ap, qc); in nv_host_intr()
885 struct ata_port *ap = host->ports[i]; in nv_adma_interrupt() local
886 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_interrupt()
898 handled += nv_host_intr(ap, irq_stat); in nv_adma_interrupt()
906 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
912 handled += nv_host_intr(ap, irq_stat); in nv_adma_interrupt()
921 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && in nv_adma_interrupt()
945 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_interrupt()
963 ata_port_freeze(ap); in nv_adma_interrupt()
976 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
978 ap->link.active_tag; in nv_adma_interrupt()
980 check_commands = ap->link.sactive; in nv_adma_interrupt()
986 rc = nv_adma_check_cpb(ap, pos, in nv_adma_interrupt()
994 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); in nv_adma_interrupt()
1012 static void nv_adma_freeze(struct ata_port *ap) in nv_adma_freeze() argument
1014 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_freeze()
1018 nv_ck804_freeze(ap); in nv_adma_freeze()
1024 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_freeze()
1025 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_freeze()
1034 static void nv_adma_thaw(struct ata_port *ap) in nv_adma_thaw() argument
1036 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_thaw()
1040 nv_ck804_thaw(ap); in nv_adma_thaw()
1052 static void nv_adma_irq_clear(struct ata_port *ap) in nv_adma_irq_clear() argument
1054 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_irq_clear()
1059 ata_bmdma_irq_clear(ap); in nv_adma_irq_clear()
1064 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_irq_clear()
1065 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_irq_clear()
1072 if (ap->port_no == 0) { in nv_adma_irq_clear()
1079 pp = ap->host->ports[0]->private_data; in nv_adma_irq_clear()
1081 pp = ap->host->ports[1]->private_data; in nv_adma_irq_clear()
1087 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1093 static int nv_adma_port_start(struct ata_port *ap) in nv_adma_port_start() argument
1095 struct device *dev = ap->host->dev; in nv_adma_port_start()
1113 rc = ata_bmdma_port_start(ap); in nv_adma_port_start()
1121 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + in nv_adma_port_start()
1122 ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_port_start()
1124 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; in nv_adma_port_start()
1126 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); in nv_adma_port_start()
1161 ap->private_data = pp; in nv_adma_port_start()
1187 static void nv_adma_port_stop(struct ata_port *ap) in nv_adma_port_stop() argument
1189 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_stop()
1196 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg) in nv_adma_port_suspend() argument
1198 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_suspend()
1202 nv_adma_register_mode(ap); in nv_adma_port_suspend()
1213 static int nv_adma_port_resume(struct ata_port *ap) in nv_adma_port_resume() argument
1215 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_resume()
1248 static void nv_adma_setup_port(struct ata_port *ap) in nv_adma_setup_port() argument
1250 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_adma_setup_port()
1251 struct ata_ioports *ioport = &ap->ioaddr; in nv_adma_setup_port()
1253 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_setup_port()
1312 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1330 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1347 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1355 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1393 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1410 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1413 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1441 struct ata_port *ap = host->ports[i]; in nv_generic_interrupt() local
1444 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1446 handled += ata_bmdma_port_intr(ap, qc); in nv_generic_interrupt()
1452 ap->ops->sff_check_status(ap); in nv_generic_interrupt()
1506 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_read()
1515 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_write()
1527 if (!(link->ap->pflags & ATA_PFLAG_LOADING) && in nv_hardreset()
1551 static void nv_nf2_freeze(struct ata_port *ap) in nv_nf2_freeze() argument
1553 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_freeze()
1554 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_freeze()
1562 static void nv_nf2_thaw(struct ata_port *ap) in nv_nf2_thaw() argument
1564 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_thaw()
1565 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_thaw()
1575 static void nv_ck804_freeze(struct ata_port *ap) in nv_ck804_freeze() argument
1577 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_freeze()
1578 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_freeze()
1586 static void nv_ck804_thaw(struct ata_port *ap) in nv_ck804_thaw() argument
1588 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_thaw()
1589 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_thaw()
1599 static void nv_mcp55_freeze(struct ata_port *ap) in nv_mcp55_freeze() argument
1601 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_freeze()
1602 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_freeze()
1612 static void nv_mcp55_thaw(struct ata_port *ap) in nv_mcp55_thaw() argument
1614 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_thaw()
1615 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_thaw()
1625 static void nv_adma_error_handler(struct ata_port *ap) in nv_adma_error_handler() argument
1627 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_error_handler()
1633 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { in nv_adma_error_handler()
1641 ata_port_err(ap, in nv_adma_error_handler()
1650 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || in nv_adma_error_handler()
1651 ap->link.sactive & (1 << i)) in nv_adma_error_handler()
1652 ata_port_err(ap, in nv_adma_error_handler()
1659 nv_adma_register_mode(ap); in nv_adma_error_handler()
1678 ata_bmdma_error_handler(ap); in nv_adma_error_handler()
1681 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) in nv_swncq_qc_to_dq() argument
1683 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_to_dq()
1692 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap) in nv_swncq_qc_from_dq() argument
1694 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_from_dq()
1706 return ata_qc_from_tag(ap, tag); in nv_swncq_qc_from_dq()
1709 static void nv_swncq_fis_reinit(struct ata_port *ap) in nv_swncq_fis_reinit() argument
1711 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fis_reinit()
1719 static void nv_swncq_pp_reinit(struct ata_port *ap) in nv_swncq_pp_reinit() argument
1721 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_pp_reinit()
1729 nv_swncq_fis_reinit(ap); in nv_swncq_pp_reinit()
1732 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis) in nv_swncq_irq_clear() argument
1734 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_irq_clear()
1739 static void __ata_bmdma_stop(struct ata_port *ap) in __ata_bmdma_stop() argument
1743 qc.ap = ap; in __ata_bmdma_stop()
1747 static void nv_swncq_ncq_stop(struct ata_port *ap) in nv_swncq_ncq_stop() argument
1749 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_ncq_stop()
1754 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n", in nv_swncq_ncq_stop()
1755 ap->qc_active, ap->link.sactive); in nv_swncq_ncq_stop()
1756 ata_port_err(ap, in nv_swncq_ncq_stop()
1762 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n", in nv_swncq_ncq_stop()
1763 ap->ops->sff_check_status(ap), in nv_swncq_ncq_stop()
1764 ioread8(ap->ioaddr.error_addr)); in nv_swncq_ncq_stop()
1769 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n"); in nv_swncq_ncq_stop()
1779 ata_port_err(ap, in nv_swncq_ncq_stop()
1788 nv_swncq_pp_reinit(ap); in nv_swncq_ncq_stop()
1789 ap->ops->sff_irq_clear(ap); in nv_swncq_ncq_stop()
1790 __ata_bmdma_stop(ap); in nv_swncq_ncq_stop()
1791 nv_swncq_irq_clear(ap, 0xffff); in nv_swncq_ncq_stop()
1794 static void nv_swncq_error_handler(struct ata_port *ap) in nv_swncq_error_handler() argument
1796 struct ata_eh_context *ehc = &ap->link.eh_context; in nv_swncq_error_handler()
1798 if (ap->link.sactive) { in nv_swncq_error_handler()
1799 nv_swncq_ncq_stop(ap); in nv_swncq_error_handler()
1803 ata_bmdma_error_handler(ap); in nv_swncq_error_handler()
1807 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg) in nv_swncq_port_suspend() argument
1809 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_suspend()
1826 static int nv_swncq_port_resume(struct ata_port *ap) in nv_swncq_port_resume() argument
1828 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_resume()
1873 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_swncq_slave_config() local
1874 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_swncq_slave_config()
1886 dev = &ap->link.device[sdev->id]; in nv_swncq_slave_config()
1887 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) in nv_swncq_slave_config()
1917 static int nv_swncq_port_start(struct ata_port *ap) in nv_swncq_port_start() argument
1919 struct device *dev = ap->host->dev; in nv_swncq_port_start()
1920 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_start()
1925 rc = ata_bmdma_port_start(ap); in nv_swncq_port_start()
1938 ap->private_data = pp; in nv_swncq_port_start()
1939 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; in nv_swncq_port_start()
1940 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1941 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1963 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg() local
1965 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fill_sg()
1997 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, in nv_swncq_issue_atacmd() argument
2000 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_issue_atacmd()
2011 trace_ata_tf_load(ap, &qc->tf); in nv_swncq_issue_atacmd()
2012 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2013 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag); in nv_swncq_issue_atacmd()
2014 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2021 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue() local
2022 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_issue()
2028 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_qc_issue()
2030 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ in nv_swncq_qc_issue()
2035 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis) in nv_swncq_hotplug() argument
2038 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_hotplug()
2043 sata_scr_read(&ap->link, SCR_ERROR, &serror); in nv_swncq_hotplug()
2044 sata_scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_hotplug()
2057 ata_port_freeze(ap); in nv_swncq_hotplug()
2060 static int nv_swncq_sdbfis(struct ata_port *ap) in nv_swncq_sdbfis() argument
2063 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_sdbfis()
2064 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_sdbfis()
2070 host_stat = ap->ops->bmdma_status(ap); in nv_swncq_sdbfis()
2071 trace_ata_bmdma_status(ap, host_stat); in nv_swncq_sdbfis()
2081 ap->ops->sff_irq_clear(ap); in nv_swncq_sdbfis()
2082 __ata_bmdma_stop(ap); in nv_swncq_sdbfis()
2091 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); in nv_swncq_sdbfis()
2093 if (!ap->qc_active) { in nv_swncq_sdbfis()
2094 ata_port_dbg(ap, "over\n"); in nv_swncq_sdbfis()
2095 nv_swncq_pp_reinit(ap); in nv_swncq_sdbfis()
2109 ata_port_dbg(ap, "QC: qc_active 0x%llx," in nv_swncq_sdbfis()
2112 ap->qc_active, pp->qc_active, in nv_swncq_sdbfis()
2116 nv_swncq_fis_reinit(ap); in nv_swncq_sdbfis()
2119 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2120 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2126 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_sdbfis()
2128 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2134 static inline u32 nv_swncq_tag(struct ata_port *ap) in nv_swncq_tag() argument
2136 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_tag()
2143 static void nv_swncq_dmafis(struct ata_port *ap) in nv_swncq_dmafis() argument
2149 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_dmafis()
2151 __ata_bmdma_stop(ap); in nv_swncq_dmafis()
2152 tag = nv_swncq_tag(ap); in nv_swncq_dmafis()
2154 ata_port_dbg(ap, "dma setup tag 0x%x\n", tag); in nv_swncq_dmafis()
2155 qc = ata_qc_from_tag(ap, tag); in nv_swncq_dmafis()
2164 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); in nv_swncq_dmafis()
2167 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2172 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2175 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis) in nv_swncq_host_interrupt() argument
2177 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_host_interrupt()
2179 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_host_interrupt()
2183 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2184 nv_swncq_irq_clear(ap, fis); in nv_swncq_host_interrupt()
2188 if (ata_port_is_frozen(ap)) in nv_swncq_host_interrupt()
2192 nv_swncq_hotplug(ap, fis); in nv_swncq_host_interrupt()
2199 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) in nv_swncq_host_interrupt()
2201 ap->ops->scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_host_interrupt()
2209 ata_port_freeze(ap); in nv_swncq_host_interrupt()
2222 ata_port_dbg(ap, "SWNCQ: qc_active 0x%X " in nv_swncq_host_interrupt()
2226 if (nv_swncq_sdbfis(ap) < 0) in nv_swncq_host_interrupt()
2245 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2250 ata_port_dbg(ap, "send next command\n"); in nv_swncq_host_interrupt()
2251 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_host_interrupt()
2252 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_host_interrupt()
2261 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); in nv_swncq_host_interrupt()
2263 nv_swncq_dmafis(ap); in nv_swncq_host_interrupt()
2270 ata_port_freeze(ap); in nv_swncq_host_interrupt()
2287 struct ata_port *ap = host->ports[i]; in nv_swncq_interrupt() local
2289 if (ap->link.sactive) { in nv_swncq_interrupt()
2290 nv_swncq_host_interrupt(ap, (u16)irq_stat); in nv_swncq_interrupt()
2294 nv_swncq_irq_clear(ap, 0xfff0); in nv_swncq_interrupt()
2296 handled += nv_host_intr(ap, (u8)irq_stat); in nv_swncq_interrupt()