2005-09-02 02:26:17 +04:00
/*
* sata_mv . c - Marvell SATA support
*
2005-11-12 20:32:50 +03:00
* Copyright 2005 : EMC Corporation , all rights reserved .
2005-11-18 22:04:23 +03:00
* Copyright 2005 Red Hat , Inc . All rights reserved .
2005-09-02 02:26:17 +04:00
*
* Please ALWAYS copy linux - ide @ vger . kernel . org on emails .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; version 2 of the License .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/init.h>
# include <linux/blkdev.h>
# include <linux/delay.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
# include <linux/dma-mapping.h>
2005-10-30 22:39:11 +03:00
# include <linux/device.h>
2005-09-02 02:26:17 +04:00
# include <scsi/scsi_host.h>
2005-11-07 08:59:37 +03:00
# include <scsi/scsi_cmnd.h>
2005-09-02 02:26:17 +04:00
# include <linux/libata.h>
# include <asm/io.h>
# define DRV_NAME "sata_mv"
2006-05-20 00:41:27 +04:00
# define DRV_VERSION "0.7"
2005-09-02 02:26:17 +04:00
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
MV_PRIMARY_BAR = 0 , /* offset 0x10: memory space */
MV_IO_BAR = 2 , /* offset 0x18: IO space */
MV_MISC_BAR = 3 , /* offset 0x1c: FLASH, NVRAM, SRAM */
MV_MAJOR_REG_AREA_SZ = 0x10000 , /* 64KB */
MV_MINOR_REG_AREA_SZ = 0x2000 , /* 8KB */
MV_PCI_REG_BASE = 0 ,
MV_IRQ_COAL_REG_BASE = 0x18000 , /* 6xxx part only */
2006-05-20 00:24:56 +04:00
MV_IRQ_COAL_CAUSE = ( MV_IRQ_COAL_REG_BASE + 0x08 ) ,
MV_IRQ_COAL_CAUSE_LO = ( MV_IRQ_COAL_REG_BASE + 0x88 ) ,
MV_IRQ_COAL_CAUSE_HI = ( MV_IRQ_COAL_REG_BASE + 0x8c ) ,
MV_IRQ_COAL_THRESHOLD = ( MV_IRQ_COAL_REG_BASE + 0xcc ) ,
MV_IRQ_COAL_TIME_THRESHOLD = ( MV_IRQ_COAL_REG_BASE + 0xd0 ) ,
2005-09-02 02:26:17 +04:00
MV_SATAHC0_REG_BASE = 0x20000 ,
2005-11-13 06:14:02 +03:00
MV_FLASH_CTL = 0x1046c ,
2005-11-12 20:48:15 +03:00
MV_GPIO_PORT_CTL = 0x104f0 ,
MV_RESET_CFG = 0x180d8 ,
2005-09-02 02:26:17 +04:00
MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ ,
MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ ,
MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ , /* arbiter */
MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ ,
2005-09-30 09:36:00 +04:00
MV_USE_Q_DEPTH = ATA_DEF_QUEUE ,
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
MV_MAX_Q_DEPTH = 32 ,
MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1 ,
/* CRQB needs alignment on a 1KB boundary. Size == 1KB
* CRPB needs alignment on a 256 B boundary . Size = = 256 B
* SG count of 176 leads to MV_PORT_PRIV_DMA_SZ = = 4 KB
* ePRD ( SG ) entries need alignment on a 16 B boundary . Size = = 16 B
*/
MV_CRQB_Q_SZ = ( 32 * MV_MAX_Q_DEPTH ) ,
MV_CRPB_Q_SZ = ( 8 * MV_MAX_Q_DEPTH ) ,
MV_MAX_SG_CT = 176 ,
MV_SG_TBL_SZ = ( 16 * MV_MAX_SG_CT ) ,
MV_PORT_PRIV_DMA_SZ = ( MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ ) ,
2005-09-02 02:26:17 +04:00
MV_PORTS_PER_HC = 4 ,
/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
MV_PORT_HC_SHIFT = 2 ,
2005-09-30 09:36:00 +04:00
/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
2005-09-02 02:26:17 +04:00
MV_PORT_MASK = 3 ,
/* Host Flags */
MV_FLAG_DUAL_HC = ( 1 < < 30 ) , /* two SATA Host Controllers */
MV_FLAG_IRQ_COALESCE = ( 1 < < 29 ) , /* IRQ coalescing capability */
2005-09-30 09:36:00 +04:00
MV_COMMON_FLAGS = ( ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
2005-12-13 10:29:45 +03:00
ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
2006-05-23 14:12:30 +04:00
ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING ) ,
2005-11-13 05:13:17 +03:00
MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE ,
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
CRQB_FLAG_READ = ( 1 < < 0 ) ,
CRQB_TAG_SHIFT = 1 ,
CRQB_CMD_ADDR_SHIFT = 8 ,
CRQB_CMD_CS = ( 0x2 < < 11 ) ,
CRQB_CMD_LAST = ( 1 < < 15 ) ,
CRPB_FLAG_STATUS_SHIFT = 8 ,
EPRD_FLAG_END_OF_TBL = ( 1 < < 31 ) ,
2005-09-02 02:26:17 +04:00
/* PCI interface registers */
2005-09-30 09:36:00 +04:00
PCI_COMMAND_OFS = 0xc00 ,
2005-09-02 02:26:17 +04:00
PCI_MAIN_CMD_STS_OFS = 0xd30 ,
STOP_PCI_MASTER = ( 1 < < 2 ) ,
PCI_MASTER_EMPTY = ( 1 < < 3 ) ,
GLOB_SFT_RST = ( 1 < < 4 ) ,
2005-11-13 06:14:02 +03:00
MV_PCI_MODE = 0xd00 ,
MV_PCI_EXP_ROM_BAR_CTL = 0xd2c ,
MV_PCI_DISC_TIMER = 0xd04 ,
MV_PCI_MSI_TRIGGER = 0xc38 ,
MV_PCI_SERR_MASK = 0xc28 ,
MV_PCI_XBAR_TMOUT = 0x1d04 ,
MV_PCI_ERR_LOW_ADDRESS = 0x1d40 ,
MV_PCI_ERR_HIGH_ADDRESS = 0x1d44 ,
MV_PCI_ERR_ATTRIBUTE = 0x1d48 ,
MV_PCI_ERR_COMMAND = 0x1d50 ,
PCI_IRQ_CAUSE_OFS = 0x1d58 ,
PCI_IRQ_MASK_OFS = 0x1d5c ,
2005-09-02 02:26:17 +04:00
PCI_UNMASK_ALL_IRQS = 0x7fffff , /* bits 22-0 */
HC_MAIN_IRQ_CAUSE_OFS = 0x1d60 ,
HC_MAIN_IRQ_MASK_OFS = 0x1d64 ,
PORT0_ERR = ( 1 < < 0 ) , /* shift by port # */
PORT0_DONE = ( 1 < < 1 ) , /* shift by port # */
HC0_IRQ_PEND = 0x1ff , /* bits 0-8 = HC0's ports */
HC_SHIFT = 9 , /* bits 9-17 = HC1's ports */
PCI_ERR = ( 1 < < 18 ) ,
TRAN_LO_DONE = ( 1 < < 19 ) , /* 6xxx: IRQ coalescing */
TRAN_HI_DONE = ( 1 < < 20 ) , /* 6xxx: IRQ coalescing */
PORTS_0_7_COAL_DONE = ( 1 < < 21 ) , /* 6xxx: IRQ coalescing */
GPIO_INT = ( 1 < < 22 ) ,
SELF_INT = ( 1 < < 23 ) ,
TWSI_INT = ( 1 < < 24 ) ,
HC_MAIN_RSVD = ( 0x7f < < 25 ) , /* bits 31-25 */
2005-11-12 20:32:50 +03:00
HC_MAIN_MASKED_IRQS = ( TRAN_LO_DONE | TRAN_HI_DONE |
2005-09-02 02:26:17 +04:00
PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
HC_MAIN_RSVD ) ,
/* SATAHC registers */
HC_CFG_OFS = 0 ,
HC_IRQ_CAUSE_OFS = 0x14 ,
2005-09-30 09:36:00 +04:00
CRPB_DMA_DONE = ( 1 < < 0 ) , /* shift by port # */
2005-09-02 02:26:17 +04:00
HC_IRQ_COAL = ( 1 < < 4 ) , /* IRQ coalescing */
DEV_IRQ = ( 1 < < 8 ) , /* shift by port # */
/* Shadow block registers */
2005-09-30 09:36:00 +04:00
SHD_BLK_OFS = 0x100 ,
SHD_CTL_AST_OFS = 0x20 , /* ofs from SHD_BLK_OFS */
2005-09-02 02:26:17 +04:00
/* SATA registers */
SATA_STATUS_OFS = 0x300 , /* ctrl, err regs follow status */
SATA_ACTIVE_OFS = 0x350 ,
2005-11-13 05:13:17 +03:00
PHY_MODE3 = 0x310 ,
2005-11-12 20:48:15 +03:00
PHY_MODE4 = 0x314 ,
PHY_MODE2 = 0x330 ,
2005-11-14 01:47:51 +03:00
MV5_PHY_MODE = 0x74 ,
MV5_LT_MODE = 0x30 ,
MV5_PHY_CTL = 0x0C ,
2005-11-12 20:48:15 +03:00
SATA_INTERFACE_CTL = 0x050 ,
MV_M2_PREAMP_MASK = 0x7e0 ,
2005-09-02 02:26:17 +04:00
/* Port registers */
EDMA_CFG_OFS = 0 ,
2005-09-30 09:36:00 +04:00
EDMA_CFG_Q_DEPTH = 0 , /* queueing disabled */
EDMA_CFG_NCQ = ( 1 < < 5 ) ,
EDMA_CFG_NCQ_GO_ON_ERR = ( 1 < < 14 ) , /* continue on error */
EDMA_CFG_RD_BRST_EXT = ( 1 < < 11 ) , /* read burst 512B */
EDMA_CFG_WR_BUFF_LEN = ( 1 < < 13 ) , /* write buffer 512B */
2005-09-02 02:26:17 +04:00
EDMA_ERR_IRQ_CAUSE_OFS = 0x8 ,
EDMA_ERR_IRQ_MASK_OFS = 0xc ,
EDMA_ERR_D_PAR = ( 1 < < 0 ) ,
EDMA_ERR_PRD_PAR = ( 1 < < 1 ) ,
EDMA_ERR_DEV = ( 1 < < 2 ) ,
EDMA_ERR_DEV_DCON = ( 1 < < 3 ) ,
EDMA_ERR_DEV_CON = ( 1 < < 4 ) ,
EDMA_ERR_SERR = ( 1 < < 5 ) ,
EDMA_ERR_SELF_DIS = ( 1 < < 7 ) ,
EDMA_ERR_BIST_ASYNC = ( 1 < < 8 ) ,
EDMA_ERR_CRBQ_PAR = ( 1 < < 9 ) ,
EDMA_ERR_CRPB_PAR = ( 1 < < 10 ) ,
EDMA_ERR_INTRL_PAR = ( 1 < < 11 ) ,
EDMA_ERR_IORDY = ( 1 < < 12 ) ,
EDMA_ERR_LNK_CTRL_RX = ( 0xf < < 13 ) ,
EDMA_ERR_LNK_CTRL_RX_2 = ( 1 < < 15 ) ,
EDMA_ERR_LNK_DATA_RX = ( 0xf < < 17 ) ,
EDMA_ERR_LNK_CTRL_TX = ( 0x1f < < 21 ) ,
EDMA_ERR_LNK_DATA_TX = ( 0x1f < < 26 ) ,
EDMA_ERR_TRANS_PROTO = ( 1 < < 31 ) ,
2005-11-12 20:32:50 +03:00
EDMA_ERR_FATAL = ( EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2005-09-02 02:26:17 +04:00
EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
2005-11-12 20:32:50 +03:00
EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
2005-09-02 02:26:17 +04:00
EDMA_ERR_LNK_DATA_RX |
2005-11-12 20:32:50 +03:00
EDMA_ERR_LNK_DATA_TX |
2005-09-02 02:26:17 +04:00
EDMA_ERR_TRANS_PROTO ) ,
2005-09-30 09:36:00 +04:00
EDMA_REQ_Q_BASE_HI_OFS = 0x10 ,
EDMA_REQ_Q_IN_PTR_OFS = 0x14 , /* also contains BASE_LO */
EDMA_REQ_Q_OUT_PTR_OFS = 0x18 ,
EDMA_REQ_Q_PTR_SHIFT = 5 ,
EDMA_RSP_Q_BASE_HI_OFS = 0x1c ,
EDMA_RSP_Q_IN_PTR_OFS = 0x20 ,
EDMA_RSP_Q_OUT_PTR_OFS = 0x24 , /* also contains BASE_LO */
EDMA_RSP_Q_PTR_SHIFT = 3 ,
2005-09-02 02:26:17 +04:00
EDMA_CMD_OFS = 0x28 ,
EDMA_EN = ( 1 < < 0 ) ,
EDMA_DS = ( 1 < < 1 ) ,
ATA_RST = ( 1 < < 2 ) ,
2005-11-14 01:47:51 +03:00
EDMA_IORDY_TMOUT = 0x34 ,
2005-11-12 20:48:15 +03:00
EDMA_ARB_CFG = 0x38 ,
2005-09-30 09:36:00 +04:00
/* Host private flags (hp_flags) */
MV_HP_FLAG_MSI = ( 1 < < 0 ) ,
2005-11-13 05:13:17 +03:00
MV_HP_ERRATA_50XXB0 = ( 1 < < 1 ) ,
MV_HP_ERRATA_50XXB2 = ( 1 < < 2 ) ,
MV_HP_ERRATA_60X1B2 = ( 1 < < 3 ) ,
MV_HP_ERRATA_60X1C0 = ( 1 < < 4 ) ,
2006-01-31 20:18:41 +03:00
MV_HP_ERRATA_XX42A0 = ( 1 < < 5 ) ,
MV_HP_50XX = ( 1 < < 6 ) ,
MV_HP_GEN_IIE = ( 1 < < 7 ) ,
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = ( 1 < < 0 ) ,
MV_PP_FLAG_EDMA_DS_ACT = ( 1 < < 1 ) ,
2005-09-02 02:26:17 +04:00
} ;
2005-11-14 01:47:51 +03:00
# define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
2005-11-12 20:48:15 +03:00
# define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
2006-01-31 20:18:41 +03:00
# define IS_GEN_I(hpriv) IS_50XX(hpriv)
# define IS_GEN_II(hpriv) IS_60XX(hpriv)
# define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
2005-11-12 20:48:15 +03:00
2005-11-12 17:50:49 +03:00
enum {
/* Our DMA boundary is determined by an ePRD being unable to handle
* anything larger than 64 KB
*/
MV_DMA_BOUNDARY = 0xffffU ,
EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U ,
EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U ,
} ;
2005-11-13 06:14:02 +03:00
enum chip_type {
chip_504x ,
chip_508x ,
chip_5080 ,
chip_604x ,
chip_608x ,
2006-01-31 20:18:41 +03:00
chip_6042 ,
chip_7042 ,
2005-11-13 06:14:02 +03:00
} ;
2005-09-30 09:36:00 +04:00
/* Command ReQuest Block: 32B */
struct mv_crqb {
2006-05-23 03:02:03 +04:00
__le32 sg_addr ;
__le32 sg_addr_hi ;
__le16 ctrl_flags ;
__le16 ata_cmd [ 11 ] ;
2005-09-30 09:36:00 +04:00
} ;
2005-09-02 02:26:17 +04:00
2006-01-31 20:18:41 +03:00
struct mv_crqb_iie {
2006-05-23 03:02:03 +04:00
__le32 addr ;
__le32 addr_hi ;
__le32 flags ;
__le32 len ;
__le32 ata_cmd [ 4 ] ;
2006-01-31 20:18:41 +03:00
} ;
2005-09-30 09:36:00 +04:00
/* Command ResPonse Block: 8B */
struct mv_crpb {
2006-05-23 03:02:03 +04:00
__le16 id ;
__le16 flags ;
__le32 tmstmp ;
2005-09-02 02:26:17 +04:00
} ;
2005-09-30 09:36:00 +04:00
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
2006-05-23 03:02:03 +04:00
__le32 addr ;
__le32 flags_size ;
__le32 addr_hi ;
__le32 reserved ;
2005-09-30 09:36:00 +04:00
} ;
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
struct mv_port_priv {
struct mv_crqb * crqb ;
dma_addr_t crqb_dma ;
struct mv_crpb * crpb ;
dma_addr_t crpb_dma ;
struct mv_sg * sg_tbl ;
dma_addr_t sg_tbl_dma ;
u32 pp_flags ;
} ;
2005-11-12 20:48:15 +03:00
struct mv_port_signal {
u32 amps ;
u32 pre ;
} ;
2005-11-13 05:13:17 +03:00
struct mv_host_priv ;
struct mv_hw_ops {
2005-11-13 07:05:14 +03:00
void ( * phy_errata ) ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int port ) ;
2005-11-13 05:13:17 +03:00
void ( * enable_leds ) ( struct mv_host_priv * hpriv , void __iomem * mmio ) ;
void ( * read_preamp ) ( struct mv_host_priv * hpriv , int idx ,
void __iomem * mmio ) ;
2005-11-14 01:47:51 +03:00
int ( * reset_hc ) ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int n_hc ) ;
2005-11-13 06:14:02 +03:00
void ( * reset_flash ) ( struct mv_host_priv * hpriv , void __iomem * mmio ) ;
void ( * reset_bus ) ( struct pci_dev * pdev , void __iomem * mmio ) ;
2005-11-13 05:13:17 +03:00
} ;
2005-09-30 09:36:00 +04:00
struct mv_host_priv {
u32 hp_flags ;
2005-11-12 20:48:15 +03:00
struct mv_port_signal signal [ 8 ] ;
2005-11-13 05:13:17 +03:00
const struct mv_hw_ops * ops ;
2005-09-02 02:26:17 +04:00
} ;
static void mv_irq_clear ( struct ata_port * ap ) ;
static u32 mv_scr_read ( struct ata_port * ap , unsigned int sc_reg_in ) ;
static void mv_scr_write ( struct ata_port * ap , unsigned int sc_reg_in , u32 val ) ;
2005-11-14 01:47:51 +03:00
static u32 mv5_scr_read ( struct ata_port * ap , unsigned int sc_reg_in ) ;
static void mv5_scr_write ( struct ata_port * ap , unsigned int sc_reg_in , u32 val ) ;
2005-09-02 02:26:17 +04:00
static void mv_phy_reset ( struct ata_port * ap ) ;
2005-11-17 18:59:48 +03:00
static void __mv_phy_reset ( struct ata_port * ap , int can_sleep ) ;
2005-09-30 09:36:00 +04:00
static void mv_host_stop ( struct ata_host_set * host_set ) ;
static int mv_port_start ( struct ata_port * ap ) ;
static void mv_port_stop ( struct ata_port * ap ) ;
static void mv_qc_prep ( struct ata_queued_cmd * qc ) ;
2006-01-31 20:18:41 +03:00
static void mv_qc_prep_iie ( struct ata_queued_cmd * qc ) ;
2006-01-23 07:09:36 +03:00
static unsigned int mv_qc_issue ( struct ata_queued_cmd * qc ) ;
2005-09-02 02:26:17 +04:00
static irqreturn_t mv_interrupt ( int irq , void * dev_instance ,
struct pt_regs * regs ) ;
2005-09-30 09:36:00 +04:00
static void mv_eng_timeout ( struct ata_port * ap ) ;
2005-09-02 02:26:17 +04:00
static int mv_init_one ( struct pci_dev * pdev , const struct pci_device_id * ent ) ;
2005-11-13 07:05:14 +03:00
static void mv5_phy_errata ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int port ) ;
2005-11-13 05:13:17 +03:00
static void mv5_enable_leds ( struct mv_host_priv * hpriv , void __iomem * mmio ) ;
static void mv5_read_preamp ( struct mv_host_priv * hpriv , int idx ,
void __iomem * mmio ) ;
2005-11-14 01:47:51 +03:00
static int mv5_reset_hc ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int n_hc ) ;
2005-11-13 06:14:02 +03:00
static void mv5_reset_flash ( struct mv_host_priv * hpriv , void __iomem * mmio ) ;
static void mv5_reset_bus ( struct pci_dev * pdev , void __iomem * mmio ) ;
2005-11-13 05:13:17 +03:00
2005-11-13 07:05:14 +03:00
static void mv6_phy_errata ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int port ) ;
2005-11-13 05:13:17 +03:00
static void mv6_enable_leds ( struct mv_host_priv * hpriv , void __iomem * mmio ) ;
static void mv6_read_preamp ( struct mv_host_priv * hpriv , int idx ,
void __iomem * mmio ) ;
2005-11-14 01:47:51 +03:00
static int mv6_reset_hc ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int n_hc ) ;
2005-11-13 06:14:02 +03:00
static void mv6_reset_flash ( struct mv_host_priv * hpriv , void __iomem * mmio ) ;
static void mv_reset_pci_bus ( struct pci_dev * pdev , void __iomem * mmio ) ;
2005-11-14 01:47:51 +03:00
static void mv_channel_reset ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int port_no ) ;
static void mv_stop_and_reset ( struct ata_port * ap ) ;
2005-11-13 05:13:17 +03:00
2005-11-07 08:59:37 +03:00
static struct scsi_host_template mv_sht = {
2005-09-02 02:26:17 +04:00
. module = THIS_MODULE ,
. name = DRV_NAME ,
. ioctl = ata_scsi_ioctl ,
. queuecommand = ata_scsi_queuecmd ,
2005-09-30 09:36:00 +04:00
. can_queue = MV_USE_Q_DEPTH ,
2005-09-02 02:26:17 +04:00
. this_id = ATA_SHT_THIS_ID ,
2005-11-17 18:59:48 +03:00
. sg_tablesize = MV_MAX_SG_CT / 2 ,
2005-09-02 02:26:17 +04:00
. cmd_per_lun = ATA_SHT_CMD_PER_LUN ,
. emulated = ATA_SHT_EMULATED ,
2005-09-30 09:36:00 +04:00
. use_clustering = ATA_SHT_USE_CLUSTERING ,
2005-09-02 02:26:17 +04:00
. proc_name = DRV_NAME ,
. dma_boundary = MV_DMA_BOUNDARY ,
. slave_configure = ata_scsi_slave_config ,
. bios_param = ata_std_bios_param ,
} ;
2005-11-14 01:47:51 +03:00
static const struct ata_port_operations mv5_ops = {
. port_disable = ata_port_disable ,
. tf_load = ata_tf_load ,
. tf_read = ata_tf_read ,
. check_status = ata_check_status ,
. exec_command = ata_exec_command ,
. dev_select = ata_std_dev_select ,
. phy_reset = mv_phy_reset ,
. qc_prep = mv_qc_prep ,
. qc_issue = mv_qc_issue ,
2006-05-22 19:59:59 +04:00
. data_xfer = ata_mmio_data_xfer ,
2005-11-14 01:47:51 +03:00
. eng_timeout = mv_eng_timeout ,
. irq_handler = mv_interrupt ,
. irq_clear = mv_irq_clear ,
. scr_read = mv5_scr_read ,
. scr_write = mv5_scr_write ,
. port_start = mv_port_start ,
. port_stop = mv_port_stop ,
. host_stop = mv_host_stop ,
} ;
static const struct ata_port_operations mv6_ops = {
2005-09-02 02:26:17 +04:00
. port_disable = ata_port_disable ,
. tf_load = ata_tf_load ,
. tf_read = ata_tf_read ,
. check_status = ata_check_status ,
. exec_command = ata_exec_command ,
. dev_select = ata_std_dev_select ,
. phy_reset = mv_phy_reset ,
2005-09-30 09:36:00 +04:00
. qc_prep = mv_qc_prep ,
. qc_issue = mv_qc_issue ,
2006-05-22 19:59:59 +04:00
. data_xfer = ata_mmio_data_xfer ,
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
. eng_timeout = mv_eng_timeout ,
2005-09-02 02:26:17 +04:00
. irq_handler = mv_interrupt ,
. irq_clear = mv_irq_clear ,
. scr_read = mv_scr_read ,
. scr_write = mv_scr_write ,
2005-09-30 09:36:00 +04:00
. port_start = mv_port_start ,
. port_stop = mv_port_stop ,
. host_stop = mv_host_stop ,
2005-09-02 02:26:17 +04:00
} ;
2006-01-31 20:18:41 +03:00
static const struct ata_port_operations mv_iie_ops = {
. port_disable = ata_port_disable ,
. tf_load = ata_tf_load ,
. tf_read = ata_tf_read ,
. check_status = ata_check_status ,
. exec_command = ata_exec_command ,
. dev_select = ata_std_dev_select ,
. phy_reset = mv_phy_reset ,
. qc_prep = mv_qc_prep_iie ,
. qc_issue = mv_qc_issue ,
. eng_timeout = mv_eng_timeout ,
. irq_handler = mv_interrupt ,
. irq_clear = mv_irq_clear ,
. scr_read = mv_scr_read ,
. scr_write = mv_scr_write ,
. port_start = mv_port_start ,
. port_stop = mv_port_stop ,
. host_stop = mv_host_stop ,
} ;
2005-11-28 12:06:23 +03:00
static const struct ata_port_info mv_port_info [ ] = {
2005-09-02 02:26:17 +04:00
{ /* chip_504x */
. sht = & mv_sht ,
2005-09-30 09:36:00 +04:00
. host_flags = MV_COMMON_FLAGS ,
. pio_mask = 0x1f , /* pio0-4 */
2005-11-14 01:47:51 +03:00
. udma_mask = 0x7f , /* udma0-6 */
. port_ops = & mv5_ops ,
2005-09-02 02:26:17 +04:00
} ,
{ /* chip_508x */
. sht = & mv_sht ,
2005-09-30 09:36:00 +04:00
. host_flags = ( MV_COMMON_FLAGS | MV_FLAG_DUAL_HC ) ,
. pio_mask = 0x1f , /* pio0-4 */
2005-11-14 01:47:51 +03:00
. udma_mask = 0x7f , /* udma0-6 */
. port_ops = & mv5_ops ,
2005-09-02 02:26:17 +04:00
} ,
2005-11-13 05:13:17 +03:00
{ /* chip_5080 */
. sht = & mv_sht ,
. host_flags = ( MV_COMMON_FLAGS | MV_FLAG_DUAL_HC ) ,
. pio_mask = 0x1f , /* pio0-4 */
2005-11-14 01:47:51 +03:00
. udma_mask = 0x7f , /* udma0-6 */
. port_ops = & mv5_ops ,
2005-11-13 05:13:17 +03:00
} ,
2005-09-02 02:26:17 +04:00
{ /* chip_604x */
. sht = & mv_sht ,
2005-09-30 09:36:00 +04:00
. host_flags = ( MV_COMMON_FLAGS | MV_6XXX_FLAGS ) ,
. pio_mask = 0x1f , /* pio0-4 */
. udma_mask = 0x7f , /* udma0-6 */
2005-11-14 01:47:51 +03:00
. port_ops = & mv6_ops ,
2005-09-02 02:26:17 +04:00
} ,
{ /* chip_608x */
. sht = & mv_sht ,
2005-11-12 20:32:50 +03:00
. host_flags = ( MV_COMMON_FLAGS | MV_6XXX_FLAGS |
2005-09-30 09:36:00 +04:00
MV_FLAG_DUAL_HC ) ,
. pio_mask = 0x1f , /* pio0-4 */
. udma_mask = 0x7f , /* udma0-6 */
2005-11-14 01:47:51 +03:00
. port_ops = & mv6_ops ,
2005-09-02 02:26:17 +04:00
} ,
2006-01-31 20:18:41 +03:00
{ /* chip_6042 */
. sht = & mv_sht ,
. host_flags = ( MV_COMMON_FLAGS | MV_6XXX_FLAGS ) ,
. pio_mask = 0x1f , /* pio0-4 */
. udma_mask = 0x7f , /* udma0-6 */
. port_ops = & mv_iie_ops ,
} ,
{ /* chip_7042 */
. sht = & mv_sht ,
. host_flags = ( MV_COMMON_FLAGS | MV_6XXX_FLAGS |
MV_FLAG_DUAL_HC ) ,
. pio_mask = 0x1f , /* pio0-4 */
. udma_mask = 0x7f , /* udma0-6 */
. port_ops = & mv_iie_ops ,
} ,
2005-09-02 02:26:17 +04:00
} ;
2005-11-10 19:04:11 +03:00
static const struct pci_device_id mv_pci_tbl [ ] = {
2005-09-02 02:26:17 +04:00
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x5040 ) , 0 , 0 , chip_504x } ,
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x5041 ) , 0 , 0 , chip_504x } ,
2005-11-13 05:13:17 +03:00
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x5080 ) , 0 , 0 , chip_5080 } ,
2005-09-02 02:26:17 +04:00
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x5081 ) , 0 , 0 , chip_508x } ,
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x6040 ) , 0 , 0 , chip_604x } ,
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x6041 ) , 0 , 0 , chip_604x } ,
2006-01-31 20:18:41 +03:00
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x6042 ) , 0 , 0 , chip_6042 } ,
2005-09-02 02:26:17 +04:00
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x6080 ) , 0 , 0 , chip_608x } ,
{ PCI_DEVICE ( PCI_VENDOR_ID_MARVELL , 0x6081 ) , 0 , 0 , chip_608x } ,
2005-11-11 16:08:03 +03:00
{ PCI_DEVICE ( PCI_VENDOR_ID_ADAPTEC2 , 0x0241 ) , 0 , 0 , chip_604x } ,
2005-09-02 02:26:17 +04:00
{ } /* terminate list */
} ;
static struct pci_driver mv_pci_driver = {
. name = DRV_NAME ,
. id_table = mv_pci_tbl ,
. probe = mv_init_one ,
. remove = ata_pci_remove_one ,
} ;
2005-11-13 05:13:17 +03:00
static const struct mv_hw_ops mv5xxx_ops = {
. phy_errata = mv5_phy_errata ,
. enable_leds = mv5_enable_leds ,
. read_preamp = mv5_read_preamp ,
. reset_hc = mv5_reset_hc ,
2005-11-13 06:14:02 +03:00
. reset_flash = mv5_reset_flash ,
. reset_bus = mv5_reset_bus ,
2005-11-13 05:13:17 +03:00
} ;
static const struct mv_hw_ops mv6xxx_ops = {
. phy_errata = mv6_phy_errata ,
. enable_leds = mv6_enable_leds ,
. read_preamp = mv6_read_preamp ,
. reset_hc = mv6_reset_hc ,
2005-11-13 06:14:02 +03:00
. reset_flash = mv6_reset_flash ,
. reset_bus = mv_reset_pci_bus ,
2005-11-13 05:13:17 +03:00
} ;
2006-02-03 00:17:06 +03:00
/*
* module options
*/
static int msi ; /* Use PCI msi; either zero (off, default) or non-zero */
2005-09-02 02:26:17 +04:00
/*
* Functions
*/
static inline void writelfl ( unsigned long data , void __iomem * addr )
{
writel ( data , addr ) ;
( void ) readl ( addr ) ; /* flush to avoid PCI posted write */
}
static inline void __iomem * mv_hc_base ( void __iomem * base , unsigned int hc )
{
return ( base + MV_SATAHC0_REG_BASE + ( hc * MV_SATAHC_REG_SZ ) ) ;
}
2005-11-14 01:47:51 +03:00
static inline unsigned int mv_hc_from_port ( unsigned int port )
{
return port > > MV_PORT_HC_SHIFT ;
}
static inline unsigned int mv_hardport_from_port ( unsigned int port )
{
return port & MV_PORT_MASK ;
}
static inline void __iomem * mv_hc_base_from_port ( void __iomem * base ,
unsigned int port )
{
return mv_hc_base ( base , mv_hc_from_port ( port ) ) ;
}
2005-09-02 02:26:17 +04:00
static inline void __iomem * mv_port_base ( void __iomem * base , unsigned int port )
{
2005-11-14 01:47:51 +03:00
return mv_hc_base_from_port ( base , port ) +
2005-11-12 20:32:50 +03:00
MV_SATAHC_ARBTR_REG_SZ +
2005-11-14 01:47:51 +03:00
( mv_hardport_from_port ( port ) * MV_PORT_REG_SZ ) ;
2005-09-02 02:26:17 +04:00
}
static inline void __iomem * mv_ap_base ( struct ata_port * ap )
{
return mv_port_base ( ap - > host_set - > mmio_base , ap - > port_no ) ;
}
2005-11-12 20:48:15 +03:00
static inline int mv_get_hc_count ( unsigned long host_flags )
2005-09-30 09:36:00 +04:00
{
2005-11-12 20:48:15 +03:00
return ( ( host_flags & MV_FLAG_DUAL_HC ) ? 2 : 1 ) ;
2005-09-30 09:36:00 +04:00
}
static void mv_irq_clear ( struct ata_port * ap )
2005-09-02 02:26:17 +04:00
{
}
2005-10-06 01:08:53 +04:00
/**
* mv_start_dma - Enable eDMA engine
* @ base : port base address
* @ pp : port private data
*
2006-02-11 13:11:13 +03:00
* Verify the local cache of the eDMA state is accurate with a
* WARN_ON .
2005-10-06 01:08:53 +04:00
*
* LOCKING :
* Inherited from caller .
*/
2005-10-06 01:08:42 +04:00
static void mv_start_dma ( void __iomem * base , struct mv_port_priv * pp )
2005-09-02 02:26:17 +04:00
{
2005-10-06 01:08:42 +04:00
if ( ! ( MV_PP_FLAG_EDMA_EN & pp - > pp_flags ) ) {
writelfl ( EDMA_EN , base + EDMA_CMD_OFS ) ;
pp - > pp_flags | = MV_PP_FLAG_EDMA_EN ;
}
2006-02-11 13:11:13 +03:00
WARN_ON ( ! ( EDMA_EN & readl ( base + EDMA_CMD_OFS ) ) ) ;
2005-09-02 02:26:17 +04:00
}
2005-10-06 01:08:53 +04:00
/**
* mv_stop_dma - Disable eDMA engine
* @ ap : ATA channel to manipulate
*
2006-02-11 13:11:13 +03:00
* Verify the local cache of the eDMA state is accurate with a
* WARN_ON .
2005-10-06 01:08:53 +04:00
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static void mv_stop_dma ( struct ata_port * ap )
2005-09-02 02:26:17 +04:00
{
2005-09-30 09:36:00 +04:00
void __iomem * port_mmio = mv_ap_base ( ap ) ;
struct mv_port_priv * pp = ap - > private_data ;
u32 reg ;
int i ;
2005-10-06 01:08:42 +04:00
if ( MV_PP_FLAG_EDMA_EN & pp - > pp_flags ) {
/* Disable EDMA if active. The disable bit auto clears.
2005-09-30 09:36:00 +04:00
*/
writelfl ( EDMA_DS , port_mmio + EDMA_CMD_OFS ) ;
pp - > pp_flags & = ~ MV_PP_FLAG_EDMA_EN ;
2005-10-06 01:08:42 +04:00
} else {
2006-02-11 13:11:13 +03:00
WARN_ON ( EDMA_EN & readl ( port_mmio + EDMA_CMD_OFS ) ) ;
2005-10-06 01:08:42 +04:00
}
2005-11-12 20:32:50 +03:00
2005-09-30 09:36:00 +04:00
/* now properly wait for the eDMA to stop */
for ( i = 1000 ; i > 0 ; i - - ) {
reg = readl ( port_mmio + EDMA_CMD_OFS ) ;
if ( ! ( EDMA_EN & reg ) ) {
break ;
}
udelay ( 100 ) ;
}
if ( EDMA_EN & reg ) {
2006-05-15 15:57:56 +04:00
ata_port_printk ( ap , KERN_ERR , " Unable to stop eDMA \n " ) ;
2005-10-06 01:08:42 +04:00
/* FIXME: Consider doing a reset here to recover */
2005-09-30 09:36:00 +04:00
}
2005-09-02 02:26:17 +04:00
}
2005-10-06 01:19:47 +04:00
# ifdef ATA_DEBUG
2005-09-30 09:36:00 +04:00
static void mv_dump_mem ( void __iomem * start , unsigned bytes )
2005-09-02 02:26:17 +04:00
{
2005-09-30 09:36:00 +04:00
int b , w ;
for ( b = 0 ; b < bytes ; ) {
DPRINTK ( " %p: " , start + b ) ;
for ( w = 0 ; b < bytes & & w < 4 ; w + + ) {
printk ( " %08x " , readl ( start + b ) ) ;
b + = sizeof ( u32 ) ;
}
printk ( " \n " ) ;
}
}
2005-10-06 01:19:47 +04:00
# endif
2005-09-30 09:36:00 +04:00
static void mv_dump_pci_cfg ( struct pci_dev * pdev , unsigned bytes )
{
# ifdef ATA_DEBUG
int b , w ;
u32 dw ;
for ( b = 0 ; b < bytes ; ) {
DPRINTK ( " %02x: " , b ) ;
for ( w = 0 ; b < bytes & & w < 4 ; w + + ) {
( void ) pci_read_config_dword ( pdev , b , & dw ) ;
printk ( " %08x " , dw ) ;
b + = sizeof ( u32 ) ;
}
printk ( " \n " ) ;
}
# endif
}
static void mv_dump_all_regs ( void __iomem * mmio_base , int port ,
struct pci_dev * pdev )
{
# ifdef ATA_DEBUG
2005-11-12 20:32:50 +03:00
void __iomem * hc_base = mv_hc_base ( mmio_base ,
2005-09-30 09:36:00 +04:00
port > > MV_PORT_HC_SHIFT ) ;
void __iomem * port_base ;
int start_port , num_ports , p , start_hc , num_hcs , hc ;
if ( 0 > port ) {
start_hc = start_port = 0 ;
num_ports = 8 ; /* shld be benign for 4 port devs */
num_hcs = 2 ;
} else {
start_hc = port > > MV_PORT_HC_SHIFT ;
start_port = port ;
num_ports = num_hcs = 1 ;
}
2005-11-12 20:32:50 +03:00
DPRINTK ( " All registers for port(s) %u-%u: \n " , start_port ,
2005-09-30 09:36:00 +04:00
num_ports > 1 ? num_ports - 1 : start_port ) ;
if ( NULL ! = pdev ) {
DPRINTK ( " PCI config space regs: \n " ) ;
mv_dump_pci_cfg ( pdev , 0x68 ) ;
}
DPRINTK ( " PCI regs: \n " ) ;
mv_dump_mem ( mmio_base + 0xc00 , 0x3c ) ;
mv_dump_mem ( mmio_base + 0xd00 , 0x34 ) ;
mv_dump_mem ( mmio_base + 0xf00 , 0x4 ) ;
mv_dump_mem ( mmio_base + 0x1d00 , 0x6c ) ;
for ( hc = start_hc ; hc < start_hc + num_hcs ; hc + + ) {
2006-04-11 10:20:22 +04:00
hc_base = mv_hc_base ( mmio_base , hc ) ;
2005-09-30 09:36:00 +04:00
DPRINTK ( " HC regs (HC %i): \n " , hc ) ;
mv_dump_mem ( hc_base , 0x1c ) ;
}
for ( p = start_port ; p < start_port + num_ports ; p + + ) {
port_base = mv_port_base ( mmio_base , p ) ;
DPRINTK ( " EDMA regs (port %i): \n " , p ) ;
mv_dump_mem ( port_base , 0x54 ) ;
DPRINTK ( " SATA regs (port %i): \n " , p ) ;
mv_dump_mem ( port_base + 0x300 , 0x60 ) ;
}
# endif
2005-09-02 02:26:17 +04:00
}
static unsigned int mv_scr_offset ( unsigned int sc_reg_in )
{
unsigned int ofs ;
switch ( sc_reg_in ) {
case SCR_STATUS :
case SCR_CONTROL :
case SCR_ERROR :
ofs = SATA_STATUS_OFS + ( sc_reg_in * sizeof ( u32 ) ) ;
break ;
case SCR_ACTIVE :
ofs = SATA_ACTIVE_OFS ; /* active is not with the others */
break ;
default :
ofs = 0xffffffffU ;
break ;
}
return ofs ;
}
static u32 mv_scr_read ( struct ata_port * ap , unsigned int sc_reg_in )
{
unsigned int ofs = mv_scr_offset ( sc_reg_in ) ;
if ( 0xffffffffU ! = ofs ) {
return readl ( mv_ap_base ( ap ) + ofs ) ;
} else {
return ( u32 ) ofs ;
}
}
static void mv_scr_write ( struct ata_port * ap , unsigned int sc_reg_in , u32 val )
{
unsigned int ofs = mv_scr_offset ( sc_reg_in ) ;
if ( 0xffffffffU ! = ofs ) {
writelfl ( val , mv_ap_base ( ap ) + ofs ) ;
}
}
2005-10-06 01:08:53 +04:00
/**
* mv_host_stop - Host specific cleanup / stop routine .
* @ host_set : host data structure
*
* Disable ints , cleanup host memory , call general purpose
* host_stop .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static void mv_host_stop ( struct ata_host_set * host_set )
2005-09-02 02:26:17 +04:00
{
2005-09-30 09:36:00 +04:00
struct mv_host_priv * hpriv = host_set - > private_data ;
struct pci_dev * pdev = to_pci_dev ( host_set - > dev ) ;
if ( hpriv - > hp_flags & MV_HP_FLAG_MSI ) {
pci_disable_msi ( pdev ) ;
} else {
pci_intx ( pdev , 0 ) ;
}
kfree ( hpriv ) ;
ata_host_stop ( host_set ) ;
}
2005-11-05 06:08:00 +03:00
static inline void mv_priv_free ( struct mv_port_priv * pp , struct device * dev )
{
dma_free_coherent ( dev , MV_PORT_PRIV_DMA_SZ , pp - > crpb , pp - > crpb_dma ) ;
}
2006-01-31 20:18:41 +03:00
static void mv_edma_cfg ( struct mv_host_priv * hpriv , void __iomem * port_mmio )
{
u32 cfg = readl ( port_mmio + EDMA_CFG_OFS ) ;
/* set up non-NCQ EDMA configuration */
cfg & = ~ 0x1f ; /* clear queue depth */
cfg & = ~ EDMA_CFG_NCQ ; /* clear NCQ mode */
cfg & = ~ ( 1 < < 9 ) ; /* disable equeue */
if ( IS_GEN_I ( hpriv ) )
cfg | = ( 1 < < 8 ) ; /* enab config burst size mask */
else if ( IS_GEN_II ( hpriv ) )
cfg | = EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN ;
else if ( IS_GEN_IIE ( hpriv ) ) {
cfg | = ( 1 < < 23 ) ; /* dis RX PM port mask */
cfg & = ~ ( 1 < < 16 ) ; /* dis FIS-based switching (for now) */
cfg & = ~ ( 1 < < 19 ) ; /* dis 128-entry queue (for now?) */
cfg | = ( 1 < < 18 ) ; /* enab early completion */
cfg | = ( 1 < < 17 ) ; /* enab host q cache */
cfg | = ( 1 < < 22 ) ; /* enab cutthrough */
}
writelfl ( cfg , port_mmio + EDMA_CFG_OFS ) ;
}
2005-10-06 01:08:53 +04:00
/**
* mv_port_start - Port specific init / start routine .
* @ ap : ATA channel to manipulate
*
* Allocate and point to DMA memory , init port private memory ,
* zero indices .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static int mv_port_start ( struct ata_port * ap )
{
struct device * dev = ap - > host_set - > dev ;
2006-01-31 20:18:41 +03:00
struct mv_host_priv * hpriv = ap - > host_set - > private_data ;
2005-09-30 09:36:00 +04:00
struct mv_port_priv * pp ;
void __iomem * port_mmio = mv_ap_base ( ap ) ;
void * mem ;
dma_addr_t mem_dma ;
2005-11-05 06:08:00 +03:00
int rc = - ENOMEM ;
2005-09-30 09:36:00 +04:00
pp = kmalloc ( sizeof ( * pp ) , GFP_KERNEL ) ;
2005-11-05 06:08:00 +03:00
if ( ! pp )
goto err_out ;
2005-09-30 09:36:00 +04:00
memset ( pp , 0 , sizeof ( * pp ) ) ;
2005-11-12 20:32:50 +03:00
mem = dma_alloc_coherent ( dev , MV_PORT_PRIV_DMA_SZ , & mem_dma ,
2005-09-30 09:36:00 +04:00
GFP_KERNEL ) ;
2005-11-05 06:08:00 +03:00
if ( ! mem )
goto err_out_pp ;
2005-09-30 09:36:00 +04:00
memset ( mem , 0 , MV_PORT_PRIV_DMA_SZ ) ;
2005-11-05 06:08:00 +03:00
rc = ata_pad_alloc ( ap , dev ) ;
if ( rc )
goto err_out_priv ;
2005-11-12 20:32:50 +03:00
/* First item in chunk of DMA memory:
2005-09-30 09:36:00 +04:00
* 32 - slot command request table ( CRQB ) , 32 bytes each in size
*/
pp - > crqb = mem ;
pp - > crqb_dma = mem_dma ;
mem + = MV_CRQB_Q_SZ ;
mem_dma + = MV_CRQB_Q_SZ ;
2005-11-12 20:32:50 +03:00
/* Second item:
2005-09-30 09:36:00 +04:00
* 32 - slot command response table ( CRPB ) , 8 bytes each in size
*/
pp - > crpb = mem ;
pp - > crpb_dma = mem_dma ;
mem + = MV_CRPB_Q_SZ ;
mem_dma + = MV_CRPB_Q_SZ ;
/* Third item:
* Table of scatter - gather descriptors ( ePRD ) , 16 bytes each
*/
pp - > sg_tbl = mem ;
pp - > sg_tbl_dma = mem_dma ;
2006-01-31 20:18:41 +03:00
mv_edma_cfg ( hpriv , port_mmio ) ;
2005-09-30 09:36:00 +04:00
writel ( ( pp - > crqb_dma > > 16 ) > > 16 , port_mmio + EDMA_REQ_Q_BASE_HI_OFS ) ;
2005-11-12 20:32:50 +03:00
writelfl ( pp - > crqb_dma & EDMA_REQ_Q_BASE_LO_MASK ,
2005-09-30 09:36:00 +04:00
port_mmio + EDMA_REQ_Q_IN_PTR_OFS ) ;
2006-01-31 20:18:41 +03:00
if ( hpriv - > hp_flags & MV_HP_ERRATA_XX42A0 )
writelfl ( pp - > crqb_dma & 0xffffffff ,
port_mmio + EDMA_REQ_Q_OUT_PTR_OFS ) ;
else
writelfl ( 0 , port_mmio + EDMA_REQ_Q_OUT_PTR_OFS ) ;
2005-09-30 09:36:00 +04:00
writel ( ( pp - > crpb_dma > > 16 ) > > 16 , port_mmio + EDMA_RSP_Q_BASE_HI_OFS ) ;
2006-01-31 20:18:41 +03:00
if ( hpriv - > hp_flags & MV_HP_ERRATA_XX42A0 )
writelfl ( pp - > crpb_dma & 0xffffffff ,
port_mmio + EDMA_RSP_Q_IN_PTR_OFS ) ;
else
writelfl ( 0 , port_mmio + EDMA_RSP_Q_IN_PTR_OFS ) ;
2005-11-12 20:32:50 +03:00
writelfl ( pp - > crpb_dma & EDMA_RSP_Q_BASE_LO_MASK ,
2005-09-30 09:36:00 +04:00
port_mmio + EDMA_RSP_Q_OUT_PTR_OFS ) ;
/* Don't turn on EDMA here...do it before DMA commands only. Else
* we ' ll be unable to send non - data , PIO , etc due to restricted access
* to shadow regs .
*/
ap - > private_data = pp ;
return 0 ;
2005-11-05 06:08:00 +03:00
err_out_priv :
mv_priv_free ( pp , dev ) ;
err_out_pp :
kfree ( pp ) ;
err_out :
return rc ;
2005-09-30 09:36:00 +04:00
}
2005-10-06 01:08:53 +04:00
/**
* mv_port_stop - Port specific cleanup / stop routine .
* @ ap : ATA channel to manipulate
*
* Stop DMA , cleanup port memory .
*
* LOCKING :
* This routine uses the host_set lock to protect the DMA stop .
*/
2005-09-30 09:36:00 +04:00
static void mv_port_stop ( struct ata_port * ap )
{
struct device * dev = ap - > host_set - > dev ;
struct mv_port_priv * pp = ap - > private_data ;
2005-10-06 01:08:42 +04:00
unsigned long flags ;
2005-09-30 09:36:00 +04:00
2005-10-06 01:08:42 +04:00
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
2005-09-30 09:36:00 +04:00
mv_stop_dma ( ap ) ;
2005-10-06 01:08:42 +04:00
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
2005-09-30 09:36:00 +04:00
ap - > private_data = NULL ;
2005-11-05 06:08:00 +03:00
ata_pad_free ( ap , dev ) ;
mv_priv_free ( pp , dev ) ;
2005-09-30 09:36:00 +04:00
kfree ( pp ) ;
}
2005-10-06 01:08:53 +04:00
/**
* mv_fill_sg - Fill out the Marvell ePRD ( scatter gather ) entries
* @ qc : queued command whose SG list to source from
*
* Populate the SG list and mark the last entry .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static void mv_fill_sg ( struct ata_queued_cmd * qc )
{
struct mv_port_priv * pp = qc - > ap - > private_data ;
2005-10-19 06:14:54 +04:00
unsigned int i = 0 ;
struct scatterlist * sg ;
2005-09-30 09:36:00 +04:00
2005-10-19 06:14:54 +04:00
ata_for_each_sg ( sg , qc ) {
2005-09-30 09:36:00 +04:00
dma_addr_t addr ;
2005-11-17 18:59:48 +03:00
u32 sg_len , len , offset ;
2005-09-30 09:36:00 +04:00
2005-10-19 06:14:54 +04:00
addr = sg_dma_address ( sg ) ;
sg_len = sg_dma_len ( sg ) ;
2005-09-30 09:36:00 +04:00
2005-11-17 18:59:48 +03:00
while ( sg_len ) {
offset = addr & MV_DMA_BOUNDARY ;
len = sg_len ;
if ( ( offset + sg_len ) > 0x10000 )
len = 0x10000 - offset ;
2005-10-19 06:14:54 +04:00
2005-11-17 18:59:48 +03:00
pp - > sg_tbl [ i ] . addr = cpu_to_le32 ( addr & 0xffffffff ) ;
pp - > sg_tbl [ i ] . addr_hi = cpu_to_le32 ( ( addr > > 16 ) > > 16 ) ;
2006-03-29 18:50:31 +04:00
pp - > sg_tbl [ i ] . flags_size = cpu_to_le32 ( len & 0xffff ) ;
2005-11-17 18:59:48 +03:00
sg_len - = len ;
addr + = len ;
if ( ! sg_len & & ata_sg_is_last ( sg , qc ) )
pp - > sg_tbl [ i ] . flags_size | = cpu_to_le32 ( EPRD_FLAG_END_OF_TBL ) ;
i + + ;
}
2005-09-30 09:36:00 +04:00
}
}
2006-05-20 00:36:36 +04:00
static inline unsigned mv_inc_q_index ( unsigned index )
2005-09-30 09:36:00 +04:00
{
2006-05-20 00:36:36 +04:00
return ( index + 1 ) & MV_MAX_Q_DEPTH_MASK ;
2005-09-30 09:36:00 +04:00
}
2006-05-23 03:02:03 +04:00
static inline void mv_crqb_pack_cmd ( __le16 * cmdw , u8 data , u8 addr , unsigned last )
2005-09-30 09:36:00 +04:00
{
2006-05-20 00:40:15 +04:00
u16 tmp = data | ( addr < < CRQB_CMD_ADDR_SHIFT ) | CRQB_CMD_CS |
2005-09-30 09:36:00 +04:00
( last ? CRQB_CMD_LAST : 0 ) ;
2006-05-20 00:40:15 +04:00
* cmdw = cpu_to_le16 ( tmp ) ;
2005-09-30 09:36:00 +04:00
}
2005-10-06 01:08:53 +04:00
/**
* mv_qc_prep - Host specific command preparation .
* @ qc : queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA . Else , it handles prep of the CRQB
* ( command request block ) , does some sanity checking , and calls
* the SG load routine .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static void mv_qc_prep ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
struct mv_port_priv * pp = ap - > private_data ;
2006-05-23 03:02:03 +04:00
__le16 * cw ;
2005-09-30 09:36:00 +04:00
struct ata_taskfile * tf ;
u16 flags = 0 ;
2006-05-20 00:36:36 +04:00
unsigned in_index ;
2005-09-30 09:36:00 +04:00
2006-01-31 20:18:41 +03:00
if ( ATA_PROT_DMA ! = qc - > tf . protocol )
2005-09-30 09:36:00 +04:00
return ;
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
/* Fill in command request block
*/
2006-01-31 20:18:41 +03:00
if ( ! ( qc - > tf . flags & ATA_TFLAG_WRITE ) )
2005-09-30 09:36:00 +04:00
flags | = CRQB_FLAG_READ ;
2006-02-11 13:11:13 +03:00
WARN_ON ( MV_MAX_Q_DEPTH < = qc - > tag ) ;
2005-09-30 09:36:00 +04:00
flags | = qc - > tag < < CRQB_TAG_SHIFT ;
2006-05-20 00:36:36 +04:00
/* get current queue index from hardware */
in_index = ( readl ( mv_ap_base ( ap ) + EDMA_REQ_Q_IN_PTR_OFS )
> > EDMA_REQ_Q_PTR_SHIFT ) & MV_MAX_Q_DEPTH_MASK ;
pp - > crqb [ in_index ] . sg_addr =
2005-09-30 09:36:00 +04:00
cpu_to_le32 ( pp - > sg_tbl_dma & 0xffffffff ) ;
2006-05-20 00:36:36 +04:00
pp - > crqb [ in_index ] . sg_addr_hi =
2005-09-30 09:36:00 +04:00
cpu_to_le32 ( ( pp - > sg_tbl_dma > > 16 ) > > 16 ) ;
2006-05-20 00:36:36 +04:00
pp - > crqb [ in_index ] . ctrl_flags = cpu_to_le16 ( flags ) ;
2005-09-30 09:36:00 +04:00
2006-05-20 00:36:36 +04:00
cw = & pp - > crqb [ in_index ] . ata_cmd [ 0 ] ;
2005-09-30 09:36:00 +04:00
tf = & qc - > tf ;
/* Sadly, the CRQB cannot accomodate all registers--there are
* only 11 bytes . . . so we must pick and choose required
* registers based on the command . So , we drop feature and
* hob_feature for [ RW ] DMA commands , but they are needed for
* NCQ . NCQ will drop hob_nsect .
2005-09-02 02:26:17 +04:00
*/
2005-09-30 09:36:00 +04:00
switch ( tf - > command ) {
case ATA_CMD_READ :
case ATA_CMD_READ_EXT :
case ATA_CMD_WRITE :
case ATA_CMD_WRITE_EXT :
2006-02-15 17:59:25 +03:00
case ATA_CMD_WRITE_FUA_EXT :
2005-09-30 09:36:00 +04:00
mv_crqb_pack_cmd ( cw + + , tf - > hob_nsect , ATA_REG_NSECT , 0 ) ;
break ;
# ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
case ATA_CMD_FPDMA_READ :
case ATA_CMD_FPDMA_WRITE :
2005-11-12 20:32:50 +03:00
mv_crqb_pack_cmd ( cw + + , tf - > hob_feature , ATA_REG_FEATURE , 0 ) ;
2005-09-30 09:36:00 +04:00
mv_crqb_pack_cmd ( cw + + , tf - > feature , ATA_REG_FEATURE , 0 ) ;
break ;
# endif /* FIXME: remove this line when NCQ added */
default :
/* The only other commands EDMA supports in non-queued and
* non - NCQ mode are : [ RW ] STREAM DMA and W DMA FUA EXT , none
* of which are defined / used by Linux . If we get here , this
* driver needs work .
*
* FIXME : modify libata to give qc_prep a return value and
* return error here .
*/
BUG_ON ( tf - > command ) ;
break ;
}
mv_crqb_pack_cmd ( cw + + , tf - > nsect , ATA_REG_NSECT , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > hob_lbal , ATA_REG_LBAL , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > lbal , ATA_REG_LBAL , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > hob_lbam , ATA_REG_LBAM , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > lbam , ATA_REG_LBAM , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > hob_lbah , ATA_REG_LBAH , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > lbah , ATA_REG_LBAH , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > device , ATA_REG_DEVICE , 0 ) ;
mv_crqb_pack_cmd ( cw + + , tf - > command , ATA_REG_CMD , 1 ) ; /* last */
2006-01-31 20:18:41 +03:00
if ( ! ( qc - > flags & ATA_QCFLAG_DMAMAP ) )
return ;
mv_fill_sg ( qc ) ;
}
/**
* mv_qc_prep_iie - Host specific command preparation .
* @ qc : queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA . Else , it handles prep of the CRQB
* ( command request block ) , does some sanity checking , and calls
* the SG load routine .
*
* LOCKING :
* Inherited from caller .
*/
static void mv_qc_prep_iie ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
struct mv_port_priv * pp = ap - > private_data ;
struct mv_crqb_iie * crqb ;
struct ata_taskfile * tf ;
2006-05-20 00:36:36 +04:00
unsigned in_index ;
2006-01-31 20:18:41 +03:00
u32 flags = 0 ;
if ( ATA_PROT_DMA ! = qc - > tf . protocol )
return ;
/* Fill in Gen IIE command request block
*/
if ( ! ( qc - > tf . flags & ATA_TFLAG_WRITE ) )
flags | = CRQB_FLAG_READ ;
2006-02-11 13:11:13 +03:00
WARN_ON ( MV_MAX_Q_DEPTH < = qc - > tag ) ;
2006-01-31 20:18:41 +03:00
flags | = qc - > tag < < CRQB_TAG_SHIFT ;
2006-05-20 00:36:36 +04:00
/* get current queue index from hardware */
in_index = ( readl ( mv_ap_base ( ap ) + EDMA_REQ_Q_IN_PTR_OFS )
> > EDMA_REQ_Q_PTR_SHIFT ) & MV_MAX_Q_DEPTH_MASK ;
crqb = ( struct mv_crqb_iie * ) & pp - > crqb [ in_index ] ;
2006-01-31 20:18:41 +03:00
crqb - > addr = cpu_to_le32 ( pp - > sg_tbl_dma & 0xffffffff ) ;
crqb - > addr_hi = cpu_to_le32 ( ( pp - > sg_tbl_dma > > 16 ) > > 16 ) ;
crqb - > flags = cpu_to_le32 ( flags ) ;
tf = & qc - > tf ;
crqb - > ata_cmd [ 0 ] = cpu_to_le32 (
( tf - > command < < 16 ) |
( tf - > feature < < 24 )
) ;
crqb - > ata_cmd [ 1 ] = cpu_to_le32 (
( tf - > lbal < < 0 ) |
( tf - > lbam < < 8 ) |
( tf - > lbah < < 16 ) |
( tf - > device < < 24 )
) ;
crqb - > ata_cmd [ 2 ] = cpu_to_le32 (
( tf - > hob_lbal < < 0 ) |
( tf - > hob_lbam < < 8 ) |
( tf - > hob_lbah < < 16 ) |
( tf - > hob_feature < < 24 )
) ;
crqb - > ata_cmd [ 3 ] = cpu_to_le32 (
( tf - > nsect < < 0 ) |
( tf - > hob_nsect < < 8 )
) ;
if ( ! ( qc - > flags & ATA_QCFLAG_DMAMAP ) )
2005-09-30 09:36:00 +04:00
return ;
mv_fill_sg ( qc ) ;
}
2005-10-06 01:08:53 +04:00
/**
* mv_qc_issue - Initiate a command to the host
* @ qc : queued command to start
*
* This routine simply redirects to the general purpose routine
* if command is not DMA . Else , it sanity checks our local
* caches of the request producer / consumer indices then enables
* DMA and bumps the request producer index .
*
* LOCKING :
* Inherited from caller .
*/
2006-01-23 07:09:36 +03:00
static unsigned int mv_qc_issue ( struct ata_queued_cmd * qc )
2005-09-30 09:36:00 +04:00
{
void __iomem * port_mmio = mv_ap_base ( qc - > ap ) ;
struct mv_port_priv * pp = qc - > ap - > private_data ;
2006-05-20 00:36:36 +04:00
unsigned in_index ;
2005-09-30 09:36:00 +04:00
u32 in_ptr ;
if ( ATA_PROT_DMA ! = qc - > tf . protocol ) {
/* We're about to send a non-EDMA capable command to the
* port . Turn off EDMA so there won ' t be problems accessing
* shadow block , etc registers .
*/
mv_stop_dma ( qc - > ap ) ;
return ata_qc_issue_prot ( qc ) ;
}
2006-05-20 00:36:36 +04:00
in_ptr = readl ( port_mmio + EDMA_REQ_Q_IN_PTR_OFS ) ;
in_index = ( in_ptr > > EDMA_REQ_Q_PTR_SHIFT ) & MV_MAX_Q_DEPTH_MASK ;
2005-09-30 09:36:00 +04:00
/* until we do queuing, the queue should be empty at this point */
2006-05-20 00:36:36 +04:00
WARN_ON ( in_index ! = ( ( readl ( port_mmio + EDMA_REQ_Q_OUT_PTR_OFS )
> > EDMA_REQ_Q_PTR_SHIFT ) & MV_MAX_Q_DEPTH_MASK ) ) ;
2005-09-30 09:36:00 +04:00
2006-05-20 00:36:36 +04:00
in_index = mv_inc_q_index ( in_index ) ; /* now incr producer index */
2005-09-30 09:36:00 +04:00
2005-10-06 01:08:42 +04:00
mv_start_dma ( port_mmio , pp ) ;
2005-09-30 09:36:00 +04:00
/* and write the request in pointer to kick the EDMA to life */
in_ptr & = EDMA_REQ_Q_BASE_LO_MASK ;
2006-05-20 00:36:36 +04:00
in_ptr | = in_index < < EDMA_REQ_Q_PTR_SHIFT ;
2005-09-30 09:36:00 +04:00
writelfl ( in_ptr , port_mmio + EDMA_REQ_Q_IN_PTR_OFS ) ;
return 0 ;
}
2005-10-06 01:08:53 +04:00
/**
* mv_get_crpb_status - get status from most recently completed cmd
* @ ap : ATA channel to manipulate
*
* This routine is for use when the port is in DMA mode , when it
* will be using the CRPB ( command response block ) method of
2006-02-11 13:11:13 +03:00
* returning command completion information . We check indices
2005-10-06 01:08:53 +04:00
* are good , grab status , and bump the response consumer index to
* prove that we ' re up to date .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static u8 mv_get_crpb_status ( struct ata_port * ap )
{
void __iomem * port_mmio = mv_ap_base ( ap ) ;
struct mv_port_priv * pp = ap - > private_data ;
2006-05-20 00:36:36 +04:00
unsigned out_index ;
2005-09-30 09:36:00 +04:00
u32 out_ptr ;
2006-03-22 05:11:53 +03:00
u8 ata_status ;
2005-09-30 09:36:00 +04:00
2006-05-20 00:36:36 +04:00
out_ptr = readl ( port_mmio + EDMA_RSP_Q_OUT_PTR_OFS ) ;
out_index = ( out_ptr > > EDMA_RSP_Q_PTR_SHIFT ) & MV_MAX_Q_DEPTH_MASK ;
2005-09-30 09:36:00 +04:00
2006-05-20 00:36:36 +04:00
ata_status = le16_to_cpu ( pp - > crpb [ out_index ] . flags )
> > CRPB_FLAG_STATUS_SHIFT ;
2006-03-22 05:11:53 +03:00
2005-09-30 09:36:00 +04:00
/* increment our consumer index... */
2006-05-20 00:36:36 +04:00
out_index = mv_inc_q_index ( out_index ) ;
2005-11-12 20:32:50 +03:00
2005-09-30 09:36:00 +04:00
/* and, until we do NCQ, there should only be 1 CRPB waiting */
2006-05-20 00:36:36 +04:00
WARN_ON ( out_index ! = ( ( readl ( port_mmio + EDMA_RSP_Q_IN_PTR_OFS )
> > EDMA_RSP_Q_PTR_SHIFT ) & MV_MAX_Q_DEPTH_MASK ) ) ;
2005-09-30 09:36:00 +04:00
/* write out our inc'd consumer index so EDMA knows we're caught up */
out_ptr & = EDMA_RSP_Q_BASE_LO_MASK ;
2006-05-20 00:36:36 +04:00
out_ptr | = out_index < < EDMA_RSP_Q_PTR_SHIFT ;
2005-09-30 09:36:00 +04:00
writelfl ( out_ptr , port_mmio + EDMA_RSP_Q_OUT_PTR_OFS ) ;
/* Return ATA status register for completed CRPB */
2006-03-22 05:11:53 +03:00
return ata_status ;
2005-09-30 09:36:00 +04:00
}
2005-10-06 01:08:53 +04:00
/**
* mv_err_intr - Handle error interrupts on the port
* @ ap : ATA channel to manipulate
2006-05-20 00:21:03 +04:00
* @ reset_allowed : bool : 0 = = don ' t trigger from reset here
2005-10-06 01:08:53 +04:00
*
* In most cases , just clear the interrupt and move on . However ,
* some cases require an eDMA reset , which is done right before
* the COMRESET in mv_phy_reset ( ) . The SERR case requires a
* clear of pending errors in the SATA SERROR register . Finally ,
* if the port disabled DMA , update our cached copy to match .
*
* LOCKING :
* Inherited from caller .
*/
2006-05-20 00:21:03 +04:00
static void mv_err_intr ( struct ata_port * ap , int reset_allowed )
2005-09-30 09:36:00 +04:00
{
void __iomem * port_mmio = mv_ap_base ( ap ) ;
u32 edma_err_cause , serr = 0 ;
2005-09-02 02:26:17 +04:00
edma_err_cause = readl ( port_mmio + EDMA_ERR_IRQ_CAUSE_OFS ) ;
if ( EDMA_ERR_SERR & edma_err_cause ) {
2006-05-15 15:57:47 +04:00
sata_scr_read ( ap , SCR_ERROR , & serr ) ;
sata_scr_write_flush ( ap , SCR_ERROR , serr ) ;
2005-09-02 02:26:17 +04:00
}
2005-10-06 01:08:42 +04:00
if ( EDMA_ERR_SELF_DIS & edma_err_cause ) {
struct mv_port_priv * pp = ap - > private_data ;
pp - > pp_flags & = ~ MV_PP_FLAG_EDMA_EN ;
}
DPRINTK ( KERN_ERR " ata%u: port error; EDMA err cause: 0x%08x "
" SERR: 0x%08x \n " , ap - > id , edma_err_cause , serr ) ;
2005-09-02 02:26:17 +04:00
/* Clear EDMA now that SERR cleanup done */
writelfl ( 0 , port_mmio + EDMA_ERR_IRQ_CAUSE_OFS ) ;
/* check for fatal here and recover if needed */
2006-05-20 00:21:03 +04:00
if ( reset_allowed & & ( EDMA_ERR_FATAL & edma_err_cause ) )
2005-11-14 01:47:51 +03:00
mv_stop_and_reset ( ap ) ;
2005-09-02 02:26:17 +04:00
}
2005-10-06 01:08:53 +04:00
/**
* mv_host_intr - Handle all interrupts on the given host controller
* @ host_set : host specific structure
* @ relevant : port error bits relevant to this host controller
* @ hc : which host controller we ' re to look at
*
* Read then write clear the HC interrupt status then walk each
* port connected to the HC and see if it needs servicing . Port
* success ints are reported in the HC interrupt status reg , the
* port error ints are reported in the higher level main
* interrupt status register and thus are passed in via the
* ' relevant ' argument .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-02 02:26:17 +04:00
static void mv_host_intr ( struct ata_host_set * host_set , u32 relevant ,
unsigned int hc )
{
void __iomem * mmio = host_set - > mmio_base ;
void __iomem * hc_mmio = mv_hc_base ( mmio , hc ) ;
struct ata_queued_cmd * qc ;
u32 hc_irq_cause ;
2005-09-30 09:36:00 +04:00
int shift , port , port0 , hard_port , handled ;
2005-10-30 12:44:42 +03:00
unsigned int err_mask ;
2005-09-02 02:26:17 +04:00
if ( hc = = 0 ) {
port0 = 0 ;
} else {
port0 = MV_PORTS_PER_HC ;
}
/* we'll need the HC success int register in most cases */
hc_irq_cause = readl ( hc_mmio + HC_IRQ_CAUSE_OFS ) ;
if ( hc_irq_cause ) {
2005-09-30 09:36:00 +04:00
writelfl ( ~ hc_irq_cause , hc_mmio + HC_IRQ_CAUSE_OFS ) ;
2005-09-02 02:26:17 +04:00
}
VPRINTK ( " ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x \n " ,
hc , relevant , hc_irq_cause ) ;
for ( port = port0 ; port < port0 + MV_PORTS_PER_HC ; port + + ) {
2006-03-21 03:49:54 +03:00
u8 ata_status = 0 ;
2006-03-29 18:50:31 +04:00
struct ata_port * ap = host_set - > ports [ port ] ;
struct mv_port_priv * pp = ap - > private_data ;
2006-03-30 04:43:31 +04:00
2006-05-20 00:33:03 +04:00
hard_port = mv_hardport_from_port ( port ) ; /* range 0..3 */
2005-09-30 09:36:00 +04:00
handled = 0 ; /* ensure ata_status is set if handled++ */
2005-09-02 02:26:17 +04:00
2006-03-29 18:50:31 +04:00
/* Note that DEV_IRQ might happen spuriously during EDMA,
2006-05-20 00:33:03 +04:00
* and should be ignored in such cases .
* The cause of this is still under investigation .
2006-05-24 09:53:39 +04:00
*/
2006-03-29 18:50:31 +04:00
if ( pp - > pp_flags & MV_PP_FLAG_EDMA_EN ) {
/* EDMA: check for response queue interrupt */
if ( ( CRPB_DMA_DONE < < hard_port ) & hc_irq_cause ) {
ata_status = mv_get_crpb_status ( ap ) ;
handled = 1 ;
}
} else {
/* PIO: check for device (drive) interrupt */
if ( ( DEV_IRQ < < hard_port ) & hc_irq_cause ) {
ata_status = readb ( ( void __iomem * )
2005-09-02 02:26:17 +04:00
ap - > ioaddr . status_addr ) ;
2006-03-29 18:50:31 +04:00
handled = 1 ;
2006-05-20 00:33:03 +04:00
/* ignore spurious intr if drive still BUSY */
if ( ata_status & ATA_BUSY ) {
ata_status = 0 ;
handled = 0 ;
}
2006-03-29 18:50:31 +04:00
}
2005-09-02 02:26:17 +04:00
}
2006-04-02 18:30:40 +04:00
if ( ap & & ( ap - > flags & ATA_FLAG_DISABLED ) )
2005-11-17 13:44:44 +03:00
continue ;
2005-10-30 12:44:42 +03:00
err_mask = ac_err_mask ( ata_status ) ;
2005-09-30 09:36:00 +04:00
shift = port < < 1 ; /* (port * 2) */
2005-09-02 02:26:17 +04:00
if ( port > = MV_PORTS_PER_HC ) {
shift + + ; /* skip bit 8 in the HC Main IRQ reg */
}
if ( ( PORT0_ERR < < shift ) & relevant ) {
2006-05-20 00:21:03 +04:00
mv_err_intr ( ap , 1 ) ;
2005-10-30 12:44:42 +03:00
err_mask | = AC_ERR_OTHER ;
2006-03-29 18:50:31 +04:00
handled = 1 ;
2005-09-02 02:26:17 +04:00
}
2005-11-12 20:32:50 +03:00
2006-03-29 18:50:31 +04:00
if ( handled ) {
2005-09-02 02:26:17 +04:00
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
2006-03-29 18:50:31 +04:00
if ( qc & & ( qc - > flags & ATA_QCFLAG_ACTIVE ) ) {
2005-09-02 02:26:17 +04:00
VPRINTK ( " port %u IRQ found for qc, "
" ata_status 0x%x \n " , port , ata_status ) ;
/* mark qc status appropriately */
2005-12-06 12:52:48 +03:00
if ( ! ( qc - > tf . flags & ATA_TFLAG_POLLING ) ) {
2005-12-05 10:38:02 +03:00
qc - > err_mask | = err_mask ;
ata_qc_complete ( qc ) ;
}
2005-09-02 02:26:17 +04:00
}
}
}
VPRINTK ( " EXIT \n " ) ;
}
2005-10-06 01:08:53 +04:00
/**
2005-11-12 20:32:50 +03:00
* mv_interrupt -
2005-10-06 01:08:53 +04:00
* @ irq : unused
* @ dev_instance : private data ; in this case the host structure
* @ regs : unused
*
* Read the read only register to determine if any host
* controllers have pending interrupts . If so , call lower level
* routine to handle . Also check for PCI errors which are only
* reported here .
*
2005-11-12 20:32:50 +03:00
* LOCKING :
2005-10-06 01:08:53 +04:00
* This routine holds the host_set lock while processing pending
* interrupts .
*/
2005-09-02 02:26:17 +04:00
static irqreturn_t mv_interrupt ( int irq , void * dev_instance ,
struct pt_regs * regs )
{
struct ata_host_set * host_set = dev_instance ;
unsigned int hc , handled = 0 , n_hcs ;
2005-09-30 09:36:00 +04:00
void __iomem * mmio = host_set - > mmio_base ;
2006-05-20 00:24:56 +04:00
struct mv_host_priv * hpriv ;
2005-09-02 02:26:17 +04:00
u32 irq_stat ;
irq_stat = readl ( mmio + HC_MAIN_IRQ_CAUSE_OFS ) ;
/* check the cases where we either have nothing pending or have read
* a bogus register value which can indicate HW removal or PCI fault
*/
if ( ! irq_stat | | ( 0xffffffffU = = irq_stat ) ) {
return IRQ_NONE ;
}
2005-09-30 09:36:00 +04:00
n_hcs = mv_get_hc_count ( host_set - > ports [ 0 ] - > flags ) ;
2005-09-02 02:26:17 +04:00
spin_lock ( & host_set - > lock ) ;
for ( hc = 0 ; hc < n_hcs ; hc + + ) {
u32 relevant = irq_stat & ( HC0_IRQ_PEND < < ( hc * HC_SHIFT ) ) ;
if ( relevant ) {
mv_host_intr ( host_set , relevant , hc ) ;
2005-09-30 09:36:00 +04:00
handled + + ;
2005-09-02 02:26:17 +04:00
}
}
2006-05-20 00:24:56 +04:00
hpriv = host_set - > private_data ;
if ( IS_60XX ( hpriv ) ) {
/* deal with the interrupt coalescing bits */
if ( irq_stat & ( TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE ) ) {
writelfl ( 0 , mmio + MV_IRQ_COAL_CAUSE_LO ) ;
writelfl ( 0 , mmio + MV_IRQ_COAL_CAUSE_HI ) ;
writelfl ( 0 , mmio + MV_IRQ_COAL_CAUSE ) ;
}
}
2005-09-02 02:26:17 +04:00
if ( PCI_ERR & irq_stat ) {
2005-09-30 09:36:00 +04:00
printk ( KERN_ERR DRV_NAME " : PCI ERROR; PCI IRQ cause=0x%08x \n " ,
readl ( mmio + PCI_IRQ_CAUSE_OFS ) ) ;
2005-10-06 01:08:42 +04:00
DPRINTK ( " All regs @ PCI error \n " ) ;
2005-09-30 09:36:00 +04:00
mv_dump_all_regs ( mmio , - 1 , to_pci_dev ( host_set - > dev ) ) ;
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
writelfl ( 0 , mmio + PCI_IRQ_CAUSE_OFS ) ;
handled + + ;
}
2005-09-02 02:26:17 +04:00
spin_unlock ( & host_set - > lock ) ;
return IRQ_RETVAL ( handled ) ;
}
2005-11-14 01:47:51 +03:00
static void __iomem * mv5_phy_base ( void __iomem * mmio , unsigned int port )
{
void __iomem * hc_mmio = mv_hc_base_from_port ( mmio , port ) ;
unsigned long ofs = ( mv_hardport_from_port ( port ) + 1 ) * 0x100UL ;
return hc_mmio + ofs ;
}
static unsigned int mv5_scr_offset ( unsigned int sc_reg_in )
{
unsigned int ofs ;
switch ( sc_reg_in ) {
case SCR_STATUS :
case SCR_ERROR :
case SCR_CONTROL :
ofs = sc_reg_in * sizeof ( u32 ) ;
break ;
default :
ofs = 0xffffffffU ;
break ;
}
return ofs ;
}
static u32 mv5_scr_read ( struct ata_port * ap , unsigned int sc_reg_in )
{
void __iomem * mmio = mv5_phy_base ( ap - > host_set - > mmio_base , ap - > port_no ) ;
unsigned int ofs = mv5_scr_offset ( sc_reg_in ) ;
if ( ofs ! = 0xffffffffU )
return readl ( mmio + ofs ) ;
else
return ( u32 ) ofs ;
}
static void mv5_scr_write ( struct ata_port * ap , unsigned int sc_reg_in , u32 val )
{
void __iomem * mmio = mv5_phy_base ( ap - > host_set - > mmio_base , ap - > port_no ) ;
unsigned int ofs = mv5_scr_offset ( sc_reg_in ) ;
if ( ofs ! = 0xffffffffU )
writelfl ( val , mmio + ofs ) ;
}
2005-11-13 06:14:02 +03:00
static void mv5_reset_bus ( struct pci_dev * pdev , void __iomem * mmio )
{
u8 rev_id ;
int early_5080 ;
pci_read_config_byte ( pdev , PCI_REVISION_ID , & rev_id ) ;
early_5080 = ( pdev - > device = = 0x5080 ) & & ( rev_id = = 0 ) ;
if ( ! early_5080 ) {
u32 tmp = readl ( mmio + MV_PCI_EXP_ROM_BAR_CTL ) ;
tmp | = ( 1 < < 0 ) ;
writel ( tmp , mmio + MV_PCI_EXP_ROM_BAR_CTL ) ;
}
mv_reset_pci_bus ( pdev , mmio ) ;
}
static void mv5_reset_flash ( struct mv_host_priv * hpriv , void __iomem * mmio )
{
writel ( 0x0fcfffff , mmio + MV_FLASH_CTL ) ;
}
2005-11-13 05:13:17 +03:00
static void mv5_read_preamp ( struct mv_host_priv * hpriv , int idx ,
2005-11-13 03:08:48 +03:00
void __iomem * mmio )
{
2005-11-14 01:47:51 +03:00
void __iomem * phy_mmio = mv5_phy_base ( mmio , idx ) ;
u32 tmp ;
tmp = readl ( phy_mmio + MV5_PHY_MODE ) ;
hpriv - > signal [ idx ] . pre = tmp & 0x1800 ; /* bits 12:11 */
hpriv - > signal [ idx ] . amps = tmp & 0xe0 ; /* bits 7:5 */
2005-11-13 03:08:48 +03:00
}
2005-11-13 05:13:17 +03:00
static void mv5_enable_leds ( struct mv_host_priv * hpriv , void __iomem * mmio )
2005-11-13 03:08:48 +03:00
{
2005-11-13 06:14:02 +03:00
u32 tmp ;
writel ( 0 , mmio + MV_GPIO_PORT_CTL ) ;
/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
tmp = readl ( mmio + MV_PCI_EXP_ROM_BAR_CTL ) ;
tmp | = ~ ( 1 < < 0 ) ;
writel ( tmp , mmio + MV_PCI_EXP_ROM_BAR_CTL ) ;
2005-11-13 03:08:48 +03:00
}
2005-11-13 07:05:14 +03:00
static void mv5_phy_errata ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int port )
2005-11-12 20:48:15 +03:00
{
2005-11-14 01:47:51 +03:00
void __iomem * phy_mmio = mv5_phy_base ( mmio , port ) ;
const u32 mask = ( 1 < < 12 ) | ( 1 < < 11 ) | ( 1 < < 7 ) | ( 1 < < 6 ) | ( 1 < < 5 ) ;
u32 tmp ;
int fix_apm_sq = ( hpriv - > hp_flags & MV_HP_ERRATA_50XXB0 ) ;
if ( fix_apm_sq ) {
tmp = readl ( phy_mmio + MV5_LT_MODE ) ;
tmp | = ( 1 < < 19 ) ;
writel ( tmp , phy_mmio + MV5_LT_MODE ) ;
tmp = readl ( phy_mmio + MV5_PHY_CTL ) ;
tmp & = ~ 0x3 ;
tmp | = 0x1 ;
writel ( tmp , phy_mmio + MV5_PHY_CTL ) ;
}
tmp = readl ( phy_mmio + MV5_PHY_MODE ) ;
tmp & = ~ mask ;
tmp | = hpriv - > signal [ port ] . pre ;
tmp | = hpriv - > signal [ port ] . amps ;
writel ( tmp , phy_mmio + MV5_PHY_MODE ) ;
2005-11-12 20:48:15 +03:00
}
2005-11-14 01:47:51 +03:00
# undef ZERO
# define ZERO(reg) writel(0, port_mmio + (reg))
static void mv5_reset_hc_port ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int port )
{
void __iomem * port_mmio = mv_port_base ( mmio , port ) ;
writelfl ( EDMA_DS , port_mmio + EDMA_CMD_OFS ) ;
mv_channel_reset ( hpriv , mmio , port ) ;
ZERO ( 0x028 ) ; /* command */
writel ( 0x11f , port_mmio + EDMA_CFG_OFS ) ;
ZERO ( 0x004 ) ; /* timer */
ZERO ( 0x008 ) ; /* irq err cause */
ZERO ( 0x00c ) ; /* irq err mask */
ZERO ( 0x010 ) ; /* rq bah */
ZERO ( 0x014 ) ; /* rq inp */
ZERO ( 0x018 ) ; /* rq outp */
ZERO ( 0x01c ) ; /* respq bah */
ZERO ( 0x024 ) ; /* respq outp */
ZERO ( 0x020 ) ; /* respq inp */
ZERO ( 0x02c ) ; /* test control */
writel ( 0xbc , port_mmio + EDMA_IORDY_TMOUT ) ;
}
# undef ZERO
# define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv5_reset_one_hc ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int hc )
2005-11-13 05:13:17 +03:00
{
2005-11-14 01:47:51 +03:00
void __iomem * hc_mmio = mv_hc_base ( mmio , hc ) ;
u32 tmp ;
ZERO ( 0x00c ) ;
ZERO ( 0x010 ) ;
ZERO ( 0x014 ) ;
ZERO ( 0x018 ) ;
tmp = readl ( hc_mmio + 0x20 ) ;
tmp & = 0x1c1c1c1c ;
tmp | = 0x03030303 ;
writel ( tmp , hc_mmio + 0x20 ) ;
}
# undef ZERO
static int mv5_reset_hc ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int n_hc )
{
unsigned int hc , port ;
for ( hc = 0 ; hc < n_hc ; hc + + ) {
for ( port = 0 ; port < MV_PORTS_PER_HC ; port + + )
mv5_reset_hc_port ( hpriv , mmio ,
( hc * MV_PORTS_PER_HC ) + port ) ;
mv5_reset_one_hc ( hpriv , mmio , hc ) ;
}
return 0 ;
2005-11-13 05:13:17 +03:00
}
2005-11-13 06:17:49 +03:00
# undef ZERO
# define ZERO(reg) writel(0, mmio + (reg))
static void mv_reset_pci_bus ( struct pci_dev * pdev , void __iomem * mmio )
{
u32 tmp ;
tmp = readl ( mmio + MV_PCI_MODE ) ;
tmp & = 0xff00ffff ;
writel ( tmp , mmio + MV_PCI_MODE ) ;
ZERO ( MV_PCI_DISC_TIMER ) ;
ZERO ( MV_PCI_MSI_TRIGGER ) ;
writel ( 0x000100ff , mmio + MV_PCI_XBAR_TMOUT ) ;
ZERO ( HC_MAIN_IRQ_MASK_OFS ) ;
ZERO ( MV_PCI_SERR_MASK ) ;
ZERO ( PCI_IRQ_CAUSE_OFS ) ;
ZERO ( PCI_IRQ_MASK_OFS ) ;
ZERO ( MV_PCI_ERR_LOW_ADDRESS ) ;
ZERO ( MV_PCI_ERR_HIGH_ADDRESS ) ;
ZERO ( MV_PCI_ERR_ATTRIBUTE ) ;
ZERO ( MV_PCI_ERR_COMMAND ) ;
}
# undef ZERO
static void mv6_reset_flash ( struct mv_host_priv * hpriv , void __iomem * mmio )
{
u32 tmp ;
mv5_reset_flash ( hpriv , mmio ) ;
tmp = readl ( mmio + MV_GPIO_PORT_CTL ) ;
tmp & = 0x3 ;
tmp | = ( 1 < < 5 ) | ( 1 < < 6 ) ;
writel ( tmp , mmio + MV_GPIO_PORT_CTL ) ;
}
/**
* mv6_reset_hc - Perform the 6 xxx global soft reset
* @ mmio : base address of the HBA
*
* This routine only applies to 6 xxx parts .
*
* LOCKING :
* Inherited from caller .
*/
2005-11-14 01:47:51 +03:00
static int mv6_reset_hc ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int n_hc )
2005-11-13 06:17:49 +03:00
{
void __iomem * reg = mmio + PCI_MAIN_CMD_STS_OFS ;
int i , rc = 0 ;
u32 t ;
/* Following procedure defined in PCI "main command and status
* register " table.
*/
t = readl ( reg ) ;
writel ( t | STOP_PCI_MASTER , reg ) ;
for ( i = 0 ; i < 1000 ; i + + ) {
udelay ( 1 ) ;
t = readl ( reg ) ;
if ( PCI_MASTER_EMPTY & t ) {
break ;
}
}
if ( ! ( PCI_MASTER_EMPTY & t ) ) {
printk ( KERN_ERR DRV_NAME " : PCI master won't flush \n " ) ;
rc = 1 ;
goto done ;
}
/* set reset */
i = 5 ;
do {
writel ( t | GLOB_SFT_RST , reg ) ;
t = readl ( reg ) ;
udelay ( 1 ) ;
} while ( ! ( GLOB_SFT_RST & t ) & & ( i - - > 0 ) ) ;
if ( ! ( GLOB_SFT_RST & t ) ) {
printk ( KERN_ERR DRV_NAME " : can't set global reset \n " ) ;
rc = 1 ;
goto done ;
}
/* clear reset and *reenable the PCI master* (not mentioned in spec) */
i = 5 ;
do {
writel ( t & ~ ( GLOB_SFT_RST | STOP_PCI_MASTER ) , reg ) ;
t = readl ( reg ) ;
udelay ( 1 ) ;
} while ( ( GLOB_SFT_RST & t ) & & ( i - - > 0 ) ) ;
if ( GLOB_SFT_RST & t ) {
printk ( KERN_ERR DRV_NAME " : can't clear global reset \n " ) ;
rc = 1 ;
}
done :
return rc ;
}
2005-11-13 05:13:17 +03:00
static void mv6_read_preamp ( struct mv_host_priv * hpriv , int idx ,
2005-11-13 03:08:48 +03:00
void __iomem * mmio )
{
void __iomem * port_mmio ;
u32 tmp ;
tmp = readl ( mmio + MV_RESET_CFG ) ;
if ( ( tmp & ( 1 < < 0 ) ) = = 0 ) {
2005-11-13 05:13:17 +03:00
hpriv - > signal [ idx ] . amps = 0x7 < < 8 ;
2005-11-13 03:08:48 +03:00
hpriv - > signal [ idx ] . pre = 0x1 < < 5 ;
return ;
}
port_mmio = mv_port_base ( mmio , idx ) ;
tmp = readl ( port_mmio + PHY_MODE2 ) ;
hpriv - > signal [ idx ] . amps = tmp & 0x700 ; /* bits 10:8 */
hpriv - > signal [ idx ] . pre = tmp & 0xe0 ; /* bits 7:5 */
}
2005-11-13 05:13:17 +03:00
static void mv6_enable_leds ( struct mv_host_priv * hpriv , void __iomem * mmio )
2005-11-13 03:08:48 +03:00
{
2005-11-13 05:13:17 +03:00
writel ( 0x00000060 , mmio + MV_GPIO_PORT_CTL ) ;
2005-11-13 03:08:48 +03:00
}
2005-11-14 01:47:51 +03:00
static void mv6_phy_errata ( struct mv_host_priv * hpriv , void __iomem * mmio ,
2005-11-13 07:05:14 +03:00
unsigned int port )
2005-11-12 20:48:15 +03:00
{
2005-11-14 01:47:51 +03:00
void __iomem * port_mmio = mv_port_base ( mmio , port ) ;
2005-11-12 20:48:15 +03:00
u32 hp_flags = hpriv - > hp_flags ;
2005-11-13 05:13:17 +03:00
int fix_phy_mode2 =
hp_flags & ( MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0 ) ;
2005-11-12 20:48:15 +03:00
int fix_phy_mode4 =
2005-11-13 05:13:17 +03:00
hp_flags & ( MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0 ) ;
u32 m2 , tmp ;
if ( fix_phy_mode2 ) {
m2 = readl ( port_mmio + PHY_MODE2 ) ;
m2 & = ~ ( 1 < < 16 ) ;
m2 | = ( 1 < < 31 ) ;
writel ( m2 , port_mmio + PHY_MODE2 ) ;
udelay ( 200 ) ;
m2 = readl ( port_mmio + PHY_MODE2 ) ;
m2 & = ~ ( ( 1 < < 16 ) | ( 1 < < 31 ) ) ;
writel ( m2 , port_mmio + PHY_MODE2 ) ;
udelay ( 200 ) ;
}
/* who knows what this magic does */
tmp = readl ( port_mmio + PHY_MODE3 ) ;
tmp & = ~ 0x7F800000 ;
tmp | = 0x2A800000 ;
writel ( tmp , port_mmio + PHY_MODE3 ) ;
2005-11-12 20:48:15 +03:00
if ( fix_phy_mode4 ) {
2005-11-13 05:13:17 +03:00
u32 m4 ;
2005-11-12 20:48:15 +03:00
m4 = readl ( port_mmio + PHY_MODE4 ) ;
2005-11-13 05:13:17 +03:00
if ( hp_flags & MV_HP_ERRATA_60X1B2 )
tmp = readl ( port_mmio + 0x310 ) ;
2005-11-12 20:48:15 +03:00
m4 = ( m4 & ~ ( 1 < < 1 ) ) | ( 1 < < 0 ) ;
writel ( m4 , port_mmio + PHY_MODE4 ) ;
2005-11-13 05:13:17 +03:00
if ( hp_flags & MV_HP_ERRATA_60X1B2 )
writel ( tmp , port_mmio + 0x310 ) ;
2005-11-12 20:48:15 +03:00
}
/* Revert values of pre-emphasis and signal amps to the saved ones */
m2 = readl ( port_mmio + PHY_MODE2 ) ;
m2 & = ~ MV_M2_PREAMP_MASK ;
2005-11-13 07:05:14 +03:00
m2 | = hpriv - > signal [ port ] . amps ;
m2 | = hpriv - > signal [ port ] . pre ;
2005-11-13 05:13:17 +03:00
m2 & = ~ ( 1 < < 16 ) ;
2005-11-12 20:48:15 +03:00
2006-01-31 20:18:41 +03:00
/* according to mvSata 3.6.1, some IIE values are fixed */
if ( IS_GEN_IIE ( hpriv ) ) {
m2 & = ~ 0xC30FF01F ;
m2 | = 0x0000900F ;
}
2005-11-12 20:48:15 +03:00
writel ( m2 , port_mmio + PHY_MODE2 ) ;
}
2005-11-14 01:47:51 +03:00
static void mv_channel_reset ( struct mv_host_priv * hpriv , void __iomem * mmio ,
unsigned int port_no )
{
void __iomem * port_mmio = mv_port_base ( mmio , port_no ) ;
writelfl ( ATA_RST , port_mmio + EDMA_CMD_OFS ) ;
if ( IS_60XX ( hpriv ) ) {
u32 ifctl = readl ( port_mmio + SATA_INTERFACE_CTL ) ;
2006-05-20 00:29:21 +04:00
ifctl | = ( 1 < < 7 ) ; /* enable gen2i speed */
ifctl = ( ifctl & 0xfff ) | 0x9b1000 ; /* from chip spec */
2005-11-14 01:47:51 +03:00
writelfl ( ifctl , port_mmio + SATA_INTERFACE_CTL ) ;
}
udelay ( 25 ) ; /* allow reset propagation */
/* Spec never mentions clearing the bit. Marvell's driver does
* clear the bit , however .
*/
writelfl ( 0 , port_mmio + EDMA_CMD_OFS ) ;
hpriv - > ops - > phy_errata ( hpriv , mmio , port_no ) ;
if ( IS_50XX ( hpriv ) )
mdelay ( 1 ) ;
}
static void mv_stop_and_reset ( struct ata_port * ap )
{
struct mv_host_priv * hpriv = ap - > host_set - > private_data ;
void __iomem * mmio = ap - > host_set - > mmio_base ;
mv_stop_dma ( ap ) ;
mv_channel_reset ( hpriv , mmio , ap - > port_no ) ;
2005-11-17 18:59:48 +03:00
__mv_phy_reset ( ap , 0 ) ;
}
static inline void __msleep ( unsigned int msec , int can_sleep )
{
if ( can_sleep )
msleep ( msec ) ;
else
mdelay ( msec ) ;
2005-11-14 01:47:51 +03:00
}
2005-10-06 01:08:53 +04:00
/**
2005-11-17 18:59:48 +03:00
* __mv_phy_reset - Perform eDMA reset followed by COMRESET
2005-10-06 01:08:53 +04:00
* @ ap : ATA channel to manipulate
*
* Part of this is taken from __sata_phy_reset and modified to
* not sleep since this routine gets called from interrupt level .
*
* LOCKING :
* Inherited from caller . This is coded to safe to call at
* interrupt level , i . e . it does not sleep .
2005-09-30 09:36:00 +04:00
*/
2005-11-17 18:59:48 +03:00
static void __mv_phy_reset ( struct ata_port * ap , int can_sleep )
2005-09-02 02:26:17 +04:00
{
2005-11-12 17:50:49 +03:00
struct mv_port_priv * pp = ap - > private_data ;
2005-11-17 18:59:48 +03:00
struct mv_host_priv * hpriv = ap - > host_set - > private_data ;
2005-09-02 02:26:17 +04:00
void __iomem * port_mmio = mv_ap_base ( ap ) ;
struct ata_taskfile tf ;
struct ata_device * dev = & ap - > device [ 0 ] ;
2005-09-30 09:36:00 +04:00
unsigned long timeout ;
2005-11-17 18:59:48 +03:00
int retry = 5 ;
u32 sstatus ;
2005-09-02 02:26:17 +04:00
VPRINTK ( " ENTER, port %u, mmio 0x%p \n " , ap - > port_no , port_mmio ) ;
2005-11-12 17:50:49 +03:00
DPRINTK ( " S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2005-09-30 09:36:00 +04:00
" SCtrl 0x%08x \n " , mv_scr_read ( ap , SCR_STATUS ) ,
mv_scr_read ( ap , SCR_ERROR ) , mv_scr_read ( ap , SCR_CONTROL ) ) ;
2005-09-02 02:26:17 +04:00
2005-11-17 18:59:48 +03:00
/* Issue COMRESET via SControl */
comreset_retry :
2006-05-15 15:57:47 +04:00
sata_scr_write_flush ( ap , SCR_CONTROL , 0x301 ) ;
2005-11-17 18:59:48 +03:00
__msleep ( 1 , can_sleep ) ;
2006-05-15 15:57:47 +04:00
sata_scr_write_flush ( ap , SCR_CONTROL , 0x300 ) ;
2005-11-17 18:59:48 +03:00
__msleep ( 20 , can_sleep ) ;
timeout = jiffies + msecs_to_jiffies ( 200 ) ;
2005-09-30 09:36:00 +04:00
do {
2006-05-15 15:57:47 +04:00
sata_scr_read ( ap , SCR_STATUS , & sstatus ) ;
sstatus & = 0x3 ;
2005-11-17 18:59:48 +03:00
if ( ( sstatus = = 3 ) | | ( sstatus = = 0 ) )
2005-09-30 09:36:00 +04:00
break ;
2005-11-17 18:59:48 +03:00
__msleep ( 1 , can_sleep ) ;
2005-09-30 09:36:00 +04:00
} while ( time_before ( jiffies , timeout ) ) ;
2005-09-02 02:26:17 +04:00
2005-11-17 18:59:48 +03:00
/* work around errata */
if ( IS_60XX ( hpriv ) & &
( sstatus ! = 0x0 ) & & ( sstatus ! = 0x113 ) & & ( sstatus ! = 0x123 ) & &
( retry - - > 0 ) )
goto comreset_retry ;
2005-11-12 17:50:49 +03:00
DPRINTK ( " S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2005-09-30 09:36:00 +04:00
" SCtrl 0x%08x \n " , mv_scr_read ( ap , SCR_STATUS ) ,
mv_scr_read ( ap , SCR_ERROR ) , mv_scr_read ( ap , SCR_CONTROL ) ) ;
2006-05-15 15:57:47 +04:00
if ( ata_port_online ( ap ) ) {
2005-09-30 09:36:00 +04:00
ata_port_probe ( ap ) ;
} else {
2006-05-15 15:57:47 +04:00
sata_scr_read ( ap , SCR_STATUS , & sstatus ) ;
2006-05-15 15:57:56 +04:00
ata_port_printk ( ap , KERN_INFO ,
" no device found (phy stat %08x) \n " , sstatus ) ;
2005-09-30 09:36:00 +04:00
ata_port_disable ( ap ) ;
2005-09-02 02:26:17 +04:00
return ;
}
2005-09-30 09:36:00 +04:00
ap - > cbl = ATA_CBL_SATA ;
2005-09-02 02:26:17 +04:00
2005-11-17 18:59:48 +03:00
/* even after SStatus reflects that device is ready,
* it seems to take a while for link to be fully
* established ( and thus Status no longer 0x80 / 0x7F ) ,
* so we poll a bit for that , here .
*/
retry = 20 ;
while ( 1 ) {
u8 drv_stat = ata_check_status ( ap ) ;
if ( ( drv_stat ! = 0x80 ) & & ( drv_stat ! = 0x7f ) )
break ;
__msleep ( 500 , can_sleep ) ;
if ( retry - - < = 0 )
break ;
}
2005-09-02 02:26:17 +04:00
tf . lbah = readb ( ( void __iomem * ) ap - > ioaddr . lbah_addr ) ;
tf . lbam = readb ( ( void __iomem * ) ap - > ioaddr . lbam_addr ) ;
tf . lbal = readb ( ( void __iomem * ) ap - > ioaddr . lbal_addr ) ;
tf . nsect = readb ( ( void __iomem * ) ap - > ioaddr . nsect_addr ) ;
dev - > class = ata_dev_classify ( & tf ) ;
2006-03-31 20:38:18 +04:00
if ( ! ata_dev_enabled ( dev ) ) {
2005-09-02 02:26:17 +04:00
VPRINTK ( " Port disabled post-sig: No device present. \n " ) ;
ata_port_disable ( ap ) ;
}
2005-11-12 17:50:49 +03:00
writelfl ( 0 , port_mmio + EDMA_ERR_IRQ_CAUSE_OFS ) ;
pp - > pp_flags & = ~ MV_PP_FLAG_EDMA_EN ;
2005-11-12 20:48:15 +03:00
VPRINTK ( " EXIT \n " ) ;
2005-09-02 02:26:17 +04:00
}
2005-11-17 18:59:48 +03:00
static void mv_phy_reset ( struct ata_port * ap )
{
__mv_phy_reset ( ap , 1 ) ;
}
2005-10-06 01:08:53 +04:00
/**
* mv_eng_timeout - Routine called by libata when SCSI times out I / O
* @ ap : ATA channel to manipulate
*
* Intent is to clear all pending error conditions , reset the
* chip / bus , fail the command , and move on .
*
* LOCKING :
* This routine holds the host_set lock while failing the command .
*/
2005-09-30 09:36:00 +04:00
static void mv_eng_timeout ( struct ata_port * ap )
{
struct ata_queued_cmd * qc ;
2006-05-15 15:57:56 +04:00
ata_port_printk ( ap , KERN_ERR , " Entering mv_eng_timeout \n " ) ;
2005-09-30 09:36:00 +04:00
DPRINTK ( " All regs @ start of eng_timeout \n " ) ;
2005-11-12 20:32:50 +03:00
mv_dump_all_regs ( ap - > host_set - > mmio_base , ap - > port_no ,
2005-09-30 09:36:00 +04:00
to_pci_dev ( ap - > host_set - > dev ) ) ;
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
printk ( KERN_ERR " mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p \n " ,
2005-11-12 20:32:50 +03:00
ap - > host_set - > mmio_base , ap , qc , qc - > scsicmd ,
2005-09-30 09:36:00 +04:00
& qc - > scsicmd - > cmnd ) ;
2006-05-20 00:21:03 +04:00
mv_err_intr ( ap , 0 ) ;
2005-11-14 01:47:51 +03:00
mv_stop_and_reset ( ap ) ;
2005-09-30 09:36:00 +04:00
2006-05-20 00:21:03 +04:00
WARN_ON ( ! ( qc - > flags & ATA_QCFLAG_ACTIVE ) ) ;
if ( qc - > flags & ATA_QCFLAG_ACTIVE ) {
qc - > err_mask | = AC_ERR_TIMEOUT ;
ata_eh_qc_complete ( qc ) ;
}
2005-09-30 09:36:00 +04:00
}
2005-10-06 01:08:53 +04:00
/**
* mv_port_init - Perform some early initialization on a single port .
* @ port : libata data structure storing shadow register addresses
* @ port_mmio : base address of the port
*
* Initialize shadow register mmio addresses , clear outstanding
* interrupts on the port , and unmask interrupts for the future
* start of the port .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static void mv_port_init ( struct ata_ioports * port , void __iomem * port_mmio )
2005-09-02 02:26:17 +04:00
{
2005-09-30 09:36:00 +04:00
unsigned long shd_base = ( unsigned long ) port_mmio + SHD_BLK_OFS ;
unsigned serr_ofs ;
2005-11-12 20:32:50 +03:00
/* PIO related setup
2005-09-30 09:36:00 +04:00
*/
port - > data_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_DATA ) ;
2005-11-12 20:32:50 +03:00
port - > error_addr =
2005-09-30 09:36:00 +04:00
port - > feature_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_ERR ) ;
port - > nsect_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_NSECT ) ;
port - > lbal_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_LBAL ) ;
port - > lbam_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_LBAM ) ;
port - > lbah_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_LBAH ) ;
port - > device_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_DEVICE ) ;
2005-11-12 20:32:50 +03:00
port - > status_addr =
2005-09-30 09:36:00 +04:00
port - > command_addr = shd_base + ( sizeof ( u32 ) * ATA_REG_STATUS ) ;
/* special case: control/altstatus doesn't have ATA_REG_ address */
port - > altstatus_addr = port - > ctl_addr = shd_base + SHD_CTL_AST_OFS ;
/* unused: */
2005-09-02 02:26:17 +04:00
port - > cmd_addr = port - > bmdma_addr = port - > scr_addr = 0 ;
2005-09-30 09:36:00 +04:00
/* Clear any currently outstanding port interrupt conditions */
serr_ofs = mv_scr_offset ( SCR_ERROR ) ;
writelfl ( readl ( port_mmio + serr_ofs ) , port_mmio + serr_ofs ) ;
writelfl ( 0 , port_mmio + EDMA_ERR_IRQ_CAUSE_OFS ) ;
2005-09-02 02:26:17 +04:00
/* unmask all EDMA error interrupts */
2005-09-30 09:36:00 +04:00
writelfl ( ~ 0 , port_mmio + EDMA_ERR_IRQ_MASK_OFS ) ;
2005-09-02 02:26:17 +04:00
2005-11-12 20:32:50 +03:00
VPRINTK ( " EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x \n " ,
2005-09-30 09:36:00 +04:00
readl ( port_mmio + EDMA_CFG_OFS ) ,
readl ( port_mmio + EDMA_ERR_IRQ_CAUSE_OFS ) ,
readl ( port_mmio + EDMA_ERR_IRQ_MASK_OFS ) ) ;
2005-09-02 02:26:17 +04:00
}
2005-11-13 05:13:17 +03:00
static int mv_chip_id ( struct pci_dev * pdev , struct mv_host_priv * hpriv ,
2005-11-13 06:14:02 +03:00
unsigned int board_idx )
2005-11-12 20:48:15 +03:00
{
u8 rev_id ;
u32 hp_flags = hpriv - > hp_flags ;
pci_read_config_byte ( pdev , PCI_REVISION_ID , & rev_id ) ;
switch ( board_idx ) {
2005-11-13 05:13:17 +03:00
case chip_5080 :
hpriv - > ops = & mv5xxx_ops ;
hp_flags | = MV_HP_50XX ;
switch ( rev_id ) {
case 0x1 :
hp_flags | = MV_HP_ERRATA_50XXB0 ;
break ;
case 0x3 :
hp_flags | = MV_HP_ERRATA_50XXB2 ;
break ;
default :
dev_printk ( KERN_WARNING , & pdev - > dev ,
" Applying 50XXB2 workarounds to unknown rev \n " ) ;
hp_flags | = MV_HP_ERRATA_50XXB2 ;
break ;
}
break ;
2005-11-12 20:48:15 +03:00
case chip_504x :
case chip_508x :
2005-11-13 05:13:17 +03:00
hpriv - > ops = & mv5xxx_ops ;
2005-11-12 20:48:15 +03:00
hp_flags | = MV_HP_50XX ;
2005-11-13 05:13:17 +03:00
switch ( rev_id ) {
case 0x0 :
hp_flags | = MV_HP_ERRATA_50XXB0 ;
break ;
case 0x3 :
hp_flags | = MV_HP_ERRATA_50XXB2 ;
break ;
default :
dev_printk ( KERN_WARNING , & pdev - > dev ,
" Applying B2 workarounds to unknown rev \n " ) ;
hp_flags | = MV_HP_ERRATA_50XXB2 ;
break ;
2005-11-12 20:48:15 +03:00
}
break ;
case chip_604x :
case chip_608x :
2005-11-13 05:13:17 +03:00
hpriv - > ops = & mv6xxx_ops ;
2005-11-12 20:48:15 +03:00
switch ( rev_id ) {
2005-11-13 05:13:17 +03:00
case 0x7 :
hp_flags | = MV_HP_ERRATA_60X1B2 ;
break ;
case 0x9 :
hp_flags | = MV_HP_ERRATA_60X1C0 ;
2005-11-12 20:48:15 +03:00
break ;
default :
dev_printk ( KERN_WARNING , & pdev - > dev ,
2005-11-13 05:13:17 +03:00
" Applying B2 workarounds to unknown rev \n " ) ;
hp_flags | = MV_HP_ERRATA_60X1B2 ;
2005-11-12 20:48:15 +03:00
break ;
}
break ;
2006-01-31 20:18:41 +03:00
case chip_7042 :
case chip_6042 :
hpriv - > ops = & mv6xxx_ops ;
hp_flags | = MV_HP_GEN_IIE ;
switch ( rev_id ) {
case 0x0 :
hp_flags | = MV_HP_ERRATA_XX42A0 ;
break ;
case 0x1 :
hp_flags | = MV_HP_ERRATA_60X1C0 ;
break ;
default :
dev_printk ( KERN_WARNING , & pdev - > dev ,
" Applying 60X1C0 workarounds to unknown rev \n " ) ;
hp_flags | = MV_HP_ERRATA_60X1C0 ;
break ;
}
break ;
2005-11-12 20:48:15 +03:00
default :
printk ( KERN_ERR DRV_NAME " : BUG: invalid board index %u \n " , board_idx ) ;
return 1 ;
}
hpriv - > hp_flags = hp_flags ;
return 0 ;
}
2005-10-06 01:08:53 +04:00
/**
2005-11-13 05:13:17 +03:00
* mv_init_host - Perform some early initialization of the host .
2005-11-12 20:48:15 +03:00
* @ pdev : host PCI device
2005-10-06 01:08:53 +04:00
* @ probe_ent : early data struct representing the host
*
* If possible , do an early global reset of the host . Then do
* our port init and clear / unmask all / relevant host interrupts .
*
* LOCKING :
* Inherited from caller .
*/
2005-11-13 05:13:17 +03:00
static int mv_init_host ( struct pci_dev * pdev , struct ata_probe_ent * probe_ent ,
2005-11-12 20:48:15 +03:00
unsigned int board_idx )
2005-09-02 02:26:17 +04:00
{
int rc = 0 , n_hc , port , hc ;
void __iomem * mmio = probe_ent - > mmio_base ;
2005-11-12 20:48:15 +03:00
struct mv_host_priv * hpriv = probe_ent - > private_data ;
2005-11-13 05:13:17 +03:00
/* global interrupt mask */
writel ( 0 , mmio + HC_MAIN_IRQ_MASK_OFS ) ;
rc = mv_chip_id ( pdev , hpriv , board_idx ) ;
2005-11-12 20:48:15 +03:00
if ( rc )
goto done ;
n_hc = mv_get_hc_count ( probe_ent - > host_flags ) ;
probe_ent - > n_ports = MV_PORTS_PER_HC * n_hc ;
2005-11-13 05:13:17 +03:00
for ( port = 0 ; port < probe_ent - > n_ports ; port + + )
hpriv - > ops - > read_preamp ( hpriv , port , mmio ) ;
2005-09-02 02:26:17 +04:00
2005-11-14 01:47:51 +03:00
rc = hpriv - > ops - > reset_hc ( hpriv , mmio , n_hc ) ;
2005-11-13 05:13:17 +03:00
if ( rc )
2005-09-02 02:26:17 +04:00
goto done ;
2005-11-13 06:14:02 +03:00
hpriv - > ops - > reset_flash ( hpriv , mmio ) ;
hpriv - > ops - > reset_bus ( pdev , mmio ) ;
2005-11-13 05:13:17 +03:00
hpriv - > ops - > enable_leds ( hpriv , mmio ) ;
2005-09-02 02:26:17 +04:00
for ( port = 0 ; port < probe_ent - > n_ports ; port + + ) {
2005-11-13 07:05:14 +03:00
if ( IS_60XX ( hpriv ) ) {
2005-11-14 01:47:51 +03:00
void __iomem * port_mmio = mv_port_base ( mmio , port ) ;
2005-11-13 07:05:14 +03:00
u32 ifctl = readl ( port_mmio + SATA_INTERFACE_CTL ) ;
2006-05-20 00:29:21 +04:00
ifctl | = ( 1 < < 7 ) ; /* enable gen2i speed */
ifctl = ( ifctl & 0xfff ) | 0x9b1000 ; /* from chip spec */
2005-11-13 07:05:14 +03:00
writelfl ( ifctl , port_mmio + SATA_INTERFACE_CTL ) ;
}
2005-11-14 01:47:51 +03:00
hpriv - > ops - > phy_errata ( hpriv , mmio , port ) ;
2005-11-13 07:05:14 +03:00
}
for ( port = 0 ; port < probe_ent - > n_ports ; port + + ) {
void __iomem * port_mmio = mv_port_base ( mmio , port ) ;
2005-09-30 09:36:00 +04:00
mv_port_init ( & probe_ent - > port [ port ] , port_mmio ) ;
2005-09-02 02:26:17 +04:00
}
for ( hc = 0 ; hc < n_hc ; hc + + ) {
2005-09-30 09:36:00 +04:00
void __iomem * hc_mmio = mv_hc_base ( mmio , hc ) ;
VPRINTK ( " HC%i: HC config=0x%08x HC IRQ cause "
" (before clear)=0x%08x \n " , hc ,
readl ( hc_mmio + HC_CFG_OFS ) ,
readl ( hc_mmio + HC_IRQ_CAUSE_OFS ) ) ;
/* Clear any currently outstanding hc interrupt conditions */
writelfl ( 0 , hc_mmio + HC_IRQ_CAUSE_OFS ) ;
2005-09-02 02:26:17 +04:00
}
2005-09-30 09:36:00 +04:00
/* Clear any currently outstanding host interrupt conditions */
writelfl ( 0 , mmio + PCI_IRQ_CAUSE_OFS ) ;
/* and unmask interrupt generation for host regs */
writelfl ( PCI_UNMASK_ALL_IRQS , mmio + PCI_IRQ_MASK_OFS ) ;
writelfl ( ~ HC_MAIN_MASKED_IRQS , mmio + HC_MAIN_IRQ_MASK_OFS ) ;
2005-09-02 02:26:17 +04:00
VPRINTK ( " HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2005-11-12 20:32:50 +03:00
" PCI int cause/mask=0x%08x/0x%08x \n " ,
2005-09-02 02:26:17 +04:00
readl ( mmio + HC_MAIN_IRQ_CAUSE_OFS ) ,
readl ( mmio + HC_MAIN_IRQ_MASK_OFS ) ,
readl ( mmio + PCI_IRQ_CAUSE_OFS ) ,
readl ( mmio + PCI_IRQ_MASK_OFS ) ) ;
2005-11-12 20:48:15 +03:00
2005-09-30 09:36:00 +04:00
done :
2005-09-02 02:26:17 +04:00
return rc ;
}
2005-10-06 01:08:53 +04:00
/**
* mv_print_info - Dump key info to kernel log for perusal .
* @ probe_ent : early data struct representing the host
*
* FIXME : complete this .
*
* LOCKING :
* Inherited from caller .
*/
2005-09-30 09:36:00 +04:00
static void mv_print_info ( struct ata_probe_ent * probe_ent )
{
struct pci_dev * pdev = to_pci_dev ( probe_ent - > dev ) ;
struct mv_host_priv * hpriv = probe_ent - > private_data ;
u8 rev_id , scc ;
const char * scc_s ;
/* Use this to determine the HW stepping of the chip so we know
* what errata to workaround
*/
pci_read_config_byte ( pdev , PCI_REVISION_ID , & rev_id ) ;
pci_read_config_byte ( pdev , PCI_CLASS_DEVICE , & scc ) ;
if ( scc = = 0 )
scc_s = " SCSI " ;
else if ( scc = = 0x01 )
scc_s = " RAID " ;
else
scc_s = " unknown " ;
2005-10-30 22:39:11 +03:00
dev_printk ( KERN_INFO , & pdev - > dev ,
" %u slots %u ports %s mode IRQ via %s \n " ,
2005-11-12 20:32:50 +03:00
( unsigned ) MV_MAX_Q_DEPTH , probe_ent - > n_ports ,
2005-09-30 09:36:00 +04:00
scc_s , ( MV_HP_FLAG_MSI & hpriv - > hp_flags ) ? " MSI " : " INTx " ) ;
}
2005-10-06 01:08:53 +04:00
/**
* mv_init_one - handle a positive probe of a Marvell host
* @ pdev : PCI device found
* @ ent : PCI device ID entry for the matched host
*
* LOCKING :
* Inherited from caller .
*/
2005-09-02 02:26:17 +04:00
static int mv_init_one ( struct pci_dev * pdev , const struct pci_device_id * ent )
{
static int printed_version = 0 ;
struct ata_probe_ent * probe_ent = NULL ;
struct mv_host_priv * hpriv ;
unsigned int board_idx = ( unsigned int ) ent - > driver_data ;
void __iomem * mmio_base ;
2005-09-30 09:36:00 +04:00
int pci_dev_busy = 0 , rc ;
2005-09-02 02:26:17 +04:00
2005-10-30 22:39:11 +03:00
if ( ! printed_version + + )
dev_printk ( KERN_INFO , & pdev - > dev , " version " DRV_VERSION " \n " ) ;
2005-09-02 02:26:17 +04:00
rc = pci_enable_device ( pdev ) ;
if ( rc ) {
return rc ;
}
2006-05-20 00:29:21 +04:00
pci_set_master ( pdev ) ;
2005-09-02 02:26:17 +04:00
rc = pci_request_regions ( pdev , DRV_NAME ) ;
if ( rc ) {
pci_dev_busy = 1 ;
goto err_out ;
}
probe_ent = kmalloc ( sizeof ( * probe_ent ) , GFP_KERNEL ) ;
if ( probe_ent = = NULL ) {
rc = - ENOMEM ;
goto err_out_regions ;
}
memset ( probe_ent , 0 , sizeof ( * probe_ent ) ) ;
probe_ent - > dev = pci_dev_to_dev ( pdev ) ;
INIT_LIST_HEAD ( & probe_ent - > node ) ;
2005-09-30 09:36:00 +04:00
mmio_base = pci_iomap ( pdev , MV_PRIMARY_BAR , 0 ) ;
2005-09-02 02:26:17 +04:00
if ( mmio_base = = NULL ) {
rc = - ENOMEM ;
goto err_out_free_ent ;
}
hpriv = kmalloc ( sizeof ( * hpriv ) , GFP_KERNEL ) ;
if ( ! hpriv ) {
rc = - ENOMEM ;
goto err_out_iounmap ;
}
memset ( hpriv , 0 , sizeof ( * hpriv ) ) ;
probe_ent - > sht = mv_port_info [ board_idx ] . sht ;
probe_ent - > host_flags = mv_port_info [ board_idx ] . host_flags ;
probe_ent - > pio_mask = mv_port_info [ board_idx ] . pio_mask ;
probe_ent - > udma_mask = mv_port_info [ board_idx ] . udma_mask ;
probe_ent - > port_ops = mv_port_info [ board_idx ] . port_ops ;
probe_ent - > irq = pdev - > irq ;
probe_ent - > irq_flags = SA_SHIRQ ;
probe_ent - > mmio_base = mmio_base ;
probe_ent - > private_data = hpriv ;
/* initialize adapter */
2005-11-13 05:13:17 +03:00
rc = mv_init_host ( pdev , probe_ent , board_idx ) ;
2005-09-02 02:26:17 +04:00
if ( rc ) {
goto err_out_hpriv ;
}
2005-09-30 09:36:00 +04:00
/* Enable interrupts */
2006-02-03 00:17:06 +03:00
if ( msi & & pci_enable_msi ( pdev ) = = 0 ) {
2005-09-30 09:36:00 +04:00
hpriv - > hp_flags | = MV_HP_FLAG_MSI ;
} else {
pci_intx ( pdev , 1 ) ;
2005-09-02 02:26:17 +04:00
}
2005-09-30 09:36:00 +04:00
mv_dump_pci_cfg ( pdev , 0x68 ) ;
mv_print_info ( probe_ent ) ;
if ( ata_device_add ( probe_ent ) = = 0 ) {
rc = - ENODEV ; /* No devices discovered */
goto err_out_dev_add ;
}
2005-09-02 02:26:17 +04:00
2005-09-30 09:36:00 +04:00
kfree ( probe_ent ) ;
2005-09-02 02:26:17 +04:00
return 0 ;
2005-09-30 09:36:00 +04:00
err_out_dev_add :
if ( MV_HP_FLAG_MSI & hpriv - > hp_flags ) {
pci_disable_msi ( pdev ) ;
} else {
pci_intx ( pdev , 0 ) ;
}
err_out_hpriv :
2005-09-02 02:26:17 +04:00
kfree ( hpriv ) ;
2005-09-30 09:36:00 +04:00
err_out_iounmap :
pci_iounmap ( pdev , mmio_base ) ;
err_out_free_ent :
2005-09-02 02:26:17 +04:00
kfree ( probe_ent ) ;
2005-09-30 09:36:00 +04:00
err_out_regions :
2005-09-02 02:26:17 +04:00
pci_release_regions ( pdev ) ;
2005-09-30 09:36:00 +04:00
err_out :
2005-09-02 02:26:17 +04:00
if ( ! pci_dev_busy ) {
pci_disable_device ( pdev ) ;
}
return rc ;
}
static int __init mv_init ( void )
{
return pci_module_init ( & mv_pci_driver ) ;
}
static void __exit mv_exit ( void )
{
pci_unregister_driver ( & mv_pci_driver ) ;
}
MODULE_AUTHOR ( " Brett Russ " ) ;
MODULE_DESCRIPTION ( " SCSI low-level driver for Marvell SATA controllers " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DEVICE_TABLE ( pci , mv_pci_tbl ) ;
MODULE_VERSION ( DRV_VERSION ) ;
2006-02-03 00:17:06 +03:00
module_param ( msi , int , 0444 ) ;
MODULE_PARM_DESC ( msi , " Enable use of PCI MSI (0=off, 1=on) " ) ;
2005-09-02 02:26:17 +04:00
module_init ( mv_init ) ;
module_exit ( mv_exit ) ;