2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2010-07-06 15:06:03 +04:00
/*
* drivers / ata / sata_dwc_460ex . c
*
* Synopsys DesignWare Cores ( DWC ) SATA host driver
*
* Author : Mark Miesfeld < mmiesfeld @ amcc . com >
*
* Ported from 2.6 .19 .2 to 2.6 .25 / 26 by Stefan Roese < sr @ denx . de >
* Copyright 2008 DENX Software Engineering
*
* Based on versions provided by AMCC and Synopsys which are :
* Copyright 2006 Applied Micro Circuits Corporation
* COPYRIGHT ( C ) 2005 SYNOPSYS , INC . ALL RIGHTS RESERVED
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/device.h>
2016-04-26 12:03:10 +03:00
# include <linux/dmaengine.h>
2013-11-11 09:19:08 +04:00
# include <linux/of_address.h>
# include <linux/of_irq.h>
2010-07-06 15:06:03 +04:00
# include <linux/of_platform.h>
# include <linux/platform_device.h>
2016-04-26 12:03:11 +03:00
# include <linux/phy/phy.h>
2010-07-06 15:06:03 +04:00
# include <linux/libata.h>
# include <linux/slab.h>
2021-12-21 10:20:31 +03:00
# include <trace/events/libata.h>
2015-03-03 23:41:21 +03:00
2010-07-06 15:06:03 +04:00
# include "libata.h"
# include <scsi/scsi_host.h>
# include <scsi/scsi_cmnd.h>
2011-01-28 21:55:55 +03:00
/* These two are defined in "libata.h" */
# undef DRV_NAME
# undef DRV_VERSION
2012-12-14 18:43:39 +04:00
2010-07-06 15:06:03 +04:00
# define DRV_NAME "sata-dwc"
2011-01-28 22:01:01 +03:00
# define DRV_VERSION "1.3"
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:20 +03:00
# define sata_dwc_writel(a, v) writel_relaxed(v, a)
# define sata_dwc_readl(a) readl_relaxed(a)
2015-01-07 16:24:21 +03:00
2016-04-26 12:03:05 +03:00
# define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
2010-07-06 15:06:03 +04:00
enum {
SATA_DWC_MAX_PORTS = 1 ,
SATA_DWC_SCR_OFFSET = 0x24 ,
SATA_DWC_REG_OFFSET = 0x64 ,
} ;
/* DWC SATA Registers */
struct sata_dwc_regs {
u32 fptagr ; /* 1st party DMA tag */
u32 fpbor ; /* 1st party DMA buffer offset */
u32 fptcr ; /* 1st party DMA Xfr count */
u32 dmacr ; /* DMA Control */
u32 dbtsr ; /* DMA Burst Transac size */
u32 intpr ; /* Interrupt Pending */
u32 intmr ; /* Interrupt Mask */
u32 errmr ; /* Error Mask */
u32 llcr ; /* Link Layer Control */
u32 phycr ; /* PHY Control */
u32 physr ; /* PHY Status */
u32 rxbistpd ; /* Recvd BIST pattern def register */
u32 rxbistpd1 ; /* Recvd BIST data dword1 */
u32 rxbistpd2 ; /* Recvd BIST pattern data dword2 */
u32 txbistpd ; /* Trans BIST pattern def register */
u32 txbistpd1 ; /* Trans BIST data dword1 */
u32 txbistpd2 ; /* Trans BIST data dword2 */
u32 bistcr ; /* BIST Control Register */
u32 bistfctr ; /* BIST FIS Count Register */
u32 bistsr ; /* BIST Status Register */
u32 bistdecr ; /* BIST Dword Error count register */
u32 res [ 15 ] ; /* Reserved locations */
u32 testr ; /* Test Register */
u32 versionr ; /* Version Register */
u32 idr ; /* ID Register */
u32 unimpl [ 192 ] ; /* Unimplemented */
2016-04-26 12:03:18 +03:00
u32 dmadr [ 256 ] ; /* FIFO Locations in DMA Mode */
2010-07-06 15:06:03 +04:00
} ;
enum {
SCR_SCONTROL_DET_ENABLE = 0x00000001 ,
SCR_SSTATUS_DET_PRESENT = 0x00000001 ,
SCR_SERROR_DIAG_X = 0x04000000 ,
/* DWC SATA Register Operations */
SATA_DWC_TXFIFO_DEPTH = 0x01FF ,
SATA_DWC_RXFIFO_DEPTH = 0x01FF ,
SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004 ,
SATA_DWC_DMACR_TXCHEN = ( 0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN ) ,
SATA_DWC_DMACR_RXCHEN = ( 0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN ) ,
SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN ,
SATA_DWC_INTPR_DMAT = 0x00000001 ,
SATA_DWC_INTPR_NEWFP = 0x00000002 ,
SATA_DWC_INTPR_PMABRT = 0x00000004 ,
SATA_DWC_INTPR_ERR = 0x00000008 ,
SATA_DWC_INTPR_NEWBIST = 0x00000010 ,
SATA_DWC_INTPR_IPF = 0x10000000 ,
SATA_DWC_INTMR_DMATM = 0x00000001 ,
SATA_DWC_INTMR_NEWFPM = 0x00000002 ,
SATA_DWC_INTMR_PMABRTM = 0x00000004 ,
SATA_DWC_INTMR_ERRM = 0x00000008 ,
SATA_DWC_INTMR_NEWBISTM = 0x00000010 ,
SATA_DWC_LLCR_SCRAMEN = 0x00000001 ,
SATA_DWC_LLCR_DESCRAMEN = 0x00000002 ,
SATA_DWC_LLCR_RPDEN = 0x00000004 ,
/* This is all error bits, zero's are reserved fields. */
SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
} ;
# define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
# define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
SATA_DWC_DMACR_TMOD_TXCHEN )
# define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
SATA_DWC_DMACR_TMOD_TXCHEN )
# define SATA_DWC_DBTSR_MWR(size) (((size) / 4) & SATA_DWC_TXFIFO_DEPTH)
# define SATA_DWC_DBTSR_MRD(size) ((((size) / 4) & SATA_DWC_RXFIFO_DEPTH)\
< < 16 )
struct sata_dwc_device {
struct device * dev ; /* generic device struct */
struct ata_probe_ent * pe ; /* ptr to probe-ent */
struct ata_host * host ;
2016-04-26 12:03:17 +03:00
struct sata_dwc_regs __iomem * sata_dwc_regs ; /* DW SATA specific */
2016-04-26 12:03:12 +03:00
u32 sactive_issued ;
u32 sactive_queued ;
2016-04-26 12:03:11 +03:00
struct phy * phy ;
2016-04-26 12:03:18 +03:00
phys_addr_t dmadr ;
2016-04-26 12:03:10 +03:00
# ifdef CONFIG_SATA_DWC_OLD_DMA
2015-03-03 23:41:21 +03:00
struct dw_dma_chip * dma ;
2016-04-26 12:03:10 +03:00
# endif
2010-07-06 15:06:03 +04:00
} ;
2022-03-19 23:11:02 +03:00
/*
* Allow one extra special slot for commands and DMA management
* to account for libata internal commands .
*/
# define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
2010-07-06 15:06:03 +04:00
struct sata_dwc_device_port {
struct sata_dwc_device * hsdev ;
int cmd_issued [ SATA_DWC_QCMD_MAX ] ;
int dma_pending [ SATA_DWC_QCMD_MAX ] ;
2015-03-03 23:41:21 +03:00
/* DMA info */
struct dma_chan * chan ;
struct dma_async_tx_descriptor * desc [ SATA_DWC_QCMD_MAX ] ;
u32 dma_interrupt_count ;
2010-07-06 15:06:03 +04:00
} ;
/*
2016-04-26 12:03:13 +03:00
* Commonly used DWC SATA driver macros
2010-07-06 15:06:03 +04:00
*/
2016-04-26 12:03:13 +03:00
# define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
# define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
# define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
# define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
# define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
2010-07-06 15:06:03 +04:00
enum {
SATA_DWC_CMD_ISSUED_NOT = 0 ,
SATA_DWC_CMD_ISSUED_PEND = 1 ,
SATA_DWC_CMD_ISSUED_EXEC = 2 ,
SATA_DWC_CMD_ISSUED_NODATA = 3 ,
SATA_DWC_DMA_PENDING_NONE = 0 ,
SATA_DWC_DMA_PENDING_TX = 1 ,
SATA_DWC_DMA_PENDING_RX = 2 ,
} ;
/*
* Prototypes
*/
static void sata_dwc_bmdma_start_by_tag ( struct ata_queued_cmd * qc , u8 tag ) ;
2021-12-21 10:21:30 +03:00
static int sata_dwc_qc_complete ( struct ata_port * ap , struct ata_queued_cmd * qc ) ;
static void sata_dwc_dma_xfer_complete ( struct ata_port * ap ) ;
2010-07-06 15:06:03 +04:00
static void sata_dwc_clear_dmacr ( struct sata_dwc_device_port * hsdevp , u8 tag ) ;
2016-04-26 12:03:10 +03:00
# ifdef CONFIG_SATA_DWC_OLD_DMA
# include <linux/platform_data/dma-dw.h>
# include <linux/dma/dw.h>
static struct dw_dma_slave sata_dwc_dma_dws = {
. src_id = 0 ,
. dst_id = 0 ,
. m_master = 1 ,
. p_master = 0 ,
} ;
static bool sata_dwc_dma_filter ( struct dma_chan * chan , void * param )
{
struct dw_dma_slave * dws = & sata_dwc_dma_dws ;
if ( dws - > dma_dev ! = chan - > device - > dev )
return false ;
chan - > private = dws ;
return true ;
}
static int sata_dwc_dma_get_channel_old ( struct sata_dwc_device_port * hsdevp )
{
struct sata_dwc_device * hsdev = hsdevp - > hsdev ;
struct dw_dma_slave * dws = & sata_dwc_dma_dws ;
2021-12-09 17:35:18 +03:00
struct device * dev = hsdev - > dev ;
2016-04-26 12:03:10 +03:00
dma_cap_mask_t mask ;
2021-12-09 17:35:18 +03:00
dws - > dma_dev = dev ;
2016-04-26 12:03:10 +03:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
/* Acquire DMA channel */
hsdevp - > chan = dma_request_channel ( mask , sata_dwc_dma_filter , hsdevp ) ;
if ( ! hsdevp - > chan ) {
2021-12-09 17:35:18 +03:00
dev_err ( dev , " %s: dma channel unavailable \n " , __func__ ) ;
2016-04-26 12:03:10 +03:00
return - EAGAIN ;
}
return 0 ;
}
static int sata_dwc_dma_init_old ( struct platform_device * pdev ,
struct sata_dwc_device * hsdev )
{
2021-12-09 17:35:18 +03:00
struct device * dev = & pdev - > dev ;
struct device_node * np = dev - > of_node ;
2016-04-26 12:03:10 +03:00
2021-12-09 17:35:18 +03:00
hsdev - > dma = devm_kzalloc ( dev , sizeof ( * hsdev - > dma ) , GFP_KERNEL ) ;
2016-04-26 12:03:10 +03:00
if ( ! hsdev - > dma )
return - ENOMEM ;
2021-12-09 17:35:18 +03:00
hsdev - > dma - > dev = dev ;
2017-11-10 20:59:37 +03:00
hsdev - > dma - > id = pdev - > id ;
2016-04-26 12:03:10 +03:00
/* Get SATA DMA interrupt number */
hsdev - > dma - > irq = irq_of_parse_and_map ( np , 1 ) ;
2022-11-11 11:30:46 +03:00
if ( ! hsdev - > dma - > irq ) {
2021-12-09 17:35:18 +03:00
dev_err ( dev , " no SATA DMA irq \n " ) ;
2016-04-26 12:03:10 +03:00
return - ENODEV ;
}
/* Get physical SATA DMA register base address */
2021-12-09 17:35:17 +03:00
hsdev - > dma - > regs = devm_platform_ioremap_resource ( pdev , 1 ) ;
2016-07-19 14:27:53 +03:00
if ( IS_ERR ( hsdev - > dma - > regs ) )
2016-04-26 12:03:22 +03:00
return PTR_ERR ( hsdev - > dma - > regs ) ;
2016-04-26 12:03:10 +03:00
/* Initialize AHB DMAC */
2016-04-26 12:03:22 +03:00
return dw_dma_probe ( hsdev - > dma ) ;
2016-04-26 12:03:10 +03:00
}
static void sata_dwc_dma_exit_old ( struct sata_dwc_device * hsdev )
{
if ( ! hsdev - > dma )
return ;
dw_dma_remove ( hsdev - > dma ) ;
}
# endif
2011-01-28 22:01:01 +03:00
static const char * get_prot_descript ( u8 protocol )
{
2016-07-16 16:16:43 +03:00
switch ( protocol ) {
2011-01-28 22:01:01 +03:00
case ATA_PROT_NODATA :
return " ATA no data " ;
case ATA_PROT_PIO :
return " ATA PIO " ;
case ATA_PROT_DMA :
return " ATA DMA " ;
case ATA_PROT_NCQ :
return " ATA NCQ " ;
2016-07-14 03:05:48 +03:00
case ATA_PROT_NCQ_NODATA :
return " ATA NCQ no data " ;
2011-01-28 22:01:01 +03:00
case ATAPI_PROT_NODATA :
return " ATAPI no data " ;
case ATAPI_PROT_PIO :
return " ATAPI PIO " ;
case ATAPI_PROT_DMA :
return " ATAPI DMA " ;
default :
return " unknown " ;
}
}
2015-03-03 23:41:21 +03:00
static void dma_dwc_xfer_done ( void * hsdev_instance )
2010-07-06 15:06:03 +04:00
{
unsigned long flags ;
2014-03-26 20:34:49 +04:00
struct sata_dwc_device * hsdev = hsdev_instance ;
2010-07-06 15:06:03 +04:00
struct ata_host * host = ( struct ata_host * ) hsdev - > host ;
struct ata_port * ap ;
struct sata_dwc_device_port * hsdevp ;
u8 tag = 0 ;
unsigned int port = 0 ;
spin_lock_irqsave ( & host - > lock , flags ) ;
ap = host - > ports [ port ] ;
hsdevp = HSDEVP_FROM_AP ( ap ) ;
tag = ap - > link . active_tag ;
/*
2015-03-03 23:41:21 +03:00
* Each DMA command produces 2 interrupts . Only
* complete the command after both interrupts have been
* seen . ( See sata_dwc_isr ( ) )
2010-07-06 15:06:03 +04:00
*/
2015-03-03 23:41:21 +03:00
hsdevp - > dma_interrupt_count + + ;
sata_dwc_clear_dmacr ( hsdevp , tag ) ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
if ( hsdevp - > dma_pending [ tag ] = = SATA_DWC_DMA_PENDING_NONE ) {
dev_err ( ap - > dev , " DMA not pending tag=0x%02x pending=%d \n " ,
tag , hsdevp - > dma_pending [ tag ] ) ;
2010-07-06 15:06:03 +04:00
}
2015-03-03 23:41:21 +03:00
if ( ( hsdevp - > dma_interrupt_count % 2 ) = = 0 )
2021-12-21 10:21:30 +03:00
sata_dwc_dma_xfer_complete ( ap ) ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2010-07-06 15:06:03 +04:00
}
2015-03-03 23:41:21 +03:00
static struct dma_async_tx_descriptor * dma_dwc_xfer_setup ( struct ata_queued_cmd * qc )
2010-07-06 15:06:03 +04:00
{
2015-03-03 23:41:21 +03:00
struct ata_port * ap = qc - > ap ;
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
struct sata_dwc_device * hsdev = HSDEV_FROM_AP ( ap ) ;
struct dma_slave_config sconf ;
struct dma_async_tx_descriptor * desc ;
if ( qc - > dma_dir = = DMA_DEV_TO_MEM ) {
2016-04-26 12:03:18 +03:00
sconf . src_addr = hsdev - > dmadr ;
2016-04-26 12:03:06 +03:00
sconf . device_fc = false ;
2015-03-03 23:41:21 +03:00
} else { /* DMA_MEM_TO_DEV */
2016-04-26 12:03:18 +03:00
sconf . dst_addr = hsdev - > dmadr ;
2015-03-03 23:41:21 +03:00
sconf . device_fc = false ;
2010-07-06 15:06:03 +04:00
}
2015-03-03 23:41:21 +03:00
sconf . direction = qc - > dma_dir ;
2016-04-26 12:03:05 +03:00
sconf . src_maxburst = AHB_DMA_BRST_DFLT / 4 ; /* in items */
sconf . dst_maxburst = AHB_DMA_BRST_DFLT / 4 ; /* in items */
2015-03-03 23:41:21 +03:00
sconf . src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
sconf . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
dmaengine_slave_config ( hsdevp - > chan , & sconf ) ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
/* Convert SG list to linked list of items (LLIs) for AHB DMA */
desc = dmaengine_prep_slave_sg ( hsdevp - > chan , qc - > sg , qc - > n_elem ,
qc - > dma_dir ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
if ( ! desc )
return NULL ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
desc - > callback = dma_dwc_xfer_done ;
desc - > callback_param = hsdev ;
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:18 +03:00
dev_dbg ( hsdev - > dev , " %s sg: 0x%p, count: %d addr: %pa \n " , __func__ ,
qc - > sg , qc - > n_elem , & hsdev - > dmadr ) ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
return desc ;
2010-07-06 15:06:03 +04:00
}
static int sata_dwc_scr_read ( struct ata_link * link , unsigned int scr , u32 * val )
{
if ( scr > SCR_NOTIFICATION ) {
dev_err ( link - > ap - > dev , " %s: Incorrect SCR offset 0x%02x \n " ,
__func__ , scr ) ;
return - EINVAL ;
}
2016-04-26 12:03:20 +03:00
* val = sata_dwc_readl ( link - > ap - > ioaddr . scr_addr + ( scr * 4 ) ) ;
2016-04-26 12:03:23 +03:00
dev_dbg ( link - > ap - > dev , " %s: id=%d reg=%d val=0x%08x \n " , __func__ ,
link - > ap - > print_id , scr , * val ) ;
2010-07-06 15:06:03 +04:00
return 0 ;
}
static int sata_dwc_scr_write ( struct ata_link * link , unsigned int scr , u32 val )
{
2016-04-26 12:03:23 +03:00
dev_dbg ( link - > ap - > dev , " %s: id=%d reg=%d val=0x%08x \n " , __func__ ,
link - > ap - > print_id , scr , val ) ;
2010-07-06 15:06:03 +04:00
if ( scr > SCR_NOTIFICATION ) {
dev_err ( link - > ap - > dev , " %s: Incorrect SCR offset 0x%02x \n " ,
__func__ , scr ) ;
return - EINVAL ;
}
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( link - > ap - > ioaddr . scr_addr + ( scr * 4 ) , val ) ;
2010-07-06 15:06:03 +04:00
return 0 ;
}
2016-04-26 12:03:12 +03:00
static void clear_serror ( struct ata_port * ap )
2010-07-06 15:06:03 +04:00
{
u32 val ;
2016-04-26 12:03:12 +03:00
sata_dwc_scr_read ( & ap - > link , SCR_ERROR , & val ) ;
sata_dwc_scr_write ( & ap - > link , SCR_ERROR , val ) ;
2010-07-06 15:06:03 +04:00
}
static void clear_interrupt_bit ( struct sata_dwc_device * hsdev , u32 bit )
{
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > intpr ,
sata_dwc_readl ( & hsdev - > sata_dwc_regs - > intpr ) ) ;
2010-07-06 15:06:03 +04:00
}
static u32 qcmd_tag_to_mask ( u8 tag )
{
return 0x00000001 < < ( tag & 0x1f ) ;
}
/* See ahci.c */
static void sata_dwc_error_intr ( struct ata_port * ap ,
struct sata_dwc_device * hsdev , uint intpr )
{
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
struct ata_eh_info * ehi = & ap - > link . eh_info ;
unsigned int err_mask = 0 , action = 0 ;
struct ata_queued_cmd * qc ;
u32 serror ;
u8 status , tag ;
ata_ehi_clear_desc ( ehi ) ;
2016-04-26 12:03:12 +03:00
sata_dwc_scr_read ( & ap - > link , SCR_ERROR , & serror ) ;
2010-07-06 15:06:03 +04:00
status = ap - > ops - > sff_check_status ( ap ) ;
tag = ap - > link . active_tag ;
2015-03-03 23:41:21 +03:00
dev_err ( ap - > dev ,
" %s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d " ,
__func__ , serror , intpr , status , hsdevp - > dma_interrupt_count ,
hsdevp - > dma_pending [ tag ] , hsdevp - > cmd_issued [ tag ] ) ;
2010-07-06 15:06:03 +04:00
/* Clear error register and interrupt bit */
2016-04-26 12:03:12 +03:00
clear_serror ( ap ) ;
2010-07-06 15:06:03 +04:00
clear_interrupt_bit ( hsdev , SATA_DWC_INTPR_ERR ) ;
/* This is the only error happening now. TODO check for exact error */
err_mask | = AC_ERR_HOST_BUS ;
action | = ATA_EH_RESET ;
/* Pass this on to EH */
ehi - > serror | = serror ;
ehi - > action | = action ;
qc = ata_qc_from_tag ( ap , tag ) ;
if ( qc )
qc - > err_mask | = err_mask ;
else
ehi - > err_mask | = err_mask ;
ata_port_abort ( ap ) ;
}
/*
* Function : sata_dwc_isr
* arguments : irq , void * dev_instance , struct pt_regs * regs
* Return value : irqreturn_t - status of IRQ
* This Interrupt handler called via port ops registered function .
* . irq_handler = sata_dwc_isr
*/
static irqreturn_t sata_dwc_isr ( int irq , void * dev_instance )
{
struct ata_host * host = ( struct ata_host * ) dev_instance ;
struct sata_dwc_device * hsdev = HSDEV_FROM_HOST ( host ) ;
struct ata_port * ap ;
struct ata_queued_cmd * qc ;
unsigned long flags ;
u8 status , tag ;
2022-10-24 17:17:59 +03:00
int handled , port = 0 ;
2010-07-06 15:06:03 +04:00
uint intpr , sactive , sactive2 , tag_mask ;
struct sata_dwc_device_port * hsdevp ;
2016-04-26 12:03:12 +03:00
hsdev - > sactive_issued = 0 ;
2010-07-06 15:06:03 +04:00
spin_lock_irqsave ( & host - > lock , flags ) ;
/* Read the interrupt register */
2016-04-26 12:03:20 +03:00
intpr = sata_dwc_readl ( & hsdev - > sata_dwc_regs - > intpr ) ;
2010-07-06 15:06:03 +04:00
ap = host - > ports [ port ] ;
hsdevp = HSDEVP_FROM_AP ( ap ) ;
dev_dbg ( ap - > dev , " %s intpr=0x%08x active_tag=%d \n " , __func__ , intpr ,
ap - > link . active_tag ) ;
/* Check for error interrupt */
if ( intpr & SATA_DWC_INTPR_ERR ) {
sata_dwc_error_intr ( ap , hsdev , intpr ) ;
handled = 1 ;
goto DONE ;
}
/* Check for DMA SETUP FIS (FP DMA) interrupt */
if ( intpr & SATA_DWC_INTPR_NEWFP ) {
clear_interrupt_bit ( hsdev , SATA_DWC_INTPR_NEWFP ) ;
2016-04-26 12:03:20 +03:00
tag = ( u8 ) ( sata_dwc_readl ( & hsdev - > sata_dwc_regs - > fptagr ) ) ;
2010-07-06 15:06:03 +04:00
dev_dbg ( ap - > dev , " %s: NEWFP tag=%d \n " , __func__ , tag ) ;
if ( hsdevp - > cmd_issued [ tag ] ! = SATA_DWC_CMD_ISSUED_PEND )
dev_warn ( ap - > dev , " CMD tag=%d not pending? \n " , tag ) ;
2016-04-26 12:03:12 +03:00
hsdev - > sactive_issued | = qcmd_tag_to_mask ( tag ) ;
2010-07-06 15:06:03 +04:00
qc = ata_qc_from_tag ( ap , tag ) ;
2021-03-03 10:34:08 +03:00
if ( unlikely ( ! qc ) ) {
dev_err ( ap - > dev , " failed to get qc " ) ;
handled = 1 ;
goto DONE ;
}
2010-07-06 15:06:03 +04:00
/*
* Start FP DMA for NCQ command . At this point the tag is the
* active tag . It is the tag that matches the command about to
* be completed .
*/
2021-12-21 10:20:31 +03:00
trace_ata_bmdma_start ( ap , & qc - > tf , tag ) ;
2010-07-06 15:06:03 +04:00
qc - > ap - > link . active_tag = tag ;
sata_dwc_bmdma_start_by_tag ( qc , tag ) ;
handled = 1 ;
goto DONE ;
}
2016-04-26 12:03:12 +03:00
sata_dwc_scr_read ( & ap - > link , SCR_ACTIVE , & sactive ) ;
tag_mask = ( hsdev - > sactive_issued | sactive ) ^ sactive ;
2010-07-06 15:06:03 +04:00
/* If no sactive issued and tag_mask is zero then this is not NCQ */
2016-04-26 12:03:12 +03:00
if ( hsdev - > sactive_issued = = 0 & & tag_mask = = 0 ) {
2010-07-06 15:06:03 +04:00
if ( ap - > link . active_tag = = ATA_TAG_POISON )
tag = 0 ;
else
tag = ap - > link . active_tag ;
qc = ata_qc_from_tag ( ap , tag ) ;
/* DEV interrupt w/ no active qc? */
if ( unlikely ( ! qc | | ( qc - > tf . flags & ATA_TFLAG_POLLING ) ) ) {
2015-03-03 22:21:58 +03:00
dev_err ( ap - > dev ,
" %s interrupt with no active qc qc=%p \n " ,
__func__ , qc ) ;
2010-07-06 15:06:03 +04:00
ap - > ops - > sff_check_status ( ap ) ;
handled = 1 ;
goto DONE ;
}
status = ap - > ops - > sff_check_status ( ap ) ;
qc - > ap - > link . active_tag = tag ;
hsdevp - > cmd_issued [ tag ] = SATA_DWC_CMD_ISSUED_NOT ;
if ( status & ATA_ERR ) {
dev_dbg ( ap - > dev , " interrupt ATA_ERR (0x%x) \n " , status ) ;
2021-12-21 10:21:30 +03:00
sata_dwc_qc_complete ( ap , qc ) ;
2010-07-06 15:06:03 +04:00
handled = 1 ;
goto DONE ;
}
dev_dbg ( ap - > dev , " %s non-NCQ cmd interrupt, protocol: %s \n " ,
2011-01-28 22:01:01 +03:00
__func__ , get_prot_descript ( qc - > tf . protocol ) ) ;
2010-07-06 15:06:03 +04:00
DRVSTILLBUSY :
if ( ata_is_dma ( qc - > tf . protocol ) ) {
/*
* Each DMA transaction produces 2 interrupts . The DMAC
* transfer complete interrupt and the SATA controller
* operation done interrupt . The command should be
* completed only after both interrupts are seen .
*/
2015-03-03 23:41:21 +03:00
hsdevp - > dma_interrupt_count + + ;
2010-07-06 15:06:03 +04:00
if ( hsdevp - > dma_pending [ tag ] = = \
SATA_DWC_DMA_PENDING_NONE ) {
2015-03-03 22:21:58 +03:00
dev_err ( ap - > dev ,
" %s: DMA not pending intpr=0x%08x status=0x%08x pending=%d \n " ,
__func__ , intpr , status ,
2010-07-06 15:06:03 +04:00
hsdevp - > dma_pending [ tag ] ) ;
}
2015-03-03 23:41:21 +03:00
if ( ( hsdevp - > dma_interrupt_count % 2 ) = = 0 )
2021-12-21 10:21:30 +03:00
sata_dwc_dma_xfer_complete ( ap ) ;
2010-07-06 15:06:03 +04:00
} else if ( ata_is_pio ( qc - > tf . protocol ) ) {
ata_sff_hsm_move ( ap , qc , status , 0 ) ;
handled = 1 ;
goto DONE ;
} else {
2021-12-21 10:21:30 +03:00
if ( unlikely ( sata_dwc_qc_complete ( ap , qc ) ) )
2010-07-06 15:06:03 +04:00
goto DRVSTILLBUSY ;
}
handled = 1 ;
goto DONE ;
}
/*
* This is a NCQ command . At this point we need to figure out for which
* tags we have gotten a completion interrupt . One interrupt may serve
* as completion for more than one operation when commands are queued
* ( NCQ ) . We need to process each completed command .
*/
/* process completed commands */
2016-04-26 12:03:12 +03:00
sata_dwc_scr_read ( & ap - > link , SCR_ACTIVE , & sactive ) ;
tag_mask = ( hsdev - > sactive_issued | sactive ) ^ sactive ;
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:12 +03:00
if ( sactive ! = 0 | | hsdev - > sactive_issued > 1 | | tag_mask > 1 ) {
2015-03-03 22:21:58 +03:00
dev_dbg ( ap - > dev ,
" %s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x \n " ,
2016-04-26 12:03:12 +03:00
__func__ , sactive , hsdev - > sactive_issued , tag_mask ) ;
2010-07-06 15:06:03 +04:00
}
2016-04-26 12:03:12 +03:00
if ( ( tag_mask | hsdev - > sactive_issued ) ! = hsdev - > sactive_issued ) {
2015-03-03 22:21:58 +03:00
dev_warn ( ap - > dev ,
2016-04-26 12:03:12 +03:00
" Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x \n " ,
sactive , hsdev - > sactive_issued , tag_mask ) ;
2010-07-06 15:06:03 +04:00
}
/* read just to clear ... not bad if currently still busy */
status = ap - > ops - > sff_check_status ( ap ) ;
dev_dbg ( ap - > dev , " %s ATA status register=0x%x \n " , __func__ , status ) ;
tag = 0 ;
while ( tag_mask ) {
while ( ! ( tag_mask & 0x00000001 ) ) {
tag + + ;
tag_mask < < = 1 ;
}
tag_mask & = ( ~ 0x00000001 ) ;
qc = ata_qc_from_tag ( ap , tag ) ;
2021-03-03 10:34:08 +03:00
if ( unlikely ( ! qc ) ) {
dev_err ( ap - > dev , " failed to get qc " ) ;
handled = 1 ;
goto DONE ;
}
2010-07-06 15:06:03 +04:00
/* To be picked up by completion functions */
qc - > ap - > link . active_tag = tag ;
hsdevp - > cmd_issued [ tag ] = SATA_DWC_CMD_ISSUED_NOT ;
/* Let libata/scsi layers handle error */
if ( status & ATA_ERR ) {
dev_dbg ( ap - > dev , " %s ATA_ERR (0x%x) \n " , __func__ ,
status ) ;
2021-12-21 10:21:30 +03:00
sata_dwc_qc_complete ( ap , qc ) ;
2010-07-06 15:06:03 +04:00
handled = 1 ;
goto DONE ;
}
/* Process completed command */
dev_dbg ( ap - > dev , " %s NCQ command, protocol: %s \n " , __func__ ,
2011-01-28 22:01:01 +03:00
get_prot_descript ( qc - > tf . protocol ) ) ;
2010-07-06 15:06:03 +04:00
if ( ata_is_dma ( qc - > tf . protocol ) ) {
2015-03-03 23:41:21 +03:00
hsdevp - > dma_interrupt_count + + ;
2010-07-06 15:06:03 +04:00
if ( hsdevp - > dma_pending [ tag ] = = \
SATA_DWC_DMA_PENDING_NONE )
dev_warn ( ap - > dev , " %s: DMA not pending? \n " ,
__func__ ) ;
2015-03-03 23:41:21 +03:00
if ( ( hsdevp - > dma_interrupt_count % 2 ) = = 0 )
2021-12-21 10:21:30 +03:00
sata_dwc_dma_xfer_complete ( ap ) ;
2010-07-06 15:06:03 +04:00
} else {
2021-12-21 10:21:30 +03:00
if ( unlikely ( sata_dwc_qc_complete ( ap , qc ) ) )
2010-07-06 15:06:03 +04:00
goto STILLBUSY ;
}
continue ;
STILLBUSY :
ap - > stats . idle_irq + + ;
dev_warn ( ap - > dev , " STILL BUSY IRQ ata%d: irq trap \n " ,
ap - > print_id ) ;
} /* while tag_mask */
/*
* Check to see if any commands completed while we were processing our
* initial set of completed commands ( read status clears interrupts ,
* so we might miss a completed command interrupt if one came in while
* we were processing - - we read status as part of processing a completed
* command ) .
*/
2016-04-26 12:03:12 +03:00
sata_dwc_scr_read ( & ap - > link , SCR_ACTIVE , & sactive2 ) ;
2010-07-06 15:06:03 +04:00
if ( sactive2 ! = sactive ) {
2015-03-03 22:21:58 +03:00
dev_dbg ( ap - > dev ,
" More completed - sactive=0x%x sactive2=0x%x \n " ,
sactive , sactive2 ) ;
2010-07-06 15:06:03 +04:00
}
handled = 1 ;
DONE :
spin_unlock_irqrestore ( & host - > lock , flags ) ;
return IRQ_RETVAL ( handled ) ;
}
static void sata_dwc_clear_dmacr ( struct sata_dwc_device_port * hsdevp , u8 tag )
{
struct sata_dwc_device * hsdev = HSDEV_FROM_HSDEVP ( hsdevp ) ;
2016-04-26 12:03:21 +03:00
u32 dmacr = sata_dwc_readl ( & hsdev - > sata_dwc_regs - > dmacr ) ;
2010-07-06 15:06:03 +04:00
if ( hsdevp - > dma_pending [ tag ] = = SATA_DWC_DMA_PENDING_RX ) {
2016-04-26 12:03:21 +03:00
dmacr = SATA_DWC_DMACR_RX_CLEAR ( dmacr ) ;
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dmacr , dmacr ) ;
2010-07-06 15:06:03 +04:00
} else if ( hsdevp - > dma_pending [ tag ] = = SATA_DWC_DMA_PENDING_TX ) {
2016-04-26 12:03:21 +03:00
dmacr = SATA_DWC_DMACR_TX_CLEAR ( dmacr ) ;
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dmacr , dmacr ) ;
2010-07-06 15:06:03 +04:00
} else {
/*
* This should not happen , it indicates the driver is out of
* sync . If it does happen , clear dmacr anyway .
*/
2015-03-03 23:41:22 +03:00
dev_err ( hsdev - > dev ,
2015-03-03 22:21:58 +03:00
" %s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x \n " ,
2016-04-26 12:03:21 +03:00
__func__ , tag , hsdevp - > dma_pending [ tag ] , dmacr ) ;
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dmacr ,
SATA_DWC_DMACR_TXRXCH_CLEAR ) ;
2010-07-06 15:06:03 +04:00
}
}
2021-12-21 10:21:30 +03:00
static void sata_dwc_dma_xfer_complete ( struct ata_port * ap )
2010-07-06 15:06:03 +04:00
{
struct ata_queued_cmd * qc ;
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
struct sata_dwc_device * hsdev = HSDEV_FROM_AP ( ap ) ;
u8 tag = 0 ;
tag = ap - > link . active_tag ;
qc = ata_qc_from_tag ( ap , tag ) ;
if ( ! qc ) {
dev_err ( ap - > dev , " failed to get qc " ) ;
return ;
}
if ( ata_is_dma ( qc - > tf . protocol ) ) {
if ( hsdevp - > dma_pending [ tag ] = = SATA_DWC_DMA_PENDING_NONE ) {
2015-03-03 22:21:58 +03:00
dev_err ( ap - > dev ,
" %s DMA protocol RX and TX DMA not pending dmacr: 0x%08x \n " ,
__func__ ,
2016-04-26 12:03:20 +03:00
sata_dwc_readl ( & hsdev - > sata_dwc_regs - > dmacr ) ) ;
2010-07-06 15:06:03 +04:00
}
hsdevp - > dma_pending [ tag ] = SATA_DWC_DMA_PENDING_NONE ;
2021-12-21 10:21:30 +03:00
sata_dwc_qc_complete ( ap , qc ) ;
2010-07-06 15:06:03 +04:00
ap - > link . active_tag = ATA_TAG_POISON ;
} else {
2021-12-21 10:21:30 +03:00
sata_dwc_qc_complete ( ap , qc ) ;
2010-07-06 15:06:03 +04:00
}
}
2021-12-21 10:21:30 +03:00
static int sata_dwc_qc_complete ( struct ata_port * ap , struct ata_queued_cmd * qc )
2010-07-06 15:06:03 +04:00
{
u8 status = 0 ;
u32 mask = 0x0 ;
2018-05-11 21:51:04 +03:00
u8 tag = qc - > hw_tag ;
2016-04-26 12:03:12 +03:00
struct sata_dwc_device * hsdev = HSDEV_FROM_AP ( ap ) ;
2010-07-06 15:06:03 +04:00
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
2016-04-26 12:03:12 +03:00
hsdev - > sactive_queued = 0 ;
2010-07-06 15:06:03 +04:00
if ( hsdevp - > dma_pending [ tag ] = = SATA_DWC_DMA_PENDING_TX )
dev_err ( ap - > dev , " TX DMA PENDING \n " ) ;
else if ( hsdevp - > dma_pending [ tag ] = = SATA_DWC_DMA_PENDING_RX )
dev_err ( ap - > dev , " RX DMA PENDING \n " ) ;
2015-03-03 22:21:58 +03:00
dev_dbg ( ap - > dev ,
" QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d \n " ,
qc - > tf . command , status , ap - > print_id , qc - > tf . protocol ) ;
2010-07-06 15:06:03 +04:00
/* clear active bit */
mask = ( ~ ( qcmd_tag_to_mask ( tag ) ) ) ;
2016-04-26 12:03:12 +03:00
hsdev - > sactive_queued = hsdev - > sactive_queued & mask ;
hsdev - > sactive_issued = hsdev - > sactive_issued & mask ;
2010-07-06 15:06:03 +04:00
ata_qc_complete ( qc ) ;
return 0 ;
}
static void sata_dwc_enable_interrupts ( struct sata_dwc_device * hsdev )
{
/* Enable selective interrupts by setting the interrupt maskregister*/
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > intmr ,
SATA_DWC_INTMR_ERRM |
SATA_DWC_INTMR_NEWFPM |
SATA_DWC_INTMR_PMABRTM |
SATA_DWC_INTMR_DMATM ) ;
2010-07-06 15:06:03 +04:00
/*
* Unmask the error bits that should trigger an error interrupt by
* setting the error mask register .
*/
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > errmr , SATA_DWC_SERROR_ERR_BITS ) ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:22 +03:00
dev_dbg ( hsdev - > dev , " %s: INTMR = 0x%08x, ERRMR = 0x%08x \n " ,
2016-04-26 12:03:20 +03:00
__func__ , sata_dwc_readl ( & hsdev - > sata_dwc_regs - > intmr ) ,
sata_dwc_readl ( & hsdev - > sata_dwc_regs - > errmr ) ) ;
2010-07-06 15:06:03 +04:00
}
2016-04-26 12:03:15 +03:00
static void sata_dwc_setup_port ( struct ata_ioports * port , void __iomem * base )
2010-07-06 15:06:03 +04:00
{
2016-04-26 12:03:15 +03:00
port - > cmd_addr = base + 0x00 ;
port - > data_addr = base + 0x00 ;
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:15 +03:00
port - > error_addr = base + 0x04 ;
port - > feature_addr = base + 0x04 ;
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:15 +03:00
port - > nsect_addr = base + 0x08 ;
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:15 +03:00
port - > lbal_addr = base + 0x0c ;
port - > lbam_addr = base + 0x10 ;
port - > lbah_addr = base + 0x14 ;
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:15 +03:00
port - > device_addr = base + 0x18 ;
port - > command_addr = base + 0x1c ;
port - > status_addr = base + 0x1c ;
2010-07-06 15:06:03 +04:00
2016-04-26 12:03:15 +03:00
port - > altstatus_addr = base + 0x20 ;
port - > ctl_addr = base + 0x20 ;
2010-07-06 15:06:03 +04:00
}
2016-04-26 12:03:10 +03:00
static int sata_dwc_dma_get_channel ( struct sata_dwc_device_port * hsdevp )
{
struct sata_dwc_device * hsdev = hsdevp - > hsdev ;
struct device * dev = hsdev - > dev ;
# ifdef CONFIG_SATA_DWC_OLD_DMA
2023-03-10 17:47:00 +03:00
if ( ! of_property_present ( dev - > of_node , " dmas " ) )
2016-04-26 12:03:10 +03:00
return sata_dwc_dma_get_channel_old ( hsdevp ) ;
# endif
hsdevp - > chan = dma_request_chan ( dev , " sata-dma " ) ;
if ( IS_ERR ( hsdevp - > chan ) ) {
dev_err ( dev , " failed to allocate dma channel: %ld \n " ,
PTR_ERR ( hsdevp - > chan ) ) ;
return PTR_ERR ( hsdevp - > chan ) ;
}
return 0 ;
}
2010-07-06 15:06:03 +04:00
/*
* Function : sata_dwc_port_start
* arguments : struct ata_ioports * port
* Return value : returns 0 if success , error code otherwise
* This function allocates the scatter gather LLI table for AHB DMA
*/
static int sata_dwc_port_start ( struct ata_port * ap )
{
int err = 0 ;
struct sata_dwc_device * hsdev ;
struct sata_dwc_device_port * hsdevp = NULL ;
struct device * pdev ;
int i ;
hsdev = HSDEV_FROM_AP ( ap ) ;
dev_dbg ( ap - > dev , " %s: port_no=%d \n " , __func__ , ap - > port_no ) ;
hsdev - > host = ap - > host ;
pdev = ap - > host - > dev ;
if ( ! pdev ) {
dev_err ( ap - > dev , " %s: no ap->host->dev \n " , __func__ ) ;
err = - ENODEV ;
goto CLEANUP ;
}
/* Allocate Port Struct */
hsdevp = kzalloc ( sizeof ( * hsdevp ) , GFP_KERNEL ) ;
if ( ! hsdevp ) {
err = - ENOMEM ;
goto CLEANUP ;
}
hsdevp - > hsdev = hsdev ;
2016-04-26 12:03:10 +03:00
err = sata_dwc_dma_get_channel ( hsdevp ) ;
if ( err )
2015-03-03 23:41:21 +03:00
goto CLEANUP_ALLOC ;
2016-04-26 12:03:11 +03:00
err = phy_power_on ( hsdev - > phy ) ;
if ( err )
goto CLEANUP_ALLOC ;
2010-07-06 15:06:03 +04:00
for ( i = 0 ; i < SATA_DWC_QCMD_MAX ; i + + )
hsdevp - > cmd_issued [ i ] = SATA_DWC_CMD_ISSUED_NOT ;
2015-01-07 16:24:22 +03:00
ap - > bmdma_prd = NULL ; /* set these so libata doesn't use them */
2010-07-06 15:06:03 +04:00
ap - > bmdma_prd_dma = 0 ;
if ( ap - > port_no = = 0 ) {
dev_dbg ( ap - > dev , " %s: clearing TXCHEN, RXCHEN in DMAC \n " ,
__func__ ) ;
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dmacr ,
SATA_DWC_DMACR_TXRXCH_CLEAR ) ;
2010-07-06 15:06:03 +04:00
dev_dbg ( ap - > dev , " %s: setting burst size in DBTSR \n " ,
__func__ ) ;
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dbtsr ,
( SATA_DWC_DBTSR_MWR ( AHB_DMA_BRST_DFLT ) |
SATA_DWC_DBTSR_MRD ( AHB_DMA_BRST_DFLT ) ) ) ;
2010-07-06 15:06:03 +04:00
}
/* Clear any error bits before libata starts issuing commands */
2016-04-26 12:03:12 +03:00
clear_serror ( ap ) ;
2010-07-06 15:06:03 +04:00
ap - > private_data = hsdevp ;
2011-08-08 15:17:57 +04:00
dev_dbg ( ap - > dev , " %s: done \n " , __func__ ) ;
return 0 ;
2010-07-06 15:06:03 +04:00
2011-08-08 15:17:57 +04:00
CLEANUP_ALLOC :
kfree ( hsdevp ) ;
2010-07-06 15:06:03 +04:00
CLEANUP :
2011-08-08 15:17:57 +04:00
dev_dbg ( ap - > dev , " %s: fail. ap->id = %d \n " , __func__ , ap - > print_id ) ;
2010-07-06 15:06:03 +04:00
return err ;
}
static void sata_dwc_port_stop ( struct ata_port * ap )
{
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
2016-04-26 12:03:11 +03:00
struct sata_dwc_device * hsdev = HSDEV_FROM_AP ( ap ) ;
2010-07-06 15:06:03 +04:00
dev_dbg ( ap - > dev , " %s: ap->id = %d \n " , __func__ , ap - > print_id ) ;
2016-04-26 12:03:19 +03:00
dmaengine_terminate_sync ( hsdevp - > chan ) ;
2015-03-03 23:41:21 +03:00
dma_release_channel ( hsdevp - > chan ) ;
2016-04-26 12:03:11 +03:00
phy_power_off ( hsdev - > phy ) ;
2010-07-06 15:06:03 +04:00
2015-03-03 23:41:21 +03:00
kfree ( hsdevp ) ;
2010-07-06 15:06:03 +04:00
ap - > private_data = NULL ;
}
/*
* Function : sata_dwc_exec_command_by_tag
* arguments : ata_port * ap , ata_taskfile * tf , u8 tag , u32 cmd_issued
* Return value : None
* This function keeps track of individual command tag ids and calls
* ata_exec_command in libata
*/
static void sata_dwc_exec_command_by_tag ( struct ata_port * ap ,
struct ata_taskfile * tf ,
u8 tag , u32 cmd_issued )
{
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
hsdevp - > cmd_issued [ tag ] = cmd_issued ;
2016-04-26 12:03:02 +03:00
2010-07-06 15:06:03 +04:00
/*
* Clear SError before executing a new command .
* sata_dwc_scr_write and read can not be used here . Clearing the PM
* managed SError register for the disk needs to be done before the
* task file is loaded .
*/
2016-04-26 12:03:12 +03:00
clear_serror ( ap ) ;
2010-07-06 15:06:03 +04:00
ata_sff_exec_command ( ap , tf ) ;
}
static void sata_dwc_bmdma_setup_by_tag ( struct ata_queued_cmd * qc , u8 tag )
{
sata_dwc_exec_command_by_tag ( qc - > ap , & qc - > tf , tag ,
SATA_DWC_CMD_ISSUED_PEND ) ;
}
static void sata_dwc_bmdma_setup ( struct ata_queued_cmd * qc )
{
2018-05-11 21:51:04 +03:00
u8 tag = qc - > hw_tag ;
2010-07-06 15:06:03 +04:00
2021-12-21 10:20:31 +03:00
if ( ! ata_is_ncq ( qc - > tf . protocol ) )
2010-07-06 15:06:03 +04:00
tag = 0 ;
2021-12-21 10:20:31 +03:00
2010-07-06 15:06:03 +04:00
sata_dwc_bmdma_setup_by_tag ( qc , tag ) ;
}
static void sata_dwc_bmdma_start_by_tag ( struct ata_queued_cmd * qc , u8 tag )
{
int start_dma ;
2015-03-03 23:41:21 +03:00
u32 reg ;
2010-07-06 15:06:03 +04:00
struct sata_dwc_device * hsdev = HSDEV_FROM_QC ( qc ) ;
struct ata_port * ap = qc - > ap ;
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
2015-03-03 23:41:21 +03:00
struct dma_async_tx_descriptor * desc = hsdevp - > desc [ tag ] ;
2010-07-06 15:06:03 +04:00
int dir = qc - > dma_dir ;
if ( hsdevp - > cmd_issued [ tag ] ! = SATA_DWC_CMD_ISSUED_NOT ) {
start_dma = 1 ;
if ( dir = = DMA_TO_DEVICE )
hsdevp - > dma_pending [ tag ] = SATA_DWC_DMA_PENDING_TX ;
else
hsdevp - > dma_pending [ tag ] = SATA_DWC_DMA_PENDING_RX ;
} else {
2015-03-03 22:21:58 +03:00
dev_err ( ap - > dev ,
" %s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started \n " ,
__func__ , hsdevp - > cmd_issued [ tag ] , tag ) ;
2010-07-06 15:06:03 +04:00
start_dma = 0 ;
}
if ( start_dma ) {
2016-04-26 12:03:12 +03:00
sata_dwc_scr_read ( & ap - > link , SCR_ERROR , & reg ) ;
2010-07-06 15:06:03 +04:00
if ( reg & SATA_DWC_SERROR_ERR_BITS ) {
dev_err ( ap - > dev , " %s: ****** SError=0x%08x ****** \n " ,
__func__ , reg ) ;
}
if ( dir = = DMA_TO_DEVICE )
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dmacr ,
SATA_DWC_DMACR_TXCHEN ) ;
2010-07-06 15:06:03 +04:00
else
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dmacr ,
SATA_DWC_DMACR_RXCHEN ) ;
2010-07-06 15:06:03 +04:00
/* Enable AHB DMA transfer on the specified channel */
2015-03-03 23:41:21 +03:00
dmaengine_submit ( desc ) ;
dma_async_issue_pending ( hsdevp - > chan ) ;
2010-07-06 15:06:03 +04:00
}
}
static void sata_dwc_bmdma_start ( struct ata_queued_cmd * qc )
{
2018-05-11 21:51:04 +03:00
u8 tag = qc - > hw_tag ;
2010-07-06 15:06:03 +04:00
2021-12-21 10:20:31 +03:00
if ( ! ata_is_ncq ( qc - > tf . protocol ) )
2010-07-06 15:06:03 +04:00
tag = 0 ;
2021-12-21 10:20:31 +03:00
2010-07-06 15:06:03 +04:00
sata_dwc_bmdma_start_by_tag ( qc , tag ) ;
}
static unsigned int sata_dwc_qc_issue ( struct ata_queued_cmd * qc )
{
u32 sactive ;
2018-05-11 21:51:04 +03:00
u8 tag = qc - > hw_tag ;
2010-07-06 15:06:03 +04:00
struct ata_port * ap = qc - > ap ;
2016-04-26 12:03:08 +03:00
struct sata_dwc_device_port * hsdevp = HSDEVP_FROM_AP ( ap ) ;
2010-07-06 15:06:03 +04:00
if ( ! ata_is_ncq ( qc - > tf . protocol ) )
tag = 0 ;
2016-04-26 12:03:08 +03:00
if ( ata_is_dma ( qc - > tf . protocol ) ) {
hsdevp - > desc [ tag ] = dma_dwc_xfer_setup ( qc ) ;
if ( ! hsdevp - > desc [ tag ] )
return AC_ERR_SYSTEM ;
} else {
hsdevp - > desc [ tag ] = NULL ;
}
2010-07-06 15:06:03 +04:00
if ( ata_is_ncq ( qc - > tf . protocol ) ) {
2016-04-26 12:03:12 +03:00
sata_dwc_scr_read ( & ap - > link , SCR_ACTIVE , & sactive ) ;
2010-07-06 15:06:03 +04:00
sactive | = ( 0x00000001 < < tag ) ;
2016-04-26 12:03:12 +03:00
sata_dwc_scr_write ( & ap - > link , SCR_ACTIVE , sactive ) ;
2010-07-06 15:06:03 +04:00
2021-12-21 10:20:31 +03:00
trace_ata_tf_load ( ap , & qc - > tf ) ;
2010-07-06 15:06:03 +04:00
ap - > ops - > sff_tf_load ( ap , & qc - > tf ) ;
2021-12-21 10:20:31 +03:00
trace_ata_exec_command ( ap , & qc - > tf , tag ) ;
2016-04-26 12:03:09 +03:00
sata_dwc_exec_command_by_tag ( ap , & qc - > tf , tag ,
2010-07-06 15:06:03 +04:00
SATA_DWC_CMD_ISSUED_PEND ) ;
} else {
2016-04-26 12:03:09 +03:00
return ata_bmdma_qc_issue ( qc ) ;
2010-07-06 15:06:03 +04:00
}
return 0 ;
}
static void sata_dwc_error_handler ( struct ata_port * ap )
{
ata_sff_error_handler ( ap ) ;
}
2015-01-07 16:24:22 +03:00
static int sata_dwc_hardreset ( struct ata_link * link , unsigned int * class ,
unsigned long deadline )
2012-04-17 12:43:13 +04:00
{
struct sata_dwc_device * hsdev = HSDEV_FROM_AP ( link - > ap ) ;
int ret ;
ret = sata_sff_hardreset ( link , class , deadline ) ;
sata_dwc_enable_interrupts ( hsdev ) ;
/* Reconfigure the DMA control register */
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dmacr ,
SATA_DWC_DMACR_TXRXCH_CLEAR ) ;
2012-04-17 12:43:13 +04:00
/* Reconfigure the DMA Burst Transaction Size register */
2016-04-26 12:03:20 +03:00
sata_dwc_writel ( & hsdev - > sata_dwc_regs - > dbtsr ,
SATA_DWC_DBTSR_MWR ( AHB_DMA_BRST_DFLT ) |
SATA_DWC_DBTSR_MRD ( AHB_DMA_BRST_DFLT ) ) ;
2012-04-17 12:43:13 +04:00
return ret ;
}
2016-04-26 12:03:03 +03:00
static void sata_dwc_dev_select ( struct ata_port * ap , unsigned int device )
{
/* SATA DWC is master only */
}
2010-07-06 15:06:03 +04:00
/*
* scsi mid - layer and libata interface structures
*/
2023-03-22 22:53:59 +03:00
static const struct scsi_host_template sata_dwc_sht = {
2010-07-06 15:06:03 +04:00
ATA_NCQ_SHT ( DRV_NAME ) ,
/*
* test - only : Currently this driver doesn ' t handle NCQ
* correctly . We enable NCQ but set the queue depth to a
* max of 1. This will get fixed in in a future release .
*/
. sg_tablesize = LIBATA_MAX_PRD ,
2015-01-07 16:24:22 +03:00
/* .can_queue = ATA_MAX_QUEUE, */
2016-04-26 12:03:04 +03:00
/*
* Make sure a LLI block is not created that will span 8 K max FIS
* boundary . If the block spans such a FIS boundary , there is a chance
* that a DMA burst will cross that boundary - - this results in an
* error in the host controller .
*/
. dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */ ,
2010-07-06 15:06:03 +04:00
} ;
static struct ata_port_operations sata_dwc_ops = {
. inherits = & ata_sff_port_ops ,
. error_handler = sata_dwc_error_handler ,
2012-04-17 12:43:13 +04:00
. hardreset = sata_dwc_hardreset ,
2010-07-06 15:06:03 +04:00
. qc_issue = sata_dwc_qc_issue ,
. scr_read = sata_dwc_scr_read ,
. scr_write = sata_dwc_scr_write ,
. port_start = sata_dwc_port_start ,
. port_stop = sata_dwc_port_stop ,
2016-04-26 12:03:03 +03:00
. sff_dev_select = sata_dwc_dev_select ,
2010-07-06 15:06:03 +04:00
. bmdma_setup = sata_dwc_bmdma_setup ,
. bmdma_start = sata_dwc_bmdma_start ,
} ;
static const struct ata_port_info sata_dwc_port_info [ ] = {
{
2011-02-04 22:05:48 +03:00
. flags = ATA_FLAG_SATA | ATA_FLAG_NCQ ,
2011-01-25 19:27:35 +03:00
. pio_mask = ATA_PIO4 ,
2010-07-06 15:06:03 +04:00
. udma_mask = ATA_UDMA6 ,
. port_ops = & sata_dwc_ops ,
} ,
} ;
2011-02-17 12:43:24 +03:00
static int sata_dwc_probe ( struct platform_device * ofdev )
2010-07-06 15:06:03 +04:00
{
2021-12-09 17:35:18 +03:00
struct device * dev = & ofdev - > dev ;
struct device_node * np = dev - > of_node ;
2010-07-06 15:06:03 +04:00
struct sata_dwc_device * hsdev ;
u32 idr , versionr ;
char * ver = ( char * ) & versionr ;
2016-04-26 12:03:16 +03:00
void __iomem * base ;
2010-07-06 15:06:03 +04:00
int err = 0 ;
2015-01-07 16:24:19 +03:00
int irq ;
2010-07-06 15:06:03 +04:00
struct ata_host * host ;
struct ata_port_info pi = sata_dwc_port_info [ 0 ] ;
const struct ata_port_info * ppi [ ] = { & pi , NULL } ;
2016-04-26 12:03:22 +03:00
struct resource * res ;
2010-07-06 15:06:03 +04:00
/* Allocate DWC SATA device */
2021-12-09 17:35:18 +03:00
host = ata_host_alloc_pinfo ( dev , ppi , SATA_DWC_MAX_PORTS ) ;
hsdev = devm_kzalloc ( dev , sizeof ( * hsdev ) , GFP_KERNEL ) ;
2015-01-08 13:50:14 +03:00
if ( ! host | | ! hsdev )
2015-01-08 13:50:13 +03:00
return - ENOMEM ;
2010-07-06 15:06:03 +04:00
2015-01-08 13:50:14 +03:00
host - > private_data = hsdev ;
2010-07-06 15:06:03 +04:00
/* Ioremap SATA registers */
2021-12-09 17:35:17 +03:00
base = devm_platform_get_and_ioremap_resource ( ofdev , 0 , & res ) ;
2016-07-19 14:27:53 +03:00
if ( IS_ERR ( base ) )
2016-04-26 12:03:22 +03:00
return PTR_ERR ( base ) ;
2021-12-09 17:35:18 +03:00
dev_dbg ( dev , " ioremap done for SATA register address \n " ) ;
2010-07-06 15:06:03 +04:00
/* Synopsys DWC SATA specific Registers */
2016-04-26 12:03:16 +03:00
hsdev - > sata_dwc_regs = base + SATA_DWC_REG_OFFSET ;
2016-04-26 12:03:18 +03:00
hsdev - > dmadr = res - > start + SATA_DWC_REG_OFFSET + offsetof ( struct sata_dwc_regs , dmadr ) ;
2010-07-06 15:06:03 +04:00
/* Setup port */
host - > ports [ 0 ] - > ioaddr . cmd_addr = base ;
host - > ports [ 0 ] - > ioaddr . scr_addr = base + SATA_DWC_SCR_OFFSET ;
2016-04-26 12:03:15 +03:00
sata_dwc_setup_port ( & host - > ports [ 0 ] - > ioaddr , base ) ;
2010-07-06 15:06:03 +04:00
/* Read the ID and Version Registers */
2016-04-26 12:03:20 +03:00
idr = sata_dwc_readl ( & hsdev - > sata_dwc_regs - > idr ) ;
versionr = sata_dwc_readl ( & hsdev - > sata_dwc_regs - > versionr ) ;
2021-12-09 17:35:18 +03:00
dev_notice ( dev , " id %d, controller version %c.%c%c \n " , idr , ver [ 0 ] , ver [ 1 ] , ver [ 2 ] ) ;
2010-07-06 15:06:03 +04:00
/* Save dev for later use in dev_xxx() routines */
2021-12-09 17:35:18 +03:00
hsdev - > dev = dev ;
2010-07-06 15:06:03 +04:00
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts ( hsdev ) ;
/* Get SATA interrupt number */
2015-01-08 13:50:12 +03:00
irq = irq_of_parse_and_map ( np , 0 ) ;
2022-11-11 11:30:46 +03:00
if ( ! irq ) {
2021-12-09 17:35:18 +03:00
dev_err ( dev , " no SATA DMA irq \n " ) ;
2021-07-27 15:51:30 +03:00
return - ENODEV ;
2010-07-06 15:06:03 +04:00
}
2016-04-26 12:03:10 +03:00
# ifdef CONFIG_SATA_DWC_OLD_DMA
2023-03-10 17:47:00 +03:00
if ( ! of_property_present ( np , " dmas " ) ) {
2016-04-26 12:03:10 +03:00
err = sata_dwc_dma_init_old ( ofdev , hsdev ) ;
if ( err )
2021-07-27 15:51:30 +03:00
return err ;
2016-04-26 12:03:10 +03:00
}
# endif
2021-12-09 17:35:18 +03:00
hsdev - > phy = devm_phy_optional_get ( dev , " sata-phy " ) ;
2021-07-27 15:51:30 +03:00
if ( IS_ERR ( hsdev - > phy ) )
return PTR_ERR ( hsdev - > phy ) ;
2016-04-26 12:03:11 +03:00
err = phy_init ( hsdev - > phy ) ;
if ( err )
goto error_out ;
2010-07-06 15:06:03 +04:00
/*
* Now , register with libATA core , this will also initiate the
* device discovery process , invoking our port_start ( ) handler &
* error_handler ( ) to execute a dummy Softreset EH session
*/
2015-01-07 16:24:19 +03:00
err = ata_host_activate ( host , irq , sata_dwc_isr , 0 , & sata_dwc_sht ) ;
if ( err )
2021-12-09 17:35:18 +03:00
dev_err ( dev , " failed to activate host " ) ;
2010-07-06 15:06:03 +04:00
return 0 ;
error_out :
2016-04-26 12:03:11 +03:00
phy_exit ( hsdev - > phy ) ;
2010-07-06 15:06:03 +04:00
return err ;
}
2010-08-16 06:20:59 +04:00
static int sata_dwc_remove ( struct platform_device * ofdev )
2010-07-06 15:06:03 +04:00
{
struct device * dev = & ofdev - > dev ;
struct ata_host * host = dev_get_drvdata ( dev ) ;
struct sata_dwc_device * hsdev = host - > private_data ;
ata_host_detach ( host ) ;
2016-04-26 12:03:11 +03:00
phy_exit ( hsdev - > phy ) ;
2016-04-26 12:03:10 +03:00
# ifdef CONFIG_SATA_DWC_OLD_DMA
2010-07-06 15:06:03 +04:00
/* Free SATA DMA resources */
2016-04-26 12:03:10 +03:00
sata_dwc_dma_exit_old ( hsdev ) ;
# endif
2010-07-06 15:06:03 +04:00
2021-12-09 17:35:18 +03:00
dev_dbg ( dev , " done \n " ) ;
2010-07-06 15:06:03 +04:00
return 0 ;
}
static const struct of_device_id sata_dwc_match [ ] = {
{ . compatible = " amcc,sata-460ex " , } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , sata_dwc_match ) ;
2011-02-17 12:43:24 +03:00
static struct platform_driver sata_dwc_driver = {
2010-07-06 15:06:03 +04:00
. driver = {
. name = DRV_NAME ,
. of_match_table = sata_dwc_match ,
} ,
. probe = sata_dwc_probe ,
. remove = sata_dwc_remove ,
} ;
2011-11-27 10:44:26 +04:00
module_platform_driver ( sata_dwc_driver ) ;
2010-07-06 15:06:03 +04:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Mark Miesfeld <mmiesfeld@amcc.com> " ) ;
2015-03-03 22:21:58 +03:00
MODULE_DESCRIPTION ( " DesignWare Cores SATA controller low level driver " ) ;
2010-07-06 15:06:03 +04:00
MODULE_VERSION ( DRV_VERSION ) ;