2005-04-17 02:20:36 +04:00
/*
2005-08-29 04:18:39 +04:00
* libata - core . c - helper library for ATA
*
* Maintained by : Jeff Garzik < jgarzik @ pobox . com >
* Please ALWAYS copy linux - ide @ vger . kernel . org
* on emails .
*
* Copyright 2003 - 2004 Red Hat , Inc . All rights reserved .
* Copyright 2003 - 2004 Jeff Garzik
*
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING . If not , write to
* the Free Software Foundation , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*
*
* libata documentation is available via ' make { ps | pdf } docs ' ,
* as Documentation / DocBook / libata . *
*
* Hardware documentation available from http : //www.t13.org/ and
* http : //www.sata-io.org/
*
2005-04-17 02:20:36 +04:00
*/
# include <linux/config.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/init.h>
# include <linux/list.h>
# include <linux/mm.h>
# include <linux/highmem.h>
# include <linux/spinlock.h>
# include <linux/blkdev.h>
# include <linux/delay.h>
# include <linux/timer.h>
# include <linux/interrupt.h>
# include <linux/completion.h>
# include <linux/suspend.h>
# include <linux/workqueue.h>
2005-10-05 10:58:32 +04:00
# include <linux/jiffies.h>
2005-09-17 11:55:31 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
# include <scsi/scsi.h>
# include "scsi_priv.h"
2005-11-07 08:59:37 +03:00
# include <scsi/scsi_cmnd.h>
2005-04-17 02:20:36 +04:00
# include <scsi/scsi_host.h>
# include <linux/libata.h>
# include <asm/io.h>
# include <asm/semaphore.h>
# include <asm/byteorder.h>
# include "libata.h"
2006-02-15 12:24:09 +03:00
static unsigned int ata_dev_init_params ( struct ata_port * ap ,
struct ata_device * dev ) ;
2005-04-17 02:20:36 +04:00
static void ata_set_mode ( struct ata_port * ap ) ;
static void ata_dev_set_xfermode ( struct ata_port * ap , struct ata_device * dev ) ;
2006-03-05 22:31:57 +03:00
static unsigned int ata_dev_xfermask ( struct ata_port * ap ,
struct ata_device * dev ) ;
2005-04-17 02:20:36 +04:00
static unsigned int ata_unique_id = 1 ;
static struct workqueue_struct * ata_wq ;
2006-03-12 04:50:08 +03:00
int atapi_enabled = 1 ;
2005-08-30 11:37:42 +04:00
module_param ( atapi_enabled , int , 0444 ) ;
MODULE_PARM_DESC ( atapi_enabled , " Enable discovery of ATAPI devices (0=off, 1=on) " ) ;
2006-02-28 06:31:19 +03:00
int libata_fua = 0 ;
module_param_named ( fua , libata_fua , int , 0444 ) ;
MODULE_PARM_DESC ( fua , " FUA support (0=off, 1=on) " ) ;
2005-04-17 02:20:36 +04:00
MODULE_AUTHOR ( " Jeff Garzik " ) ;
MODULE_DESCRIPTION ( " Library module for ATA devices " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( DRV_VERSION ) ;
2005-06-03 02:17:13 +04:00
2005-04-17 02:20:36 +04:00
/**
* ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
* @ tf : Taskfile to convert
* @ fis : Buffer into which data will output
* @ pmp : Port multiplier port
*
* Converts a standard ATA taskfile to a Serial ATA
* FIS structure ( Register - Host to Device ) .
*
* LOCKING :
* Inherited from caller .
*/
2005-10-22 22:27:05 +04:00
void ata_tf_to_fis ( const struct ata_taskfile * tf , u8 * fis , u8 pmp )
2005-04-17 02:20:36 +04:00
{
fis [ 0 ] = 0x27 ; /* Register - Host to Device FIS */
fis [ 1 ] = ( pmp & 0xf ) | ( 1 < < 7 ) ; /* Port multiplier number,
bit 7 indicates Command FIS */
fis [ 2 ] = tf - > command ;
fis [ 3 ] = tf - > feature ;
fis [ 4 ] = tf - > lbal ;
fis [ 5 ] = tf - > lbam ;
fis [ 6 ] = tf - > lbah ;
fis [ 7 ] = tf - > device ;
fis [ 8 ] = tf - > hob_lbal ;
fis [ 9 ] = tf - > hob_lbam ;
fis [ 10 ] = tf - > hob_lbah ;
fis [ 11 ] = tf - > hob_feature ;
fis [ 12 ] = tf - > nsect ;
fis [ 13 ] = tf - > hob_nsect ;
fis [ 14 ] = 0 ;
fis [ 15 ] = tf - > ctl ;
fis [ 16 ] = 0 ;
fis [ 17 ] = 0 ;
fis [ 18 ] = 0 ;
fis [ 19 ] = 0 ;
}
/**
* ata_tf_from_fis - Convert SATA FIS to ATA taskfile
* @ fis : Buffer from which data will be input
* @ tf : Taskfile to output
*
2005-11-13 02:55:45 +03:00
* Converts a serial ATA FIS structure to a standard ATA taskfile .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
* Inherited from caller .
*/
2005-10-22 22:27:05 +04:00
void ata_tf_from_fis ( const u8 * fis , struct ata_taskfile * tf )
2005-04-17 02:20:36 +04:00
{
tf - > command = fis [ 2 ] ; /* status */
tf - > feature = fis [ 3 ] ; /* error */
tf - > lbal = fis [ 4 ] ;
tf - > lbam = fis [ 5 ] ;
tf - > lbah = fis [ 6 ] ;
tf - > device = fis [ 7 ] ;
tf - > hob_lbal = fis [ 8 ] ;
tf - > hob_lbam = fis [ 9 ] ;
tf - > hob_lbah = fis [ 10 ] ;
tf - > nsect = fis [ 12 ] ;
tf - > hob_nsect = fis [ 13 ] ;
}
2005-10-12 11:06:27 +04:00
static const u8 ata_rw_cmds [ ] = {
/* pio multi */
ATA_CMD_READ_MULTI ,
ATA_CMD_WRITE_MULTI ,
ATA_CMD_READ_MULTI_EXT ,
ATA_CMD_WRITE_MULTI_EXT ,
2006-01-06 11:56:18 +03:00
0 ,
0 ,
0 ,
ATA_CMD_WRITE_MULTI_FUA_EXT ,
2005-10-12 11:06:27 +04:00
/* pio */
ATA_CMD_PIO_READ ,
ATA_CMD_PIO_WRITE ,
ATA_CMD_PIO_READ_EXT ,
ATA_CMD_PIO_WRITE_EXT ,
2006-01-06 11:56:18 +03:00
0 ,
0 ,
0 ,
0 ,
2005-10-12 11:06:27 +04:00
/* dma */
ATA_CMD_READ ,
ATA_CMD_WRITE ,
ATA_CMD_READ_EXT ,
2006-01-06 11:56:18 +03:00
ATA_CMD_WRITE_EXT ,
0 ,
0 ,
0 ,
ATA_CMD_WRITE_FUA_EXT
2005-10-12 11:06:27 +04:00
} ;
2005-04-17 02:20:36 +04:00
/**
2005-10-12 11:06:27 +04:00
* ata_rwcmd_protocol - set taskfile r / w commands and protocol
* @ qc : command to examine and configure
2005-04-17 02:20:36 +04:00
*
2005-10-12 11:06:27 +04:00
* Examine the device configuration and tf - > flags to calculate
* the proper read / write commands and protocol to use .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
* caller .
*/
2006-01-06 11:56:18 +03:00
int ata_rwcmd_protocol ( struct ata_queued_cmd * qc )
2005-04-17 02:20:36 +04:00
{
2005-10-12 11:06:27 +04:00
struct ata_taskfile * tf = & qc - > tf ;
struct ata_device * dev = qc - > dev ;
2006-01-06 11:56:18 +03:00
u8 cmd ;
2005-04-17 02:20:36 +04:00
2006-01-06 11:56:18 +03:00
int index , fua , lba48 , write ;
2005-10-12 11:06:27 +04:00
2006-01-06 11:56:18 +03:00
fua = ( tf - > flags & ATA_TFLAG_FUA ) ? 4 : 0 ;
2005-10-12 11:06:27 +04:00
lba48 = ( tf - > flags & ATA_TFLAG_LBA48 ) ? 2 : 0 ;
write = ( tf - > flags & ATA_TFLAG_WRITE ) ? 1 : 0 ;
2005-04-17 02:20:36 +04:00
2005-10-12 11:06:27 +04:00
if ( dev - > flags & ATA_DFLAG_PIO ) {
tf - > protocol = ATA_PROT_PIO ;
2006-01-06 11:56:18 +03:00
index = dev - > multi_count ? 0 : 8 ;
2006-01-17 23:50:31 +03:00
} else if ( lba48 & & ( qc - > ap - > flags & ATA_FLAG_PIO_LBA48 ) ) {
/* Unable to use DMA due to host limitation */
tf - > protocol = ATA_PROT_PIO ;
2006-02-13 13:55:25 +03:00
index = dev - > multi_count ? 0 : 8 ;
2005-10-12 11:06:27 +04:00
} else {
tf - > protocol = ATA_PROT_DMA ;
2006-01-06 11:56:18 +03:00
index = 16 ;
2005-10-12 11:06:27 +04:00
}
2005-04-17 02:20:36 +04:00
2006-01-06 11:56:18 +03:00
cmd = ata_rw_cmds [ index + fua + lba48 + write ] ;
if ( cmd ) {
tf - > command = cmd ;
return 0 ;
}
return - 1 ;
2005-04-17 02:20:36 +04:00
}
2006-03-05 22:31:56 +03:00
/**
* ata_pack_xfermask - Pack pio , mwdma and udma masks into xfer_mask
* @ pio_mask : pio_mask
* @ mwdma_mask : mwdma_mask
* @ udma_mask : udma_mask
*
* Pack @ pio_mask , @ mwdma_mask and @ udma_mask into a single
* unsigned int xfer_mask .
*
* LOCKING :
* None .
*
* RETURNS :
* Packed xfer_mask .
*/
static unsigned int ata_pack_xfermask ( unsigned int pio_mask ,
unsigned int mwdma_mask ,
unsigned int udma_mask )
{
return ( ( pio_mask < < ATA_SHIFT_PIO ) & ATA_MASK_PIO ) |
( ( mwdma_mask < < ATA_SHIFT_MWDMA ) & ATA_MASK_MWDMA ) |
( ( udma_mask < < ATA_SHIFT_UDMA ) & ATA_MASK_UDMA ) ;
}
static const struct ata_xfer_ent {
unsigned int shift , bits ;
u8 base ;
} ata_xfer_tbl [ ] = {
{ ATA_SHIFT_PIO , ATA_BITS_PIO , XFER_PIO_0 } ,
{ ATA_SHIFT_MWDMA , ATA_BITS_MWDMA , XFER_MW_DMA_0 } ,
{ ATA_SHIFT_UDMA , ATA_BITS_UDMA , XFER_UDMA_0 } ,
{ - 1 , } ,
} ;
/**
* ata_xfer_mask2mode - Find matching XFER_ * for the given xfer_mask
* @ xfer_mask : xfer_mask of interest
*
* Return matching XFER_ * value for @ xfer_mask . Only the highest
* bit of @ xfer_mask is considered .
*
* LOCKING :
* None .
*
* RETURNS :
* Matching XFER_ * value , 0 if no match found .
*/
static u8 ata_xfer_mask2mode ( unsigned int xfer_mask )
{
int highbit = fls ( xfer_mask ) - 1 ;
const struct ata_xfer_ent * ent ;
for ( ent = ata_xfer_tbl ; ent - > shift > = 0 ; ent + + )
if ( highbit > = ent - > shift & & highbit < ent - > shift + ent - > bits )
return ent - > base + highbit - ent - > shift ;
return 0 ;
}
/**
* ata_xfer_mode2mask - Find matching xfer_mask for XFER_ *
* @ xfer_mode : XFER_ * of interest
*
* Return matching xfer_mask for @ xfer_mode .
*
* LOCKING :
* None .
*
* RETURNS :
* Matching xfer_mask , 0 if no match found .
*/
static unsigned int ata_xfer_mode2mask ( u8 xfer_mode )
{
const struct ata_xfer_ent * ent ;
for ( ent = ata_xfer_tbl ; ent - > shift > = 0 ; ent + + )
if ( xfer_mode > = ent - > base & & xfer_mode < ent - > base + ent - > bits )
return 1 < < ( ent - > shift + xfer_mode - ent - > base ) ;
return 0 ;
}
/**
* ata_xfer_mode2shift - Find matching xfer_shift for XFER_ *
* @ xfer_mode : XFER_ * of interest
*
* Return matching xfer_shift for @ xfer_mode .
*
* LOCKING :
* None .
*
* RETURNS :
* Matching xfer_shift , - 1 if no match found .
*/
static int ata_xfer_mode2shift ( unsigned int xfer_mode )
{
const struct ata_xfer_ent * ent ;
for ( ent = ata_xfer_tbl ; ent - > shift > = 0 ; ent + + )
if ( xfer_mode > = ent - > base & & xfer_mode < ent - > base + ent - > bits )
return ent - > shift ;
return - 1 ;
}
2005-04-17 02:20:36 +04:00
/**
2006-03-05 22:31:56 +03:00
* ata_mode_string - convert xfer_mask to string
* @ xfer_mask : mask of bits supported ; only highest bit counts .
2005-04-17 02:20:36 +04:00
*
* Determine string which represents the highest speed
2006-03-05 22:31:56 +03:00
* ( highest bit in @ modemask ) .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
* None .
*
* RETURNS :
* Constant C string representing highest speed listed in
2006-03-05 22:31:56 +03:00
* @ mode_mask , or the constant C string " <n/a> " .
2005-04-17 02:20:36 +04:00
*/
2006-03-05 22:31:56 +03:00
static const char * ata_mode_string ( unsigned int xfer_mask )
2005-04-17 02:20:36 +04:00
{
2006-03-05 22:31:57 +03:00
static const char * const xfer_mode_str [ ] = {
" PIO0 " ,
" PIO1 " ,
" PIO2 " ,
" PIO3 " ,
" PIO4 " ,
" MWDMA0 " ,
" MWDMA1 " ,
" MWDMA2 " ,
" UDMA/16 " ,
" UDMA/25 " ,
" UDMA/33 " ,
" UDMA/44 " ,
" UDMA/66 " ,
" UDMA/100 " ,
" UDMA/133 " ,
" UDMA7 " ,
} ;
2006-03-05 22:31:56 +03:00
int highbit ;
2005-04-17 02:20:36 +04:00
2006-03-05 22:31:56 +03:00
highbit = fls ( xfer_mask ) - 1 ;
if ( highbit > = 0 & & highbit < ARRAY_SIZE ( xfer_mode_str ) )
return xfer_mode_str [ highbit ] ;
2005-04-17 02:20:36 +04:00
return " <n/a> " ;
}
/**
* ata_pio_devchk - PATA device presence detection
* @ ap : ATA channel to examine
* @ device : Device to examine ( starting at zero )
*
* This technique was originally described in
* Hale Landis ' s ATADRVR ( www . ata - atapi . com ) , and
* later found its way into the ATA / ATAPI spec .
*
* Write a pattern to the ATA shadow registers ,
* and if a device is present , it will respond by
* correctly storing and echoing back the
* ATA shadow register contents .
*
* LOCKING :
* caller .
*/
static unsigned int ata_pio_devchk ( struct ata_port * ap ,
unsigned int device )
{
struct ata_ioports * ioaddr = & ap - > ioaddr ;
u8 nsect , lbal ;
ap - > ops - > dev_select ( ap , device ) ;
outb ( 0x55 , ioaddr - > nsect_addr ) ;
outb ( 0xaa , ioaddr - > lbal_addr ) ;
outb ( 0xaa , ioaddr - > nsect_addr ) ;
outb ( 0x55 , ioaddr - > lbal_addr ) ;
outb ( 0x55 , ioaddr - > nsect_addr ) ;
outb ( 0xaa , ioaddr - > lbal_addr ) ;
nsect = inb ( ioaddr - > nsect_addr ) ;
lbal = inb ( ioaddr - > lbal_addr ) ;
if ( ( nsect = = 0x55 ) & & ( lbal = = 0xaa ) )
return 1 ; /* we found a device */
return 0 ; /* nothing found */
}
/**
* ata_mmio_devchk - PATA device presence detection
* @ ap : ATA channel to examine
* @ device : Device to examine ( starting at zero )
*
* This technique was originally described in
* Hale Landis ' s ATADRVR ( www . ata - atapi . com ) , and
* later found its way into the ATA / ATAPI spec .
*
* Write a pattern to the ATA shadow registers ,
* and if a device is present , it will respond by
* correctly storing and echoing back the
* ATA shadow register contents .
*
* LOCKING :
* caller .
*/
static unsigned int ata_mmio_devchk ( struct ata_port * ap ,
unsigned int device )
{
struct ata_ioports * ioaddr = & ap - > ioaddr ;
u8 nsect , lbal ;
ap - > ops - > dev_select ( ap , device ) ;
writeb ( 0x55 , ( void __iomem * ) ioaddr - > nsect_addr ) ;
writeb ( 0xaa , ( void __iomem * ) ioaddr - > lbal_addr ) ;
writeb ( 0xaa , ( void __iomem * ) ioaddr - > nsect_addr ) ;
writeb ( 0x55 , ( void __iomem * ) ioaddr - > lbal_addr ) ;
writeb ( 0x55 , ( void __iomem * ) ioaddr - > nsect_addr ) ;
writeb ( 0xaa , ( void __iomem * ) ioaddr - > lbal_addr ) ;
nsect = readb ( ( void __iomem * ) ioaddr - > nsect_addr ) ;
lbal = readb ( ( void __iomem * ) ioaddr - > lbal_addr ) ;
if ( ( nsect = = 0x55 ) & & ( lbal = = 0xaa ) )
return 1 ; /* we found a device */
return 0 ; /* nothing found */
}
/**
* ata_devchk - PATA device presence detection
* @ ap : ATA channel to examine
* @ device : Device to examine ( starting at zero )
*
* Dispatch ATA device presence detection , depending
* on whether we are using PIO or MMIO to talk to the
* ATA shadow registers .
*
* LOCKING :
* caller .
*/
static unsigned int ata_devchk ( struct ata_port * ap ,
unsigned int device )
{
if ( ap - > flags & ATA_FLAG_MMIO )
return ata_mmio_devchk ( ap , device ) ;
return ata_pio_devchk ( ap , device ) ;
}
/**
* ata_dev_classify - determine device type based on ATA - spec signature
* @ tf : ATA taskfile register set for device to be identified
*
* Determine from taskfile register contents whether a device is
* ATA or ATAPI , as per " Signature and persistence " section
* of ATA / PI spec ( volume 1 , sect 5.14 ) .
*
* LOCKING :
* None .
*
* RETURNS :
* Device type , % ATA_DEV_ATA , % ATA_DEV_ATAPI , or % ATA_DEV_UNKNOWN
* the event of failure .
*/
2005-10-22 22:27:05 +04:00
unsigned int ata_dev_classify ( const struct ata_taskfile * tf )
2005-04-17 02:20:36 +04:00
{
/* Apple's open source Darwin code hints that some devices only
* put a proper signature into the LBA mid / high registers ,
* So , we only check those . It ' s sufficient for uniqueness .
*/
if ( ( ( tf - > lbam = = 0 ) & & ( tf - > lbah = = 0 ) ) | |
( ( tf - > lbam = = 0x3c ) & & ( tf - > lbah = = 0xc3 ) ) ) {
DPRINTK ( " found ATA device by sig \n " ) ;
return ATA_DEV_ATA ;
}
if ( ( ( tf - > lbam = = 0x14 ) & & ( tf - > lbah = = 0xeb ) ) | |
( ( tf - > lbam = = 0x69 ) & & ( tf - > lbah = = 0x96 ) ) ) {
DPRINTK ( " found ATAPI device by sig \n " ) ;
return ATA_DEV_ATAPI ;
}
DPRINTK ( " unknown device \n " ) ;
return ATA_DEV_UNKNOWN ;
}
/**
* ata_dev_try_classify - Parse returned ATA device signature
* @ ap : ATA channel to examine
* @ device : Device to examine ( starting at zero )
2006-01-24 11:05:22 +03:00
* @ r_err : Value of error register on completion
2005-04-17 02:20:36 +04:00
*
* After an event - - SRST , E . D . D . , or SATA COMRESET - - occurs ,
* an ATA / ATAPI - defined set of values is placed in the ATA
* shadow registers , indicating the results of device detection
* and diagnostics .
*
* Select the ATA device , and read the values from the ATA shadow
* registers . Then parse according to the Error register value ,
* and the spec - defined values examined by ata_dev_classify ( ) .
*
* LOCKING :
* caller .
2006-01-24 11:05:22 +03:00
*
* RETURNS :
* Device type - % ATA_DEV_ATA , % ATA_DEV_ATAPI or % ATA_DEV_NONE .
2005-04-17 02:20:36 +04:00
*/
2006-01-24 11:05:22 +03:00
static unsigned int
ata_dev_try_classify ( struct ata_port * ap , unsigned int device , u8 * r_err )
2005-04-17 02:20:36 +04:00
{
struct ata_taskfile tf ;
unsigned int class ;
u8 err ;
ap - > ops - > dev_select ( ap , device ) ;
memset ( & tf , 0 , sizeof ( tf ) ) ;
ap - > ops - > tf_read ( ap , & tf ) ;
2005-10-30 04:25:10 +03:00
err = tf . feature ;
2006-01-24 11:05:22 +03:00
if ( r_err )
* r_err = err ;
2005-04-17 02:20:36 +04:00
/* see if device passed diags */
if ( err = = 1 )
/* do nothing */ ;
else if ( ( device = = 0 ) & & ( err = = 0x81 ) )
/* do nothing */ ;
else
2006-01-24 11:05:22 +03:00
return ATA_DEV_NONE ;
2005-04-17 02:20:36 +04:00
2006-01-24 11:05:22 +03:00
/* determine if device is ATA or ATAPI */
2005-04-17 02:20:36 +04:00
class = ata_dev_classify ( & tf ) ;
2006-01-24 11:05:22 +03:00
2005-04-17 02:20:36 +04:00
if ( class = = ATA_DEV_UNKNOWN )
2006-01-24 11:05:22 +03:00
return ATA_DEV_NONE ;
2005-04-17 02:20:36 +04:00
if ( ( class = = ATA_DEV_ATA ) & & ( ata_chk_status ( ap ) = = 0 ) )
2006-01-24 11:05:22 +03:00
return ATA_DEV_NONE ;
return class ;
2005-04-17 02:20:36 +04:00
}
/**
2006-02-13 04:02:46 +03:00
* ata_id_string - Convert IDENTIFY DEVICE page into string
2005-04-17 02:20:36 +04:00
* @ id : IDENTIFY DEVICE results we will examine
* @ s : string into which data is output
* @ ofs : offset into identify device page
* @ len : length of string to return . must be an even number .
*
* The strings in the IDENTIFY DEVICE page are broken up into
* 16 - bit chunks . Run through the string , and output each
* 8 - bit chunk linearly , regardless of platform .
*
* LOCKING :
* caller .
*/
2006-02-13 04:02:46 +03:00
void ata_id_string ( const u16 * id , unsigned char * s ,
unsigned int ofs , unsigned int len )
2005-04-17 02:20:36 +04:00
{
unsigned int c ;
while ( len > 0 ) {
c = id [ ofs ] > > 8 ;
* s = c ;
s + + ;
c = id [ ofs ] & 0xff ;
* s = c ;
s + + ;
ofs + + ;
len - = 2 ;
}
}
2006-02-12 16:47:04 +03:00
/**
2006-02-13 04:02:46 +03:00
* ata_id_c_string - Convert IDENTIFY DEVICE page into C string
2006-02-12 16:47:04 +03:00
* @ id : IDENTIFY DEVICE results we will examine
* @ s : string into which data is output
* @ ofs : offset into identify device page
* @ len : length of string to return . must be an odd number .
*
2006-02-13 04:02:46 +03:00
* This function is identical to ata_id_string except that it
2006-02-12 16:47:04 +03:00
* trims trailing spaces and terminates the resulting string with
* null . @ len must be actual maximum length ( even number ) + 1.
*
* LOCKING :
* caller .
*/
2006-02-13 04:02:46 +03:00
void ata_id_c_string ( const u16 * id , unsigned char * s ,
unsigned int ofs , unsigned int len )
2006-02-12 16:47:04 +03:00
{
unsigned char * p ;
WARN_ON ( ! ( len & 1 ) ) ;
2006-02-13 04:02:46 +03:00
ata_id_string ( id , s , ofs , len - 1 ) ;
2006-02-12 16:47:04 +03:00
p = s + strnlen ( s , len - 1 ) ;
while ( p > s & & p [ - 1 ] = = ' ' )
p - - ;
* p = ' \0 ' ;
}
2005-06-03 02:17:13 +04:00
2006-02-12 16:47:04 +03:00
static u64 ata_id_n_sectors ( const u16 * id )
{
if ( ata_id_has_lba ( id ) ) {
if ( ata_id_has_lba48 ( id ) )
return ata_id_u64 ( id , 100 ) ;
else
return ata_id_u32 ( id , 60 ) ;
} else {
if ( ata_id_current_chs_valid ( id ) )
return ata_id_u32 ( id , 57 ) ;
else
return id [ 1 ] * id [ 3 ] * id [ 6 ] ;
}
}
2005-06-03 02:17:13 +04:00
/**
* ata_noop_dev_select - Select device 0 / 1 on ATA bus
* @ ap : ATA channel to manipulate
* @ device : ATA device ( numbered from zero ) to select
*
* This function performs no actual function .
*
* May be used as the dev_select ( ) entry in ata_port_operations .
*
* LOCKING :
* caller .
*/
2005-04-17 02:20:36 +04:00
void ata_noop_dev_select ( struct ata_port * ap , unsigned int device )
{
}
2005-06-03 02:17:13 +04:00
2005-04-17 02:20:36 +04:00
/**
* ata_std_dev_select - Select device 0 / 1 on ATA bus
* @ ap : ATA channel to manipulate
* @ device : ATA device ( numbered from zero ) to select
*
* Use the method defined in the ATA specification to
* make either device 0 , or device 1 , active on the
2005-06-03 02:17:13 +04:00
* ATA channel . Works with both PIO and MMIO .
*
* May be used as the dev_select ( ) entry in ata_port_operations .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
* caller .
*/
void ata_std_dev_select ( struct ata_port * ap , unsigned int device )
{
u8 tmp ;
if ( device = = 0 )
tmp = ATA_DEVICE_OBS ;
else
tmp = ATA_DEVICE_OBS | ATA_DEV1 ;
if ( ap - > flags & ATA_FLAG_MMIO ) {
writeb ( tmp , ( void __iomem * ) ap - > ioaddr . device_addr ) ;
} else {
outb ( tmp , ap - > ioaddr . device_addr ) ;
}
ata_pause ( ap ) ; /* needed; also flushes, for mmio */
}
/**
* ata_dev_select - Select device 0 / 1 on ATA bus
* @ ap : ATA channel to manipulate
* @ device : ATA device ( numbered from zero ) to select
* @ wait : non - zero to wait for Status register BSY bit to clear
* @ can_sleep : non - zero if context allows sleeping
*
* Use the method defined in the ATA specification to
* make either device 0 , or device 1 , active on the
* ATA channel .
*
* This is a high - level version of ata_std_dev_select ( ) ,
* which additionally provides the services of inserting
* the proper pauses and status polling , where needed .
*
* LOCKING :
* caller .
*/
void ata_dev_select ( struct ata_port * ap , unsigned int device ,
unsigned int wait , unsigned int can_sleep )
{
VPRINTK ( " ENTER, ata%u: device %u, wait %u \n " ,
ap - > id , device , wait ) ;
if ( wait )
ata_wait_idle ( ap ) ;
ap - > ops - > dev_select ( ap , device ) ;
if ( wait ) {
if ( can_sleep & & ap - > device [ device ] . class = = ATA_DEV_ATAPI )
msleep ( 150 ) ;
ata_wait_idle ( ap ) ;
}
}
/**
* ata_dump_id - IDENTIFY DEVICE info debugging output
2006-02-12 16:47:05 +03:00
* @ id : IDENTIFY DEVICE page to dump
2005-04-17 02:20:36 +04:00
*
2006-02-12 16:47:05 +03:00
* Dump selected 16 - bit words from the given IDENTIFY DEVICE
* page .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
* caller .
*/
2006-02-12 16:47:05 +03:00
static inline void ata_dump_id ( const u16 * id )
2005-04-17 02:20:36 +04:00
{
DPRINTK ( " 49==0x%04x "
" 53==0x%04x "
" 63==0x%04x "
" 64==0x%04x "
" 75==0x%04x \n " ,
2006-02-12 16:47:05 +03:00
id [ 49 ] ,
id [ 53 ] ,
id [ 63 ] ,
id [ 64 ] ,
id [ 75 ] ) ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " 80==0x%04x "
" 81==0x%04x "
" 82==0x%04x "
" 83==0x%04x "
" 84==0x%04x \n " ,
2006-02-12 16:47:05 +03:00
id [ 80 ] ,
id [ 81 ] ,
id [ 82 ] ,
id [ 83 ] ,
id [ 84 ] ) ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " 88==0x%04x "
" 93==0x%04x \n " ,
2006-02-12 16:47:05 +03:00
id [ 88 ] ,
id [ 93 ] ) ;
2005-04-17 02:20:36 +04:00
}
2006-03-05 22:31:56 +03:00
/**
* ata_id_xfermask - Compute xfermask from the given IDENTIFY data
* @ id : IDENTIFY data to compute xfer mask from
*
* Compute the xfermask for this device . This is not as trivial
* as it seems if we must consider early devices correctly .
*
* FIXME : pre IDE drive timing ( do we care ? ) .
*
* LOCKING :
* None .
*
* RETURNS :
* Computed xfermask
*/
static unsigned int ata_id_xfermask ( const u16 * id )
{
unsigned int pio_mask , mwdma_mask , udma_mask ;
/* Usual case. Word 53 indicates word 64 is valid */
if ( id [ ATA_ID_FIELD_VALID ] & ( 1 < < 1 ) ) {
pio_mask = id [ ATA_ID_PIO_MODES ] & 0x03 ;
pio_mask < < = 3 ;
pio_mask | = 0x7 ;
} else {
/* If word 64 isn't valid then Word 51 high byte holds
* the PIO timing number for the maximum . Turn it into
* a mask .
*/
pio_mask = ( 2 < < ( id [ ATA_ID_OLD_PIO_MODES ] & 0xFF ) ) - 1 ;
/* But wait.. there's more. Design your standards by
* committee and you too can get a free iordy field to
* process . However its the speeds not the modes that
* are supported . . . Note drivers using the timing API
* will get this right anyway
*/
}
mwdma_mask = id [ ATA_ID_MWDMA_MODES ] & 0x07 ;
2006-03-12 06:34:35 +03:00
udma_mask = 0 ;
if ( id [ ATA_ID_FIELD_VALID ] & ( 1 < < 2 ) )
udma_mask = id [ ATA_ID_UDMA_MODES ] & 0xff ;
2006-03-05 22:31:56 +03:00
return ata_pack_xfermask ( pio_mask , mwdma_mask , udma_mask ) ;
}
2006-03-05 09:29:09 +03:00
/**
* ata_port_queue_task - Queue port_task
* @ ap : The ata_port to queue port_task for
*
* Schedule @ fn ( @ data ) for execution after @ delay jiffies using
* port_task . There is one port_task per port and it ' s the
* user ( low level driver ) ' s responsibility to make sure that only
* one task is active at any given time .
*
* libata core layer takes care of synchronization between
* port_task and EH . ata_port_queue_task ( ) may be ignored for EH
* synchronization .
*
* LOCKING :
* Inherited from caller .
*/
void ata_port_queue_task ( struct ata_port * ap , void ( * fn ) ( void * ) , void * data ,
unsigned long delay )
{
int rc ;
2006-03-05 09:29:09 +03:00
if ( ap - > flags & ATA_FLAG_FLUSH_PORT_TASK )
2006-03-05 09:29:09 +03:00
return ;
PREPARE_WORK ( & ap - > port_task , fn , data ) ;
if ( ! delay )
rc = queue_work ( ata_wq , & ap - > port_task ) ;
else
rc = queue_delayed_work ( ata_wq , & ap - > port_task , delay ) ;
/* rc == 0 means that another user is using port task */
WARN_ON ( rc = = 0 ) ;
}
/**
* ata_port_flush_task - Flush port_task
* @ ap : The ata_port to flush port_task for
*
* After this function completes , port_task is guranteed not to
* be running or scheduled .
*
* LOCKING :
* Kernel thread context ( may sleep )
*/
void ata_port_flush_task ( struct ata_port * ap )
{
unsigned long flags ;
DPRINTK ( " ENTER \n " ) ;
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
2006-03-05 09:29:09 +03:00
ap - > flags | = ATA_FLAG_FLUSH_PORT_TASK ;
2006-03-05 09:29:09 +03:00
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
DPRINTK ( " flush #1 \n " ) ;
flush_workqueue ( ata_wq ) ;
/*
* At this point , if a task is running , it ' s guaranteed to see
* the FLUSH flag ; thus , it will never queue pio tasks again .
* Cancel and flush .
*/
if ( ! cancel_delayed_work ( & ap - > port_task ) ) {
DPRINTK ( " flush #2 \n " ) ;
flush_workqueue ( ata_wq ) ;
}
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
2006-03-05 09:29:09 +03:00
ap - > flags & = ~ ATA_FLAG_FLUSH_PORT_TASK ;
2006-03-05 09:29:09 +03:00
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
DPRINTK ( " EXIT \n " ) ;
}
2006-01-23 07:09:36 +03:00
void ata_qc_complete_internal ( struct ata_queued_cmd * qc )
2005-12-13 08:48:31 +03:00
{
2006-01-23 07:09:36 +03:00
struct completion * waiting = qc - > private_data ;
2005-12-13 08:48:31 +03:00
2006-01-23 07:09:36 +03:00
qc - > ap - > ops - > tf_read ( qc - > ap , & qc - > tf ) ;
2005-12-13 08:48:31 +03:00
complete ( waiting ) ;
}
/**
* ata_exec_internal - execute libata internal command
* @ ap : Port to which the command is sent
* @ dev : Device to which the command is sent
* @ tf : Taskfile registers for the command and the result
* @ dma_dir : Data tranfer direction of the command
* @ buf : Data buffer of the command
* @ buflen : Length of data buffer
*
* Executes libata internal command with timeout . @ tf contains
* command on entry and result on return . Timeout and error
* conditions are reported via return value . No recovery action
* is taken after a command times out . It ' s caller ' s duty to
* clean up after timeout .
*
* LOCKING :
* None . Should be called with kernel context , might sleep .
*/
static unsigned
ata_exec_internal ( struct ata_port * ap , struct ata_device * dev ,
struct ata_taskfile * tf ,
int dma_dir , void * buf , unsigned int buflen )
{
u8 command = tf - > command ;
struct ata_queued_cmd * qc ;
DECLARE_COMPLETION ( wait ) ;
unsigned long flags ;
2006-01-23 07:09:36 +03:00
unsigned int err_mask ;
2005-12-13 08:48:31 +03:00
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
qc = ata_qc_new_init ( ap , dev ) ;
BUG_ON ( qc = = NULL ) ;
qc - > tf = * tf ;
qc - > dma_dir = dma_dir ;
if ( dma_dir ! = DMA_NONE ) {
ata_sg_init_one ( qc , buf , buflen ) ;
qc - > nsect = buflen / ATA_SECT_SIZE ;
}
2006-01-23 07:09:36 +03:00
qc - > private_data = & wait ;
2005-12-13 08:48:31 +03:00
qc - > complete_fn = ata_qc_complete_internal ;
2006-01-23 07:09:36 +03:00
qc - > err_mask = ata_qc_issue ( qc ) ;
if ( qc - > err_mask )
2006-01-23 07:09:36 +03:00
ata_qc_complete ( qc ) ;
2005-12-13 08:48:31 +03:00
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
if ( ! wait_for_completion_timeout ( & wait , ATA_TMOUT_INTERNAL ) ) {
2006-03-14 06:19:04 +03:00
ata_port_flush_task ( ap ) ;
2005-12-13 08:48:31 +03:00
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
/* We're racing with irq here. If we lose, the
* following test prevents us from completing the qc
* again . If completion irq occurs after here but
* before the caller cleans up , it will result in a
* spurious interrupt . We can live with that .
*/
2006-01-23 07:09:36 +03:00
if ( qc - > flags & ATA_QCFLAG_ACTIVE ) {
2006-01-23 07:09:36 +03:00
qc - > err_mask = AC_ERR_TIMEOUT ;
2005-12-13 08:48:31 +03:00
ata_qc_complete ( qc ) ;
printk ( KERN_WARNING " ata%u: qc timeout (cmd 0x%x) \n " ,
ap - > id , command ) ;
}
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
}
2006-01-23 07:09:36 +03:00
* tf = qc - > tf ;
err_mask = qc - > err_mask ;
ata_qc_free ( qc ) ;
return err_mask ;
2005-12-13 08:48:31 +03:00
}
2006-01-09 20:18:14 +03:00
/**
* ata_pio_need_iordy - check if iordy needed
* @ adev : ATA device
*
* Check if the current speed of the device requires IORDY . Used
* by various controllers for chip configuration .
*/
unsigned int ata_pio_need_iordy ( const struct ata_device * adev )
{
int pio ;
int speed = adev - > pio_mode - XFER_PIO_0 ;
if ( speed < 2 )
return 0 ;
if ( speed > 2 )
return 1 ;
/* If we have no drive specific rule, then PIO 2 is non IORDY */
if ( adev - > id [ ATA_ID_FIELD_VALID ] & 2 ) { /* EIDE */
pio = adev - > id [ ATA_ID_EIDE_PIO ] ;
/* Is the speed faster than the drive allows non IORDY ? */
if ( pio ) {
/* This is cycle times not frequency - watch the logic! */
if ( pio > 240 ) /* PIO2 is 240nS per cycle */
return 1 ;
return 0 ;
}
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
/**
2006-02-20 20:12:11 +03:00
* ata_dev_read_id - Read ID data from the specified device
* @ ap : port on which target device resides
* @ dev : target device
* @ p_class : pointer to class of the target device ( may be changed )
* @ post_reset : is this read ID post - reset ?
2006-03-01 10:09:35 +03:00
* @ p_id : read IDENTIFY page ( newly allocated )
2005-04-17 02:20:36 +04:00
*
2006-02-20 20:12:11 +03:00
* Read ID data from the specified device . ATA_CMD_ID_ATA is
* performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
* devices . This function also takes care of EDD signature
* misreporting ( to be removed once EDD support is gone ) and
* issues ATA_CMD_INIT_DEV_PARAMS for pre - ATA4 drives .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2006-02-20 20:12:11 +03:00
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , - errno otherwise .
2005-04-17 02:20:36 +04:00
*/
2006-02-20 20:12:11 +03:00
static int ata_dev_read_id ( struct ata_port * ap , struct ata_device * dev ,
2006-03-01 10:09:35 +03:00
unsigned int * p_class , int post_reset , u16 * * p_id )
2005-04-17 02:20:36 +04:00
{
2006-02-20 20:12:11 +03:00
unsigned int class = * p_class ;
2005-04-17 02:20:36 +04:00
unsigned int using_edd ;
2005-12-13 08:49:31 +03:00
struct ata_taskfile tf ;
2006-02-20 20:12:11 +03:00
unsigned int err_mask = 0 ;
2006-03-01 10:09:35 +03:00
u16 * id ;
2006-02-20 20:12:11 +03:00
const char * reason ;
int rc ;
2005-04-17 02:20:36 +04:00
2006-02-20 20:12:11 +03:00
DPRINTK ( " ENTER, host %u, dev %u \n " , ap - > id , dev - > devno ) ;
2005-04-17 02:20:36 +04:00
2006-02-12 17:22:37 +03:00
if ( ap - > ops - > probe_reset | |
ap - > flags & ( ATA_FLAG_SRST | ATA_FLAG_SATA_RESET ) )
2005-04-17 02:20:36 +04:00
using_edd = 0 ;
else
using_edd = 1 ;
2006-02-20 20:12:11 +03:00
ata_dev_select ( ap , dev - > devno , 1 , 1 ) ; /* select device 0/1 */
2005-04-17 02:20:36 +04:00
2006-03-01 10:09:35 +03:00
id = kmalloc ( sizeof ( id [ 0 ] ) * ATA_ID_WORDS , GFP_KERNEL ) ;
if ( id = = NULL ) {
rc = - ENOMEM ;
reason = " out of memory " ;
goto err_out ;
}
2006-02-20 20:12:11 +03:00
retry :
ata_tf_init ( ap , & tf , dev - > devno ) ;
2005-12-13 08:49:31 +03:00
2006-02-20 20:12:11 +03:00
switch ( class ) {
case ATA_DEV_ATA :
2005-12-13 08:49:31 +03:00
tf . command = ATA_CMD_ID_ATA ;
2006-02-20 20:12:11 +03:00
break ;
case ATA_DEV_ATAPI :
2005-12-13 08:49:31 +03:00
tf . command = ATA_CMD_ID_ATAPI ;
2006-02-20 20:12:11 +03:00
break ;
default :
rc = - ENODEV ;
reason = " unsupported class " ;
goto err_out ;
2005-04-17 02:20:36 +04:00
}
2005-12-13 08:49:31 +03:00
tf . protocol = ATA_PROT_PIO ;
2005-04-17 02:20:36 +04:00
2005-12-13 08:49:31 +03:00
err_mask = ata_exec_internal ( ap , dev , & tf , DMA_FROM_DEVICE ,
2006-02-20 20:12:11 +03:00
id , sizeof ( id [ 0 ] ) * ATA_ID_WORDS ) ;
2005-04-17 02:20:36 +04:00
2005-12-13 08:49:31 +03:00
if ( err_mask ) {
2006-02-20 20:12:11 +03:00
rc = - EIO ;
reason = " I/O error " ;
2005-12-13 08:49:31 +03:00
if ( err_mask & ~ AC_ERR_DEV )
goto err_out ;
2005-10-30 04:25:10 +03:00
2005-04-17 02:20:36 +04:00
/*
* arg ! EDD works for all test cases , but seems to return
* the ATA signature for some ATAPI devices . Until the
* reason for this is found and fixed , we fix up the mess
* here . If IDENTIFY DEVICE returns command aborted
* ( as ATAPI devices do ) , then we issue an
* IDENTIFY PACKET DEVICE .
*
* ATA software reset ( SRST , the default ) does not appear
* to have this problem .
*/
2006-02-20 20:12:11 +03:00
if ( ( using_edd ) & & ( class = = ATA_DEV_ATA ) ) {
2005-12-13 08:49:31 +03:00
u8 err = tf . feature ;
2005-04-17 02:20:36 +04:00
if ( err & ATA_ABORTED ) {
2006-02-20 20:12:11 +03:00
class = ATA_DEV_ATAPI ;
2005-04-17 02:20:36 +04:00
goto retry ;
}
}
goto err_out ;
}
2006-02-20 20:12:11 +03:00
swap_buf_le16 ( id , ATA_ID_WORDS ) ;
2005-04-17 02:20:36 +04:00
2006-02-20 20:12:11 +03:00
/* sanity check */
if ( ( class = = ATA_DEV_ATA ) ! = ata_id_is_ata ( id ) ) {
rc = - EINVAL ;
reason = " device reports illegal type " ;
goto err_out ;
}
if ( post_reset & & class = = ATA_DEV_ATA ) {
/*
* The exact sequence expected by certain pre - ATA4 drives is :
* SRST RESET
* IDENTIFY
* INITIALIZE DEVICE PARAMETERS
* anything else . .
* Some drives were very specific about that exact sequence .
*/
if ( ata_id_major_version ( id ) < 4 | | ! ata_id_has_lba ( id ) ) {
err_mask = ata_dev_init_params ( ap , dev ) ;
if ( err_mask ) {
rc = - EIO ;
reason = " INIT_DEV_PARAMS failed " ;
goto err_out ;
}
/* current CHS translation info (id[53-58]) might be
* changed . reread the identify device info .
*/
post_reset = 0 ;
goto retry ;
}
}
* p_class = class ;
2006-03-01 10:09:35 +03:00
* p_id = id ;
2006-02-20 20:12:11 +03:00
return 0 ;
err_out :
printk ( KERN_WARNING " ata%u: dev %u failed to IDENTIFY (%s) \n " ,
ap - > id , dev - > devno , reason ) ;
2006-03-01 10:09:35 +03:00
kfree ( id ) ;
2006-02-20 20:12:11 +03:00
return rc ;
}
2006-03-01 10:09:36 +03:00
static inline u8 ata_dev_knobble ( const struct ata_port * ap ,
struct ata_device * dev )
{
return ( ( ap - > cbl = = ATA_CBL_SATA ) & & ( ! ata_id_is_sata ( dev - > id ) ) ) ;
}
2006-02-20 20:12:11 +03:00
/**
2006-03-01 10:09:35 +03:00
* ata_dev_configure - Configure the specified ATA / ATAPI device
* @ ap : Port on which target device resides
* @ dev : Target device to configure
2006-03-05 11:55:58 +03:00
* @ print_info : Enable device info printout
2006-03-01 10:09:35 +03:00
*
* Configure @ dev according to @ dev - > id . Generic and low - level
* driver specific fixups are also applied .
2006-02-20 20:12:11 +03:00
*
* LOCKING :
2006-03-01 10:09:35 +03:00
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , - errno otherwise
2006-02-20 20:12:11 +03:00
*/
2006-03-05 11:55:58 +03:00
static int ata_dev_configure ( struct ata_port * ap , struct ata_device * dev ,
int print_info )
2006-02-20 20:12:11 +03:00
{
2006-03-13 13:48:04 +03:00
const u16 * id = dev - > id ;
2006-03-05 22:31:56 +03:00
unsigned int xfer_mask ;
2006-02-20 20:12:11 +03:00
int i , rc ;
if ( ! ata_dev_present ( dev ) ) {
DPRINTK ( " ENTER/EXIT (host %u, dev %u) -- nodev \n " ,
2006-03-01 10:09:35 +03:00
ap - > id , dev - > devno ) ;
return 0 ;
2006-02-20 20:12:11 +03:00
}
2006-03-01 10:09:35 +03:00
DPRINTK ( " ENTER, host %u, dev %u \n " , ap - > id , dev - > devno ) ;
2005-04-17 02:20:36 +04:00
2006-03-13 13:51:19 +03:00
/* print device capabilities */
if ( print_info )
printk ( KERN_DEBUG " ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
" 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x \n " ,
ap - > id , dev - > devno , id [ 49 ] , id [ 82 ] , id [ 83 ] ,
id [ 84 ] , id [ 85 ] , id [ 86 ] , id [ 87 ] , id [ 88 ] ) ;
2006-03-05 11:55:58 +03:00
/* initialize to-be-configured parameters */
dev - > flags = 0 ;
dev - > max_sectors = 0 ;
dev - > cdb_len = 0 ;
dev - > n_sectors = 0 ;
dev - > cylinders = 0 ;
dev - > heads = 0 ;
dev - > sectors = 0 ;
2005-04-17 02:20:36 +04:00
/*
* common ATA , ATAPI feature tests
*/
2005-05-12 23:29:42 +04:00
/* we require DMA support (bits 8 of word 49) */
2006-03-13 13:48:04 +03:00
if ( ! ata_id_has_dma ( id ) ) {
2005-05-12 23:29:42 +04:00
printk ( KERN_DEBUG " ata%u: no dma \n " , ap - > id ) ;
2006-03-01 10:09:35 +03:00
rc = - EINVAL ;
2005-04-17 02:20:36 +04:00
goto err_out_nosup ;
}
2006-03-05 22:31:56 +03:00
/* find max transfer mode; for printk only */
2006-03-13 13:48:04 +03:00
xfer_mask = ata_id_xfermask ( id ) ;
2005-04-17 02:20:36 +04:00
2006-03-13 13:48:04 +03:00
ata_dump_id ( id ) ;
2005-04-17 02:20:36 +04:00
/* ATA-specific feature tests */
if ( dev - > class = = ATA_DEV_ATA ) {
2006-03-13 13:48:04 +03:00
dev - > n_sectors = ata_id_n_sectors ( id ) ;
2006-02-12 16:47:04 +03:00
2006-03-13 13:48:04 +03:00
if ( ata_id_has_lba ( id ) ) {
2006-03-05 11:55:58 +03:00
const char * lba_desc ;
2005-05-12 23:29:42 +04:00
2006-03-05 11:55:58 +03:00
lba_desc = " LBA " ;
dev - > flags | = ATA_DFLAG_LBA ;
2006-03-13 13:48:04 +03:00
if ( ata_id_has_lba48 ( id ) ) {
2005-05-12 23:29:42 +04:00
dev - > flags | = ATA_DFLAG_LBA48 ;
2006-03-05 11:55:58 +03:00
lba_desc = " LBA48 " ;
}
2005-05-12 23:29:42 +04:00
/* print device info to dmesg */
2006-03-05 11:55:58 +03:00
if ( print_info )
printk ( KERN_INFO " ata%u: dev %u ATA-%d, "
" max %s, %Lu sectors: %s \n " ,
ap - > id , dev - > devno ,
2006-03-13 13:48:04 +03:00
ata_id_major_version ( id ) ,
2006-03-05 22:31:56 +03:00
ata_mode_string ( xfer_mask ) ,
2006-03-05 11:55:58 +03:00
( unsigned long long ) dev - > n_sectors ,
lba_desc ) ;
2006-03-01 10:09:35 +03:00
} else {
2005-05-12 23:29:42 +04:00
/* CHS */
/* Default translation */
2006-03-13 13:48:04 +03:00
dev - > cylinders = id [ 1 ] ;
dev - > heads = id [ 3 ] ;
dev - > sectors = id [ 6 ] ;
2005-05-12 23:29:42 +04:00
2006-03-13 13:48:04 +03:00
if ( ata_id_current_chs_valid ( id ) ) {
2005-05-12 23:29:42 +04:00
/* Current CHS translation is valid. */
2006-03-13 13:48:04 +03:00
dev - > cylinders = id [ 54 ] ;
dev - > heads = id [ 55 ] ;
dev - > sectors = id [ 56 ] ;
2005-05-12 23:29:42 +04:00
}
/* print device info to dmesg */
2006-03-05 11:55:58 +03:00
if ( print_info )
printk ( KERN_INFO " ata%u: dev %u ATA-%d, "
" max %s, %Lu sectors: CHS %u/%u/%u \n " ,
ap - > id , dev - > devno ,
2006-03-13 13:48:04 +03:00
ata_id_major_version ( id ) ,
2006-03-05 22:31:56 +03:00
ata_mode_string ( xfer_mask ) ,
2006-03-05 11:55:58 +03:00
( unsigned long long ) dev - > n_sectors ,
dev - > cylinders , dev - > heads , dev - > sectors ) ;
2005-04-17 02:20:36 +04:00
}
2006-02-12 17:32:58 +03:00
dev - > cdb_len = 16 ;
2005-04-17 02:20:36 +04:00
}
/* ATAPI-specific feature tests */
2005-11-14 22:14:16 +03:00
else if ( dev - > class = = ATA_DEV_ATAPI ) {
2006-03-13 13:48:04 +03:00
rc = atapi_cdb_len ( id ) ;
2005-04-17 02:20:36 +04:00
if ( ( rc < 12 ) | | ( rc > ATAPI_CDB_LEN ) ) {
printk ( KERN_WARNING " ata%u: unsupported CDB len \n " , ap - > id ) ;
2006-03-01 10:09:35 +03:00
rc = - EINVAL ;
2005-04-17 02:20:36 +04:00
goto err_out_nosup ;
}
2006-02-12 17:32:58 +03:00
dev - > cdb_len = ( unsigned int ) rc ;
2005-04-17 02:20:36 +04:00
/* print device info to dmesg */
2006-03-05 11:55:58 +03:00
if ( print_info )
printk ( KERN_INFO " ata%u: dev %u ATAPI, max %s \n " ,
2006-03-05 22:31:56 +03:00
ap - > id , dev - > devno , ata_mode_string ( xfer_mask ) ) ;
2005-04-17 02:20:36 +04:00
}
2006-02-12 17:32:58 +03:00
ap - > host - > max_cmd_len = 0 ;
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
ap - > host - > max_cmd_len = max_t ( unsigned int ,
ap - > host - > max_cmd_len ,
ap - > device [ i ] . cdb_len ) ;
2006-03-01 10:09:36 +03:00
/* limit bridge transfers to udma5, 200 sectors */
if ( ata_dev_knobble ( ap , dev ) ) {
2006-03-05 11:55:58 +03:00
if ( print_info )
printk ( KERN_INFO " ata%u(%u): applying bridge limits \n " ,
ap - > id , dev - > devno ) ;
2006-03-01 10:09:36 +03:00
ap - > udma_mask & = ATA_UDMA5 ;
dev - > max_sectors = ATA_MAX_SECTORS ;
}
if ( ap - > ops - > dev_config )
ap - > ops - > dev_config ( ap , dev ) ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " EXIT, drv_stat = 0x%x \n " , ata_chk_status ( ap ) ) ;
2006-03-01 10:09:35 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
err_out_nosup :
printk ( KERN_WARNING " ata%u: dev %u not supported, ignoring \n " ,
2006-03-01 10:09:35 +03:00
ap - > id , dev - > devno ) ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " EXIT, err \n " ) ;
2006-03-01 10:09:35 +03:00
return rc ;
2005-04-17 02:20:36 +04:00
}
/**
* ata_bus_probe - Reset and probe ATA bus
* @ ap : Bus to probe
*
2005-05-31 03:49:12 +04:00
* Master ATA bus probing function . Initiates a hardware - dependent
* bus reset , then attempts to identify any devices found on
* the bus .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-04-17 02:20:36 +04:00
*
* RETURNS :
* Zero on success , non - zero on error .
*/
static int ata_bus_probe ( struct ata_port * ap )
{
2006-03-01 10:09:36 +03:00
unsigned int classes [ ATA_MAX_DEVICES ] ;
unsigned int i , rc , found = 0 ;
2005-04-17 02:20:36 +04:00
2006-03-01 10:09:36 +03:00
ata_port_probe ( ap ) ;
2006-01-24 11:05:22 +03:00
2006-03-12 19:57:01 +03:00
/* reset and determine device classes */
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
classes [ i ] = ATA_DEV_UNKNOWN ;
2006-03-11 18:57:39 +03:00
2006-03-12 19:57:01 +03:00
if ( ap - > ops - > probe_reset ) {
2006-01-24 11:05:22 +03:00
rc = ap - > ops - > probe_reset ( ap , classes ) ;
2006-03-01 10:09:36 +03:00
if ( rc ) {
printk ( " ata%u: reset failed (errno=%d) \n " , ap - > id , rc ) ;
return rc ;
2006-01-24 11:05:22 +03:00
}
2006-03-01 10:09:36 +03:00
} else {
2006-01-24 11:05:22 +03:00
ap - > ops - > phy_reset ( ap ) ;
2006-03-12 19:57:01 +03:00
if ( ! ( ap - > flags & ATA_FLAG_PORT_DISABLED ) )
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
2006-03-01 10:09:36 +03:00
classes [ i ] = ap - > device [ i ] . class ;
2006-03-12 19:57:01 +03:00
2006-03-01 10:09:36 +03:00
ata_port_probe ( ap ) ;
}
2005-04-17 02:20:36 +04:00
2006-03-12 19:57:01 +03:00
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
if ( classes [ i ] = = ATA_DEV_UNKNOWN )
classes [ i ] = ATA_DEV_NONE ;
2006-03-01 10:09:36 +03:00
/* read IDENTIFY page and configure devices */
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + ) {
2006-03-01 10:09:35 +03:00
struct ata_device * dev = & ap - > device [ i ] ;
2006-03-01 10:09:36 +03:00
dev - > class = classes [ i ] ;
2006-03-01 10:09:35 +03:00
if ( ! ata_dev_present ( dev ) )
continue ;
WARN_ON ( dev - > id ! = NULL ) ;
if ( ata_dev_read_id ( ap , dev , & dev - > class , 1 , & dev - > id ) ) {
dev - > class = ATA_DEV_NONE ;
continue ;
}
2006-03-05 11:55:58 +03:00
if ( ata_dev_configure ( ap , dev , 1 ) ) {
2006-03-01 10:09:35 +03:00
dev - > class + + ; /* disable device */
continue ;
2005-04-17 02:20:36 +04:00
}
2006-03-01 10:09:35 +03:00
found = 1 ;
2005-04-17 02:20:36 +04:00
}
2006-03-01 10:09:36 +03:00
if ( ! found )
2005-04-17 02:20:36 +04:00
goto err_out_disable ;
ata_set_mode ( ap ) ;
if ( ap - > flags & ATA_FLAG_PORT_DISABLED )
goto err_out_disable ;
return 0 ;
err_out_disable :
ap - > ops - > port_disable ( ap ) ;
return - 1 ;
}
/**
2005-05-31 03:49:12 +04:00
* ata_port_probe - Mark port as enabled
* @ ap : Port for which we indicate enablement
2005-04-17 02:20:36 +04:00
*
2005-05-31 03:49:12 +04:00
* Modify @ ap data structure such that the system
* thinks that the entire port is enabled .
*
* LOCKING : host_set lock , or some other form of
* serialization .
2005-04-17 02:20:36 +04:00
*/
void ata_port_probe ( struct ata_port * ap )
{
ap - > flags & = ~ ATA_FLAG_PORT_DISABLED ;
}
2005-12-19 16:35:02 +03:00
/**
* sata_print_link_status - Print SATA link status
* @ ap : SATA port to printk link status about
*
* This function prints link speed and status of a SATA link .
*
* LOCKING :
* None .
*/
static void sata_print_link_status ( struct ata_port * ap )
{
u32 sstatus , tmp ;
const char * speed ;
if ( ! ap - > ops - > scr_read )
return ;
sstatus = scr_read ( ap , SCR_STATUS ) ;
if ( sata_dev_present ( ap ) ) {
tmp = ( sstatus > > 4 ) & 0xf ;
if ( tmp & ( 1 < < 0 ) )
speed = " 1.5 " ;
else if ( tmp & ( 1 < < 1 ) )
speed = " 3.0 " ;
else
speed = " <unknown> " ;
printk ( KERN_INFO " ata%u: SATA link up %s Gbps (SStatus %X) \n " ,
ap - > id , speed , sstatus ) ;
} else {
printk ( KERN_INFO " ata%u: SATA link down (SStatus %X) \n " ,
ap - > id , sstatus ) ;
}
}
2005-04-17 02:20:36 +04:00
/**
2005-05-30 23:41:05 +04:00
* __sata_phy_reset - Wake / reset a low - level SATA PHY
* @ ap : SATA port associated with target SATA PHY .
2005-04-17 02:20:36 +04:00
*
2005-05-30 23:41:05 +04:00
* This function issues commands to standard SATA Sxxx
* PHY registers , to wake up the phy ( and device ) , and
* clear any reset condition .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-04-17 02:20:36 +04:00
*
*/
void __sata_phy_reset ( struct ata_port * ap )
{
u32 sstatus ;
unsigned long timeout = jiffies + ( HZ * 5 ) ;
if ( ap - > flags & ATA_FLAG_SATA_RESET ) {
2005-03-29 00:10:27 +04:00
/* issue phy wake/reset */
scr_write_flush ( ap , SCR_CONTROL , 0x301 ) ;
2005-06-26 18:27:19 +04:00
/* Couldn't find anything in SATA I/II specs, but
* AHCI - 1.1 10.4 .2 says at least 1 ms . */
mdelay ( 1 ) ;
2005-04-17 02:20:36 +04:00
}
2005-03-29 00:10:27 +04:00
scr_write_flush ( ap , SCR_CONTROL , 0x300 ) ; /* phy wake/clear reset */
2005-04-17 02:20:36 +04:00
/* wait for phy to become ready, if necessary */
do {
msleep ( 200 ) ;
sstatus = scr_read ( ap , SCR_STATUS ) ;
if ( ( sstatus & 0xf ) ! = 1 )
break ;
} while ( time_before ( jiffies , timeout ) ) ;
2005-12-19 16:35:02 +03:00
/* print link status */
sata_print_link_status ( ap ) ;
2005-11-20 11:36:45 +03:00
2005-12-19 16:35:02 +03:00
/* TODO: phy layer with polling, timeouts, etc. */
if ( sata_dev_present ( ap ) )
2005-04-17 02:20:36 +04:00
ata_port_probe ( ap ) ;
2005-12-19 16:35:02 +03:00
else
2005-04-17 02:20:36 +04:00
ata_port_disable ( ap ) ;
if ( ap - > flags & ATA_FLAG_PORT_DISABLED )
return ;
if ( ata_busy_sleep ( ap , ATA_TMOUT_BOOT_QUICK , ATA_TMOUT_BOOT ) ) {
ata_port_disable ( ap ) ;
return ;
}
ap - > cbl = ATA_CBL_SATA ;
}
/**
2005-05-30 23:41:05 +04:00
* sata_phy_reset - Reset SATA bus .
* @ ap : SATA port associated with target SATA PHY .
2005-04-17 02:20:36 +04:00
*
2005-05-30 23:41:05 +04:00
* This function resets the SATA bus , and then probes
* the bus for devices .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-04-17 02:20:36 +04:00
*
*/
void sata_phy_reset ( struct ata_port * ap )
{
__sata_phy_reset ( ap ) ;
if ( ap - > flags & ATA_FLAG_PORT_DISABLED )
return ;
ata_bus_reset ( ap ) ;
}
/**
2005-05-30 23:41:05 +04:00
* ata_port_disable - Disable port .
* @ ap : Port to be disabled .
2005-04-17 02:20:36 +04:00
*
2005-05-30 23:41:05 +04:00
* Modify @ ap data structure such that the system
* thinks that the entire port is disabled , and should
* never attempt to probe or communicate with devices
* on this port .
*
* LOCKING : host_set lock , or some other form of
* serialization .
2005-04-17 02:20:36 +04:00
*/
void ata_port_disable ( struct ata_port * ap )
{
ap - > device [ 0 ] . class = ATA_DEV_NONE ;
ap - > device [ 1 ] . class = ATA_DEV_NONE ;
ap - > flags | = ATA_FLAG_PORT_DISABLED ;
}
2005-10-22 03:01:32 +04:00
/*
* This mode timing computation functionality is ported over from
* drivers / ide / ide - timing . h and was originally written by Vojtech Pavlik
*/
/*
* PIO 0 - 5 , MWDMA 0 - 2 and UDMA 0 - 6 timings ( in nanoseconds ) .
* These were taken from ATA / ATAPI - 6 standard , rev 0 a , except
* for PIO 5 , which is a nonstandard extension and UDMA6 , which
* is currently supported only by Maxtor drives .
*/
static const struct ata_timing ata_timing [ ] = {
{ XFER_UDMA_6 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 15 } ,
{ XFER_UDMA_5 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 20 } ,
{ XFER_UDMA_4 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 30 } ,
{ XFER_UDMA_3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 45 } ,
{ XFER_UDMA_2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 60 } ,
{ XFER_UDMA_1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 80 } ,
{ XFER_UDMA_0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 120 } ,
/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
{ XFER_MW_DMA_2 , 25 , 0 , 0 , 0 , 70 , 25 , 120 , 0 } ,
{ XFER_MW_DMA_1 , 45 , 0 , 0 , 0 , 80 , 50 , 150 , 0 } ,
{ XFER_MW_DMA_0 , 60 , 0 , 0 , 0 , 215 , 215 , 480 , 0 } ,
{ XFER_SW_DMA_2 , 60 , 0 , 0 , 0 , 120 , 120 , 240 , 0 } ,
{ XFER_SW_DMA_1 , 90 , 0 , 0 , 0 , 240 , 240 , 480 , 0 } ,
{ XFER_SW_DMA_0 , 120 , 0 , 0 , 0 , 480 , 480 , 960 , 0 } ,
/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
{ XFER_PIO_4 , 25 , 70 , 25 , 120 , 70 , 25 , 120 , 0 } ,
{ XFER_PIO_3 , 30 , 80 , 70 , 180 , 80 , 70 , 180 , 0 } ,
{ XFER_PIO_2 , 30 , 290 , 40 , 330 , 100 , 90 , 240 , 0 } ,
{ XFER_PIO_1 , 50 , 290 , 93 , 383 , 125 , 100 , 383 , 0 } ,
{ XFER_PIO_0 , 70 , 290 , 240 , 600 , 165 , 150 , 600 , 0 } ,
/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
{ 0xFF }
} ;
# define ENOUGH(v,unit) (((v)-1) / (unit)+1)
# define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
static void ata_timing_quantize ( const struct ata_timing * t , struct ata_timing * q , int T , int UT )
{
q - > setup = EZ ( t - > setup * 1000 , T ) ;
q - > act8b = EZ ( t - > act8b * 1000 , T ) ;
q - > rec8b = EZ ( t - > rec8b * 1000 , T ) ;
q - > cyc8b = EZ ( t - > cyc8b * 1000 , T ) ;
q - > active = EZ ( t - > active * 1000 , T ) ;
q - > recover = EZ ( t - > recover * 1000 , T ) ;
q - > cycle = EZ ( t - > cycle * 1000 , T ) ;
q - > udma = EZ ( t - > udma * 1000 , UT ) ;
}
void ata_timing_merge ( const struct ata_timing * a , const struct ata_timing * b ,
struct ata_timing * m , unsigned int what )
{
if ( what & ATA_TIMING_SETUP ) m - > setup = max ( a - > setup , b - > setup ) ;
if ( what & ATA_TIMING_ACT8B ) m - > act8b = max ( a - > act8b , b - > act8b ) ;
if ( what & ATA_TIMING_REC8B ) m - > rec8b = max ( a - > rec8b , b - > rec8b ) ;
if ( what & ATA_TIMING_CYC8B ) m - > cyc8b = max ( a - > cyc8b , b - > cyc8b ) ;
if ( what & ATA_TIMING_ACTIVE ) m - > active = max ( a - > active , b - > active ) ;
if ( what & ATA_TIMING_RECOVER ) m - > recover = max ( a - > recover , b - > recover ) ;
if ( what & ATA_TIMING_CYCLE ) m - > cycle = max ( a - > cycle , b - > cycle ) ;
if ( what & ATA_TIMING_UDMA ) m - > udma = max ( a - > udma , b - > udma ) ;
}
static const struct ata_timing * ata_timing_find_mode ( unsigned short speed )
{
const struct ata_timing * t ;
for ( t = ata_timing ; t - > mode ! = speed ; t + + )
2005-10-26 20:17:46 +04:00
if ( t - > mode = = 0xFF )
2005-10-22 03:01:32 +04:00
return NULL ;
return t ;
}
int ata_timing_compute ( struct ata_device * adev , unsigned short speed ,
struct ata_timing * t , int T , int UT )
{
const struct ata_timing * s ;
struct ata_timing p ;
/*
* Find the mode .
2005-11-16 12:06:18 +03:00
*/
2005-10-22 03:01:32 +04:00
if ( ! ( s = ata_timing_find_mode ( speed ) ) )
return - EINVAL ;
2005-11-16 12:06:18 +03:00
memcpy ( t , s , sizeof ( * s ) ) ;
2005-10-22 03:01:32 +04:00
/*
* If the drive is an EIDE drive , it can tell us it needs extended
* PIO / MW_DMA cycle timing .
*/
if ( adev - > id [ ATA_ID_FIELD_VALID ] & 2 ) { /* EIDE drive */
memset ( & p , 0 , sizeof ( p ) ) ;
if ( speed > = XFER_PIO_0 & & speed < = XFER_SW_DMA_0 ) {
if ( speed < = XFER_PIO_2 ) p . cycle = p . cyc8b = adev - > id [ ATA_ID_EIDE_PIO ] ;
else p . cycle = p . cyc8b = adev - > id [ ATA_ID_EIDE_PIO_IORDY ] ;
} else if ( speed > = XFER_MW_DMA_0 & & speed < = XFER_MW_DMA_2 ) {
p . cycle = adev - > id [ ATA_ID_EIDE_DMA_MIN ] ;
}
ata_timing_merge ( & p , t , t , ATA_TIMING_CYCLE | ATA_TIMING_CYC8B ) ;
}
/*
* Convert the timing to bus clock counts .
*/
2005-11-16 12:06:18 +03:00
ata_timing_quantize ( t , t , T , UT ) ;
2005-10-22 03:01:32 +04:00
/*
2006-01-28 21:15:32 +03:00
* Even in DMA / UDMA modes we still use PIO access for IDENTIFY ,
* S . M . A . R . T * and some other commands . We have to ensure that the
* DMA cycle timing is slower / equal than the fastest PIO timing .
2005-10-22 03:01:32 +04:00
*/
if ( speed > XFER_PIO_4 ) {
ata_timing_compute ( adev , adev - > pio_mode , & p , T , UT ) ;
ata_timing_merge ( & p , t , t , ATA_TIMING_ALL ) ;
}
/*
2006-01-28 21:15:32 +03:00
* Lengthen active & recovery time so that cycle time is correct .
2005-10-22 03:01:32 +04:00
*/
if ( t - > act8b + t - > rec8b < t - > cyc8b ) {
t - > act8b + = ( t - > cyc8b - ( t - > act8b + t - > rec8b ) ) / 2 ;
t - > rec8b = t - > cyc8b - t - > act8b ;
}
if ( t - > active + t - > recover < t - > cycle ) {
t - > active + = ( t - > cycle - ( t - > active + t - > recover ) ) / 2 ;
t - > recover = t - > cycle - t - > active ;
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
static void ata_dev_set_mode ( struct ata_port * ap , struct ata_device * dev )
{
if ( ! ata_dev_present ( dev ) | | ( ap - > flags & ATA_FLAG_PORT_DISABLED ) )
return ;
if ( dev - > xfer_shift = = ATA_SHIFT_PIO )
dev - > flags | = ATA_DFLAG_PIO ;
ata_dev_set_xfermode ( ap , dev ) ;
2006-03-05 11:55:58 +03:00
if ( ata_dev_revalidate ( ap , dev , 0 ) ) {
printk ( KERN_ERR " ata%u: failed to revalidate after set "
" xfermode, disabled \n " , ap - > id ) ;
ata_port_disable ( ap ) ;
}
2006-03-05 22:31:57 +03:00
DPRINTK ( " xfer_shift=%u, xfer_mode=0x%x \n " ,
dev - > xfer_shift , ( int ) dev - > xfer_mode ) ;
2005-04-17 02:20:36 +04:00
printk ( KERN_INFO " ata%u: dev %u configured for %s \n " ,
2006-03-05 22:31:57 +03:00
ap - > id , dev - > devno ,
ata_mode_string ( ata_xfer_mode2mask ( dev - > xfer_mode ) ) ) ;
2005-04-17 02:20:36 +04:00
}
static int ata_host_set_pio ( struct ata_port * ap )
{
2006-03-05 22:31:57 +03:00
int i ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + ) {
struct ata_device * dev = & ap - > device [ i ] ;
2006-03-05 22:31:57 +03:00
if ( ! ata_dev_present ( dev ) )
continue ;
if ( ! dev - > pio_mode ) {
printk ( KERN_WARNING " ata%u: no PIO support \n " , ap - > id ) ;
return - 1 ;
2005-04-17 02:20:36 +04:00
}
2006-03-05 22:31:57 +03:00
dev - > xfer_mode = dev - > pio_mode ;
dev - > xfer_shift = ATA_SHIFT_PIO ;
if ( ap - > ops - > set_piomode )
ap - > ops - > set_piomode ( ap , dev ) ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
2006-03-05 22:31:57 +03:00
static void ata_host_set_dma ( struct ata_port * ap )
2005-04-17 02:20:36 +04:00
{
int i ;
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + ) {
struct ata_device * dev = & ap - > device [ i ] ;
2006-03-05 22:31:57 +03:00
if ( ! ata_dev_present ( dev ) | | ! dev - > dma_mode )
continue ;
dev - > xfer_mode = dev - > dma_mode ;
dev - > xfer_shift = ata_xfer_mode2shift ( dev - > dma_mode ) ;
if ( ap - > ops - > set_dmamode )
ap - > ops - > set_dmamode ( ap , dev ) ;
2005-04-17 02:20:36 +04:00
}
}
/**
* ata_set_mode - Program timings and issue SET FEATURES - XFER
* @ ap : port on which timings will be programmed
*
2005-05-30 23:41:05 +04:00
* Set ATA device disk transfer mode ( PIO3 , UDMA6 , etc . ) .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-04-17 02:20:36 +04:00
*/
static void ata_set_mode ( struct ata_port * ap )
{
2006-03-05 22:31:57 +03:00
int i , rc ;
2005-04-17 02:20:36 +04:00
2006-03-05 22:31:57 +03:00
/* step 1: calculate xfer_mask */
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + ) {
struct ata_device * dev = & ap - > device [ i ] ;
unsigned int xfer_mask ;
if ( ! ata_dev_present ( dev ) )
continue ;
xfer_mask = ata_dev_xfermask ( ap , dev ) ;
2005-04-17 02:20:36 +04:00
2006-03-05 22:31:57 +03:00
dev - > pio_mode = ata_xfer_mask2mode ( xfer_mask & ATA_MASK_PIO ) ;
dev - > dma_mode = ata_xfer_mask2mode ( xfer_mask & ( ATA_MASK_MWDMA |
ATA_MASK_UDMA ) ) ;
}
/* step 2: always set host PIO timings */
rc = ata_host_set_pio ( ap ) ;
2005-04-17 02:20:36 +04:00
if ( rc )
goto err_out ;
2006-03-05 22:31:57 +03:00
/* step 3: set host DMA timings */
ata_host_set_dma ( ap ) ;
2005-04-17 02:20:36 +04:00
/* step 4: update devices' xfer mode */
2006-03-05 22:31:57 +03:00
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
ata_dev_set_mode ( ap , & ap - > device [ i ] ) ;
2005-04-17 02:20:36 +04:00
if ( ap - > flags & ATA_FLAG_PORT_DISABLED )
return ;
if ( ap - > ops - > post_set_mode )
ap - > ops - > post_set_mode ( ap ) ;
return ;
err_out :
ata_port_disable ( ap ) ;
}
2006-02-09 13:15:27 +03:00
/**
* ata_tf_to_host - issue ATA taskfile to host controller
* @ ap : port to which command is being issued
* @ tf : ATA taskfile register set
*
* Issues ATA taskfile register set to ATA host controller ,
* with proper synchronization with interrupt handler and
* other threads .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
static inline void ata_tf_to_host ( struct ata_port * ap ,
const struct ata_taskfile * tf )
{
ap - > ops - > tf_load ( ap , tf ) ;
ap - > ops - > exec_command ( ap , tf ) ;
}
2005-04-17 02:20:36 +04:00
/**
* ata_busy_sleep - sleep until BSY clears , or timeout
* @ ap : port containing status register to be polled
* @ tmout_pat : impatience timeout
* @ tmout : overall timeout
*
2005-05-30 23:41:05 +04:00
* Sleep until ATA Status register bit BSY clears ,
* or a timeout occurs .
*
* LOCKING : None .
2005-04-17 02:20:36 +04:00
*/
2006-01-24 11:05:21 +03:00
unsigned int ata_busy_sleep ( struct ata_port * ap ,
unsigned long tmout_pat , unsigned long tmout )
2005-04-17 02:20:36 +04:00
{
unsigned long timer_start , timeout ;
u8 status ;
status = ata_busy_wait ( ap , ATA_BUSY , 300 ) ;
timer_start = jiffies ;
timeout = timer_start + tmout_pat ;
while ( ( status & ATA_BUSY ) & & ( time_before ( jiffies , timeout ) ) ) {
msleep ( 50 ) ;
status = ata_busy_wait ( ap , ATA_BUSY , 3 ) ;
}
if ( status & ATA_BUSY )
printk ( KERN_WARNING " ata%u is slow to respond, "
" please be patient \n " , ap - > id ) ;
timeout = timer_start + tmout ;
while ( ( status & ATA_BUSY ) & & ( time_before ( jiffies , timeout ) ) ) {
msleep ( 50 ) ;
status = ata_chk_status ( ap ) ;
}
if ( status & ATA_BUSY ) {
printk ( KERN_ERR " ata%u failed to respond (%lu secs) \n " ,
ap - > id , tmout / HZ ) ;
return 1 ;
}
return 0 ;
}
static void ata_bus_post_reset ( struct ata_port * ap , unsigned int devmask )
{
struct ata_ioports * ioaddr = & ap - > ioaddr ;
unsigned int dev0 = devmask & ( 1 < < 0 ) ;
unsigned int dev1 = devmask & ( 1 < < 1 ) ;
unsigned long timeout ;
/* if device 0 was found in ata_devchk, wait for its
* BSY bit to clear
*/
if ( dev0 )
ata_busy_sleep ( ap , ATA_TMOUT_BOOT_QUICK , ATA_TMOUT_BOOT ) ;
/* if device 1 was found in ata_devchk, wait for
* register access , then wait for BSY to clear
*/
timeout = jiffies + ATA_TMOUT_BOOT ;
while ( dev1 ) {
u8 nsect , lbal ;
ap - > ops - > dev_select ( ap , 1 ) ;
if ( ap - > flags & ATA_FLAG_MMIO ) {
nsect = readb ( ( void __iomem * ) ioaddr - > nsect_addr ) ;
lbal = readb ( ( void __iomem * ) ioaddr - > lbal_addr ) ;
} else {
nsect = inb ( ioaddr - > nsect_addr ) ;
lbal = inb ( ioaddr - > lbal_addr ) ;
}
if ( ( nsect = = 1 ) & & ( lbal = = 1 ) )
break ;
if ( time_after ( jiffies , timeout ) ) {
dev1 = 0 ;
break ;
}
msleep ( 50 ) ; /* give drive a breather */
}
if ( dev1 )
ata_busy_sleep ( ap , ATA_TMOUT_BOOT_QUICK , ATA_TMOUT_BOOT ) ;
/* is all this really necessary? */
ap - > ops - > dev_select ( ap , 0 ) ;
if ( dev1 )
ap - > ops - > dev_select ( ap , 1 ) ;
if ( dev0 )
ap - > ops - > dev_select ( ap , 0 ) ;
}
/**
2005-05-31 03:49:12 +04:00
* ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command .
* @ ap : Port to reset and probe
*
* Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
* probe the bus . Not often used these days .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-10-31 05:37:17 +03:00
* Obtains host_set lock .
2005-04-17 02:20:36 +04:00
*
*/
static unsigned int ata_bus_edd ( struct ata_port * ap )
{
struct ata_taskfile tf ;
2005-10-31 05:37:17 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
/* set up execute-device-diag (bus reset) taskfile */
/* also, take interrupts to a known state (disabled) */
DPRINTK ( " execute-device-diag \n " ) ;
ata_tf_init ( ap , & tf , 0 ) ;
tf . ctl | = ATA_NIEN ;
tf . command = ATA_CMD_EDD ;
tf . protocol = ATA_PROT_NODATA ;
/* do bus reset */
2005-10-31 05:37:17 +03:00
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
ata_tf_to_host ( ap , & tf ) ;
2005-10-31 05:37:17 +03:00
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
/* spec says at least 2ms. but who knows with those
* crazy ATAPI devices . . .
*/
msleep ( 150 ) ;
return ata_busy_sleep ( ap , ATA_TMOUT_BOOT_QUICK , ATA_TMOUT_BOOT ) ;
}
static unsigned int ata_bus_softreset ( struct ata_port * ap ,
unsigned int devmask )
{
struct ata_ioports * ioaddr = & ap - > ioaddr ;
DPRINTK ( " ata%u: bus reset via SRST \n " , ap - > id ) ;
/* software reset. causes dev0 to be selected */
if ( ap - > flags & ATA_FLAG_MMIO ) {
writeb ( ap - > ctl , ( void __iomem * ) ioaddr - > ctl_addr ) ;
udelay ( 20 ) ; /* FIXME: flush */
writeb ( ap - > ctl | ATA_SRST , ( void __iomem * ) ioaddr - > ctl_addr ) ;
udelay ( 20 ) ; /* FIXME: flush */
writeb ( ap - > ctl , ( void __iomem * ) ioaddr - > ctl_addr ) ;
} else {
outb ( ap - > ctl , ioaddr - > ctl_addr ) ;
udelay ( 10 ) ;
outb ( ap - > ctl | ATA_SRST , ioaddr - > ctl_addr ) ;
udelay ( 10 ) ;
outb ( ap - > ctl , ioaddr - > ctl_addr ) ;
}
/* spec mandates ">= 2ms" before checking status.
* We wait 150 ms , because that was the magic delay used for
* ATAPI devices in Hale Landis ' s ATADRVR , for the period of time
* between when the ATA command register is written , and then
* status is checked . Because waiting for " a while " before
* checking status is fine , post SRST , we perform this magic
* delay here as well .
*/
msleep ( 150 ) ;
ata_bus_post_reset ( ap , devmask ) ;
return 0 ;
}
/**
* ata_bus_reset - reset host port and associated ATA channel
* @ ap : port to reset
*
* This is typically the first time we actually start issuing
* commands to the ATA channel . We wait for BSY to clear , then
* issue EXECUTE DEVICE DIAGNOSTIC command , polling for its
* result . Determine what devices , if any , are on the channel
* by looking at the device 0 / 1 error register . Look at the signature
* stored in each device ' s taskfile registers , to determine if
* the device is ATA or ATAPI .
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
* Obtains host_set lock .
2005-04-17 02:20:36 +04:00
*
* SIDE EFFECTS :
* Sets ATA_FLAG_PORT_DISABLED if bus reset fails .
*/
void ata_bus_reset ( struct ata_port * ap )
{
struct ata_ioports * ioaddr = & ap - > ioaddr ;
unsigned int slave_possible = ap - > flags & ATA_FLAG_SLAVE_POSS ;
u8 err ;
unsigned int dev0 , dev1 = 0 , rc = 0 , devmask = 0 ;
DPRINTK ( " ENTER, host %u, port %u \n " , ap - > id , ap - > port_no ) ;
/* determine if device 0/1 are present */
if ( ap - > flags & ATA_FLAG_SATA_RESET )
dev0 = 1 ;
else {
dev0 = ata_devchk ( ap , 0 ) ;
if ( slave_possible )
dev1 = ata_devchk ( ap , 1 ) ;
}
if ( dev0 )
devmask | = ( 1 < < 0 ) ;
if ( dev1 )
devmask | = ( 1 < < 1 ) ;
/* select device 0 again */
ap - > ops - > dev_select ( ap , 0 ) ;
/* issue bus reset */
if ( ap - > flags & ATA_FLAG_SRST )
rc = ata_bus_softreset ( ap , devmask ) ;
else if ( ( ap - > flags & ATA_FLAG_SATA_RESET ) = = 0 ) {
/* set up device control */
if ( ap - > flags & ATA_FLAG_MMIO )
writeb ( ap - > ctl , ( void __iomem * ) ioaddr - > ctl_addr ) ;
else
outb ( ap - > ctl , ioaddr - > ctl_addr ) ;
rc = ata_bus_edd ( ap ) ;
}
if ( rc )
goto err_out ;
/*
* determine by signature whether we have ATA or ATAPI devices
*/
2006-01-24 11:05:22 +03:00
ap - > device [ 0 ] . class = ata_dev_try_classify ( ap , 0 , & err ) ;
2005-04-17 02:20:36 +04:00
if ( ( slave_possible ) & & ( err ! = 0x81 ) )
2006-01-24 11:05:22 +03:00
ap - > device [ 1 ] . class = ata_dev_try_classify ( ap , 1 , & err ) ;
2005-04-17 02:20:36 +04:00
/* re-enable interrupts */
if ( ap - > ioaddr . ctl_addr ) /* FIXME: hack. create a hook instead */
ata_irq_on ( ap ) ;
/* is double-select really necessary? */
if ( ap - > device [ 1 ] . class ! = ATA_DEV_NONE )
ap - > ops - > dev_select ( ap , 1 ) ;
if ( ap - > device [ 0 ] . class ! = ATA_DEV_NONE )
ap - > ops - > dev_select ( ap , 0 ) ;
/* if no devices were detected, disable this port */
if ( ( ap - > device [ 0 ] . class = = ATA_DEV_NONE ) & &
( ap - > device [ 1 ] . class = = ATA_DEV_NONE ) )
goto err_out ;
if ( ap - > flags & ( ATA_FLAG_SATA_RESET | ATA_FLAG_SRST ) ) {
/* set up device control for ATA_FLAG_SATA_RESET */
if ( ap - > flags & ATA_FLAG_MMIO )
writeb ( ap - > ctl , ( void __iomem * ) ioaddr - > ctl_addr ) ;
else
outb ( ap - > ctl , ioaddr - > ctl_addr ) ;
}
DPRINTK ( " EXIT \n " ) ;
return ;
err_out :
printk ( KERN_ERR " ata%u: disabling port \n " , ap - > id ) ;
ap - > ops - > port_disable ( ap ) ;
DPRINTK ( " EXIT \n " ) ;
}
2006-02-02 12:20:00 +03:00
static int sata_phy_resume ( struct ata_port * ap )
{
unsigned long timeout = jiffies + ( HZ * 5 ) ;
u32 sstatus ;
scr_write_flush ( ap , SCR_CONTROL , 0x300 ) ;
/* Wait for phy to become ready, if necessary. */
do {
msleep ( 200 ) ;
sstatus = scr_read ( ap , SCR_STATUS ) ;
if ( ( sstatus & 0xf ) ! = 1 )
return 0 ;
} while ( time_before ( jiffies , timeout ) ) ;
return - 1 ;
}
2006-02-02 12:20:00 +03:00
/**
* ata_std_probeinit - initialize probing
* @ ap : port to be probed
*
* @ ap is about to be probed . Initialize it . This function is
* to be used as standard callback for ata_drive_probe_reset ( ) .
2006-02-10 17:58:48 +03:00
*
* NOTE ! ! ! Do not use this function as probeinit if a low level
* driver implements only hardreset . Just pass NULL as probeinit
* in that case . Using this function is probably okay but doing
* so makes reset sequence different from the original
* - > phy_reset implementation and Jeff nervous . : - P
2006-02-02 12:20:00 +03:00
*/
extern void ata_std_probeinit ( struct ata_port * ap )
{
2006-02-10 17:58:48 +03:00
if ( ap - > flags & ATA_FLAG_SATA & & ap - > ops - > scr_read ) {
2006-02-02 12:20:00 +03:00
sata_phy_resume ( ap ) ;
2006-02-10 17:58:48 +03:00
if ( sata_dev_present ( ap ) )
ata_busy_sleep ( ap , ATA_TMOUT_BOOT_QUICK , ATA_TMOUT_BOOT ) ;
}
2006-02-02 12:20:00 +03:00
}
2006-01-24 11:05:22 +03:00
/**
* ata_std_softreset - reset host port via ATA SRST
* @ ap : port to reset
* @ verbose : fail verbosely
* @ classes : resulting classes of attached devices
*
* Reset host port using ATA SRST . This function is to be used
* as standard callback for ata_drive_ * _reset ( ) functions .
*
* LOCKING :
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , - errno otherwise .
*/
int ata_std_softreset ( struct ata_port * ap , int verbose , unsigned int * classes )
{
unsigned int slave_possible = ap - > flags & ATA_FLAG_SLAVE_POSS ;
unsigned int devmask = 0 , err_mask ;
u8 err ;
DPRINTK ( " ENTER \n " ) ;
2006-02-10 17:58:48 +03:00
if ( ap - > ops - > scr_read & & ! sata_dev_present ( ap ) ) {
classes [ 0 ] = ATA_DEV_NONE ;
goto out ;
}
2006-01-24 11:05:22 +03:00
/* determine if device 0/1 are present */
if ( ata_devchk ( ap , 0 ) )
devmask | = ( 1 < < 0 ) ;
if ( slave_possible & & ata_devchk ( ap , 1 ) )
devmask | = ( 1 < < 1 ) ;
/* select device 0 again */
ap - > ops - > dev_select ( ap , 0 ) ;
/* issue bus reset */
DPRINTK ( " about to softreset, devmask=%x \n " , devmask ) ;
err_mask = ata_bus_softreset ( ap , devmask ) ;
if ( err_mask ) {
if ( verbose )
printk ( KERN_ERR " ata%u: SRST failed (err_mask=0x%x) \n " ,
ap - > id , err_mask ) ;
else
DPRINTK ( " EXIT, softreset failed (err_mask=0x%x) \n " ,
err_mask ) ;
return - EIO ;
}
/* determine by signature whether we have ATA or ATAPI devices */
classes [ 0 ] = ata_dev_try_classify ( ap , 0 , & err ) ;
if ( slave_possible & & err ! = 0x81 )
classes [ 1 ] = ata_dev_try_classify ( ap , 1 , & err ) ;
2006-02-10 17:58:48 +03:00
out :
2006-01-24 11:05:22 +03:00
DPRINTK ( " EXIT, classes[0]=%u [1]=%u \n " , classes [ 0 ] , classes [ 1 ] ) ;
return 0 ;
}
/**
* sata_std_hardreset - reset host port via SATA phy reset
* @ ap : port to reset
* @ verbose : fail verbosely
* @ class : resulting class of attached device
*
* SATA phy - reset host port using DET bits of SControl register .
* This function is to be used as standard callback for
* ata_drive_ * _reset ( ) .
*
* LOCKING :
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , - errno otherwise .
*/
int sata_std_hardreset ( struct ata_port * ap , int verbose , unsigned int * class )
{
DPRINTK ( " ENTER \n " ) ;
/* Issue phy wake/reset */
scr_write_flush ( ap , SCR_CONTROL , 0x301 ) ;
/*
* Couldn ' t find anything in SATA I / II specs , but AHCI - 1.1
* 10.4 .2 says at least 1 ms .
*/
msleep ( 1 ) ;
2006-02-02 12:20:00 +03:00
/* Bring phy back */
sata_phy_resume ( ap ) ;
2006-01-24 11:05:22 +03:00
/* TODO: phy layer with polling, timeouts, etc. */
if ( ! sata_dev_present ( ap ) ) {
* class = ATA_DEV_NONE ;
DPRINTK ( " EXIT, link offline \n " ) ;
return 0 ;
}
if ( ata_busy_sleep ( ap , ATA_TMOUT_BOOT_QUICK , ATA_TMOUT_BOOT ) ) {
if ( verbose )
printk ( KERN_ERR " ata%u: COMRESET failed "
" (device not ready) \n " , ap - > id ) ;
else
DPRINTK ( " EXIT, device not ready \n " ) ;
return - EIO ;
}
2006-02-10 17:58:48 +03:00
ap - > ops - > dev_select ( ap , 0 ) ; /* probably unnecessary */
2006-01-24 11:05:22 +03:00
* class = ata_dev_try_classify ( ap , 0 , NULL ) ;
DPRINTK ( " EXIT, class=%u \n " , * class ) ;
return 0 ;
}
/**
* ata_std_postreset - standard postreset callback
* @ ap : the target ata_port
* @ classes : classes of attached devices
*
* This function is invoked after a successful reset . Note that
* the device might have been reset more than once using
* different reset methods before postreset is invoked .
*
* This function is to be used as standard callback for
* ata_drive_ * _reset ( ) .
*
* LOCKING :
* Kernel thread context ( may sleep )
*/
void ata_std_postreset ( struct ata_port * ap , unsigned int * classes )
{
DPRINTK ( " ENTER \n " ) ;
2006-02-15 09:01:42 +03:00
/* set cable type if it isn't already set */
2006-01-24 11:05:22 +03:00
if ( ap - > cbl = = ATA_CBL_NONE & & ap - > flags & ATA_FLAG_SATA )
ap - > cbl = ATA_CBL_SATA ;
/* print link status */
if ( ap - > cbl = = ATA_CBL_SATA )
sata_print_link_status ( ap ) ;
2006-02-10 17:58:48 +03:00
/* re-enable interrupts */
if ( ap - > ioaddr . ctl_addr ) /* FIXME: hack. create a hook instead */
ata_irq_on ( ap ) ;
2006-01-24 11:05:22 +03:00
/* is double-select really necessary? */
if ( classes [ 0 ] ! = ATA_DEV_NONE )
ap - > ops - > dev_select ( ap , 1 ) ;
if ( classes [ 1 ] ! = ATA_DEV_NONE )
ap - > ops - > dev_select ( ap , 0 ) ;
2006-02-10 17:58:48 +03:00
/* bail out if no device is present */
if ( classes [ 0 ] = = ATA_DEV_NONE & & classes [ 1 ] = = ATA_DEV_NONE ) {
DPRINTK ( " EXIT, no device \n " ) ;
return ;
}
/* set up device control */
if ( ap - > ioaddr . ctl_addr ) {
if ( ap - > flags & ATA_FLAG_MMIO )
writeb ( ap - > ctl , ( void __iomem * ) ap - > ioaddr . ctl_addr ) ;
else
outb ( ap - > ctl , ap - > ioaddr . ctl_addr ) ;
}
2006-01-24 11:05:22 +03:00
DPRINTK ( " EXIT \n " ) ;
}
/**
* ata_std_probe_reset - standard probe reset method
* @ ap : prot to perform probe - reset
* @ classes : resulting classes of attached devices
*
* The stock off - the - shelf - > probe_reset method .
*
* LOCKING :
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , - errno otherwise .
*/
int ata_std_probe_reset ( struct ata_port * ap , unsigned int * classes )
{
ata_reset_fn_t hardreset ;
hardreset = NULL ;
2006-02-02 12:20:00 +03:00
if ( ap - > flags & ATA_FLAG_SATA & & ap - > ops - > scr_read )
2006-01-24 11:05:22 +03:00
hardreset = sata_std_hardreset ;
2006-02-02 12:20:00 +03:00
return ata_drive_probe_reset ( ap , ata_std_probeinit ,
2006-02-02 12:20:00 +03:00
ata_std_softreset , hardreset ,
2006-01-24 11:05:22 +03:00
ata_std_postreset , classes ) ;
}
2006-01-24 11:05:22 +03:00
static int do_probe_reset ( struct ata_port * ap , ata_reset_fn_t reset ,
ata_postreset_fn_t postreset ,
unsigned int * classes )
{
int i , rc ;
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
classes [ i ] = ATA_DEV_UNKNOWN ;
rc = reset ( ap , 0 , classes ) ;
if ( rc )
return rc ;
/* If any class isn't ATA_DEV_UNKNOWN, consider classification
* is complete and convert all ATA_DEV_UNKNOWN to
* ATA_DEV_NONE .
*/
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
if ( classes [ i ] ! = ATA_DEV_UNKNOWN )
break ;
if ( i < ATA_MAX_DEVICES )
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
if ( classes [ i ] = = ATA_DEV_UNKNOWN )
classes [ i ] = ATA_DEV_NONE ;
if ( postreset )
postreset ( ap , classes ) ;
return classes [ 0 ] ! = ATA_DEV_UNKNOWN ? 0 : - ENODEV ;
}
/**
* ata_drive_probe_reset - Perform probe reset with given methods
* @ ap : port to reset
2006-02-02 12:20:00 +03:00
* @ probeinit : probeinit method ( can be NULL )
2006-01-24 11:05:22 +03:00
* @ softreset : softreset method ( can be NULL )
* @ hardreset : hardreset method ( can be NULL )
* @ postreset : postreset method ( can be NULL )
* @ classes : resulting classes of attached devices
*
* Reset the specified port and classify attached devices using
* given methods . This function prefers softreset but tries all
* possible reset sequences to reset and classify devices . This
* function is intended to be used for constructing - > probe_reset
* callback by low level drivers .
*
* Reset methods should follow the following rules .
*
* - Return 0 on sucess , - errno on failure .
* - If classification is supported , fill classes [ ] with
* recognized class codes .
* - If classification is not supported , leave classes [ ] alone .
* - If verbose is non - zero , print error message on failure ;
* otherwise , shut up .
*
* LOCKING :
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , - EINVAL if no reset method is avaliable , - ENODEV
* if classification fails , and any error code from reset
* methods .
*/
2006-02-02 12:20:00 +03:00
int ata_drive_probe_reset ( struct ata_port * ap , ata_probeinit_fn_t probeinit ,
2006-01-24 11:05:22 +03:00
ata_reset_fn_t softreset , ata_reset_fn_t hardreset ,
ata_postreset_fn_t postreset , unsigned int * classes )
{
int rc = - EINVAL ;
2006-02-02 12:20:00 +03:00
if ( probeinit )
probeinit ( ap ) ;
2006-01-24 11:05:22 +03:00
if ( softreset ) {
rc = do_probe_reset ( ap , softreset , postreset , classes ) ;
if ( rc = = 0 )
return 0 ;
}
if ( ! hardreset )
return rc ;
rc = do_probe_reset ( ap , hardreset , postreset , classes ) ;
if ( rc = = 0 | | rc ! = - ENODEV )
return rc ;
if ( softreset )
rc = do_probe_reset ( ap , softreset , postreset , classes ) ;
return rc ;
}
2006-03-05 11:55:58 +03:00
/**
* ata_dev_same_device - Determine whether new ID matches configured device
* @ ap : port on which the device to compare against resides
* @ dev : device to compare against
* @ new_class : class of the new device
* @ new_id : IDENTIFY page of the new device
*
* Compare @ new_class and @ new_id against @ dev and determine
* whether @ dev is the device indicated by @ new_class and
* @ new_id .
*
* LOCKING :
* None .
*
* RETURNS :
* 1 if @ dev matches @ new_class and @ new_id , 0 otherwise .
*/
static int ata_dev_same_device ( struct ata_port * ap , struct ata_device * dev ,
unsigned int new_class , const u16 * new_id )
{
const u16 * old_id = dev - > id ;
unsigned char model [ 2 ] [ 41 ] , serial [ 2 ] [ 21 ] ;
u64 new_n_sectors ;
if ( dev - > class ! = new_class ) {
printk ( KERN_INFO
" ata%u: dev %u class mismatch %d != %d \n " ,
ap - > id , dev - > devno , dev - > class , new_class ) ;
return 0 ;
}
ata_id_c_string ( old_id , model [ 0 ] , ATA_ID_PROD_OFS , sizeof ( model [ 0 ] ) ) ;
ata_id_c_string ( new_id , model [ 1 ] , ATA_ID_PROD_OFS , sizeof ( model [ 1 ] ) ) ;
ata_id_c_string ( old_id , serial [ 0 ] , ATA_ID_SERNO_OFS , sizeof ( serial [ 0 ] ) ) ;
ata_id_c_string ( new_id , serial [ 1 ] , ATA_ID_SERNO_OFS , sizeof ( serial [ 1 ] ) ) ;
new_n_sectors = ata_id_n_sectors ( new_id ) ;
if ( strcmp ( model [ 0 ] , model [ 1 ] ) ) {
printk ( KERN_INFO
" ata%u: dev %u model number mismatch '%s' != '%s' \n " ,
ap - > id , dev - > devno , model [ 0 ] , model [ 1 ] ) ;
return 0 ;
}
if ( strcmp ( serial [ 0 ] , serial [ 1 ] ) ) {
printk ( KERN_INFO
" ata%u: dev %u serial number mismatch '%s' != '%s' \n " ,
ap - > id , dev - > devno , serial [ 0 ] , serial [ 1 ] ) ;
return 0 ;
}
if ( dev - > class = = ATA_DEV_ATA & & dev - > n_sectors ! = new_n_sectors ) {
printk ( KERN_INFO
" ata%u: dev %u n_sectors mismatch %llu != %llu \n " ,
ap - > id , dev - > devno , ( unsigned long long ) dev - > n_sectors ,
( unsigned long long ) new_n_sectors ) ;
return 0 ;
}
return 1 ;
}
/**
* ata_dev_revalidate - Revalidate ATA device
* @ ap : port on which the device to revalidate resides
* @ dev : device to revalidate
* @ post_reset : is this revalidation after reset ?
*
* Re - read IDENTIFY page and make sure @ dev is still attached to
* the port .
*
* LOCKING :
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , negative errno otherwise
*/
int ata_dev_revalidate ( struct ata_port * ap , struct ata_device * dev ,
int post_reset )
{
unsigned int class ;
u16 * id ;
int rc ;
if ( ! ata_dev_present ( dev ) )
return - ENODEV ;
class = dev - > class ;
id = NULL ;
/* allocate & read ID data */
rc = ata_dev_read_id ( ap , dev , & class , post_reset , & id ) ;
if ( rc )
goto fail ;
/* is the device still there? */
if ( ! ata_dev_same_device ( ap , dev , class , id ) ) {
rc = - ENODEV ;
goto fail ;
}
kfree ( dev - > id ) ;
dev - > id = id ;
/* configure device according to the new ID */
return ata_dev_configure ( ap , dev , 0 ) ;
fail :
printk ( KERN_ERR " ata%u: dev %u revalidation failed (errno=%d) \n " ,
ap - > id , dev - > devno , rc ) ;
kfree ( id ) ;
return rc ;
}
2005-11-28 12:06:23 +03:00
static const char * const ata_dma_blacklist [ ] = {
2005-04-17 02:20:36 +04:00
" WDC AC11000H " ,
" WDC AC22100H " ,
" WDC AC32500H " ,
" WDC AC33100H " ,
" WDC AC31600H " ,
" WDC AC32100H " ,
" WDC AC23200L " ,
" Compaq CRD-8241B " ,
" CRD-8400B " ,
" CRD-8480B " ,
" CRD-8482B " ,
" CRD-84 " ,
" SanDisk SDP3B " ,
" SanDisk SDP3B-64 " ,
" SANYO CD-ROM CRD " ,
" HITACHI CDR-8 " ,
" HITACHI CDR-8335 " ,
" HITACHI CDR-8435 " ,
" Toshiba CD-ROM XM-6202B " ,
2005-06-28 08:03:37 +04:00
" TOSHIBA CD-ROM XM-1702BC " ,
2005-04-17 02:20:36 +04:00
" CD-532E-A " ,
" E-IDE CD-ROM CR-840 " ,
" CD-ROM Drive/F5A " ,
" WPI CDD-820 " ,
" SAMSUNG CD-ROM SC-148C " ,
" SAMSUNG CD-ROM SC " ,
" SanDisk SDP3B-64 " ,
" ATAPI CD-ROM DRIVE 40X MAXIMUM " ,
" _NEC DV5800A " ,
} ;
2005-10-22 22:27:05 +04:00
static int ata_dma_blacklisted ( const struct ata_device * dev )
2005-04-17 02:20:36 +04:00
{
2006-02-12 16:47:04 +03:00
unsigned char model_num [ 41 ] ;
2005-04-17 02:20:36 +04:00
int i ;
2006-02-13 04:02:46 +03:00
ata_id_c_string ( dev - > id , model_num , ATA_ID_PROD_OFS , sizeof ( model_num ) ) ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( ata_dma_blacklist ) ; i + + )
2006-02-12 16:47:04 +03:00
if ( ! strcmp ( ata_dma_blacklist [ i ] , model_num ) )
2005-04-17 02:20:36 +04:00
return 1 ;
return 0 ;
}
2006-03-05 22:31:57 +03:00
/**
* ata_dev_xfermask - Compute supported xfermask of the given device
* @ ap : Port on which the device to compute xfermask for resides
* @ dev : Device to compute xfermask for
*
* Compute supported xfermask of @ dev . This function is
* responsible for applying all known limits including host
* controller limits , device blacklist , etc . . .
*
* LOCKING :
* None .
*
* RETURNS :
* Computed xfermask .
*/
static unsigned int ata_dev_xfermask ( struct ata_port * ap ,
struct ata_device * dev )
2005-04-17 02:20:36 +04:00
{
2006-03-05 22:31:57 +03:00
unsigned long xfer_mask ;
int i ;
2005-04-17 02:20:36 +04:00
2006-03-05 22:31:57 +03:00
xfer_mask = ata_pack_xfermask ( ap - > pio_mask , ap - > mwdma_mask ,
ap - > udma_mask ) ;
2005-04-17 02:20:36 +04:00
2006-03-05 22:31:57 +03:00
/* use port-wide xfermask for now */
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + ) {
struct ata_device * d = & ap - > device [ i ] ;
if ( ! ata_dev_present ( d ) )
continue ;
xfer_mask & = ata_id_xfermask ( d - > id ) ;
if ( ata_dma_blacklisted ( d ) )
xfer_mask & = ~ ( ATA_MASK_MWDMA | ATA_MASK_UDMA ) ;
2005-04-17 02:20:36 +04:00
}
2006-03-05 22:31:57 +03:00
if ( ata_dma_blacklisted ( dev ) )
printk ( KERN_WARNING " ata%u: dev %u is on DMA blacklist, "
" disabling DMA \n " , ap - > id , dev - > devno ) ;
return xfer_mask ;
2005-04-17 02:20:36 +04:00
}
/**
* ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
* @ ap : Port associated with device @ dev
* @ dev : Device to which command will be sent
*
2005-05-30 23:41:05 +04:00
* Issue SET FEATURES - XFER MODE command to device @ dev
* on port @ ap .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-04-17 02:20:36 +04:00
*/
static void ata_dev_set_xfermode ( struct ata_port * ap , struct ata_device * dev )
{
2005-12-13 08:49:31 +03:00
struct ata_taskfile tf ;
2005-04-17 02:20:36 +04:00
/* set up set-features taskfile */
DPRINTK ( " set features - xfer mode \n " ) ;
2005-12-13 08:49:31 +03:00
ata_tf_init ( ap , & tf , dev - > devno ) ;
tf . command = ATA_CMD_SET_FEATURES ;
tf . feature = SETFEATURES_XFER ;
tf . flags | = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE ;
tf . protocol = ATA_PROT_NODATA ;
tf . nsect = dev - > xfer_mode ;
2005-04-17 02:20:36 +04:00
2005-12-13 08:49:31 +03:00
if ( ata_exec_internal ( ap , dev , & tf , DMA_NONE , NULL , 0 ) ) {
printk ( KERN_ERR " ata%u: failed to set xfermode, disabled \n " ,
ap - > id ) ;
2005-04-17 02:20:36 +04:00
ata_port_disable ( ap ) ;
2005-12-13 08:49:31 +03:00
}
2005-04-17 02:20:36 +04:00
DPRINTK ( " EXIT \n " ) ;
}
2005-05-12 23:29:42 +04:00
/**
* ata_dev_init_params - Issue INIT DEV PARAMS command
* @ ap : Port associated with device @ dev
* @ dev : Device to which command will be sent
*
* LOCKING :
2006-02-15 12:24:09 +03:00
* Kernel thread context ( may sleep )
*
* RETURNS :
* 0 on success , AC_ERR_ * mask otherwise .
2005-05-12 23:29:42 +04:00
*/
2006-02-15 12:24:09 +03:00
static unsigned int ata_dev_init_params ( struct ata_port * ap ,
struct ata_device * dev )
2005-05-12 23:29:42 +04:00
{
2005-12-13 08:49:31 +03:00
struct ata_taskfile tf ;
2006-02-15 12:24:09 +03:00
unsigned int err_mask ;
2005-05-12 23:29:42 +04:00
u16 sectors = dev - > id [ 6 ] ;
u16 heads = dev - > id [ 3 ] ;
/* Number of sectors per track 1-255. Number of heads 1-16 */
if ( sectors < 1 | | sectors > 255 | | heads < 1 | | heads > 16 )
2006-02-15 12:24:09 +03:00
return 0 ;
2005-05-12 23:29:42 +04:00
/* set up init dev params taskfile */
DPRINTK ( " init dev params \n " ) ;
2005-12-13 08:49:31 +03:00
ata_tf_init ( ap , & tf , dev - > devno ) ;
tf . command = ATA_CMD_INIT_DEV_PARAMS ;
tf . flags | = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE ;
tf . protocol = ATA_PROT_NODATA ;
tf . nsect = sectors ;
tf . device | = ( heads - 1 ) & 0x0f ; /* max head = num. of heads - 1 */
2005-05-12 23:29:42 +04:00
2006-02-15 12:24:09 +03:00
err_mask = ata_exec_internal ( ap , dev , & tf , DMA_NONE , NULL , 0 ) ;
2005-05-12 23:29:42 +04:00
2006-02-15 12:24:09 +03:00
DPRINTK ( " EXIT, err_mask=%x \n " , err_mask ) ;
return err_mask ;
2005-05-12 23:29:42 +04:00
}
2005-04-17 02:20:36 +04:00
/**
2005-05-31 03:49:12 +04:00
* ata_sg_clean - Unmap DMA memory associated with command
* @ qc : Command containing DMA memory to be released
*
* Unmap all mapped DMA memory associated with this command .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* spin_lock_irqsave ( host_set lock )
2005-04-17 02:20:36 +04:00
*/
static void ata_sg_clean ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
2005-10-05 15:13:30 +04:00
struct scatterlist * sg = qc - > __sg ;
2005-04-17 02:20:36 +04:00
int dir = qc - > dma_dir ;
2005-10-05 15:13:30 +04:00
void * pad_buf = NULL ;
2005-04-17 02:20:36 +04:00
2006-02-11 13:11:13 +03:00
WARN_ON ( ! ( qc - > flags & ATA_QCFLAG_DMAMAP ) ) ;
WARN_ON ( sg = = NULL ) ;
2005-04-17 02:20:36 +04:00
if ( qc - > flags & ATA_QCFLAG_SINGLE )
2006-02-21 00:55:56 +03:00
WARN_ON ( qc - > n_elem > 1 ) ;
2005-04-17 02:20:36 +04:00
2005-11-14 22:14:16 +03:00
VPRINTK ( " unmapping %u sg elements \n " , qc - > n_elem ) ;
2005-04-17 02:20:36 +04:00
2005-10-05 15:13:30 +04:00
/* if we padded the buffer out to 32-bit bound, and data
* xfer direction is from - device , we must copy from the
* pad buffer back into the supplied buffer
*/
if ( qc - > pad_len & & ! ( qc - > tf . flags & ATA_TFLAG_WRITE ) )
pad_buf = ap - > pad + ( qc - > tag * ATA_DMA_PAD_SZ ) ;
if ( qc - > flags & ATA_QCFLAG_SG ) {
2005-11-14 22:06:26 +03:00
if ( qc - > n_elem )
dma_unmap_sg ( ap - > host_set - > dev , sg , qc - > n_elem , dir ) ;
2005-10-05 15:13:30 +04:00
/* restore last sg */
sg [ qc - > orig_n_elem - 1 ] . length + = qc - > pad_len ;
if ( pad_buf ) {
struct scatterlist * psg = & qc - > pad_sgent ;
void * addr = kmap_atomic ( psg - > page , KM_IRQ0 ) ;
memcpy ( addr + psg - > offset , pad_buf , qc - > pad_len ) ;
2005-12-13 07:19:28 +03:00
kunmap_atomic ( addr , KM_IRQ0 ) ;
2005-10-05 15:13:30 +04:00
}
} else {
2006-02-20 17:48:38 +03:00
if ( qc - > n_elem )
2005-11-14 22:06:26 +03:00
dma_unmap_single ( ap - > host_set - > dev ,
sg_dma_address ( & sg [ 0 ] ) , sg_dma_len ( & sg [ 0 ] ) ,
dir ) ;
2005-10-05 15:13:30 +04:00
/* restore sg */
sg - > length + = qc - > pad_len ;
if ( pad_buf )
memcpy ( qc - > buf_virt + sg - > length - qc - > pad_len ,
pad_buf , qc - > pad_len ) ;
}
2005-04-17 02:20:36 +04:00
qc - > flags & = ~ ATA_QCFLAG_DMAMAP ;
2005-10-05 15:13:30 +04:00
qc - > __sg = NULL ;
2005-04-17 02:20:36 +04:00
}
/**
* ata_fill_sg - Fill PCI IDE PRD table
* @ qc : Metadata associated with taskfile to be transferred
*
2005-05-30 23:41:05 +04:00
* Fill PCI IDE PRD ( scatter - gather ) table with segments
* associated with the current disk command .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-30 23:41:05 +04:00
* spin_lock_irqsave ( host_set lock )
2005-04-17 02:20:36 +04:00
*
*/
static void ata_fill_sg ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
2005-10-05 15:13:30 +04:00
struct scatterlist * sg ;
unsigned int idx ;
2005-04-17 02:20:36 +04:00
2006-02-11 13:11:13 +03:00
WARN_ON ( qc - > __sg = = NULL ) ;
2006-02-21 00:55:56 +03:00
WARN_ON ( qc - > n_elem = = 0 & & qc - > pad_len = = 0 ) ;
2005-04-17 02:20:36 +04:00
idx = 0 ;
2005-10-05 15:13:30 +04:00
ata_for_each_sg ( sg , qc ) {
2005-04-17 02:20:36 +04:00
u32 addr , offset ;
u32 sg_len , len ;
/* determine if physical DMA addr spans 64K boundary.
* Note h / w doesn ' t support 64 - bit , so we unconditionally
* truncate dma_addr_t to u32 .
*/
addr = ( u32 ) sg_dma_address ( sg ) ;
sg_len = sg_dma_len ( sg ) ;
while ( sg_len ) {
offset = addr & 0xffff ;
len = sg_len ;
if ( ( offset + sg_len ) > 0x10000 )
len = 0x10000 - offset ;
ap - > prd [ idx ] . addr = cpu_to_le32 ( addr ) ;
ap - > prd [ idx ] . flags_len = cpu_to_le32 ( len & 0xffff ) ;
VPRINTK ( " PRD[%u] = (0x%X, 0x%X) \n " , idx , addr , len ) ;
idx + + ;
sg_len - = len ;
addr + = len ;
}
}
if ( idx )
ap - > prd [ idx - 1 ] . flags_len | = cpu_to_le32 ( ATA_PRD_EOT ) ;
}
/**
* ata_check_atapi_dma - Check whether ATAPI DMA can be supported
* @ qc : Metadata associated with taskfile to check
*
2005-05-30 23:41:05 +04:00
* Allow low - level driver to filter ATA PACKET commands , returning
* a status indicating whether or not it is OK to use DMA for the
* supplied PACKET command .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-31 03:49:12 +04:00
* spin_lock_irqsave ( host_set lock )
*
2005-04-17 02:20:36 +04:00
* RETURNS : 0 when ATAPI DMA can be used
* nonzero otherwise
*/
int ata_check_atapi_dma ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
int rc = 0 ; /* Assume ATAPI DMA is OK by default */
if ( ap - > ops - > check_atapi_dma )
rc = ap - > ops - > check_atapi_dma ( qc ) ;
return rc ;
}
/**
* ata_qc_prep - Prepare taskfile for submission
* @ qc : Metadata associated with taskfile to be prepared
*
2005-05-30 23:41:05 +04:00
* Prepare ATA taskfile for submission .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
void ata_qc_prep ( struct ata_queued_cmd * qc )
{
if ( ! ( qc - > flags & ATA_QCFLAG_DMAMAP ) )
return ;
ata_fill_sg ( qc ) ;
}
2005-05-31 03:49:12 +04:00
/**
* ata_sg_init_one - Associate command with memory buffer
* @ qc : Command to be associated
* @ buf : Memory buffer
* @ buflen : Length of memory buffer , in bytes .
*
* Initialize the data - related elements of queued_cmd @ qc
* to point to a single memory buffer , @ buf of byte length @ buflen .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
2005-04-17 02:20:36 +04:00
void ata_sg_init_one ( struct ata_queued_cmd * qc , void * buf , unsigned int buflen )
{
struct scatterlist * sg ;
qc - > flags | = ATA_QCFLAG_SINGLE ;
memset ( & qc - > sgent , 0 , sizeof ( qc - > sgent ) ) ;
2005-10-05 15:13:30 +04:00
qc - > __sg = & qc - > sgent ;
2005-04-17 02:20:36 +04:00
qc - > n_elem = 1 ;
2005-10-05 15:13:30 +04:00
qc - > orig_n_elem = 1 ;
2005-04-17 02:20:36 +04:00
qc - > buf_virt = buf ;
2005-10-05 15:13:30 +04:00
sg = qc - > __sg ;
2005-10-30 09:58:18 +03:00
sg_init_one ( sg , buf , buflen ) ;
2005-04-17 02:20:36 +04:00
}
2005-05-31 03:49:12 +04:00
/**
* ata_sg_init - Associate command with scatter - gather table .
* @ qc : Command to be associated
* @ sg : Scatter - gather table .
* @ n_elem : Number of elements in s / g table .
*
* Initialize the data - related elements of queued_cmd @ qc
* to point to a scatter - gather table @ sg , containing @ n_elem
* elements .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
2005-04-17 02:20:36 +04:00
void ata_sg_init ( struct ata_queued_cmd * qc , struct scatterlist * sg ,
unsigned int n_elem )
{
qc - > flags | = ATA_QCFLAG_SG ;
2005-10-05 15:13:30 +04:00
qc - > __sg = sg ;
2005-04-17 02:20:36 +04:00
qc - > n_elem = n_elem ;
2005-10-05 15:13:30 +04:00
qc - > orig_n_elem = n_elem ;
2005-04-17 02:20:36 +04:00
}
/**
2005-05-31 03:49:12 +04:00
* ata_sg_setup_one - DMA - map the memory buffer associated with a command .
* @ qc : Command with memory buffer to be mapped .
*
* DMA - map the memory buffer associated with queued_cmd @ qc .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*
* RETURNS :
2005-05-31 03:49:12 +04:00
* Zero on success , negative on error .
2005-04-17 02:20:36 +04:00
*/
static int ata_sg_setup_one ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
int dir = qc - > dma_dir ;
2005-10-05 15:13:30 +04:00
struct scatterlist * sg = qc - > __sg ;
2005-04-17 02:20:36 +04:00
dma_addr_t dma_address ;
2006-02-20 17:48:38 +03:00
int trim_sg = 0 ;
2005-04-17 02:20:36 +04:00
2005-10-05 15:13:30 +04:00
/* we must lengthen transfers to end on a 32-bit boundary */
qc - > pad_len = sg - > length & 3 ;
if ( qc - > pad_len ) {
void * pad_buf = ap - > pad + ( qc - > tag * ATA_DMA_PAD_SZ ) ;
struct scatterlist * psg = & qc - > pad_sgent ;
2006-02-11 13:11:13 +03:00
WARN_ON ( qc - > dev - > class ! = ATA_DEV_ATAPI ) ;
2005-10-05 15:13:30 +04:00
memset ( pad_buf , 0 , ATA_DMA_PAD_SZ ) ;
if ( qc - > tf . flags & ATA_TFLAG_WRITE )
memcpy ( pad_buf , qc - > buf_virt + sg - > length - qc - > pad_len ,
qc - > pad_len ) ;
sg_dma_address ( psg ) = ap - > pad_dma + ( qc - > tag * ATA_DMA_PAD_SZ ) ;
sg_dma_len ( psg ) = ATA_DMA_PAD_SZ ;
/* trim sg */
sg - > length - = qc - > pad_len ;
2006-02-20 17:48:38 +03:00
if ( sg - > length = = 0 )
trim_sg = 1 ;
2005-10-05 15:13:30 +04:00
DPRINTK ( " padding done, sg->length=%u pad_len=%u \n " ,
sg - > length , qc - > pad_len ) ;
}
2006-02-20 17:48:38 +03:00
if ( trim_sg ) {
qc - > n_elem - - ;
2005-11-14 22:06:26 +03:00
goto skip_map ;
}
2005-04-17 02:20:36 +04:00
dma_address = dma_map_single ( ap - > host_set - > dev , qc - > buf_virt ,
2005-05-26 11:49:42 +04:00
sg - > length , dir ) ;
2005-11-05 22:29:01 +03:00
if ( dma_mapping_error ( dma_address ) ) {
/* restore sg */
sg - > length + = qc - > pad_len ;
2005-04-17 02:20:36 +04:00
return - 1 ;
2005-11-05 22:29:01 +03:00
}
2005-04-17 02:20:36 +04:00
sg_dma_address ( sg ) = dma_address ;
2005-05-26 11:49:42 +04:00
sg_dma_len ( sg ) = sg - > length ;
2005-04-17 02:20:36 +04:00
2006-02-20 17:48:38 +03:00
skip_map :
2005-04-17 02:20:36 +04:00
DPRINTK ( " mapped buffer of %d bytes for %s \n " , sg_dma_len ( sg ) ,
qc - > tf . flags & ATA_TFLAG_WRITE ? " write " : " read " ) ;
return 0 ;
}
/**
2005-05-31 03:49:12 +04:00
* ata_sg_setup - DMA - map the scatter - gather table associated with a command .
* @ qc : Command with scatter - gather table to be mapped .
*
* DMA - map the scatter - gather table associated with queued_cmd @ qc .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*
* RETURNS :
2005-05-31 03:49:12 +04:00
* Zero on success , negative on error .
2005-04-17 02:20:36 +04:00
*
*/
static int ata_sg_setup ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
2005-10-05 15:13:30 +04:00
struct scatterlist * sg = qc - > __sg ;
struct scatterlist * lsg = & sg [ qc - > n_elem - 1 ] ;
2005-11-14 22:06:26 +03:00
int n_elem , pre_n_elem , dir , trim_sg = 0 ;
2005-04-17 02:20:36 +04:00
VPRINTK ( " ENTER, ata%u \n " , ap - > id ) ;
2006-02-11 13:11:13 +03:00
WARN_ON ( ! ( qc - > flags & ATA_QCFLAG_SG ) ) ;
2005-04-17 02:20:36 +04:00
2005-10-05 15:13:30 +04:00
/* we must lengthen transfers to end on a 32-bit boundary */
qc - > pad_len = lsg - > length & 3 ;
if ( qc - > pad_len ) {
void * pad_buf = ap - > pad + ( qc - > tag * ATA_DMA_PAD_SZ ) ;
struct scatterlist * psg = & qc - > pad_sgent ;
unsigned int offset ;
2006-02-11 13:11:13 +03:00
WARN_ON ( qc - > dev - > class ! = ATA_DEV_ATAPI ) ;
2005-10-05 15:13:30 +04:00
memset ( pad_buf , 0 , ATA_DMA_PAD_SZ ) ;
/*
* psg - > page / offset are used to copy to - be - written
* data in this function or read data in ata_sg_clean .
*/
offset = lsg - > offset + lsg - > length - qc - > pad_len ;
psg - > page = nth_page ( lsg - > page , offset > > PAGE_SHIFT ) ;
psg - > offset = offset_in_page ( offset ) ;
if ( qc - > tf . flags & ATA_TFLAG_WRITE ) {
void * addr = kmap_atomic ( psg - > page , KM_IRQ0 ) ;
memcpy ( pad_buf , addr + psg - > offset , qc - > pad_len ) ;
2005-12-13 07:19:28 +03:00
kunmap_atomic ( addr , KM_IRQ0 ) ;
2005-10-05 15:13:30 +04:00
}
sg_dma_address ( psg ) = ap - > pad_dma + ( qc - > tag * ATA_DMA_PAD_SZ ) ;
sg_dma_len ( psg ) = ATA_DMA_PAD_SZ ;
/* trim last sg */
lsg - > length - = qc - > pad_len ;
2005-11-14 22:06:26 +03:00
if ( lsg - > length = = 0 )
trim_sg = 1 ;
2005-10-05 15:13:30 +04:00
DPRINTK ( " padding done, sg[%d].length=%u pad_len=%u \n " ,
qc - > n_elem - 1 , lsg - > length , qc - > pad_len ) ;
}
2005-11-14 22:06:26 +03:00
pre_n_elem = qc - > n_elem ;
if ( trim_sg & & pre_n_elem )
pre_n_elem - - ;
if ( ! pre_n_elem ) {
n_elem = 0 ;
goto skip_map ;
}
2005-04-17 02:20:36 +04:00
dir = qc - > dma_dir ;
2005-11-14 22:06:26 +03:00
n_elem = dma_map_sg ( ap - > host_set - > dev , sg , pre_n_elem , dir ) ;
2005-11-05 22:29:01 +03:00
if ( n_elem < 1 ) {
/* restore last sg */
lsg - > length + = qc - > pad_len ;
2005-04-17 02:20:36 +04:00
return - 1 ;
2005-11-05 22:29:01 +03:00
}
2005-04-17 02:20:36 +04:00
DPRINTK ( " %d sg elements mapped \n " , n_elem ) ;
2005-11-14 22:06:26 +03:00
skip_map :
2005-04-17 02:20:36 +04:00
qc - > n_elem = n_elem ;
return 0 ;
}
2005-08-22 12:12:45 +04:00
/**
* ata_poll_qc_complete - turn irq back on and finish qc
* @ qc : Command to complete
2005-11-02 08:29:27 +03:00
* @ err_mask : ATA status register content
2005-08-22 12:12:45 +04:00
*
* LOCKING :
* None . ( grabs host lock )
*/
2005-12-05 10:38:02 +03:00
void ata_poll_qc_complete ( struct ata_queued_cmd * qc )
2005-08-22 12:12:45 +04:00
{
struct ata_port * ap = qc - > ap ;
2005-08-26 06:01:20 +04:00
unsigned long flags ;
2005-08-22 12:12:45 +04:00
2005-08-26 06:01:20 +04:00
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
2005-08-22 12:12:45 +04:00
ap - > flags & = ~ ATA_FLAG_NOINTR ;
ata_irq_on ( ap ) ;
2005-12-05 10:38:02 +03:00
ata_qc_complete ( qc ) ;
2005-08-26 06:01:20 +04:00
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
2005-08-22 12:12:45 +04:00
}
2005-04-17 02:20:36 +04:00
/**
2006-01-28 21:15:32 +03:00
* ata_pio_poll - poll using PIO , depending on current state
2005-10-25 09:44:30 +04:00
* @ ap : the target ata_port
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* None . ( executing in kernel thread context )
2005-04-17 02:20:36 +04:00
*
* RETURNS :
2005-10-25 09:44:30 +04:00
* timeout value to use
2005-04-17 02:20:36 +04:00
*/
static unsigned long ata_pio_poll ( struct ata_port * ap )
{
2005-12-05 10:36:08 +03:00
struct ata_queued_cmd * qc ;
2005-04-17 02:20:36 +04:00
u8 status ;
2005-09-27 13:36:35 +04:00
unsigned int poll_state = HSM_ST_UNKNOWN ;
unsigned int reg_state = HSM_ST_UNKNOWN ;
2005-12-05 10:36:08 +03:00
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
2006-02-11 13:11:13 +03:00
WARN_ON ( qc = = NULL ) ;
2005-12-05 10:36:08 +03:00
2005-09-27 13:36:35 +04:00
switch ( ap - > hsm_task_state ) {
case HSM_ST :
case HSM_ST_POLL :
poll_state = HSM_ST_POLL ;
reg_state = HSM_ST ;
2005-04-17 02:20:36 +04:00
break ;
2005-09-27 13:36:35 +04:00
case HSM_ST_LAST :
case HSM_ST_LAST_POLL :
poll_state = HSM_ST_LAST_POLL ;
reg_state = HSM_ST_LAST ;
2005-04-17 02:20:36 +04:00
break ;
default :
BUG ( ) ;
break ;
}
status = ata_chk_status ( ap ) ;
if ( status & ATA_BUSY ) {
if ( time_after ( jiffies , ap - > pio_task_timeout ) ) {
2006-01-23 07:09:36 +03:00
qc - > err_mask | = AC_ERR_TIMEOUT ;
2005-11-09 08:03:30 +03:00
ap - > hsm_task_state = HSM_ST_TMOUT ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = poll_state ;
2005-04-17 02:20:36 +04:00
return ATA_SHORT_PAUSE ;
}
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = reg_state ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/**
2005-10-25 09:44:30 +04:00
* ata_pio_complete - check if drive is busy or idle
* @ ap : the target ata_port
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* None . ( executing in kernel thread context )
2005-09-16 14:01:48 +04:00
*
* RETURNS :
* Non - zero if qc completed , zero otherwise .
2005-04-17 02:20:36 +04:00
*/
2005-09-16 14:01:48 +04:00
static int ata_pio_complete ( struct ata_port * ap )
2005-04-17 02:20:36 +04:00
{
struct ata_queued_cmd * qc ;
u8 drv_stat ;
/*
2005-08-26 18:56:47 +04:00
* This is purely heuristic . This is a fast path . Sometimes when
* we enter , BSY will be cleared in a chk - status or two . If not ,
* the drive is probably seeking or something . Snooze for a couple
* msecs , then chk - status again . If still busy , fall back to
2005-09-27 13:36:35 +04:00
* HSM_ST_POLL state .
2005-04-17 02:20:36 +04:00
*/
2005-12-06 06:34:59 +03:00
drv_stat = ata_busy_wait ( ap , ATA_BUSY , 10 ) ;
if ( drv_stat & ATA_BUSY ) {
2005-04-17 02:20:36 +04:00
msleep ( 2 ) ;
2005-12-06 06:34:59 +03:00
drv_stat = ata_busy_wait ( ap , ATA_BUSY , 10 ) ;
if ( drv_stat & ATA_BUSY ) {
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_LAST_POLL ;
2005-04-17 02:20:36 +04:00
ap - > pio_task_timeout = jiffies + ATA_TMOUT_PIO ;
2005-09-16 14:01:48 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
}
2005-12-05 10:36:08 +03:00
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
2006-02-11 13:11:13 +03:00
WARN_ON ( qc = = NULL ) ;
2005-12-05 10:36:08 +03:00
2005-04-17 02:20:36 +04:00
drv_stat = ata_wait_idle ( ap ) ;
if ( ! ata_ok ( drv_stat ) ) {
2005-12-05 10:40:15 +03:00
qc - > err_mask | = __ac_err_mask ( drv_stat ) ;
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_ERR ;
2005-09-16 14:01:48 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_IDLE ;
2005-04-17 02:20:36 +04:00
2006-02-11 13:11:13 +03:00
WARN_ON ( qc - > err_mask ) ;
2005-12-05 10:38:02 +03:00
ata_poll_qc_complete ( qc ) ;
2005-09-16 14:01:48 +04:00
/* another command may start at this point */
return 1 ;
2005-04-17 02:20:36 +04:00
}
2005-06-03 02:17:13 +04:00
/**
2006-01-28 21:15:32 +03:00
* swap_buf_le16 - swap halves of 16 - bit words in place
2005-06-03 02:17:13 +04:00
* @ buf : Buffer to swap
* @ buf_words : Number of 16 - bit words in buffer .
*
* Swap halves of 16 - bit words if needed to convert from
* little - endian byte order to native cpu byte order , or
* vice - versa .
*
* LOCKING :
2005-10-25 09:44:30 +04:00
* Inherited from caller .
2005-06-03 02:17:13 +04:00
*/
2005-04-17 02:20:36 +04:00
void swap_buf_le16 ( u16 * buf , unsigned int buf_words )
{
# ifdef __BIG_ENDIAN
unsigned int i ;
for ( i = 0 ; i < buf_words ; i + + )
buf [ i ] = le16_to_cpu ( buf [ i ] ) ;
# endif /* __BIG_ENDIAN */
}
2005-08-12 10:15:34 +04:00
/**
* ata_mmio_data_xfer - Transfer data by MMIO
* @ ap : port to read / write
* @ buf : data buffer
* @ buflen : buffer length
2005-09-07 09:15:17 +04:00
* @ write_data : read / write
2005-08-12 10:15:34 +04:00
*
* Transfer data from / to the device data register by MMIO .
*
* LOCKING :
* Inherited from caller .
*/
2005-04-17 02:20:36 +04:00
static void ata_mmio_data_xfer ( struct ata_port * ap , unsigned char * buf ,
unsigned int buflen , int write_data )
{
unsigned int i ;
unsigned int words = buflen > > 1 ;
u16 * buf16 = ( u16 * ) buf ;
void __iomem * mmio = ( void __iomem * ) ap - > ioaddr . data_addr ;
2005-08-12 10:15:34 +04:00
/* Transfer multiple of 2 bytes */
2005-04-17 02:20:36 +04:00
if ( write_data ) {
for ( i = 0 ; i < words ; i + + )
writew ( le16_to_cpu ( buf16 [ i ] ) , mmio ) ;
} else {
for ( i = 0 ; i < words ; i + + )
buf16 [ i ] = cpu_to_le16 ( readw ( mmio ) ) ;
}
2005-08-12 10:15:34 +04:00
/* Transfer trailing 1 byte, if any. */
if ( unlikely ( buflen & 0x01 ) ) {
u16 align_buf [ 1 ] = { 0 } ;
unsigned char * trailing_buf = buf + buflen - 1 ;
if ( write_data ) {
memcpy ( align_buf , trailing_buf , 1 ) ;
writew ( le16_to_cpu ( align_buf [ 0 ] ) , mmio ) ;
} else {
align_buf [ 0 ] = cpu_to_le16 ( readw ( mmio ) ) ;
memcpy ( trailing_buf , align_buf , 1 ) ;
}
}
2005-04-17 02:20:36 +04:00
}
2005-08-12 10:15:34 +04:00
/**
* ata_pio_data_xfer - Transfer data by PIO
* @ ap : port to read / write
* @ buf : data buffer
* @ buflen : buffer length
2005-09-07 09:15:17 +04:00
* @ write_data : read / write
2005-08-12 10:15:34 +04:00
*
* Transfer data from / to the device data register by PIO .
*
* LOCKING :
* Inherited from caller .
*/
2005-04-17 02:20:36 +04:00
static void ata_pio_data_xfer ( struct ata_port * ap , unsigned char * buf ,
unsigned int buflen , int write_data )
{
2005-08-12 10:15:34 +04:00
unsigned int words = buflen > > 1 ;
2005-04-17 02:20:36 +04:00
2005-08-12 10:15:34 +04:00
/* Transfer multiple of 2 bytes */
2005-04-17 02:20:36 +04:00
if ( write_data )
2005-08-12 10:15:34 +04:00
outsw ( ap - > ioaddr . data_addr , buf , words ) ;
2005-04-17 02:20:36 +04:00
else
2005-08-12 10:15:34 +04:00
insw ( ap - > ioaddr . data_addr , buf , words ) ;
/* Transfer trailing 1 byte, if any. */
if ( unlikely ( buflen & 0x01 ) ) {
u16 align_buf [ 1 ] = { 0 } ;
unsigned char * trailing_buf = buf + buflen - 1 ;
if ( write_data ) {
memcpy ( align_buf , trailing_buf , 1 ) ;
outw ( le16_to_cpu ( align_buf [ 0 ] ) , ap - > ioaddr . data_addr ) ;
} else {
align_buf [ 0 ] = cpu_to_le16 ( inw ( ap - > ioaddr . data_addr ) ) ;
memcpy ( trailing_buf , align_buf , 1 ) ;
}
}
2005-04-17 02:20:36 +04:00
}
2005-08-12 10:15:34 +04:00
/**
* ata_data_xfer - Transfer data from / to the data register .
* @ ap : port to read / write
* @ buf : data buffer
* @ buflen : buffer length
* @ do_write : read / write
*
* Transfer data from / to the device data register .
*
* LOCKING :
* Inherited from caller .
*/
2005-04-17 02:20:36 +04:00
static void ata_data_xfer ( struct ata_port * ap , unsigned char * buf ,
unsigned int buflen , int do_write )
{
2006-01-17 23:53:50 +03:00
/* Make the crap hardware pay the costs not the good stuff */
if ( unlikely ( ap - > flags & ATA_FLAG_IRQ_MASK ) ) {
unsigned long flags ;
local_irq_save ( flags ) ;
if ( ap - > flags & ATA_FLAG_MMIO )
ata_mmio_data_xfer ( ap , buf , buflen , do_write ) ;
else
ata_pio_data_xfer ( ap , buf , buflen , do_write ) ;
local_irq_restore ( flags ) ;
} else {
if ( ap - > flags & ATA_FLAG_MMIO )
ata_mmio_data_xfer ( ap , buf , buflen , do_write ) ;
else
ata_pio_data_xfer ( ap , buf , buflen , do_write ) ;
}
2005-04-17 02:20:36 +04:00
}
2005-08-12 10:15:34 +04:00
/**
* ata_pio_sector - Transfer ATA_SECT_SIZE ( 512 bytes ) of data .
* @ qc : Command on going
*
* Transfer ATA_SECT_SIZE of data from / to the ATA device .
*
* LOCKING :
* Inherited from caller .
*/
2005-04-17 02:20:36 +04:00
static void ata_pio_sector ( struct ata_queued_cmd * qc )
{
int do_write = ( qc - > tf . flags & ATA_TFLAG_WRITE ) ;
2005-10-05 15:13:30 +04:00
struct scatterlist * sg = qc - > __sg ;
2005-04-17 02:20:36 +04:00
struct ata_port * ap = qc - > ap ;
struct page * page ;
unsigned int offset ;
unsigned char * buf ;
if ( qc - > cursect = = ( qc - > nsect - 1 ) )
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_LAST ;
2005-04-17 02:20:36 +04:00
page = sg [ qc - > cursg ] . page ;
offset = sg [ qc - > cursg ] . offset + qc - > cursg_ofs * ATA_SECT_SIZE ;
/* get the current page and offset */
page = nth_page ( page , ( offset > > PAGE_SHIFT ) ) ;
offset % = PAGE_SIZE ;
buf = kmap ( page ) + offset ;
qc - > cursect + + ;
qc - > cursg_ofs + + ;
2005-05-26 11:49:42 +04:00
if ( ( qc - > cursg_ofs * ATA_SECT_SIZE ) = = ( & sg [ qc - > cursg ] ) - > length ) {
2005-04-17 02:20:36 +04:00
qc - > cursg + + ;
qc - > cursg_ofs = 0 ;
}
DPRINTK ( " data %s \n " , qc - > tf . flags & ATA_TFLAG_WRITE ? " write " : " read " ) ;
/* do the actual data transfer */
do_write = ( qc - > tf . flags & ATA_TFLAG_WRITE ) ;
ata_data_xfer ( ap , buf , ATA_SECT_SIZE , do_write ) ;
kunmap ( page ) ;
}
2005-08-12 10:15:34 +04:00
/**
* __atapi_pio_bytes - Transfer data from / to the ATAPI device .
* @ qc : Command on going
* @ bytes : number of bytes
*
* Transfer Transfer data from / to the ATAPI device .
*
* LOCKING :
* Inherited from caller .
*
*/
2005-04-17 02:20:36 +04:00
static void __atapi_pio_bytes ( struct ata_queued_cmd * qc , unsigned int bytes )
{
int do_write = ( qc - > tf . flags & ATA_TFLAG_WRITE ) ;
2005-10-05 15:13:30 +04:00
struct scatterlist * sg = qc - > __sg ;
2005-04-17 02:20:36 +04:00
struct ata_port * ap = qc - > ap ;
struct page * page ;
unsigned char * buf ;
unsigned int offset , count ;
2005-08-12 10:17:50 +04:00
if ( qc - > curbytes + bytes > = qc - > nbytes )
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_LAST ;
2005-04-17 02:20:36 +04:00
next_sg :
2005-08-12 10:17:50 +04:00
if ( unlikely ( qc - > cursg > = qc - > n_elem ) ) {
2005-09-16 14:01:48 +04:00
/*
2005-08-12 10:17:50 +04:00
* The end of qc - > sg is reached and the device expects
* more data to transfer . In order not to overrun qc - > sg
* and fulfill length specified in the byte count register ,
* - for read case , discard trailing data from the device
* - for write case , padding zero data to the device
*/
u16 pad_buf [ 1 ] = { 0 } ;
unsigned int words = bytes > > 1 ;
unsigned int i ;
if ( words ) /* warning if bytes > 1 */
2005-09-16 14:01:48 +04:00
printk ( KERN_WARNING " ata%u: %u bytes trailing data \n " ,
2005-08-12 10:17:50 +04:00
ap - > id , bytes ) ;
for ( i = 0 ; i < words ; i + + )
ata_data_xfer ( ap , ( unsigned char * ) pad_buf , 2 , do_write ) ;
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_LAST ;
2005-08-12 10:17:50 +04:00
return ;
}
2005-10-05 15:13:30 +04:00
sg = & qc - > __sg [ qc - > cursg ] ;
2005-04-17 02:20:36 +04:00
page = sg - > page ;
offset = sg - > offset + qc - > cursg_ofs ;
/* get the current page and offset */
page = nth_page ( page , ( offset > > PAGE_SHIFT ) ) ;
offset % = PAGE_SIZE ;
2005-06-06 11:56:03 +04:00
/* don't overrun current sg */
2005-05-26 11:49:42 +04:00
count = min ( sg - > length - qc - > cursg_ofs , bytes ) ;
2005-04-17 02:20:36 +04:00
/* don't cross page boundaries */
count = min ( count , ( unsigned int ) PAGE_SIZE - offset ) ;
buf = kmap ( page ) + offset ;
bytes - = count ;
qc - > curbytes + = count ;
qc - > cursg_ofs + = count ;
2005-05-26 11:49:42 +04:00
if ( qc - > cursg_ofs = = sg - > length ) {
2005-04-17 02:20:36 +04:00
qc - > cursg + + ;
qc - > cursg_ofs = 0 ;
}
DPRINTK ( " data %s \n " , qc - > tf . flags & ATA_TFLAG_WRITE ? " write " : " read " ) ;
/* do the actual data transfer */
ata_data_xfer ( ap , buf , count , do_write ) ;
kunmap ( page ) ;
2005-08-12 10:17:50 +04:00
if ( bytes )
2005-04-17 02:20:36 +04:00
goto next_sg ;
}
2005-08-12 10:15:34 +04:00
/**
* atapi_pio_bytes - Transfer data from / to the ATAPI device .
* @ qc : Command on going
*
* Transfer Transfer data from / to the ATAPI device .
*
* LOCKING :
* Inherited from caller .
*/
2005-04-17 02:20:36 +04:00
static void atapi_pio_bytes ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
struct ata_device * dev = qc - > dev ;
unsigned int ireason , bc_lo , bc_hi , bytes ;
int i_write , do_write = ( qc - > tf . flags & ATA_TFLAG_WRITE ) ? 1 : 0 ;
ap - > ops - > tf_read ( ap , & qc - > tf ) ;
ireason = qc - > tf . nsect ;
bc_lo = qc - > tf . lbam ;
bc_hi = qc - > tf . lbah ;
bytes = ( bc_hi < < 8 ) | bc_lo ;
/* shall be cleared to zero, indicating xfer of data */
if ( ireason & ( 1 < < 0 ) )
goto err_out ;
/* make sure transfer direction matches expected */
i_write = ( ( ireason & ( 1 < < 1 ) ) = = 0 ) ? 1 : 0 ;
if ( do_write ! = i_write )
goto err_out ;
__atapi_pio_bytes ( qc , bytes ) ;
return ;
err_out :
printk ( KERN_INFO " ata%u: dev %u: ATAPI check failed \n " ,
ap - > id , dev - > devno ) ;
2006-01-23 07:09:36 +03:00
qc - > err_mask | = AC_ERR_HSM ;
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_ERR ;
2005-04-17 02:20:36 +04:00
}
/**
2005-10-25 09:44:30 +04:00
* ata_pio_block - start PIO on a block
* @ ap : the target ata_port
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* None . ( executing in kernel thread context )
2005-04-17 02:20:36 +04:00
*/
static void ata_pio_block ( struct ata_port * ap )
{
struct ata_queued_cmd * qc ;
u8 status ;
/*
2005-10-25 09:44:30 +04:00
* This is purely heuristic . This is a fast path .
2005-04-17 02:20:36 +04:00
* Sometimes when we enter , BSY will be cleared in
* a chk - status or two . If not , the drive is probably seeking
* or something . Snooze for a couple msecs , then
* chk - status again . If still busy , fall back to
2005-09-27 13:36:35 +04:00
* HSM_ST_POLL state .
2005-04-17 02:20:36 +04:00
*/
status = ata_busy_wait ( ap , ATA_BUSY , 5 ) ;
if ( status & ATA_BUSY ) {
msleep ( 2 ) ;
status = ata_busy_wait ( ap , ATA_BUSY , 10 ) ;
if ( status & ATA_BUSY ) {
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_POLL ;
2005-04-17 02:20:36 +04:00
ap - > pio_task_timeout = jiffies + ATA_TMOUT_PIO ;
return ;
}
}
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
2006-02-11 13:11:13 +03:00
WARN_ON ( qc = = NULL ) ;
2005-04-17 02:20:36 +04:00
2005-12-06 06:34:59 +03:00
/* check error */
if ( status & ( ATA_ERR | ATA_DF ) ) {
qc - > err_mask | = AC_ERR_DEV ;
ap - > hsm_task_state = HSM_ST_ERR ;
return ;
}
/* transfer data if any */
2005-04-17 02:20:36 +04:00
if ( is_atapi_taskfile ( & qc - > tf ) ) {
2005-12-06 06:34:59 +03:00
/* DRQ=0 means no more data to transfer */
2005-04-17 02:20:36 +04:00
if ( ( status & ATA_DRQ ) = = 0 ) {
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_LAST ;
2005-04-17 02:20:36 +04:00
return ;
}
atapi_pio_bytes ( qc ) ;
} else {
/* handle BSY=0, DRQ=0 as error */
if ( ( status & ATA_DRQ ) = = 0 ) {
2006-01-23 07:09:36 +03:00
qc - > err_mask | = AC_ERR_HSM ;
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_ERR ;
2005-04-17 02:20:36 +04:00
return ;
}
ata_pio_sector ( qc ) ;
}
}
static void ata_pio_error ( struct ata_port * ap )
{
struct ata_queued_cmd * qc ;
2005-10-30 12:44:42 +03:00
2005-04-17 02:20:36 +04:00
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
2006-02-11 13:11:13 +03:00
WARN_ON ( qc = = NULL ) ;
2005-04-17 02:20:36 +04:00
2006-02-13 13:55:25 +03:00
if ( qc - > tf . command ! = ATA_CMD_PACKET )
printk ( KERN_WARNING " ata%u: PIO error \n " , ap - > id ) ;
2005-12-05 10:40:15 +03:00
/* make sure qc->err_mask is available to
* know what ' s wrong and recover
*/
2006-02-11 13:11:13 +03:00
WARN_ON ( qc - > err_mask = = 0 ) ;
2005-12-05 10:40:15 +03:00
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST_IDLE ;
2005-04-17 02:20:36 +04:00
2005-12-05 10:38:02 +03:00
ata_poll_qc_complete ( qc ) ;
2005-04-17 02:20:36 +04:00
}
static void ata_pio_task ( void * _data )
{
struct ata_port * ap = _data ;
2005-09-16 14:01:48 +04:00
unsigned long timeout ;
int qc_completed ;
fsm_start :
timeout = 0 ;
qc_completed = 0 ;
2005-04-17 02:20:36 +04:00
2005-09-27 13:36:35 +04:00
switch ( ap - > hsm_task_state ) {
case HSM_ST_IDLE :
2005-04-17 02:20:36 +04:00
return ;
2005-09-27 13:36:35 +04:00
case HSM_ST :
2005-04-17 02:20:36 +04:00
ata_pio_block ( ap ) ;
break ;
2005-09-27 13:36:35 +04:00
case HSM_ST_LAST :
2005-09-16 14:01:48 +04:00
qc_completed = ata_pio_complete ( ap ) ;
2005-04-17 02:20:36 +04:00
break ;
2005-09-27 13:36:35 +04:00
case HSM_ST_POLL :
case HSM_ST_LAST_POLL :
2005-04-17 02:20:36 +04:00
timeout = ata_pio_poll ( ap ) ;
break ;
2005-09-27 13:36:35 +04:00
case HSM_ST_TMOUT :
case HSM_ST_ERR :
2005-04-17 02:20:36 +04:00
ata_pio_error ( ap ) ;
return ;
}
if ( timeout )
2006-03-05 09:29:09 +03:00
ata_port_queue_task ( ap , ata_pio_task , ap , timeout ) ;
2005-09-16 14:01:48 +04:00
else if ( ! qc_completed )
goto fsm_start ;
2005-04-17 02:20:36 +04:00
}
2006-03-05 09:29:09 +03:00
/**
* atapi_packet_task - Write CDB bytes to hardware
* @ _data : Port to which ATAPI device is attached .
*
* When device has indicated its readiness to accept
* a CDB , this function is called . Send the CDB .
* If DMA is to be performed , exit immediately .
* Otherwise , we are in polling mode , so poll
* status under operation succeeds or fails .
*
* LOCKING :
* Kernel thread context ( may sleep )
*/
static void atapi_packet_task ( void * _data )
{
struct ata_port * ap = _data ;
struct ata_queued_cmd * qc ;
u8 status ;
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
WARN_ON ( qc = = NULL ) ;
WARN_ON ( ! ( qc - > flags & ATA_QCFLAG_ACTIVE ) ) ;
/* sleep-wait for BSY to clear */
DPRINTK ( " busy wait \n " ) ;
if ( ata_busy_sleep ( ap , ATA_TMOUT_CDB_QUICK , ATA_TMOUT_CDB ) ) {
qc - > err_mask | = AC_ERR_TIMEOUT ;
goto err_out ;
}
/* make sure DRQ is set */
status = ata_chk_status ( ap ) ;
if ( ( status & ( ATA_BUSY | ATA_DRQ ) ) ! = ATA_DRQ ) {
qc - > err_mask | = AC_ERR_HSM ;
goto err_out ;
}
/* send SCSI cdb */
DPRINTK ( " send cdb \n " ) ;
WARN_ON ( qc - > dev - > cdb_len < 12 ) ;
if ( qc - > tf . protocol = = ATA_PROT_ATAPI_DMA | |
qc - > tf . protocol = = ATA_PROT_ATAPI_NODATA ) {
unsigned long flags ;
/* Once we're done issuing command and kicking bmdma,
* irq handler takes over . To not lose irq , we need
* to clear NOINTR flag before sending cdb , but
* interrupt handler shouldn ' t be invoked before we ' re
* finished . Hence , the following locking .
*/
spin_lock_irqsave ( & ap - > host_set - > lock , flags ) ;
ap - > flags & = ~ ATA_FLAG_NOINTR ;
ata_data_xfer ( ap , qc - > cdb , qc - > dev - > cdb_len , 1 ) ;
if ( qc - > tf . protocol = = ATA_PROT_ATAPI_DMA )
ap - > ops - > bmdma_start ( qc ) ; /* initiate bmdma */
spin_unlock_irqrestore ( & ap - > host_set - > lock , flags ) ;
} else {
ata_data_xfer ( ap , qc - > cdb , qc - > dev - > cdb_len , 1 ) ;
/* PIO commands are handled by polling */
ap - > hsm_task_state = HSM_ST ;
ata_port_queue_task ( ap , ata_pio_task , ap , 0 ) ;
}
return ;
err_out :
ata_poll_qc_complete ( qc ) ;
}
2005-04-17 02:20:36 +04:00
/**
* ata_qc_timeout - Handle timeout of queued command
* @ qc : Command that timed out
*
* Some part of the kernel ( currently , only the SCSI layer )
* has noticed that the active command on port @ ap has not
* completed after a specified length of time . Handle this
* condition by disabling DMA ( if necessary ) and completing
* transactions , with error if necessary .
*
* This also handles the case of the " lost interrupt " , where
* for some reason ( possibly hardware bug , possibly driver bug )
* an interrupt was not delivered to the driver , even though the
* transaction completed successfully .
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* Inherited from SCSI layer ( none , can sleep )
2005-04-17 02:20:36 +04:00
*/
static void ata_qc_timeout ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
2005-08-26 06:01:20 +04:00
struct ata_host_set * host_set = ap - > host_set ;
2005-04-17 02:20:36 +04:00
u8 host_stat = 0 , drv_stat ;
2005-08-26 06:01:20 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " ENTER \n " ) ;
2006-02-01 18:56:10 +03:00
ap - > hsm_task_state = HSM_ST_IDLE ;
2005-08-26 06:01:20 +04:00
spin_lock_irqsave ( & host_set - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
switch ( qc - > tf . protocol ) {
case ATA_PROT_DMA :
case ATA_PROT_ATAPI_DMA :
host_stat = ap - > ops - > bmdma_status ( ap ) ;
/* before we do anything else, clear DMA-Start bit */
2005-08-26 19:03:19 +04:00
ap - > ops - > bmdma_stop ( qc ) ;
2005-04-17 02:20:36 +04:00
/* fall through */
default :
ata_altstatus ( ap ) ;
drv_stat = ata_chk_status ( ap ) ;
/* ack bmdma irq events */
ap - > ops - > irq_clear ( ap ) ;
printk ( KERN_ERR " ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x \n " ,
ap - > id , qc - > tf . command , drv_stat , host_stat ) ;
/* complete taskfile transaction */
2005-12-05 10:38:02 +03:00
qc - > err_mask | = ac_err_mask ( drv_stat ) ;
2005-04-17 02:20:36 +04:00
break ;
}
2005-08-26 06:01:20 +04:00
spin_unlock_irqrestore ( & host_set - > lock , flags ) ;
2006-01-23 07:09:37 +03:00
ata_eh_qc_complete ( qc ) ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " EXIT \n " ) ;
}
/**
* ata_eng_timeout - Handle timeout of queued command
* @ ap : Port on which timed - out command is active
*
* Some part of the kernel ( currently , only the SCSI layer )
* has noticed that the active command on port @ ap has not
* completed after a specified length of time . Handle this
* condition by disabling DMA ( if necessary ) and completing
* transactions , with error if necessary .
*
* This also handles the case of the " lost interrupt " , where
* for some reason ( possibly hardware bug , possibly driver bug )
* an interrupt was not delivered to the driver , even though the
* transaction completed successfully .
*
* LOCKING :
* Inherited from SCSI layer ( none , can sleep )
*/
void ata_eng_timeout ( struct ata_port * ap )
{
DPRINTK ( " ENTER \n " ) ;
2006-02-10 09:10:48 +03:00
ata_qc_timeout ( ata_qc_from_tag ( ap , ap - > active_tag ) ) ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " EXIT \n " ) ;
}
/**
* ata_qc_new - Request an available ATA command , for queueing
* @ ap : Port associated with device @ dev
* @ dev : Device from whom we request an available command structure
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* None .
2005-04-17 02:20:36 +04:00
*/
static struct ata_queued_cmd * ata_qc_new ( struct ata_port * ap )
{
struct ata_queued_cmd * qc = NULL ;
unsigned int i ;
for ( i = 0 ; i < ATA_MAX_QUEUE ; i + + )
if ( ! test_and_set_bit ( i , & ap - > qactive ) ) {
qc = ata_qc_from_tag ( ap , i ) ;
break ;
}
if ( qc )
qc - > tag = i ;
return qc ;
}
/**
* ata_qc_new_init - Request an available ATA command , and initialize it
* @ ap : Port associated with device @ dev
* @ dev : Device from whom we request an available command structure
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* None .
2005-04-17 02:20:36 +04:00
*/
struct ata_queued_cmd * ata_qc_new_init ( struct ata_port * ap ,
struct ata_device * dev )
{
struct ata_queued_cmd * qc ;
qc = ata_qc_new ( ap ) ;
if ( qc ) {
qc - > scsicmd = NULL ;
qc - > ap = ap ;
qc - > dev = dev ;
2005-11-14 22:14:16 +03:00
ata_qc_reinit ( qc ) ;
2005-04-17 02:20:36 +04:00
}
return qc ;
}
/**
* ata_qc_free - free unused ata_queued_cmd
* @ qc : Command to complete
*
* Designed to free unused ata_queued_cmd object
* in case something prevents using it .
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* spin_lock_irqsave ( host_set lock )
2005-04-17 02:20:36 +04:00
*/
void ata_qc_free ( struct ata_queued_cmd * qc )
{
2006-01-23 07:09:36 +03:00
struct ata_port * ap = qc - > ap ;
unsigned int tag ;
2006-02-11 13:11:13 +03:00
WARN_ON ( qc = = NULL ) ; /* ata_qc_from_tag _might_ return NULL */
2005-04-17 02:20:36 +04:00
2006-01-23 07:09:36 +03:00
qc - > flags = 0 ;
tag = qc - > tag ;
if ( likely ( ata_tag_valid ( tag ) ) ) {
if ( tag = = ap - > active_tag )
ap - > active_tag = ATA_TAG_POISON ;
qc - > tag = ATA_TAG_POISON ;
clear_bit ( tag , & ap - > qactive ) ;
}
2005-04-17 02:20:36 +04:00
}
2006-02-11 09:13:49 +03:00
void __ata_qc_complete ( struct ata_queued_cmd * qc )
2005-04-17 02:20:36 +04:00
{
2006-02-11 13:11:13 +03:00
WARN_ON ( qc = = NULL ) ; /* ata_qc_from_tag _might_ return NULL */
WARN_ON ( ! ( qc - > flags & ATA_QCFLAG_ACTIVE ) ) ;
2005-04-17 02:20:36 +04:00
if ( likely ( qc - > flags & ATA_QCFLAG_DMAMAP ) )
ata_sg_clean ( qc ) ;
2005-08-16 10:25:38 +04:00
/* atapi: mark qc as inactive to prevent the interrupt handler
* from completing the command twice later , before the error handler
* is called . ( when rc ! = 0 and atapi request sense is needed )
*/
qc - > flags & = ~ ATA_QCFLAG_ACTIVE ;
2005-04-17 02:20:36 +04:00
/* call completion callback */
2006-01-23 07:09:36 +03:00
qc - > complete_fn ( qc ) ;
2005-04-17 02:20:36 +04:00
}
static inline int ata_should_dma_map ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
switch ( qc - > tf . protocol ) {
case ATA_PROT_DMA :
case ATA_PROT_ATAPI_DMA :
return 1 ;
case ATA_PROT_ATAPI :
case ATA_PROT_PIO :
if ( ap - > flags & ATA_FLAG_PIO_DMA )
return 1 ;
/* fall through */
default :
return 0 ;
}
/* never reached */
}
/**
* ata_qc_issue - issue taskfile to device
* @ qc : command to issue to device
*
* Prepare an ATA command to submission to device .
* This includes mapping the data into a DMA - able
* area , filling in the S / G table , and finally
* writing the taskfile to hardware , starting the command .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*
* RETURNS :
2006-01-23 07:09:36 +03:00
* Zero on success , AC_ERR_ * mask on failure
2005-04-17 02:20:36 +04:00
*/
2006-01-23 07:09:36 +03:00
unsigned int ata_qc_issue ( struct ata_queued_cmd * qc )
2005-04-17 02:20:36 +04:00
{
struct ata_port * ap = qc - > ap ;
if ( ata_should_dma_map ( qc ) ) {
if ( qc - > flags & ATA_QCFLAG_SG ) {
if ( ata_sg_setup ( qc ) )
2006-01-23 07:09:36 +03:00
goto sg_err ;
2005-04-17 02:20:36 +04:00
} else if ( qc - > flags & ATA_QCFLAG_SINGLE ) {
if ( ata_sg_setup_one ( qc ) )
2006-01-23 07:09:36 +03:00
goto sg_err ;
2005-04-17 02:20:36 +04:00
}
} else {
qc - > flags & = ~ ATA_QCFLAG_DMAMAP ;
}
ap - > ops - > qc_prep ( qc ) ;
qc - > ap - > active_tag = qc - > tag ;
qc - > flags | = ATA_QCFLAG_ACTIVE ;
return ap - > ops - > qc_issue ( qc ) ;
2006-01-23 07:09:36 +03:00
sg_err :
qc - > flags & = ~ ATA_QCFLAG_DMAMAP ;
2006-01-23 07:09:36 +03:00
return AC_ERR_SYSTEM ;
2005-04-17 02:20:36 +04:00
}
2005-06-03 02:17:13 +04:00
2005-04-17 02:20:36 +04:00
/**
* ata_qc_issue_prot - issue taskfile to device in proto - dependent manner
* @ qc : command to issue to device
*
* Using various libata functions and hooks , this function
* starts an ATA command . ATA commands are grouped into
* classes called " protocols " , and issuing each type of protocol
* is slightly different .
*
2005-06-03 02:17:13 +04:00
* May be used as the qc_issue ( ) entry in ata_port_operations .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*
* RETURNS :
2006-01-23 07:09:36 +03:00
* Zero on success , AC_ERR_ * mask on failure
2005-04-17 02:20:36 +04:00
*/
2006-01-23 07:09:36 +03:00
unsigned int ata_qc_issue_prot ( struct ata_queued_cmd * qc )
2005-04-17 02:20:36 +04:00
{
struct ata_port * ap = qc - > ap ;
ata_dev_select ( ap , qc - > dev - > devno , 1 , 0 ) ;
switch ( qc - > tf . protocol ) {
case ATA_PROT_NODATA :
2005-10-31 05:37:17 +03:00
ata_tf_to_host ( ap , & qc - > tf ) ;
2005-04-17 02:20:36 +04:00
break ;
case ATA_PROT_DMA :
ap - > ops - > tf_load ( ap , & qc - > tf ) ; /* load tf registers */
ap - > ops - > bmdma_setup ( qc ) ; /* set up bmdma */
ap - > ops - > bmdma_start ( qc ) ; /* initiate bmdma */
break ;
case ATA_PROT_PIO : /* load tf registers, initiate polling pio */
ata_qc_set_polling ( qc ) ;
2005-10-31 05:37:17 +03:00
ata_tf_to_host ( ap , & qc - > tf ) ;
2005-09-27 13:36:35 +04:00
ap - > hsm_task_state = HSM_ST ;
2006-03-05 09:29:09 +03:00
ata_port_queue_task ( ap , ata_pio_task , ap , 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
case ATA_PROT_ATAPI :
ata_qc_set_polling ( qc ) ;
2005-10-31 05:37:17 +03:00
ata_tf_to_host ( ap , & qc - > tf ) ;
2006-03-05 09:29:09 +03:00
ata_port_queue_task ( ap , atapi_packet_task , ap , 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
case ATA_PROT_ATAPI_NODATA :
2005-08-22 09:59:24 +04:00
ap - > flags | = ATA_FLAG_NOINTR ;
2005-10-31 05:37:17 +03:00
ata_tf_to_host ( ap , & qc - > tf ) ;
2006-03-05 09:29:09 +03:00
ata_port_queue_task ( ap , atapi_packet_task , ap , 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
case ATA_PROT_ATAPI_DMA :
2005-08-22 09:59:24 +04:00
ap - > flags | = ATA_FLAG_NOINTR ;
2005-04-17 02:20:36 +04:00
ap - > ops - > tf_load ( ap , & qc - > tf ) ; /* load tf registers */
ap - > ops - > bmdma_setup ( qc ) ; /* set up bmdma */
2006-03-05 09:29:09 +03:00
ata_port_queue_task ( ap , atapi_packet_task , ap , 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
WARN_ON ( 1 ) ;
2006-01-23 07:09:36 +03:00
return AC_ERR_SYSTEM ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
/**
2005-06-03 02:17:13 +04:00
* ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
2005-04-17 02:20:36 +04:00
* @ qc : Info associated with this ATA transaction .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
static void ata_bmdma_setup_mmio ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
unsigned int rw = ( qc - > tf . flags & ATA_TFLAG_WRITE ) ;
u8 dmactl ;
void __iomem * mmio = ( void __iomem * ) ap - > ioaddr . bmdma_addr ;
/* load PRD table addr. */
mb ( ) ; /* make sure PRD table writes are visible to controller */
writel ( ap - > prd_dma , mmio + ATA_DMA_TABLE_OFS ) ;
/* specify data direction, triple-check start bit is clear */
dmactl = readb ( mmio + ATA_DMA_CMD ) ;
dmactl & = ~ ( ATA_DMA_WR | ATA_DMA_START ) ;
if ( ! rw )
dmactl | = ATA_DMA_WR ;
writeb ( dmactl , mmio + ATA_DMA_CMD ) ;
/* issue r/w command */
ap - > ops - > exec_command ( ap , & qc - > tf ) ;
}
/**
2005-08-26 19:03:19 +04:00
* ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
2005-04-17 02:20:36 +04:00
* @ qc : Info associated with this ATA transaction .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
static void ata_bmdma_start_mmio ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
void __iomem * mmio = ( void __iomem * ) ap - > ioaddr . bmdma_addr ;
u8 dmactl ;
/* start host DMA transaction */
dmactl = readb ( mmio + ATA_DMA_CMD ) ;
writeb ( dmactl | ATA_DMA_START , mmio + ATA_DMA_CMD ) ;
/* Strictly, one may wish to issue a readb() here, to
* flush the mmio write . However , control also passes
* to the hardware at this point , and it will interrupt
* us when we are to resume control . So , in effect ,
* we don ' t care when the mmio write flushes .
* Further , a read of the DMA status register _immediately_
* following the write may not be what certain flaky hardware
* is expected , so I think it is best to not add a readb ( )
* without first all the MMIO ATA cards / mobos .
* Or maybe I ' m just being paranoid .
*/
}
/**
* ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction ( PIO )
* @ qc : Info associated with this ATA transaction .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
static void ata_bmdma_setup_pio ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
unsigned int rw = ( qc - > tf . flags & ATA_TFLAG_WRITE ) ;
u8 dmactl ;
/* load PRD table addr. */
outl ( ap - > prd_dma , ap - > ioaddr . bmdma_addr + ATA_DMA_TABLE_OFS ) ;
/* specify data direction, triple-check start bit is clear */
dmactl = inb ( ap - > ioaddr . bmdma_addr + ATA_DMA_CMD ) ;
dmactl & = ~ ( ATA_DMA_WR | ATA_DMA_START ) ;
if ( ! rw )
dmactl | = ATA_DMA_WR ;
outb ( dmactl , ap - > ioaddr . bmdma_addr + ATA_DMA_CMD ) ;
/* issue r/w command */
ap - > ops - > exec_command ( ap , & qc - > tf ) ;
}
/**
* ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction ( PIO )
* @ qc : Info associated with this ATA transaction .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
static void ata_bmdma_start_pio ( struct ata_queued_cmd * qc )
{
struct ata_port * ap = qc - > ap ;
u8 dmactl ;
/* start host DMA transaction */
dmactl = inb ( ap - > ioaddr . bmdma_addr + ATA_DMA_CMD ) ;
outb ( dmactl | ATA_DMA_START ,
ap - > ioaddr . bmdma_addr + ATA_DMA_CMD ) ;
}
2005-06-03 02:17:13 +04:00
/**
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
* @ qc : Info associated with this ATA transaction .
*
* Writes the ATA_DMA_START flag to the DMA command register .
*
* May be used as the bmdma_start ( ) entry in ata_port_operations .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
2005-04-17 02:20:36 +04:00
void ata_bmdma_start ( struct ata_queued_cmd * qc )
{
if ( qc - > ap - > flags & ATA_FLAG_MMIO )
ata_bmdma_start_mmio ( qc ) ;
else
ata_bmdma_start_pio ( qc ) ;
}
2005-06-03 02:17:13 +04:00
/**
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
* @ qc : Info associated with this ATA transaction .
*
* Writes address of PRD table to device ' s PRD Table Address
* register , sets the DMA control register , and calls
* ops - > exec_command ( ) to start the transfer .
*
* May be used as the bmdma_setup ( ) entry in ata_port_operations .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
2005-04-17 02:20:36 +04:00
void ata_bmdma_setup ( struct ata_queued_cmd * qc )
{
if ( qc - > ap - > flags & ATA_FLAG_MMIO )
ata_bmdma_setup_mmio ( qc ) ;
else
ata_bmdma_setup_pio ( qc ) ;
}
2005-06-03 02:17:13 +04:00
/**
* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt .
2005-06-03 02:42:33 +04:00
* @ ap : Port associated with this ATA transaction .
2005-06-03 02:17:13 +04:00
*
* Clear interrupt and error flags in DMA status register .
*
* May be used as the irq_clear ( ) entry in ata_port_operations .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
2005-04-17 02:20:36 +04:00
void ata_bmdma_irq_clear ( struct ata_port * ap )
{
if ( ap - > flags & ATA_FLAG_MMIO ) {
void __iomem * mmio = ( ( void __iomem * ) ap - > ioaddr . bmdma_addr ) + ATA_DMA_STATUS ;
writeb ( readb ( mmio ) , mmio ) ;
} else {
unsigned long addr = ap - > ioaddr . bmdma_addr + ATA_DMA_STATUS ;
outb ( inb ( addr ) , addr ) ;
}
}
2005-06-03 02:17:13 +04:00
/**
* ata_bmdma_status - Read PCI IDE BMDMA status
2005-06-03 02:42:33 +04:00
* @ ap : Port associated with this ATA transaction .
2005-06-03 02:17:13 +04:00
*
* Read and return BMDMA status register .
*
* May be used as the bmdma_status ( ) entry in ata_port_operations .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
2005-04-17 02:20:36 +04:00
u8 ata_bmdma_status ( struct ata_port * ap )
{
u8 host_stat ;
if ( ap - > flags & ATA_FLAG_MMIO ) {
void __iomem * mmio = ( void __iomem * ) ap - > ioaddr . bmdma_addr ;
host_stat = readb ( mmio + ATA_DMA_STATUS ) ;
} else
2005-09-27 13:34:38 +04:00
host_stat = inb ( ap - > ioaddr . bmdma_addr + ATA_DMA_STATUS ) ;
2005-04-17 02:20:36 +04:00
return host_stat ;
}
2005-06-03 02:17:13 +04:00
/**
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2005-08-26 19:03:19 +04:00
* @ qc : Command we are ending DMA for
2005-06-03 02:17:13 +04:00
*
* Clears the ATA_DMA_START flag in the dma control register
*
* May be used as the bmdma_stop ( ) entry in ata_port_operations .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*/
2005-08-26 19:03:19 +04:00
void ata_bmdma_stop ( struct ata_queued_cmd * qc )
2005-04-17 02:20:36 +04:00
{
2005-08-26 19:03:19 +04:00
struct ata_port * ap = qc - > ap ;
2005-04-17 02:20:36 +04:00
if ( ap - > flags & ATA_FLAG_MMIO ) {
void __iomem * mmio = ( void __iomem * ) ap - > ioaddr . bmdma_addr ;
/* clear start/stop bit */
writeb ( readb ( mmio + ATA_DMA_CMD ) & ~ ATA_DMA_START ,
mmio + ATA_DMA_CMD ) ;
} else {
/* clear start/stop bit */
outb ( inb ( ap - > ioaddr . bmdma_addr + ATA_DMA_CMD ) & ~ ATA_DMA_START ,
ap - > ioaddr . bmdma_addr + ATA_DMA_CMD ) ;
}
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_altstatus ( ap ) ; /* dummy read */
}
/**
* ata_host_intr - Handle host interrupt for given ( port , task )
* @ ap : Port on which interrupt arrived ( possibly . . . )
* @ qc : Taskfile currently active in engine
*
* Handle host interrupt for given queued command . Currently ,
* only DMA interrupts are handled . All other commands are
* handled via polling with interrupts disabled ( nIEN bit ) .
*
* LOCKING :
* spin_lock_irqsave ( host_set lock )
*
* RETURNS :
* One if interrupt was handled , zero if not ( shared irq ) .
*/
inline unsigned int ata_host_intr ( struct ata_port * ap ,
struct ata_queued_cmd * qc )
{
u8 status , host_stat ;
switch ( qc - > tf . protocol ) {
case ATA_PROT_DMA :
case ATA_PROT_ATAPI_DMA :
case ATA_PROT_ATAPI :
/* check status of DMA engine */
host_stat = ap - > ops - > bmdma_status ( ap ) ;
VPRINTK ( " ata%u: host_stat 0x%X \n " , ap - > id , host_stat ) ;
/* if it's not our irq... */
if ( ! ( host_stat & ATA_DMA_INTR ) )
goto idle_irq ;
/* before we do anything else, clear DMA-Start bit */
2005-08-26 19:03:19 +04:00
ap - > ops - > bmdma_stop ( qc ) ;
2005-04-17 02:20:36 +04:00
/* fall through */
case ATA_PROT_ATAPI_NODATA :
case ATA_PROT_NODATA :
/* check altstatus */
status = ata_altstatus ( ap ) ;
if ( status & ATA_BUSY )
goto idle_irq ;
/* check main status, clearing INTRQ */
status = ata_chk_status ( ap ) ;
if ( unlikely ( status & ATA_BUSY ) )
goto idle_irq ;
DPRINTK ( " ata%u: protocol %d (dev_stat 0x%X) \n " ,
ap - > id , qc - > tf . protocol , status ) ;
/* ack bmdma irq events */
ap - > ops - > irq_clear ( ap ) ;
/* complete taskfile transaction */
2005-12-05 10:38:02 +03:00
qc - > err_mask | = ac_err_mask ( status ) ;
ata_qc_complete ( qc ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
goto idle_irq ;
}
return 1 ; /* irq handled */
idle_irq :
ap - > stats . idle_irq + + ;
# ifdef ATA_IRQ_TRAP
if ( ( ap - > stats . idle_irq % 1000 ) = = 0 ) {
handled = 1 ;
ata_irq_ack ( ap , 0 ) ; /* debug trap */
printk ( KERN_WARNING " ata%d: irq trap \n " , ap - > id ) ;
}
# endif
return 0 ; /* irq not handled */
}
/**
* ata_interrupt - Default ATA host interrupt handler
2005-05-31 03:49:12 +04:00
* @ irq : irq line ( unused )
* @ dev_instance : pointer to our ata_host_set information structure
2005-04-17 02:20:36 +04:00
* @ regs : unused
*
2005-05-31 03:49:12 +04:00
* Default interrupt handler for PCI IDE devices . Calls
* ata_host_intr ( ) for each port that is not disabled .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-31 03:49:12 +04:00
* Obtains host_set lock during operation .
2005-04-17 02:20:36 +04:00
*
* RETURNS :
2005-05-31 03:49:12 +04:00
* IRQ_NONE or IRQ_HANDLED .
2005-04-17 02:20:36 +04:00
*/
irqreturn_t ata_interrupt ( int irq , void * dev_instance , struct pt_regs * regs )
{
struct ata_host_set * host_set = dev_instance ;
unsigned int i ;
unsigned int handled = 0 ;
unsigned long flags ;
/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
spin_lock_irqsave ( & host_set - > lock , flags ) ;
for ( i = 0 ; i < host_set - > n_ports ; i + + ) {
struct ata_port * ap ;
ap = host_set - > ports [ i ] ;
2005-08-22 09:59:24 +04:00
if ( ap & &
! ( ap - > flags & ( ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR ) ) ) {
2005-04-17 02:20:36 +04:00
struct ata_queued_cmd * qc ;
qc = ata_qc_from_tag ( ap , ap - > active_tag ) ;
2005-04-29 13:34:59 +04:00
if ( qc & & ( ! ( qc - > tf . ctl & ATA_NIEN ) ) & &
( qc - > flags & ATA_QCFLAG_ACTIVE ) )
2005-04-17 02:20:36 +04:00
handled | = ata_host_intr ( ap , qc ) ;
}
}
spin_unlock_irqrestore ( & host_set - > lock , flags ) ;
return IRQ_RETVAL ( handled ) ;
}
2005-06-03 02:17:13 +04:00
2006-01-06 11:28:07 +03:00
/*
* Execute a ' simple ' command , that only consists of the opcode ' cmd ' itself ,
* without filling any other registers
*/
static int ata_do_simple_cmd ( struct ata_port * ap , struct ata_device * dev ,
u8 cmd )
{
struct ata_taskfile tf ;
int err ;
ata_tf_init ( ap , & tf , dev - > devno ) ;
tf . command = cmd ;
tf . flags | = ATA_TFLAG_DEVICE ;
tf . protocol = ATA_PROT_NODATA ;
err = ata_exec_internal ( ap , dev , & tf , DMA_NONE , NULL , 0 ) ;
if ( err )
printk ( KERN_ERR " %s: ata command failed: %d \n " ,
__FUNCTION__ , err ) ;
return err ;
}
static int ata_flush_cache ( struct ata_port * ap , struct ata_device * dev )
{
u8 cmd ;
if ( ! ata_try_flush_cache ( dev ) )
return 0 ;
if ( ata_id_has_flush_ext ( dev - > id ) )
cmd = ATA_CMD_FLUSH_EXT ;
else
cmd = ATA_CMD_FLUSH ;
return ata_do_simple_cmd ( ap , dev , cmd ) ;
}
static int ata_standby_drive ( struct ata_port * ap , struct ata_device * dev )
{
return ata_do_simple_cmd ( ap , dev , ATA_CMD_STANDBYNOW1 ) ;
}
static int ata_start_drive ( struct ata_port * ap , struct ata_device * dev )
{
return ata_do_simple_cmd ( ap , dev , ATA_CMD_IDLEIMMEDIATE ) ;
}
/**
* ata_device_resume - wakeup a previously suspended devices
2006-01-28 21:15:32 +03:00
* @ ap : port the device is connected to
* @ dev : the device to resume
2006-01-06 11:28:07 +03:00
*
* Kick the drive back into action , by sending it an idle immediate
* command and making sure its transfer mode matches between drive
* and host .
*
*/
int ata_device_resume ( struct ata_port * ap , struct ata_device * dev )
{
if ( ap - > flags & ATA_FLAG_SUSPENDED ) {
ap - > flags & = ~ ATA_FLAG_SUSPENDED ;
ata_set_mode ( ap ) ;
}
if ( ! ata_dev_present ( dev ) )
return 0 ;
if ( dev - > class = = ATA_DEV_ATA )
ata_start_drive ( ap , dev ) ;
return 0 ;
}
/**
* ata_device_suspend - prepare a device for suspend
2006-01-28 21:15:32 +03:00
* @ ap : port the device is connected to
* @ dev : the device to suspend
2006-01-06 11:28:07 +03:00
*
* Flush the cache on the drive , if appropriate , then issue a
* standbynow command .
*/
int ata_device_suspend ( struct ata_port * ap , struct ata_device * dev )
{
if ( ! ata_dev_present ( dev ) )
return 0 ;
if ( dev - > class = = ATA_DEV_ATA )
ata_flush_cache ( ap , dev ) ;
ata_standby_drive ( ap , dev ) ;
ap - > flags | = ATA_FLAG_SUSPENDED ;
return 0 ;
}
2006-01-28 21:15:32 +03:00
/**
* ata_port_start - Set port up for dma .
* @ ap : Port to initialize
*
* Called just after data structures for each port are
* initialized . Allocates space for PRD table .
*
* May be used as the port_start ( ) entry in ata_port_operations .
*
* LOCKING :
* Inherited from caller .
*/
2005-04-17 02:20:36 +04:00
int ata_port_start ( struct ata_port * ap )
{
struct device * dev = ap - > host_set - > dev ;
2005-11-05 06:08:00 +03:00
int rc ;
2005-04-17 02:20:36 +04:00
ap - > prd = dma_alloc_coherent ( dev , ATA_PRD_TBL_SZ , & ap - > prd_dma , GFP_KERNEL ) ;
if ( ! ap - > prd )
return - ENOMEM ;
2005-11-05 06:08:00 +03:00
rc = ata_pad_alloc ( ap , dev ) ;
if ( rc ) {
2005-10-05 15:13:30 +04:00
dma_free_coherent ( dev , ATA_PRD_TBL_SZ , ap - > prd , ap - > prd_dma ) ;
2005-11-05 06:08:00 +03:00
return rc ;
2005-10-05 15:13:30 +04:00
}
2005-04-17 02:20:36 +04:00
DPRINTK ( " prd alloc, virt %p, dma %llx \n " , ap - > prd , ( unsigned long long ) ap - > prd_dma ) ;
return 0 ;
}
2005-06-03 02:17:13 +04:00
/**
* ata_port_stop - Undo ata_port_start ( )
* @ ap : Port to shut down
*
* Frees the PRD table .
*
* May be used as the port_stop ( ) entry in ata_port_operations .
*
* LOCKING :
2005-10-25 09:44:30 +04:00
* Inherited from caller .
2005-06-03 02:17:13 +04:00
*/
2005-04-17 02:20:36 +04:00
void ata_port_stop ( struct ata_port * ap )
{
struct device * dev = ap - > host_set - > dev ;
dma_free_coherent ( dev , ATA_PRD_TBL_SZ , ap - > prd , ap - > prd_dma ) ;
2005-11-05 06:08:00 +03:00
ata_pad_free ( ap , dev ) ;
2005-04-17 02:20:36 +04:00
}
2005-05-27 05:54:27 +04:00
void ata_host_stop ( struct ata_host_set * host_set )
{
if ( host_set - > mmio_base )
iounmap ( host_set - > mmio_base ) ;
}
2005-04-17 02:20:36 +04:00
/**
* ata_host_remove - Unregister SCSI host structure with upper layers
* @ ap : Port to unregister
* @ do_unregister : 1 if we fully unregister , 0 to just stop the port
*
* LOCKING :
2005-10-25 09:44:30 +04:00
* Inherited from caller .
2005-04-17 02:20:36 +04:00
*/
static void ata_host_remove ( struct ata_port * ap , unsigned int do_unregister )
{
struct Scsi_Host * sh = ap - > host ;
DPRINTK ( " ENTER \n " ) ;
if ( do_unregister )
scsi_remove_host ( sh ) ;
ap - > ops - > port_stop ( ap ) ;
}
/**
* ata_host_init - Initialize an ata_port structure
* @ ap : Structure to initialize
* @ host : associated SCSI mid - layer structure
* @ host_set : Collection of hosts to which @ ap belongs
* @ ent : Probe information provided by low - level driver
* @ port_no : Port number associated with this ata_port
*
2005-05-31 03:49:12 +04:00
* Initialize a new ata_port structure , and its associated
* scsi_host .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-31 03:49:12 +04:00
* Inherited from caller .
2005-04-17 02:20:36 +04:00
*/
static void ata_host_init ( struct ata_port * ap , struct Scsi_Host * host ,
struct ata_host_set * host_set ,
2005-10-22 22:27:05 +04:00
const struct ata_probe_ent * ent , unsigned int port_no )
2005-04-17 02:20:36 +04:00
{
unsigned int i ;
host - > max_id = 16 ;
host - > max_lun = 1 ;
host - > max_channel = 1 ;
host - > unique_id = ata_unique_id + + ;
host - > max_cmd_len = 12 ;
2005-06-11 03:05:01 +04:00
2005-04-17 02:20:36 +04:00
ap - > flags = ATA_FLAG_PORT_DISABLED ;
ap - > id = host - > unique_id ;
ap - > host = host ;
ap - > ctl = ATA_DEVCTL_OBS ;
ap - > host_set = host_set ;
ap - > port_no = port_no ;
ap - > hard_port_no =
ent - > legacy_mode ? ent - > hard_port_no : port_no ;
ap - > pio_mask = ent - > pio_mask ;
ap - > mwdma_mask = ent - > mwdma_mask ;
ap - > udma_mask = ent - > udma_mask ;
ap - > flags | = ent - > host_flags ;
ap - > ops = ent - > port_ops ;
ap - > cbl = ATA_CBL_NONE ;
ap - > active_tag = ATA_TAG_POISON ;
ap - > last_ctl = 0xFF ;
2006-03-05 09:29:09 +03:00
INIT_WORK ( & ap - > port_task , NULL , NULL ) ;
2006-01-23 07:09:37 +03:00
INIT_LIST_HEAD ( & ap - > eh_done_q ) ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
ap - > device [ i ] . devno = i ;
# ifdef ATA_IRQ_TRAP
ap - > stats . unhandled_irq = 1 ;
ap - > stats . idle_irq = 1 ;
# endif
memcpy ( & ap - > ioaddr , & ent - > port [ port_no ] , sizeof ( struct ata_ioports ) ) ;
}
/**
* ata_host_add - Attach low - level ATA driver to system
* @ ent : Information provided by low - level driver
* @ host_set : Collections of ports to which we add
* @ port_no : Port number associated with this host
*
2005-05-31 03:49:12 +04:00
* Attach low - level ATA driver to system .
*
2005-04-17 02:20:36 +04:00
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-04-17 02:20:36 +04:00
*
* RETURNS :
2005-05-31 03:49:12 +04:00
* New ata_port on success , for NULL on error .
2005-04-17 02:20:36 +04:00
*/
2005-10-22 22:27:05 +04:00
static struct ata_port * ata_host_add ( const struct ata_probe_ent * ent ,
2005-04-17 02:20:36 +04:00
struct ata_host_set * host_set ,
unsigned int port_no )
{
struct Scsi_Host * host ;
struct ata_port * ap ;
int rc ;
DPRINTK ( " ENTER \n " ) ;
host = scsi_host_alloc ( ent - > sht , sizeof ( struct ata_port ) ) ;
if ( ! host )
return NULL ;
ap = ( struct ata_port * ) & host - > hostdata [ 0 ] ;
ata_host_init ( ap , host , host_set , ent , port_no ) ;
rc = ap - > ops - > port_start ( ap ) ;
if ( rc )
goto err_out ;
return ap ;
err_out :
scsi_host_put ( host ) ;
return NULL ;
}
/**
2005-05-31 03:49:12 +04:00
* ata_device_add - Register hardware device with ATA and SCSI layers
* @ ent : Probe information describing hardware device to be registered
*
* This function processes the information provided in the probe
* information struct @ ent , allocates the necessary ATA and SCSI
* host information structures , initializes them , and registers
* everything with requisite kernel subsystems .
*
* This function requests irqs , probes the ATA bus , and probes
* the SCSI bus .
2005-04-17 02:20:36 +04:00
*
* LOCKING :
2005-05-31 03:49:12 +04:00
* PCI / etc . bus probe sem .
2005-04-17 02:20:36 +04:00
*
* RETURNS :
2005-05-31 03:49:12 +04:00
* Number of ports registered . Zero on error ( no ports registered ) .
2005-04-17 02:20:36 +04:00
*/
2005-10-22 22:27:05 +04:00
int ata_device_add ( const struct ata_probe_ent * ent )
2005-04-17 02:20:36 +04:00
{
unsigned int count = 0 , i ;
struct device * dev = ent - > dev ;
struct ata_host_set * host_set ;
DPRINTK ( " ENTER \n " ) ;
/* alloc a container for our list of ATA ports (buses) */
2005-10-29 07:37:23 +04:00
host_set = kzalloc ( sizeof ( struct ata_host_set ) +
2005-04-17 02:20:36 +04:00
( ent - > n_ports * sizeof ( void * ) ) , GFP_KERNEL ) ;
if ( ! host_set )
return 0 ;
spin_lock_init ( & host_set - > lock ) ;
host_set - > dev = dev ;
host_set - > n_ports = ent - > n_ports ;
host_set - > irq = ent - > irq ;
host_set - > mmio_base = ent - > mmio_base ;
host_set - > private_data = ent - > private_data ;
host_set - > ops = ent - > port_ops ;
/* register each port bound to this device */
for ( i = 0 ; i < ent - > n_ports ; i + + ) {
struct ata_port * ap ;
unsigned long xfer_mode_mask ;
ap = ata_host_add ( ent , host_set , i ) ;
if ( ! ap )
goto err_out ;
host_set - > ports [ i ] = ap ;
xfer_mode_mask = ( ap - > udma_mask < < ATA_SHIFT_UDMA ) |
( ap - > mwdma_mask < < ATA_SHIFT_MWDMA ) |
( ap - > pio_mask < < ATA_SHIFT_PIO ) ;
/* print per-port info to dmesg */
printk ( KERN_INFO " ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
" bmdma 0x%lX irq %lu \n " ,
ap - > id ,
ap - > flags & ATA_FLAG_SATA ? ' S ' : ' P ' ,
ata_mode_string ( xfer_mode_mask ) ,
ap - > ioaddr . cmd_addr ,
ap - > ioaddr . ctl_addr ,
ap - > ioaddr . bmdma_addr ,
ent - > irq ) ;
ata_chk_status ( ap ) ;
host_set - > ops - > irq_clear ( ap ) ;
count + + ;
}
2005-10-29 07:37:23 +04:00
if ( ! count )
goto err_free_ret ;
2005-04-17 02:20:36 +04:00
/* obtain irq, that is shared between channels */
if ( request_irq ( ent - > irq , ent - > port_ops - > irq_handler , ent - > irq_flags ,
DRV_NAME , host_set ) )
goto err_out ;
/* perform each probe synchronously */
DPRINTK ( " probe begin \n " ) ;
for ( i = 0 ; i < count ; i + + ) {
struct ata_port * ap ;
int rc ;
ap = host_set - > ports [ i ] ;
2006-01-28 21:15:32 +03:00
DPRINTK ( " ata%u: bus probe begin \n " , ap - > id ) ;
2005-04-17 02:20:36 +04:00
rc = ata_bus_probe ( ap ) ;
2006-01-28 21:15:32 +03:00
DPRINTK ( " ata%u: bus probe end \n " , ap - > id ) ;
2005-04-17 02:20:36 +04:00
if ( rc ) {
/* FIXME: do something useful here?
* Current libata behavior will
* tear down everything when
* the module is removed
* or the h / w is unplugged .
*/
}
rc = scsi_add_host ( ap - > host , dev ) ;
if ( rc ) {
printk ( KERN_ERR " ata%u: scsi_add_host failed \n " ,
ap - > id ) ;
/* FIXME: do something useful here */
/* FIXME: handle unconditional calls to
* scsi_scan_host and ata_host_remove , below ,
* at the very least
*/
}
}
/* probes are done, now scan each port's disk(s) */
2006-01-28 21:15:32 +03:00
DPRINTK ( " host probe begin \n " ) ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < count ; i + + ) {
struct ata_port * ap = host_set - > ports [ i ] ;
2005-10-03 23:55:19 +04:00
ata_scsi_scan_host ( ap ) ;
2005-04-17 02:20:36 +04:00
}
dev_set_drvdata ( dev , host_set ) ;
VPRINTK ( " EXIT, returning %u \n " , ent - > n_ports ) ;
return ent - > n_ports ; /* success */
err_out :
for ( i = 0 ; i < count ; i + + ) {
ata_host_remove ( host_set - > ports [ i ] , 1 ) ;
scsi_host_put ( host_set - > ports [ i ] - > host ) ;
}
2005-10-29 07:37:23 +04:00
err_free_ret :
2005-04-17 02:20:36 +04:00
kfree ( host_set ) ;
VPRINTK ( " EXIT, returning 0 \n " ) ;
return 0 ;
}
2005-09-15 18:44:00 +04:00
/**
* ata_host_set_remove - PCI layer callback for device removal
* @ host_set : ATA host set that was removed
*
* Unregister all objects associated with this host set . Free those
* objects .
*
* LOCKING :
* Inherited from calling layer ( may sleep ) .
*/
void ata_host_set_remove ( struct ata_host_set * host_set )
{
struct ata_port * ap ;
unsigned int i ;
for ( i = 0 ; i < host_set - > n_ports ; i + + ) {
ap = host_set - > ports [ i ] ;
scsi_remove_host ( ap - > host ) ;
}
free_irq ( host_set - > irq , host_set ) ;
for ( i = 0 ; i < host_set - > n_ports ; i + + ) {
ap = host_set - > ports [ i ] ;
ata_scsi_release ( ap - > host ) ;
if ( ( ap - > flags & ATA_FLAG_NO_LEGACY ) = = 0 ) {
struct ata_ioports * ioaddr = & ap - > ioaddr ;
if ( ioaddr - > cmd_addr = = 0x1f0 )
release_region ( 0x1f0 , 8 ) ;
else if ( ioaddr - > cmd_addr = = 0x170 )
release_region ( 0x170 , 8 ) ;
}
scsi_host_put ( ap - > host ) ;
}
if ( host_set - > ops - > host_stop )
host_set - > ops - > host_stop ( host_set ) ;
kfree ( host_set ) ;
}
2005-04-17 02:20:36 +04:00
/**
* ata_scsi_release - SCSI layer callback hook for host unload
* @ host : libata host to be unloaded
*
* Performs all duties necessary to shut down a libata port . . .
* Kill port kthread , disable port , and release resources .
*
* LOCKING :
* Inherited from SCSI layer .
*
* RETURNS :
* One .
*/
int ata_scsi_release ( struct Scsi_Host * host )
{
struct ata_port * ap = ( struct ata_port * ) & host - > hostdata [ 0 ] ;
2006-03-01 10:09:35 +03:00
int i ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " ENTER \n " ) ;
ap - > ops - > port_disable ( ap ) ;
ata_host_remove ( ap , 0 ) ;
2006-03-01 10:09:35 +03:00
for ( i = 0 ; i < ATA_MAX_DEVICES ; i + + )
kfree ( ap - > device [ i ] . id ) ;
2005-04-17 02:20:36 +04:00
DPRINTK ( " EXIT \n " ) ;
return 1 ;
}
/**
* ata_std_ports - initialize ioaddr with standard port offsets .
* @ ioaddr : IO address structure to be initialized
2005-06-03 02:17:13 +04:00
*
* Utility function which initializes data_addr , error_addr ,
* feature_addr , nsect_addr , lbal_addr , lbam_addr , lbah_addr ,
* device_addr , status_addr , and command_addr to standard offsets
* relative to cmd_addr .
*
* Does not set ctl_addr , altstatus_addr , bmdma_addr , or scr_addr .
2005-04-17 02:20:36 +04:00
*/
2005-06-03 02:17:13 +04:00
2005-04-17 02:20:36 +04:00
void ata_std_ports ( struct ata_ioports * ioaddr )
{
ioaddr - > data_addr = ioaddr - > cmd_addr + ATA_REG_DATA ;
ioaddr - > error_addr = ioaddr - > cmd_addr + ATA_REG_ERR ;
ioaddr - > feature_addr = ioaddr - > cmd_addr + ATA_REG_FEATURE ;
ioaddr - > nsect_addr = ioaddr - > cmd_addr + ATA_REG_NSECT ;
ioaddr - > lbal_addr = ioaddr - > cmd_addr + ATA_REG_LBAL ;
ioaddr - > lbam_addr = ioaddr - > cmd_addr + ATA_REG_LBAM ;
ioaddr - > lbah_addr = ioaddr - > cmd_addr + ATA_REG_LBAH ;
ioaddr - > device_addr = ioaddr - > cmd_addr + ATA_REG_DEVICE ;
ioaddr - > status_addr = ioaddr - > cmd_addr + ATA_REG_STATUS ;
ioaddr - > command_addr = ioaddr - > cmd_addr + ATA_REG_CMD ;
}
2005-06-03 02:17:13 +04:00
2005-08-30 13:42:52 +04:00
# ifdef CONFIG_PCI
void ata_pci_host_stop ( struct ata_host_set * host_set )
{
struct pci_dev * pdev = to_pci_dev ( host_set - > dev ) ;
pci_iounmap ( pdev , host_set - > mmio_base ) ;
}
2005-04-17 02:20:36 +04:00
/**
* ata_pci_remove_one - PCI layer callback for device removal
* @ pdev : PCI device that was removed
*
* PCI layer indicates to libata via this hook that
2005-10-25 09:44:30 +04:00
* hot - unplug or module unload event has occurred .
2005-04-17 02:20:36 +04:00
* Handle this by unregistering all objects associated
* with this PCI device . Free those objects . Then finally
* release PCI resources and disable device .
*
* LOCKING :
* Inherited from PCI layer ( may sleep ) .
*/
void ata_pci_remove_one ( struct pci_dev * pdev )
{
struct device * dev = pci_dev_to_dev ( pdev ) ;
struct ata_host_set * host_set = dev_get_drvdata ( dev ) ;
2005-09-15 18:44:00 +04:00
ata_host_set_remove ( host_set ) ;
2005-04-17 02:20:36 +04:00
pci_release_regions ( pdev ) ;
pci_disable_device ( pdev ) ;
dev_set_drvdata ( dev , NULL ) ;
}
/* move to PCI subsystem */
2005-10-22 22:27:05 +04:00
int pci_test_config_bits ( struct pci_dev * pdev , const struct pci_bits * bits )
2005-04-17 02:20:36 +04:00
{
unsigned long tmp = 0 ;
switch ( bits - > width ) {
case 1 : {
u8 tmp8 = 0 ;
pci_read_config_byte ( pdev , bits - > reg , & tmp8 ) ;
tmp = tmp8 ;
break ;
}
case 2 : {
u16 tmp16 = 0 ;
pci_read_config_word ( pdev , bits - > reg , & tmp16 ) ;
tmp = tmp16 ;
break ;
}
case 4 : {
u32 tmp32 = 0 ;
pci_read_config_dword ( pdev , bits - > reg , & tmp32 ) ;
tmp = tmp32 ;
break ;
}
default :
return - EINVAL ;
}
tmp & = bits - > mask ;
return ( tmp = = bits - > val ) ? 1 : 0 ;
}
2006-01-06 11:28:07 +03:00
int ata_pci_device_suspend ( struct pci_dev * pdev , pm_message_t state )
{
pci_save_state ( pdev ) ;
pci_disable_device ( pdev ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
return 0 ;
}
int ata_pci_device_resume ( struct pci_dev * pdev )
{
pci_set_power_state ( pdev , PCI_D0 ) ;
pci_restore_state ( pdev ) ;
pci_enable_device ( pdev ) ;
pci_set_master ( pdev ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_PCI */
static int __init ata_init ( void )
{
ata_wq = create_workqueue ( " ata " ) ;
if ( ! ata_wq )
return - ENOMEM ;
printk ( KERN_DEBUG " libata version " DRV_VERSION " loaded. \n " ) ;
return 0 ;
}
static void __exit ata_exit ( void )
{
destroy_workqueue ( ata_wq ) ;
}
module_init ( ata_init ) ;
module_exit ( ata_exit ) ;
2005-10-05 10:58:32 +04:00
static unsigned long ratelimit_time ;
static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED ;
int ata_ratelimit ( void )
{
int rc ;
unsigned long flags ;
spin_lock_irqsave ( & ata_ratelimit_lock , flags ) ;
if ( time_after ( jiffies , ratelimit_time ) ) {
rc = 1 ;
ratelimit_time = jiffies + ( HZ / 5 ) ;
} else
rc = 0 ;
spin_unlock_irqrestore ( & ata_ratelimit_lock , flags ) ;
return rc ;
}
2005-04-17 02:20:36 +04:00
/*
* libata is essentially a library of internal helper functions for
* low - level ATA host controller drivers . As such , the API / ABI is
* likely to change as new drivers are added and updated .
* Do not depend on ABI / API stability .
*/
EXPORT_SYMBOL_GPL ( ata_std_bios_param ) ;
EXPORT_SYMBOL_GPL ( ata_std_ports ) ;
EXPORT_SYMBOL_GPL ( ata_device_add ) ;
2005-09-15 18:44:00 +04:00
EXPORT_SYMBOL_GPL ( ata_host_set_remove ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_sg_init ) ;
EXPORT_SYMBOL_GPL ( ata_sg_init_one ) ;
2006-02-11 09:13:49 +03:00
EXPORT_SYMBOL_GPL ( __ata_qc_complete ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_qc_issue_prot ) ;
EXPORT_SYMBOL_GPL ( ata_eng_timeout ) ;
EXPORT_SYMBOL_GPL ( ata_tf_load ) ;
EXPORT_SYMBOL_GPL ( ata_tf_read ) ;
EXPORT_SYMBOL_GPL ( ata_noop_dev_select ) ;
EXPORT_SYMBOL_GPL ( ata_std_dev_select ) ;
EXPORT_SYMBOL_GPL ( ata_tf_to_fis ) ;
EXPORT_SYMBOL_GPL ( ata_tf_from_fis ) ;
EXPORT_SYMBOL_GPL ( ata_check_status ) ;
EXPORT_SYMBOL_GPL ( ata_altstatus ) ;
EXPORT_SYMBOL_GPL ( ata_exec_command ) ;
EXPORT_SYMBOL_GPL ( ata_port_start ) ;
EXPORT_SYMBOL_GPL ( ata_port_stop ) ;
2005-05-27 05:54:27 +04:00
EXPORT_SYMBOL_GPL ( ata_host_stop ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_interrupt ) ;
EXPORT_SYMBOL_GPL ( ata_qc_prep ) ;
EXPORT_SYMBOL_GPL ( ata_bmdma_setup ) ;
EXPORT_SYMBOL_GPL ( ata_bmdma_start ) ;
EXPORT_SYMBOL_GPL ( ata_bmdma_irq_clear ) ;
EXPORT_SYMBOL_GPL ( ata_bmdma_status ) ;
EXPORT_SYMBOL_GPL ( ata_bmdma_stop ) ;
EXPORT_SYMBOL_GPL ( ata_port_probe ) ;
EXPORT_SYMBOL_GPL ( sata_phy_reset ) ;
EXPORT_SYMBOL_GPL ( __sata_phy_reset ) ;
EXPORT_SYMBOL_GPL ( ata_bus_reset ) ;
2006-02-02 12:20:00 +03:00
EXPORT_SYMBOL_GPL ( ata_std_probeinit ) ;
2006-01-24 11:05:22 +03:00
EXPORT_SYMBOL_GPL ( ata_std_softreset ) ;
EXPORT_SYMBOL_GPL ( sata_std_hardreset ) ;
EXPORT_SYMBOL_GPL ( ata_std_postreset ) ;
EXPORT_SYMBOL_GPL ( ata_std_probe_reset ) ;
2006-01-24 11:05:22 +03:00
EXPORT_SYMBOL_GPL ( ata_drive_probe_reset ) ;
2006-03-05 11:55:58 +03:00
EXPORT_SYMBOL_GPL ( ata_dev_revalidate ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_port_disable ) ;
2005-10-05 10:58:32 +04:00
EXPORT_SYMBOL_GPL ( ata_ratelimit ) ;
2006-01-24 11:05:21 +03:00
EXPORT_SYMBOL_GPL ( ata_busy_sleep ) ;
2006-03-05 09:29:09 +03:00
EXPORT_SYMBOL_GPL ( ata_port_queue_task ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_scsi_ioctl ) ;
EXPORT_SYMBOL_GPL ( ata_scsi_queuecmd ) ;
2006-02-10 09:10:48 +03:00
EXPORT_SYMBOL_GPL ( ata_scsi_timed_out ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_scsi_error ) ;
EXPORT_SYMBOL_GPL ( ata_scsi_slave_config ) ;
EXPORT_SYMBOL_GPL ( ata_scsi_release ) ;
EXPORT_SYMBOL_GPL ( ata_host_intr ) ;
EXPORT_SYMBOL_GPL ( ata_dev_classify ) ;
2006-02-13 04:02:46 +03:00
EXPORT_SYMBOL_GPL ( ata_id_string ) ;
EXPORT_SYMBOL_GPL ( ata_id_c_string ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_scsi_simulate ) ;
2006-01-23 07:09:37 +03:00
EXPORT_SYMBOL_GPL ( ata_eh_qc_complete ) ;
EXPORT_SYMBOL_GPL ( ata_eh_qc_retry ) ;
2005-04-17 02:20:36 +04:00
2006-01-09 20:18:14 +03:00
EXPORT_SYMBOL_GPL ( ata_pio_need_iordy ) ;
2005-10-22 03:01:32 +04:00
EXPORT_SYMBOL_GPL ( ata_timing_compute ) ;
EXPORT_SYMBOL_GPL ( ata_timing_merge ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL ( pci_test_config_bits ) ;
2005-08-30 13:42:52 +04:00
EXPORT_SYMBOL_GPL ( ata_pci_host_stop ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( ata_pci_init_native_mode ) ;
EXPORT_SYMBOL_GPL ( ata_pci_init_one ) ;
EXPORT_SYMBOL_GPL ( ata_pci_remove_one ) ;
2006-01-06 11:28:07 +03:00
EXPORT_SYMBOL_GPL ( ata_pci_device_suspend ) ;
EXPORT_SYMBOL_GPL ( ata_pci_device_resume ) ;
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_PCI */
2006-01-06 11:28:07 +03:00
EXPORT_SYMBOL_GPL ( ata_device_suspend ) ;
EXPORT_SYMBOL_GPL ( ata_device_resume ) ;
EXPORT_SYMBOL_GPL ( ata_scsi_device_suspend ) ;
EXPORT_SYMBOL_GPL ( ata_scsi_device_resume ) ;