2017-08-17 23:12:47 +03:00
/*
* Driver for sTec s1120 PCIe SSDs . sTec was acquired in 2013 by HGST and HGST
* was acquired by Western Digital in 2012.
2013-10-16 00:19:07 +04:00
*
2017-08-17 23:12:47 +03:00
* Copyright 2012 sTec , Inc .
* Copyright ( c ) 2017 Western Digital Corporation or its affiliates .
*
* This file is part of the Linux kernel , and is made available under
* the terms of the GNU General Public License version 2.
2013-10-16 00:19:07 +04:00
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/pci.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/blkdev.h>
2017-08-17 23:13:26 +03:00
# include <linux/blk-mq.h>
2013-10-16 00:19:07 +04:00
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/compiler.h>
# include <linux/workqueue.h>
# include <linux/delay.h>
# include <linux/time.h>
# include <linux/hdreg.h>
# include <linux/dma-mapping.h>
# include <linux/completion.h>
# include <linux/scatterlist.h>
# include <linux/version.h>
# include <linux/err.h>
# include <linux/aer.h>
# include <linux/wait.h>
2017-08-17 23:13:01 +03:00
# include <linux/stringify.h>
2013-10-16 00:19:07 +04:00
# include <scsi/scsi.h>
# include <scsi/sg.h>
# include <linux/io.h>
# include <linux/uaccess.h>
2013-11-05 15:37:04 +04:00
# include <asm/unaligned.h>
2013-10-16 00:19:07 +04:00
# include "skd_s1120.h"
static int skd_dbg_level ;
static int skd_isr_comp_limit = 4 ;
enum {
SKD_FLUSH_INITIALIZER ,
SKD_FLUSH_ZERO_SIZE_FIRST ,
SKD_FLUSH_DATA_SECOND ,
} ;
# define SKD_ASSERT(expr) \
do { \
if ( unlikely ( ! ( expr ) ) ) { \
pr_err ( " Assertion failed! %s,%s,%s,line=%d \n " , \
# expr, __FILE__, __func__, __LINE__); \
} \
} while ( 0 )
# define DRV_NAME "skd"
# define DRV_VERSION "2.2.1"
# define DRV_BUILD_ID "0260"
# define PFX DRV_NAME ": "
2017-08-17 23:12:47 +03:00
MODULE_LICENSE ( " GPL " ) ;
2013-10-16 00:19:07 +04:00
2013-11-01 23:05:10 +04:00
MODULE_DESCRIPTION ( " STEC s1120 PCIe SSD block driver (b " DRV_BUILD_ID " ) " ) ;
2013-10-16 00:19:07 +04:00
MODULE_VERSION ( DRV_VERSION " - " DRV_BUILD_ID ) ;
# define PCI_VENDOR_ID_STEC 0x1B39
# define PCI_DEVICE_ID_S1120 0x0001
# define SKD_FUA_NV (1 << 1)
# define SKD_MINORS_PER_DEVICE 16
# define SKD_MAX_QUEUE_DEPTH 200u
# define SKD_PAUSE_TIMEOUT (5 * 1000)
# define SKD_N_FITMSG_BYTES (512u)
2017-08-17 23:13:01 +03:00
# define SKD_MAX_REQ_PER_MSG 14
2013-10-16 00:19:07 +04:00
# define SKD_N_SPECIAL_FITMSG_BYTES (128u)
/* SG elements are 32 bytes, so we can make this 4096 and still be under the
* 128 KB limit . That allows 4096 * 4 K = 16 M xfer size
*/
# define SKD_N_SG_PER_REQ_DEFAULT 256u
# define SKD_N_COMPLETION_ENTRY 256u
# define SKD_N_READ_CAP_BYTES (8u)
# define SKD_N_INTERNAL_BYTES (512u)
2017-08-17 23:13:02 +03:00
# define SKD_SKCOMP_SIZE \
( ( sizeof ( struct fit_completion_entry_v1 ) + \
sizeof ( struct fit_comp_error_info ) ) * SKD_N_COMPLETION_ENTRY )
2013-10-16 00:19:07 +04:00
/* 5 bits of uniqifier, 0xF800 */
# define SKD_ID_INCR (0x400)
# define SKD_ID_TABLE_MASK (3u << 8u)
# define SKD_ID_RW_REQUEST (0u << 8u)
# define SKD_ID_INTERNAL (1u << 8u)
# define SKD_ID_FIT_MSG (3u << 8u)
# define SKD_ID_SLOT_MASK 0x00FFu
# define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
# define SKD_N_TIMEOUT_SLOT 4u
# define SKD_TIMEOUT_SLOT_MASK 3u
# define SKD_N_MAX_SECTORS 2048u
# define SKD_MAX_RETRIES 2u
# define SKD_TIMER_SECONDS(seconds) (seconds)
# define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
# define INQ_STD_NBYTES 36
enum skd_drvr_state {
SKD_DRVR_STATE_LOAD ,
SKD_DRVR_STATE_IDLE ,
SKD_DRVR_STATE_BUSY ,
SKD_DRVR_STATE_STARTING ,
SKD_DRVR_STATE_ONLINE ,
SKD_DRVR_STATE_PAUSING ,
SKD_DRVR_STATE_PAUSED ,
SKD_DRVR_STATE_DRAINING_TIMEOUT ,
SKD_DRVR_STATE_RESTARTING ,
SKD_DRVR_STATE_RESUMING ,
SKD_DRVR_STATE_STOPPING ,
SKD_DRVR_STATE_FAULT ,
SKD_DRVR_STATE_DISAPPEARED ,
SKD_DRVR_STATE_PROTOCOL_MISMATCH ,
SKD_DRVR_STATE_BUSY_ERASE ,
SKD_DRVR_STATE_BUSY_SANITIZE ,
SKD_DRVR_STATE_BUSY_IMMINENT ,
SKD_DRVR_STATE_WAIT_BOOT ,
SKD_DRVR_STATE_SYNCING ,
} ;
# define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
# define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
# define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
# define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
# define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
# define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
# define SKD_START_WAIT_SECONDS 90u
enum skd_req_state {
SKD_REQ_STATE_IDLE ,
SKD_REQ_STATE_SETUP ,
SKD_REQ_STATE_BUSY ,
SKD_REQ_STATE_COMPLETED ,
SKD_REQ_STATE_TIMEOUT ,
} ;
enum skd_check_status_action {
SKD_CHECK_STATUS_REPORT_GOOD ,
SKD_CHECK_STATUS_REPORT_SMART_ALERT ,
SKD_CHECK_STATUS_REQUEUE_REQUEST ,
SKD_CHECK_STATUS_REPORT_ERROR ,
SKD_CHECK_STATUS_BUSY_IMMINENT ,
} ;
2017-08-17 23:13:07 +03:00
struct skd_msg_buf {
struct fit_msg_hdr fmh ;
struct skd_scsi_request scsi [ SKD_MAX_REQ_PER_MSG ] ;
} ;
2013-10-16 00:19:07 +04:00
struct skd_fitmsg_context {
u32 id ;
u32 length ;
2017-08-17 23:13:07 +03:00
struct skd_msg_buf * msg_buf ;
2013-10-16 00:19:07 +04:00
dma_addr_t mb_dma_address ;
} ;
struct skd_request_context {
enum skd_req_state state ;
u16 id ;
u32 fitmsg_id ;
struct request * req ;
u8 flush_cmd ;
u32 timeout_stamp ;
2017-08-17 23:13:12 +03:00
enum dma_data_direction data_dir ;
2013-10-16 00:19:07 +04:00
struct scatterlist * sg ;
u32 n_sg ;
u32 sg_byte_count ;
struct fit_sg_descriptor * sksg_list ;
dma_addr_t sksg_dma_address ;
struct fit_completion_entry_v1 completion ;
struct fit_comp_error_info err_info ;
} ;
struct skd_special_context {
struct skd_request_context req ;
void * data_buf ;
dma_addr_t db_dma_address ;
2017-08-17 23:13:07 +03:00
struct skd_msg_buf * msg_buf ;
2013-10-16 00:19:07 +04:00
dma_addr_t mb_dma_address ;
} ;
typedef enum skd_irq_type {
SKD_IRQ_LEGACY ,
SKD_IRQ_MSI ,
SKD_IRQ_MSIX
} skd_irq_type_t ;
# define SKD_MAX_BARS 2
struct skd_device {
2017-08-17 23:13:17 +03:00
void __iomem * mem_map [ SKD_MAX_BARS ] ;
2013-10-16 00:19:07 +04:00
resource_size_t mem_phys [ SKD_MAX_BARS ] ;
u32 mem_size [ SKD_MAX_BARS ] ;
struct skd_msix_entry * msix_entries ;
struct pci_dev * pdev ;
int pcie_error_reporting_is_enabled ;
spinlock_t lock ;
struct gendisk * disk ;
struct request_queue * queue ;
struct device * class_dev ;
int gendisk_on ;
int sync_done ;
u32 devno ;
u32 major ;
char isr_name [ 30 ] ;
enum skd_drvr_state state ;
u32 drive_state ;
u32 in_flight ;
u32 cur_max_queue_depth ;
u32 queue_low_water_mark ;
u32 dev_max_queue_depth ;
u32 num_fitmsg_context ;
u32 num_req_context ;
u32 timeout_slot [ SKD_N_TIMEOUT_SLOT ] ;
u32 timeout_stamp ;
struct skd_fitmsg_context * skmsg_table ;
struct skd_request_context * skreq_table ;
struct skd_special_context internal_skspcl ;
u32 read_cap_blocksize ;
u32 read_cap_last_lba ;
int read_cap_is_valid ;
int inquiry_is_valid ;
u8 inq_serial_num [ 13 ] ; /*12 chars plus null term */
u8 skcomp_cycle ;
u32 skcomp_ix ;
struct fit_completion_entry_v1 * skcomp_table ;
struct fit_comp_error_info * skerr_table ;
dma_addr_t cq_dma_address ;
wait_queue_head_t waitq ;
struct timer_list timer ;
u32 timer_countdown ;
u32 timer_substate ;
int sgs_per_request ;
u32 last_mtd ;
u32 proto_ver ;
int dbg_level ;
u32 connect_time_stamp ;
int connect_retries ;
# define SKD_MAX_CONNECT_RETRIES 16
u32 drive_jiffies ;
u32 timo_slot ;
2013-11-01 23:05:10 +04:00
struct work_struct completion_worker ;
2013-10-16 00:19:07 +04:00
} ;
# define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
# define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
# define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
static inline u32 skd_reg_read32 ( struct skd_device * skdev , u32 offset )
{
2017-08-17 23:12:57 +03:00
u32 val = readl ( skdev - > mem_map [ 1 ] + offset ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:57 +03:00
if ( unlikely ( skdev - > dbg_level > = 2 ) )
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " offset %x = %x \n " , offset , val ) ;
2017-08-17 23:12:57 +03:00
return val ;
2013-10-16 00:19:07 +04:00
}
static inline void skd_reg_write32 ( struct skd_device * skdev , u32 val ,
u32 offset )
{
2017-08-17 23:12:57 +03:00
writel ( val , skdev - > mem_map [ 1 ] + offset ) ;
if ( unlikely ( skdev - > dbg_level > = 2 ) )
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " offset %x = %x \n " , offset , val ) ;
2013-10-16 00:19:07 +04:00
}
static inline void skd_reg_write64 ( struct skd_device * skdev , u64 val ,
u32 offset )
{
2017-08-17 23:12:57 +03:00
writeq ( val , skdev - > mem_map [ 1 ] + offset ) ;
if ( unlikely ( skdev - > dbg_level > = 2 ) )
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " offset %x = %016llx \n " , offset ,
val ) ;
2013-10-16 00:19:07 +04:00
}
# define SKD_IRQ_DEFAULT SKD_IRQ_MSI
static int skd_isr_type = SKD_IRQ_DEFAULT ;
module_param ( skd_isr_type , int , 0444 ) ;
MODULE_PARM_DESC ( skd_isr_type , " Interrupt type capability. "
" (0==legacy, 1==MSI, 2==MSI-X, default==1) " ) ;
# define SKD_MAX_REQ_PER_MSG_DEFAULT 1
static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT ;
module_param ( skd_max_req_per_msg , int , 0444 ) ;
MODULE_PARM_DESC ( skd_max_req_per_msg ,
" Maximum SCSI requests packed in a single message. "
2017-08-17 23:13:01 +03:00
" (1- " __stringify ( SKD_MAX_REQ_PER_MSG ) " , default==1) " ) ;
2013-10-16 00:19:07 +04:00
# define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
# define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT ;
module_param ( skd_max_queue_depth , int , 0444 ) ;
MODULE_PARM_DESC ( skd_max_queue_depth ,
" Maximum SCSI requests issued to s1120. "
" (1-200, default== " SKD_MAX_QUEUE_DEPTH_DEFAULT_STR " ) " ) ;
static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT ;
module_param ( skd_sgs_per_request , int , 0444 ) ;
MODULE_PARM_DESC ( skd_sgs_per_request ,
" Maximum SG elements per block request. "
" (1-4096, default==256) " ) ;
2017-08-17 23:13:23 +03:00
static int skd_max_pass_thru = 1 ;
2013-10-16 00:19:07 +04:00
module_param ( skd_max_pass_thru , int , 0444 ) ;
MODULE_PARM_DESC ( skd_max_pass_thru ,
2017-08-17 23:13:23 +03:00
" Maximum SCSI pass-thru at a time. IGNORED " ) ;
2013-10-16 00:19:07 +04:00
module_param ( skd_dbg_level , int , 0444 ) ;
MODULE_PARM_DESC ( skd_dbg_level , " s1120 debug level (0,1,2) " ) ;
module_param ( skd_isr_comp_limit , int , 0444 ) ;
MODULE_PARM_DESC ( skd_isr_comp_limit , " s1120 isr comp limit (0=none) default=4 " ) ;
/* Major device number dynamically assigned. */
static u32 skd_major ;
static void skd_destruct ( struct skd_device * skdev ) ;
static const struct block_device_operations skd_blockdev_ops ;
static void skd_send_fitmsg ( struct skd_device * skdev ,
struct skd_fitmsg_context * skmsg ) ;
static void skd_send_special_fitmsg ( struct skd_device * skdev ,
struct skd_special_context * skspcl ) ;
static void skd_request_fn ( struct request_queue * rq ) ;
2017-08-17 23:13:26 +03:00
static void skd_end_request ( struct skd_device * skdev , struct request * req ,
blk_status_t status ) ;
2017-06-03 10:38:04 +03:00
static bool skd_preop_sg_list ( struct skd_device * skdev ,
2013-10-16 00:19:07 +04:00
struct skd_request_context * skreq ) ;
static void skd_postop_sg_list ( struct skd_device * skdev ,
struct skd_request_context * skreq ) ;
static void skd_restart_device ( struct skd_device * skdev ) ;
static int skd_quiesce_dev ( struct skd_device * skdev ) ;
static int skd_unquiesce_dev ( struct skd_device * skdev ) ;
static void skd_disable_interrupts ( struct skd_device * skdev ) ;
static void skd_isr_fwstate ( struct skd_device * skdev ) ;
2017-08-17 23:13:14 +03:00
static void skd_recover_requests ( struct skd_device * skdev ) ;
2013-10-16 00:19:07 +04:00
static void skd_soft_reset ( struct skd_device * skdev ) ;
const char * skd_drive_state_to_str ( int state ) ;
const char * skd_skdev_state_to_str ( enum skd_drvr_state state ) ;
static void skd_log_skdev ( struct skd_device * skdev , const char * event ) ;
static void skd_log_skreq ( struct skd_device * skdev ,
struct skd_request_context * skreq , const char * event ) ;
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* READ / WRITE REQUESTS
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
2013-11-01 20:14:56 +04:00
static void skd_fail_all_pending ( struct skd_device * skdev )
2013-10-16 00:19:07 +04:00
{
struct request_queue * q = skdev - > queue ;
struct request * req ;
for ( ; ; ) {
req = blk_peek_request ( q ) ;
if ( req = = NULL )
break ;
2017-08-17 23:13:26 +03:00
WARN_ON_ONCE ( blk_queue_start_tag ( q , req ) ) ;
2017-06-03 10:38:04 +03:00
__blk_end_request_all ( req , BLK_STS_IOERR ) ;
2013-10-16 00:19:07 +04:00
}
}
static void
skd_prep_rw_cdb ( struct skd_scsi_request * scsi_req ,
int data_dir , unsigned lba ,
unsigned count )
{
if ( data_dir = = READ )
2017-08-17 23:13:19 +03:00
scsi_req - > cdb [ 0 ] = READ_10 ;
2013-10-16 00:19:07 +04:00
else
2017-08-17 23:13:19 +03:00
scsi_req - > cdb [ 0 ] = WRITE_10 ;
2013-10-16 00:19:07 +04:00
scsi_req - > cdb [ 1 ] = 0 ;
scsi_req - > cdb [ 2 ] = ( lba & 0xff000000 ) > > 24 ;
scsi_req - > cdb [ 3 ] = ( lba & 0xff0000 ) > > 16 ;
scsi_req - > cdb [ 4 ] = ( lba & 0xff00 ) > > 8 ;
scsi_req - > cdb [ 5 ] = ( lba & 0xff ) ;
scsi_req - > cdb [ 6 ] = 0 ;
scsi_req - > cdb [ 7 ] = ( count & 0xff00 ) > > 8 ;
scsi_req - > cdb [ 8 ] = count & 0xff ;
scsi_req - > cdb [ 9 ] = 0 ;
}
static void
skd_prep_zerosize_flush_cdb ( struct skd_scsi_request * scsi_req ,
2013-11-01 23:05:10 +04:00
struct skd_request_context * skreq )
2013-10-16 00:19:07 +04:00
{
skreq - > flush_cmd = 1 ;
2017-08-17 23:13:19 +03:00
scsi_req - > cdb [ 0 ] = SYNCHRONIZE_CACHE ;
2013-10-16 00:19:07 +04:00
scsi_req - > cdb [ 1 ] = 0 ;
scsi_req - > cdb [ 2 ] = 0 ;
scsi_req - > cdb [ 3 ] = 0 ;
scsi_req - > cdb [ 4 ] = 0 ;
scsi_req - > cdb [ 5 ] = 0 ;
scsi_req - > cdb [ 6 ] = 0 ;
scsi_req - > cdb [ 7 ] = 0 ;
scsi_req - > cdb [ 8 ] = 0 ;
scsi_req - > cdb [ 9 ] = 0 ;
}
2017-08-17 23:13:21 +03:00
/*
* Return true if and only if all pending requests should be failed .
*/
static bool skd_fail_all ( struct request_queue * q )
2017-08-17 23:13:20 +03:00
{
struct skd_device * skdev = q - > queuedata ;
SKD_ASSERT ( skdev - > state ! = SKD_DRVR_STATE_ONLINE ) ;
skd_log_skdev ( skdev , " req_not_online " ) ;
switch ( skdev - > state ) {
case SKD_DRVR_STATE_PAUSING :
case SKD_DRVR_STATE_PAUSED :
case SKD_DRVR_STATE_STARTING :
case SKD_DRVR_STATE_RESTARTING :
case SKD_DRVR_STATE_WAIT_BOOT :
/* In case of starting, we haven't started the queue,
* so we can ' t get here . . . but requests are
* possibly hanging out waiting for us because we
* reported the dev / skd0 already . They ' ll wait
* forever if connect doesn ' t complete .
* What to do ? ? ? delay dev / skd0 ? ?
*/
case SKD_DRVR_STATE_BUSY :
case SKD_DRVR_STATE_BUSY_IMMINENT :
case SKD_DRVR_STATE_BUSY_ERASE :
case SKD_DRVR_STATE_DRAINING_TIMEOUT :
2017-08-17 23:13:21 +03:00
return false ;
2017-08-17 23:13:20 +03:00
case SKD_DRVR_STATE_BUSY_SANITIZE :
case SKD_DRVR_STATE_STOPPING :
case SKD_DRVR_STATE_SYNCING :
case SKD_DRVR_STATE_FAULT :
case SKD_DRVR_STATE_DISAPPEARED :
default :
2017-08-17 23:13:21 +03:00
return true ;
2017-08-17 23:13:20 +03:00
}
}
2013-10-16 00:19:07 +04:00
static void skd_request_fn ( struct request_queue * q )
{
struct skd_device * skdev = q - > queuedata ;
struct skd_fitmsg_context * skmsg = NULL ;
struct fit_msg_hdr * fmh = NULL ;
struct skd_request_context * skreq ;
struct request * req = NULL ;
struct skd_scsi_request * scsi_req ;
unsigned long io_flags ;
u32 lba ;
u32 count ;
int data_dir ;
2017-08-17 23:12:59 +03:00
__be64 be_dmaa ;
2013-10-16 00:19:07 +04:00
u64 cmdctxt ;
u32 timo_slot ;
int flush , fua ;
2017-08-17 23:13:26 +03:00
u32 tag ;
2013-10-16 00:19:07 +04:00
if ( skdev - > state ! = SKD_DRVR_STATE_ONLINE ) {
2017-08-17 23:13:21 +03:00
if ( skd_fail_all ( q ) )
skd_fail_all_pending ( skdev ) ;
2013-10-16 00:19:07 +04:00
return ;
}
2013-11-01 20:38:45 +04:00
if ( blk_queue_stopped ( skdev - > queue ) ) {
2017-08-17 23:13:26 +03:00
if ( skdev - > in_flight > = skdev - > queue_low_water_mark )
2013-10-16 00:19:07 +04:00
/* There is still some kind of shortage */
return ;
2013-11-01 20:38:45 +04:00
queue_flag_clear ( QUEUE_FLAG_STOPPED , skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
}
/*
* Stop conditions :
* - There are no more native requests
* - There are already the maximum number of requests in progress
* - There are no more skd_request_context entries
* - There are no more FIT msg buffers
*/
for ( ; ; ) {
flush = fua = 0 ;
2013-11-01 20:14:56 +04:00
req = blk_peek_request ( q ) ;
2013-10-16 00:19:07 +04:00
2013-11-01 20:14:56 +04:00
/* Are there any native requests to start? */
if ( req = = NULL )
break ;
2013-10-16 00:19:07 +04:00
2013-11-01 20:14:56 +04:00
lba = ( u32 ) blk_rq_pos ( req ) ;
count = blk_rq_sectors ( req ) ;
data_dir = rq_data_dir ( req ) ;
io_flags = req - > cmd_flags ;
2013-10-16 00:19:07 +04:00
2016-06-05 22:32:23 +03:00
if ( req_op ( req ) = = REQ_OP_FLUSH )
2013-11-01 20:14:56 +04:00
flush + + ;
2013-10-16 00:19:07 +04:00
2013-11-01 20:14:56 +04:00
if ( io_flags & REQ_FUA )
fua + + ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d \n " ,
req , lba , lba , count , count , data_dir ) ;
2013-10-16 00:19:07 +04:00
2013-11-01 23:05:10 +04:00
/* At this point we know there is a request */
2013-10-16 00:19:07 +04:00
/* Are too many requets already in progress? */
if ( skdev - > in_flight > = skdev - > cur_max_queue_depth ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " qdepth %d, limit %d \n " ,
skdev - > in_flight , skdev - > cur_max_queue_depth ) ;
2013-10-16 00:19:07 +04:00
break ;
}
/*
2013-11-01 23:05:10 +04:00
* OK to now dequeue request from q .
2013-10-16 00:19:07 +04:00
*
* At this point we are comitted to either start or reject
* the native request . Note that skd_request_context is
* available but is still at the head of the free list .
*/
2017-08-17 23:13:26 +03:00
WARN_ON_ONCE ( blk_queue_start_tag ( q , req ) ) ;
tag = blk_mq_unique_tag ( req ) ;
WARN_ONCE ( tag > = skd_max_queue_depth ,
" %#x > %#x (nr_requests = %lu) \n " , tag ,
skd_max_queue_depth , q - > nr_requests ) ;
skreq = & skdev - > skreq_table [ tag ] ;
SKD_ASSERT ( skreq - > state = = SKD_REQ_STATE_IDLE ) ;
SKD_ASSERT ( ( skreq - > id & SKD_ID_INCR ) = = 0 ) ;
skreq - > id = tag + SKD_ID_RW_REQUEST ;
skreq - > flush_cmd = 0 ;
skreq - > n_sg = 0 ;
skreq - > sg_byte_count = 0 ;
2013-11-01 20:14:56 +04:00
skreq - > req = req ;
skreq - > fitmsg_id = 0 ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:13:12 +03:00
skreq - > data_dir = data_dir = = READ ? DMA_FROM_DEVICE :
DMA_TO_DEVICE ;
2017-08-17 23:13:04 +03:00
if ( req - > bio & & ! skd_preop_sg_list ( skdev , skreq ) ) {
dev_dbg ( & skdev - > pdev - > dev , " error Out \n " ) ;
2017-08-17 23:13:26 +03:00
skd_end_request ( skdev , skreq - > req , BLK_STS_RESOURCE ) ;
2017-08-17 23:13:04 +03:00
continue ;
}
2013-10-16 00:19:07 +04:00
/* Either a FIT msg is in progress or we have to start one. */
if ( skmsg = = NULL ) {
2017-08-17 23:13:26 +03:00
skmsg = & skdev - > skmsg_table [ tag ] ;
2013-10-16 00:19:07 +04:00
/* Initialize the FIT msg header */
2017-08-17 23:13:07 +03:00
fmh = & skmsg - > msg_buf - > fmh ;
2013-10-16 00:19:07 +04:00
memset ( fmh , 0 , sizeof ( * fmh ) ) ;
fmh - > protocol_id = FIT_PROTOCOL_ID_SOFIT ;
skmsg - > length = sizeof ( * fmh ) ;
}
skreq - > fitmsg_id = skmsg - > id ;
2017-08-17 23:13:07 +03:00
scsi_req =
& skmsg - > msg_buf - > scsi [ fmh - > num_protocol_cmds_coalesced ] ;
memset ( scsi_req , 0 , sizeof ( * scsi_req ) ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:59 +03:00
be_dmaa = cpu_to_be64 ( skreq - > sksg_dma_address ) ;
2013-10-16 00:19:07 +04:00
cmdctxt = skreq - > id + SKD_ID_INCR ;
scsi_req - > hdr . tag = cmdctxt ;
scsi_req - > hdr . sg_list_dma_address = be_dmaa ;
2016-04-26 04:12:38 +03:00
if ( flush = = SKD_FLUSH_ZERO_SIZE_FIRST ) {
2013-10-16 00:19:07 +04:00
skd_prep_zerosize_flush_cdb ( scsi_req , skreq ) ;
SKD_ASSERT ( skreq - > flush_cmd = = 1 ) ;
} else {
skd_prep_rw_cdb ( scsi_req , data_dir , lba , count ) ;
}
if ( fua )
scsi_req - > cdb [ 1 ] | = SKD_FUA_NV ;
scsi_req - > hdr . sg_list_len_bytes =
cpu_to_be32 ( skreq - > sg_byte_count ) ;
/* Complete resource allocations. */
skreq - > state = SKD_REQ_STATE_BUSY ;
skreq - > id + = SKD_ID_INCR ;
skmsg - > length + = sizeof ( struct skd_scsi_request ) ;
fmh - > num_protocol_cmds_coalesced + + ;
/*
* Update the active request counts .
* Capture the timeout timestamp .
*/
skreq - > timeout_stamp = skdev - > timeout_stamp ;
timo_slot = skreq - > timeout_stamp & SKD_TIMEOUT_SLOT_MASK ;
skdev - > timeout_slot [ timo_slot ] + + ;
skdev - > in_flight + + ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " req=0x%x busy=%d \n " , skreq - > id ,
skdev - > in_flight ) ;
2013-10-16 00:19:07 +04:00
/*
* If the FIT msg buffer is full send it .
*/
2017-08-17 23:13:05 +03:00
if ( fmh - > num_protocol_cmds_coalesced > = skd_max_req_per_msg ) {
2013-10-16 00:19:07 +04:00
skd_send_fitmsg ( skdev , skmsg ) ;
skmsg = NULL ;
fmh = NULL ;
}
}
2017-08-17 23:13:05 +03:00
/* If the FIT msg buffer is not empty send what we got. */
if ( skmsg ) {
WARN_ON_ONCE ( ! fmh - > num_protocol_cmds_coalesced ) ;
skd_send_fitmsg ( skdev , skmsg ) ;
2013-10-16 00:19:07 +04:00
skmsg = NULL ;
fmh = NULL ;
}
/*
* If req is non - NULL it means there is something to do but
* we are out of a resource .
*/
2013-11-01 20:14:56 +04:00
if ( req )
2013-11-01 20:38:45 +04:00
blk_stop_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
}
2017-08-17 23:13:26 +03:00
static void skd_end_request ( struct skd_device * skdev , struct request * req ,
blk_status_t error )
2013-10-16 00:19:07 +04:00
{
if ( unlikely ( error ) ) {
char * cmd = ( rq_data_dir ( req ) = = READ ) ? " read " : " write " ;
u32 lba = ( u32 ) blk_rq_pos ( req ) ;
u32 count = blk_rq_sectors ( req ) ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" Error cmd=%s sect=%u count=%u id=0x%x \n " , cmd , lba ,
2017-08-17 23:13:26 +03:00
count , req - > tag ) ;
2013-10-16 00:19:07 +04:00
} else
2017-08-17 23:13:26 +03:00
dev_dbg ( & skdev - > pdev - > dev , " id=0x%x error=%d \n " , req - > tag ,
2017-08-17 23:12:58 +03:00
error ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:13:26 +03:00
__blk_end_request_all ( req , error ) ;
2013-10-16 00:19:07 +04:00
}
2017-06-03 10:38:04 +03:00
static bool skd_preop_sg_list ( struct skd_device * skdev ,
2013-11-01 23:05:10 +04:00
struct skd_request_context * skreq )
2013-10-16 00:19:07 +04:00
{
struct request * req = skreq - > req ;
2017-08-17 23:13:15 +03:00
struct scatterlist * sgl = & skreq - > sg [ 0 ] , * sg ;
2013-10-16 00:19:07 +04:00
int n_sg ;
int i ;
skreq - > sg_byte_count = 0 ;
2017-08-17 23:13:12 +03:00
WARN_ON_ONCE ( skreq - > data_dir ! = DMA_TO_DEVICE & &
skreq - > data_dir ! = DMA_FROM_DEVICE ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:13:15 +03:00
n_sg = blk_rq_map_sg ( skdev - > queue , req , sgl ) ;
2013-10-16 00:19:07 +04:00
if ( n_sg < = 0 )
2017-06-03 10:38:04 +03:00
return false ;
2013-10-16 00:19:07 +04:00
/*
* Map scatterlist to PCI bus addresses .
* Note PCI might change the number of entries .
*/
2017-08-17 23:13:15 +03:00
n_sg = pci_map_sg ( skdev - > pdev , sgl , n_sg , skreq - > data_dir ) ;
2013-10-16 00:19:07 +04:00
if ( n_sg < = 0 )
2017-06-03 10:38:04 +03:00
return false ;
2013-10-16 00:19:07 +04:00
SKD_ASSERT ( n_sg < = skdev - > sgs_per_request ) ;
skreq - > n_sg = n_sg ;
2017-08-17 23:13:15 +03:00
for_each_sg ( sgl , sg , n_sg , i ) {
2013-10-16 00:19:07 +04:00
struct fit_sg_descriptor * sgd = & skreq - > sksg_list [ i ] ;
2017-08-17 23:13:15 +03:00
u32 cnt = sg_dma_len ( sg ) ;
uint64_t dma_addr = sg_dma_address ( sg ) ;
2013-10-16 00:19:07 +04:00
sgd - > control = FIT_SGD_CONTROL_NOT_LAST ;
sgd - > byte_count = cnt ;
skreq - > sg_byte_count + = cnt ;
sgd - > host_side_addr = dma_addr ;
sgd - > dev_side_addr = 0 ;
}
skreq - > sksg_list [ n_sg - 1 ] . next_desc_ptr = 0LL ;
skreq - > sksg_list [ n_sg - 1 ] . control = FIT_SGD_CONTROL_LAST ;
if ( unlikely ( skdev - > dbg_level > 1 ) ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" skreq=%x sksg_list=%p sksg_dma=%llx \n " ,
skreq - > id , skreq - > sksg_list , skreq - > sksg_dma_address ) ;
2013-10-16 00:19:07 +04:00
for ( i = 0 ; i < n_sg ; i + + ) {
struct fit_sg_descriptor * sgd = & skreq - > sksg_list [ i ] ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx \n " ,
i , sgd - > byte_count , sgd - > control ,
sgd - > host_side_addr , sgd - > next_desc_ptr ) ;
2013-10-16 00:19:07 +04:00
}
}
2017-06-03 10:38:04 +03:00
return true ;
2013-10-16 00:19:07 +04:00
}
2013-11-01 20:14:56 +04:00
static void skd_postop_sg_list ( struct skd_device * skdev ,
2013-11-01 23:05:10 +04:00
struct skd_request_context * skreq )
2013-10-16 00:19:07 +04:00
{
/*
* restore the next ptr for next IO request so we
* don ' t have to set it every time .
*/
skreq - > sksg_list [ skreq - > n_sg - 1 ] . next_desc_ptr =
skreq - > sksg_dma_address +
( ( skreq - > n_sg ) * sizeof ( struct fit_sg_descriptor ) ) ;
2017-08-17 23:13:12 +03:00
pci_unmap_sg ( skdev - > pdev , & skreq - > sg [ 0 ] , skreq - > n_sg , skreq - > data_dir ) ;
2013-10-16 00:19:07 +04:00
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* TIMER
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static void skd_timer_tick_not_online ( struct skd_device * skdev ) ;
static void skd_timer_tick ( ulong arg )
{
struct skd_device * skdev = ( struct skd_device * ) arg ;
u32 timo_slot ;
unsigned long reqflags ;
u32 state ;
if ( skdev - > state = = SKD_DRVR_STATE_FAULT )
/* The driver has declared fault, and we want it to
* stay that way until driver is reloaded .
*/
return ;
spin_lock_irqsave ( & skdev - > lock , reqflags ) ;
state = SKD_READL ( skdev , FIT_STATUS ) ;
state & = FIT_SR_DRIVE_STATE_MASK ;
if ( state ! = skdev - > drive_state )
skd_isr_fwstate ( skdev ) ;
if ( skdev - > state ! = SKD_DRVR_STATE_ONLINE ) {
skd_timer_tick_not_online ( skdev ) ;
goto timer_func_out ;
}
skdev - > timeout_stamp + + ;
timo_slot = skdev - > timeout_stamp & SKD_TIMEOUT_SLOT_MASK ;
/*
* All requests that happened during the previous use of
* this slot should be done by now . The previous use was
* over 7 seconds ago .
*/
if ( skdev - > timeout_slot [ timo_slot ] = = 0 )
goto timer_func_out ;
/* Something is overdue */
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " found %d timeouts, draining busy=%d \n " ,
skdev - > timeout_slot [ timo_slot ] , skdev - > in_flight ) ;
dev_err ( & skdev - > pdev - > dev , " Overdue IOs (%d), busy %d \n " ,
skdev - > timeout_slot [ timo_slot ] , skdev - > in_flight ) ;
2013-10-16 00:19:07 +04:00
skdev - > timer_countdown = SKD_DRAINING_TIMO ;
skdev - > state = SKD_DRVR_STATE_DRAINING_TIMEOUT ;
skdev - > timo_slot = timo_slot ;
2013-11-01 20:38:45 +04:00
blk_stop_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
timer_func_out :
mod_timer ( & skdev - > timer , ( jiffies + HZ ) ) ;
spin_unlock_irqrestore ( & skdev - > lock , reqflags ) ;
}
static void skd_timer_tick_not_online ( struct skd_device * skdev )
{
switch ( skdev - > state ) {
case SKD_DRVR_STATE_IDLE :
case SKD_DRVR_STATE_LOAD :
break ;
case SKD_DRVR_STATE_BUSY_SANITIZE :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" drive busy sanitize[%x], driver[%x] \n " ,
skdev - > drive_state , skdev - > state ) ;
2013-10-16 00:19:07 +04:00
/* If we've been in sanitize for 3 seconds, we figure we're not
* going to get anymore completions , so recover requests now
*/
if ( skdev - > timer_countdown > 0 ) {
skdev - > timer_countdown - - ;
return ;
}
2017-08-17 23:13:14 +03:00
skd_recover_requests ( skdev ) ;
2013-10-16 00:19:07 +04:00
break ;
case SKD_DRVR_STATE_BUSY :
case SKD_DRVR_STATE_BUSY_IMMINENT :
case SKD_DRVR_STATE_BUSY_ERASE :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " busy[%x], countdown=%d \n " ,
skdev - > state , skdev - > timer_countdown ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > timer_countdown > 0 ) {
skdev - > timer_countdown - - ;
return ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" busy[%x], timedout=%d, restarting device. " ,
skdev - > state , skdev - > timer_countdown ) ;
2013-10-16 00:19:07 +04:00
skd_restart_device ( skdev ) ;
break ;
case SKD_DRVR_STATE_WAIT_BOOT :
case SKD_DRVR_STATE_STARTING :
if ( skdev - > timer_countdown > 0 ) {
skdev - > timer_countdown - - ;
return ;
}
/* For now, we fault the drive. Could attempt resets to
* revcover at some point . */
skdev - > state = SKD_DRVR_STATE_FAULT ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " DriveFault Connect Timeout (%x) \n " ,
skdev - > drive_state ) ;
2013-10-16 00:19:07 +04:00
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
skdev - > gendisk_on = - 1 ;
wake_up_interruptible ( & skdev - > waitq ) ;
break ;
case SKD_DRVR_STATE_ONLINE :
/* shouldn't get here. */
break ;
case SKD_DRVR_STATE_PAUSING :
case SKD_DRVR_STATE_PAUSED :
break ;
case SKD_DRVR_STATE_DRAINING_TIMEOUT :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" draining busy [%d] tick[%d] qdb[%d] tmls[%d] \n " ,
skdev - > timo_slot , skdev - > timer_countdown ,
skdev - > in_flight ,
skdev - > timeout_slot [ skdev - > timo_slot ] ) ;
2013-10-16 00:19:07 +04:00
/* if the slot has cleared we can let the I/O continue */
if ( skdev - > timeout_slot [ skdev - > timo_slot ] = = 0 ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" Slot drained, starting queue. \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_ONLINE ;
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
return ;
}
if ( skdev - > timer_countdown > 0 ) {
skdev - > timer_countdown - - ;
return ;
}
skd_restart_device ( skdev ) ;
break ;
case SKD_DRVR_STATE_RESTARTING :
if ( skdev - > timer_countdown > 0 ) {
skdev - > timer_countdown - - ;
return ;
}
/* For now, we fault the drive. Could attempt resets to
* revcover at some point . */
skdev - > state = SKD_DRVR_STATE_FAULT ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" DriveFault Reconnect Timeout (%x) \n " ,
skdev - > drive_state ) ;
2013-10-16 00:19:07 +04:00
/*
* Recovering does two things :
* 1. completes IO with error
* 2. reclaims dma resources
* When is it safe to recover requests ?
* - if the drive state is faulted
* - if the state is still soft reset after out timeout
* - if the drive registers are dead ( state = FF )
* If it is " unsafe " , we still need to recover , so we will
* disable pci bus mastering and disable our interrupts .
*/
if ( ( skdev - > drive_state = = FIT_SR_DRIVE_SOFT_RESET ) | |
( skdev - > drive_state = = FIT_SR_DRIVE_FAULT ) | |
( skdev - > drive_state = = FIT_SR_DRIVE_STATE_MASK ) )
/* It never came out of soft reset. Try to
* recover the requests and then let them
* fail . This is to mitigate hung processes . */
2017-08-17 23:13:14 +03:00
skd_recover_requests ( skdev ) ;
2013-10-16 00:19:07 +04:00
else {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Disable BusMaster (%x) \n " ,
skdev - > drive_state ) ;
2013-10-16 00:19:07 +04:00
pci_disable_device ( skdev - > pdev ) ;
skd_disable_interrupts ( skdev ) ;
2017-08-17 23:13:14 +03:00
skd_recover_requests ( skdev ) ;
2013-10-16 00:19:07 +04:00
}
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
skdev - > gendisk_on = - 1 ;
wake_up_interruptible ( & skdev - > waitq ) ;
break ;
case SKD_DRVR_STATE_RESUMING :
case SKD_DRVR_STATE_STOPPING :
case SKD_DRVR_STATE_SYNCING :
case SKD_DRVR_STATE_FAULT :
case SKD_DRVR_STATE_DISAPPEARED :
default :
break ;
}
}
static int skd_start_timer ( struct skd_device * skdev )
{
int rc ;
setup_timer ( & skdev - > timer , skd_timer_tick , ( ulong ) skdev ) ;
rc = mod_timer ( & skdev - > timer , ( jiffies + HZ ) ) ;
if ( rc )
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " failed to start timer %d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
return rc ;
}
static void skd_kill_timer ( struct skd_device * skdev )
{
del_timer_sync ( & skdev - > timer ) ;
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* INTERNAL REQUESTS - - generated by driver itself
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static int skd_format_internal_skspcl ( struct skd_device * skdev )
{
struct skd_special_context * skspcl = & skdev - > internal_skspcl ;
struct fit_sg_descriptor * sgd = & skspcl - > req . sksg_list [ 0 ] ;
struct fit_msg_hdr * fmh ;
uint64_t dma_address ;
struct skd_scsi_request * scsi ;
2017-08-17 23:13:07 +03:00
fmh = & skspcl - > msg_buf - > fmh ;
2013-10-16 00:19:07 +04:00
fmh - > protocol_id = FIT_PROTOCOL_ID_SOFIT ;
fmh - > num_protocol_cmds_coalesced = 1 ;
2017-08-17 23:13:07 +03:00
scsi = & skspcl - > msg_buf - > scsi [ 0 ] ;
2013-10-16 00:19:07 +04:00
memset ( scsi , 0 , sizeof ( * scsi ) ) ;
dma_address = skspcl - > req . sksg_dma_address ;
scsi - > hdr . sg_list_dma_address = cpu_to_be64 ( dma_address ) ;
2017-08-17 23:13:25 +03:00
skspcl - > req . n_sg = 1 ;
2013-10-16 00:19:07 +04:00
sgd - > control = FIT_SGD_CONTROL_LAST ;
sgd - > byte_count = 0 ;
sgd - > host_side_addr = skspcl - > db_dma_address ;
sgd - > dev_side_addr = 0 ;
sgd - > next_desc_ptr = 0LL ;
return 1 ;
}
# define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
static void skd_send_internal_skspcl ( struct skd_device * skdev ,
struct skd_special_context * skspcl ,
u8 opcode )
{
struct fit_sg_descriptor * sgd = & skspcl - > req . sksg_list [ 0 ] ;
struct skd_scsi_request * scsi ;
unsigned char * buf = skspcl - > data_buf ;
int i ;
if ( skspcl - > req . state ! = SKD_REQ_STATE_IDLE )
/*
* A refresh is already in progress .
* Just wait for it to finish .
*/
return ;
SKD_ASSERT ( ( skspcl - > req . id & SKD_ID_INCR ) = = 0 ) ;
skspcl - > req . state = SKD_REQ_STATE_BUSY ;
skspcl - > req . id + = SKD_ID_INCR ;
2017-08-17 23:13:07 +03:00
scsi = & skspcl - > msg_buf - > scsi [ 0 ] ;
2013-10-16 00:19:07 +04:00
scsi - > hdr . tag = skspcl - > req . id ;
memset ( scsi - > cdb , 0 , sizeof ( scsi - > cdb ) ) ;
switch ( opcode ) {
case TEST_UNIT_READY :
scsi - > cdb [ 0 ] = TEST_UNIT_READY ;
sgd - > byte_count = 0 ;
scsi - > hdr . sg_list_len_bytes = 0 ;
break ;
case READ_CAPACITY :
scsi - > cdb [ 0 ] = READ_CAPACITY ;
sgd - > byte_count = SKD_N_READ_CAP_BYTES ;
scsi - > hdr . sg_list_len_bytes = cpu_to_be32 ( sgd - > byte_count ) ;
break ;
case INQUIRY :
scsi - > cdb [ 0 ] = INQUIRY ;
scsi - > cdb [ 1 ] = 0x01 ; /* evpd */
scsi - > cdb [ 2 ] = 0x80 ; /* serial number page */
scsi - > cdb [ 4 ] = 0x10 ;
sgd - > byte_count = 16 ;
scsi - > hdr . sg_list_len_bytes = cpu_to_be32 ( sgd - > byte_count ) ;
break ;
case SYNCHRONIZE_CACHE :
scsi - > cdb [ 0 ] = SYNCHRONIZE_CACHE ;
sgd - > byte_count = 0 ;
scsi - > hdr . sg_list_len_bytes = 0 ;
break ;
case WRITE_BUFFER :
scsi - > cdb [ 0 ] = WRITE_BUFFER ;
scsi - > cdb [ 1 ] = 0x02 ;
scsi - > cdb [ 7 ] = ( WR_BUF_SIZE & 0xFF00 ) > > 8 ;
scsi - > cdb [ 8 ] = WR_BUF_SIZE & 0xFF ;
sgd - > byte_count = WR_BUF_SIZE ;
scsi - > hdr . sg_list_len_bytes = cpu_to_be32 ( sgd - > byte_count ) ;
/* fill incrementing byte pattern */
for ( i = 0 ; i < sgd - > byte_count ; i + + )
buf [ i ] = i & 0xFF ;
break ;
case READ_BUFFER :
scsi - > cdb [ 0 ] = READ_BUFFER ;
scsi - > cdb [ 1 ] = 0x02 ;
scsi - > cdb [ 7 ] = ( WR_BUF_SIZE & 0xFF00 ) > > 8 ;
scsi - > cdb [ 8 ] = WR_BUF_SIZE & 0xFF ;
sgd - > byte_count = WR_BUF_SIZE ;
scsi - > hdr . sg_list_len_bytes = cpu_to_be32 ( sgd - > byte_count ) ;
memset ( skspcl - > data_buf , 0 , sgd - > byte_count ) ;
break ;
default :
SKD_ASSERT ( " Don't know what to send " ) ;
return ;
}
skd_send_special_fitmsg ( skdev , skspcl ) ;
}
static void skd_refresh_device_data ( struct skd_device * skdev )
{
struct skd_special_context * skspcl = & skdev - > internal_skspcl ;
skd_send_internal_skspcl ( skdev , skspcl , TEST_UNIT_READY ) ;
}
static int skd_chk_read_buf ( struct skd_device * skdev ,
struct skd_special_context * skspcl )
{
unsigned char * buf = skspcl - > data_buf ;
int i ;
/* check for incrementing byte pattern */
for ( i = 0 ; i < WR_BUF_SIZE ; i + + )
if ( buf [ i ] ! = ( i & 0xFF ) )
return 1 ;
return 0 ;
}
static void skd_log_check_status ( struct skd_device * skdev , u8 status , u8 key ,
u8 code , u8 qual , u8 fruc )
{
/* If the check condition is of special interest, log a message */
if ( ( status = = SAM_STAT_CHECK_CONDITION ) & & ( key = = 0x02 )
& & ( code = = 0x04 ) & & ( qual = = 0x06 ) ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" *** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x \n " ,
key , code , qual , fruc ) ;
2013-10-16 00:19:07 +04:00
}
}
static void skd_complete_internal ( struct skd_device * skdev ,
2017-08-17 23:13:17 +03:00
struct fit_completion_entry_v1 * skcomp ,
struct fit_comp_error_info * skerr ,
2013-10-16 00:19:07 +04:00
struct skd_special_context * skspcl )
{
u8 * buf = skspcl - > data_buf ;
u8 status ;
int i ;
2017-08-17 23:13:07 +03:00
struct skd_scsi_request * scsi = & skspcl - > msg_buf - > scsi [ 0 ] ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:13:00 +03:00
lockdep_assert_held ( & skdev - > lock ) ;
2013-10-16 00:19:07 +04:00
SKD_ASSERT ( skspcl = = & skdev - > internal_skspcl ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " complete internal %x \n " , scsi - > cdb [ 0 ] ) ;
2013-10-16 00:19:07 +04:00
skspcl - > req . completion = * skcomp ;
skspcl - > req . state = SKD_REQ_STATE_IDLE ;
skspcl - > req . id + = SKD_ID_INCR ;
status = skspcl - > req . completion . status ;
skd_log_check_status ( skdev , status , skerr - > key , skerr - > code ,
skerr - > qual , skerr - > fruc ) ;
switch ( scsi - > cdb [ 0 ] ) {
case TEST_UNIT_READY :
if ( status = = SAM_STAT_GOOD )
skd_send_internal_skspcl ( skdev , skspcl , WRITE_BUFFER ) ;
else if ( ( status = = SAM_STAT_CHECK_CONDITION ) & &
( skerr - > key = = MEDIUM_ERROR ) )
skd_send_internal_skspcl ( skdev , skspcl , WRITE_BUFFER ) ;
else {
if ( skdev - > state = = SKD_DRVR_STATE_STOPPING ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" TUR failed, don't send anymore state 0x%x \n " ,
skdev - > state ) ;
2013-10-16 00:19:07 +04:00
return ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" **** TUR failed, retry skerr \n " ) ;
2017-08-17 23:13:19 +03:00
skd_send_internal_skspcl ( skdev , skspcl ,
TEST_UNIT_READY ) ;
2013-10-16 00:19:07 +04:00
}
break ;
case WRITE_BUFFER :
if ( status = = SAM_STAT_GOOD )
skd_send_internal_skspcl ( skdev , skspcl , READ_BUFFER ) ;
else {
if ( skdev - > state = = SKD_DRVR_STATE_STOPPING ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" write buffer failed, don't send anymore state 0x%x \n " ,
skdev - > state ) ;
2013-10-16 00:19:07 +04:00
return ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" **** write buffer failed, retry skerr \n " ) ;
2017-08-17 23:13:19 +03:00
skd_send_internal_skspcl ( skdev , skspcl ,
TEST_UNIT_READY ) ;
2013-10-16 00:19:07 +04:00
}
break ;
case READ_BUFFER :
if ( status = = SAM_STAT_GOOD ) {
if ( skd_chk_read_buf ( skdev , skspcl ) = = 0 )
skd_send_internal_skspcl ( skdev , skspcl ,
READ_CAPACITY ) ;
else {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" *** W/R Buffer mismatch %d *** \n " ,
skdev - > connect_retries ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > connect_retries <
SKD_MAX_CONNECT_RETRIES ) {
skdev - > connect_retries + + ;
skd_soft_reset ( skdev ) ;
} else {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" W/R Buffer Connect Error \n " ) ;
2013-10-16 00:19:07 +04:00
return ;
}
}
} else {
if ( skdev - > state = = SKD_DRVR_STATE_STOPPING ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" read buffer failed, don't send anymore state 0x%x \n " ,
skdev - > state ) ;
2013-10-16 00:19:07 +04:00
return ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" **** read buffer failed, retry skerr \n " ) ;
2017-08-17 23:13:19 +03:00
skd_send_internal_skspcl ( skdev , skspcl ,
TEST_UNIT_READY ) ;
2013-10-16 00:19:07 +04:00
}
break ;
case READ_CAPACITY :
skdev - > read_cap_is_valid = 0 ;
if ( status = = SAM_STAT_GOOD ) {
skdev - > read_cap_last_lba =
( buf [ 0 ] < < 24 ) | ( buf [ 1 ] < < 16 ) |
( buf [ 2 ] < < 8 ) | buf [ 3 ] ;
skdev - > read_cap_blocksize =
( buf [ 4 ] < < 24 ) | ( buf [ 5 ] < < 16 ) |
( buf [ 6 ] < < 8 ) | buf [ 7 ] ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " last lba %d, bs %d \n " ,
skdev - > read_cap_last_lba ,
skdev - > read_cap_blocksize ) ;
2013-10-16 00:19:07 +04:00
set_capacity ( skdev - > disk , skdev - > read_cap_last_lba + 1 ) ;
skdev - > read_cap_is_valid = 1 ;
skd_send_internal_skspcl ( skdev , skspcl , INQUIRY ) ;
} else if ( ( status = = SAM_STAT_CHECK_CONDITION ) & &
( skerr - > key = = MEDIUM_ERROR ) ) {
skdev - > read_cap_last_lba = ~ 0 ;
set_capacity ( skdev - > disk , skdev - > read_cap_last_lba + 1 ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " **** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry \n " ) ;
2013-10-16 00:19:07 +04:00
skd_send_internal_skspcl ( skdev , skspcl , INQUIRY ) ;
} else {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " **** READCAP failed, retry TUR \n " ) ;
2013-10-16 00:19:07 +04:00
skd_send_internal_skspcl ( skdev , skspcl ,
TEST_UNIT_READY ) ;
}
break ;
case INQUIRY :
skdev - > inquiry_is_valid = 0 ;
if ( status = = SAM_STAT_GOOD ) {
skdev - > inquiry_is_valid = 1 ;
for ( i = 0 ; i < 12 ; i + + )
skdev - > inq_serial_num [ i ] = buf [ i + 4 ] ;
skdev - > inq_serial_num [ 12 ] = 0 ;
}
if ( skd_unquiesce_dev ( skdev ) < 0 )
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " **** failed, to ONLINE device \n " ) ;
2013-10-16 00:19:07 +04:00
/* connection is complete */
skdev - > connect_retries = 0 ;
break ;
case SYNCHRONIZE_CACHE :
if ( status = = SAM_STAT_GOOD )
skdev - > sync_done = 1 ;
else
skdev - > sync_done = - 1 ;
wake_up_interruptible ( & skdev - > waitq ) ;
break ;
default :
SKD_ASSERT ( " we didn't send this " ) ;
}
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* FIT MESSAGES
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static void skd_send_fitmsg ( struct skd_device * skdev ,
struct skd_fitmsg_context * skmsg )
{
u64 qcmd ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " dma address 0x%llx, busy=%d \n " ,
skmsg - > mb_dma_address , skdev - > in_flight ) ;
2017-08-17 23:13:06 +03:00
dev_dbg ( & skdev - > pdev - > dev , " msg_buf %p \n " , skmsg - > msg_buf ) ;
2013-10-16 00:19:07 +04:00
qcmd = skmsg - > mb_dma_address ;
qcmd | = FIT_QCMD_QID_NORMAL ;
if ( unlikely ( skdev - > dbg_level > 1 ) ) {
u8 * bp = ( u8 * ) skmsg - > msg_buf ;
int i ;
for ( i = 0 ; i < skmsg - > length ; i + = 8 ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " msg[%2d] %8ph \n " , i ,
& bp [ i ] ) ;
2013-10-16 00:19:07 +04:00
if ( i = = 0 )
i = 64 - 8 ;
}
}
if ( skmsg - > length > 256 )
qcmd | = FIT_QCMD_MSGSIZE_512 ;
else if ( skmsg - > length > 128 )
qcmd | = FIT_QCMD_MSGSIZE_256 ;
else if ( skmsg - > length > 64 )
qcmd | = FIT_QCMD_MSGSIZE_128 ;
else
/*
* This makes no sense because the FIT msg header is
* 64 bytes . If the msg is only 64 bytes long it has
* no payload .
*/
qcmd | = FIT_QCMD_MSGSIZE_64 ;
2017-08-17 23:12:46 +03:00
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
smp_wmb ( ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEQ ( skdev , qcmd , FIT_Q_COMMAND ) ;
}
static void skd_send_special_fitmsg ( struct skd_device * skdev ,
struct skd_special_context * skspcl )
{
u64 qcmd ;
if ( unlikely ( skdev - > dbg_level > 1 ) ) {
u8 * bp = ( u8 * ) skspcl - > msg_buf ;
int i ;
for ( i = 0 ; i < SKD_N_SPECIAL_FITMSG_BYTES ; i + = 8 ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " spcl[%2d] %8ph \n " , i ,
& bp [ i ] ) ;
2013-10-16 00:19:07 +04:00
if ( i = = 0 )
i = 64 - 8 ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx \n " ,
skspcl , skspcl - > req . id , skspcl - > req . sksg_list ,
skspcl - > req . sksg_dma_address ) ;
2013-10-16 00:19:07 +04:00
for ( i = 0 ; i < skspcl - > req . n_sg ; i + + ) {
struct fit_sg_descriptor * sgd =
& skspcl - > req . sksg_list [ i ] ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx \n " ,
i , sgd - > byte_count , sgd - > control ,
sgd - > host_side_addr , sgd - > next_desc_ptr ) ;
2013-10-16 00:19:07 +04:00
}
}
/*
* Special FIT msgs are always 128 bytes : a 64 - byte FIT hdr
* and one 64 - byte SSDI command .
*/
qcmd = skspcl - > mb_dma_address ;
qcmd | = FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128 ;
2017-08-17 23:12:46 +03:00
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
smp_wmb ( ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEQ ( skdev , qcmd , FIT_Q_COMMAND ) ;
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* COMPLETION QUEUE
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static void skd_complete_other ( struct skd_device * skdev ,
2017-08-17 23:13:17 +03:00
struct fit_completion_entry_v1 * skcomp ,
struct fit_comp_error_info * skerr ) ;
2013-10-16 00:19:07 +04:00
struct sns_info {
u8 type ;
u8 stat ;
u8 key ;
u8 asc ;
u8 ascq ;
u8 mask ;
enum skd_check_status_action action ;
} ;
static struct sns_info skd_chkstat_table [ ] = {
/* Good */
{ 0x70 , 0x02 , RECOVERED_ERROR , 0 , 0 , 0x1c ,
SKD_CHECK_STATUS_REPORT_GOOD } ,
/* Smart alerts */
{ 0x70 , 0x02 , NO_SENSE , 0x0B , 0x00 , 0x1E , /* warnings */
SKD_CHECK_STATUS_REPORT_SMART_ALERT } ,
{ 0x70 , 0x02 , NO_SENSE , 0x5D , 0x00 , 0x1E , /* thresholds */
SKD_CHECK_STATUS_REPORT_SMART_ALERT } ,
{ 0x70 , 0x02 , RECOVERED_ERROR , 0x0B , 0x01 , 0x1F , /* temperature over trigger */
SKD_CHECK_STATUS_REPORT_SMART_ALERT } ,
/* Retry (with limits) */
{ 0x70 , 0x02 , 0x0B , 0 , 0 , 0x1C , /* This one is for DMA ERROR */
SKD_CHECK_STATUS_REQUEUE_REQUEST } ,
{ 0x70 , 0x02 , 0x06 , 0x0B , 0x00 , 0x1E , /* warnings */
SKD_CHECK_STATUS_REQUEUE_REQUEST } ,
{ 0x70 , 0x02 , 0x06 , 0x5D , 0x00 , 0x1E , /* thresholds */
SKD_CHECK_STATUS_REQUEUE_REQUEST } ,
{ 0x70 , 0x02 , 0x06 , 0x80 , 0x30 , 0x1F , /* backup power */
SKD_CHECK_STATUS_REQUEUE_REQUEST } ,
/* Busy (or about to be) */
{ 0x70 , 0x02 , 0x06 , 0x3f , 0x01 , 0x1F , /* fw changed */
SKD_CHECK_STATUS_BUSY_IMMINENT } ,
} ;
/*
* Look up status and sense data to decide how to handle the error
* from the device .
* mask says which fields must match e . g . , mask = 0x18 means check
* type and stat , ignore key , asc , ascq .
*/
2013-11-01 23:05:10 +04:00
static enum skd_check_status_action
skd_check_status ( struct skd_device * skdev ,
2017-08-17 23:13:17 +03:00
u8 cmp_status , struct fit_comp_error_info * skerr )
2013-10-16 00:19:07 +04:00
{
2017-08-17 23:13:11 +03:00
int i ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " key/asc/ascq/fruc %02x/%02x/%02x/%02x \n " ,
skerr - > key , skerr - > code , skerr - > qual , skerr - > fruc ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x \n " ,
skerr - > type , cmp_status , skerr - > key , skerr - > code , skerr - > qual ,
skerr - > fruc ) ;
2013-10-16 00:19:07 +04:00
/* Does the info match an entry in the good category? */
2017-08-17 23:13:11 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( skd_chkstat_table ) ; i + + ) {
2013-10-16 00:19:07 +04:00
struct sns_info * sns = & skd_chkstat_table [ i ] ;
if ( sns - > mask & 0x10 )
if ( skerr - > type ! = sns - > type )
continue ;
if ( sns - > mask & 0x08 )
if ( cmp_status ! = sns - > stat )
continue ;
if ( sns - > mask & 0x04 )
if ( skerr - > key ! = sns - > key )
continue ;
if ( sns - > mask & 0x02 )
if ( skerr - > code ! = sns - > asc )
continue ;
if ( sns - > mask & 0x01 )
if ( skerr - > qual ! = sns - > ascq )
continue ;
if ( sns - > action = = SKD_CHECK_STATUS_REPORT_SMART_ALERT ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" SMART Alert: sense key/asc/ascq %02x/%02x/%02x \n " ,
skerr - > key , skerr - > code , skerr - > qual ) ;
2013-10-16 00:19:07 +04:00
}
return sns - > action ;
}
/* No other match, so nonzero status means error,
* zero status means good
*/
if ( cmp_status ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " status check: error \n " ) ;
2013-10-16 00:19:07 +04:00
return SKD_CHECK_STATUS_REPORT_ERROR ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " status check good default \n " ) ;
2013-10-16 00:19:07 +04:00
return SKD_CHECK_STATUS_REPORT_GOOD ;
}
static void skd_resolve_req_exception ( struct skd_device * skdev ,
2017-08-17 23:13:26 +03:00
struct skd_request_context * skreq ,
struct request * req )
2013-10-16 00:19:07 +04:00
{
u8 cmp_status = skreq - > completion . status ;
switch ( skd_check_status ( skdev , cmp_status , & skreq - > err_info ) ) {
case SKD_CHECK_STATUS_REPORT_GOOD :
case SKD_CHECK_STATUS_REPORT_SMART_ALERT :
2017-08-17 23:13:26 +03:00
skd_end_request ( skdev , req , BLK_STS_OK ) ;
2013-10-16 00:19:07 +04:00
break ;
case SKD_CHECK_STATUS_BUSY_IMMINENT :
skd_log_skreq ( skdev , skreq , " retry(busy) " ) ;
2017-08-17 23:13:26 +03:00
blk_requeue_request ( skdev - > queue , req ) ;
2017-08-17 23:12:58 +03:00
dev_info ( & skdev - > pdev - > dev , " drive BUSY imminent \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_BUSY_IMMINENT ;
skdev - > timer_countdown = SKD_TIMER_MINUTES ( 20 ) ;
skd_quiesce_dev ( skdev ) ;
break ;
case SKD_CHECK_STATUS_REQUEUE_REQUEST :
2017-08-17 23:13:26 +03:00
if ( ( unsigned long ) + + req - > special < SKD_MAX_RETRIES ) {
2013-11-01 20:14:56 +04:00
skd_log_skreq ( skdev , skreq , " retry " ) ;
2017-08-17 23:13:26 +03:00
blk_requeue_request ( skdev - > queue , req ) ;
2013-11-01 20:14:56 +04:00
break ;
2013-10-16 00:19:07 +04:00
}
2017-08-17 23:12:52 +03:00
/* fall through */
2013-10-16 00:19:07 +04:00
case SKD_CHECK_STATUS_REPORT_ERROR :
default :
2017-08-17 23:13:26 +03:00
skd_end_request ( skdev , req , BLK_STS_IOERR ) ;
2013-10-16 00:19:07 +04:00
break ;
}
}
/* assume spinlock is already held */
static void skd_release_skreq ( struct skd_device * skdev ,
struct skd_request_context * skreq )
{
u32 timo_slot ;
/*
* Decrease the number of active requests .
* Also decrements the count in the timeout slot .
*/
SKD_ASSERT ( skdev - > in_flight > 0 ) ;
skdev - > in_flight - = 1 ;
timo_slot = skreq - > timeout_stamp & SKD_TIMEOUT_SLOT_MASK ;
SKD_ASSERT ( skdev - > timeout_slot [ timo_slot ] > 0 ) ;
skdev - > timeout_slot [ timo_slot ] - = 1 ;
/*
* Reset backpointer
*/
2013-11-01 20:14:56 +04:00
skreq - > req = NULL ;
2013-10-16 00:19:07 +04:00
/*
* Reclaim the skd_request_context
*/
skreq - > state = SKD_REQ_STATE_IDLE ;
skreq - > id + = SKD_ID_INCR ;
2017-08-17 23:13:26 +03:00
}
static struct skd_request_context * skd_skreq_from_rq ( struct skd_device * skdev ,
struct request * rq )
{
struct skd_request_context * skreq ;
int i ;
for ( i = 0 , skreq = skdev - > skreq_table ; i < skdev - > num_fitmsg_context ;
i + + , skreq + + )
if ( skreq - > req = = rq )
return skreq ;
return NULL ;
2013-10-16 00:19:07 +04:00
}
static int skd_isr_completion_posted ( struct skd_device * skdev ,
int limit , int * enqueued )
{
2017-08-17 23:13:17 +03:00
struct fit_completion_entry_v1 * skcmp ;
struct fit_comp_error_info * skerr ;
2013-10-16 00:19:07 +04:00
u16 req_id ;
2017-08-17 23:13:26 +03:00
u32 tag ;
struct request * rq ;
2013-10-16 00:19:07 +04:00
struct skd_request_context * skreq ;
2017-08-17 23:13:13 +03:00
u16 cmp_cntxt ;
u8 cmp_status ;
u8 cmp_cycle ;
u32 cmp_bytes ;
int rc ;
2013-10-16 00:19:07 +04:00
int processed = 0 ;
2017-08-17 23:13:00 +03:00
lockdep_assert_held ( & skdev - > lock ) ;
2013-10-16 00:19:07 +04:00
for ( ; ; ) {
SKD_ASSERT ( skdev - > skcomp_ix < SKD_N_COMPLETION_ENTRY ) ;
skcmp = & skdev - > skcomp_table [ skdev - > skcomp_ix ] ;
cmp_cycle = skcmp - > cycle ;
cmp_cntxt = skcmp - > tag ;
cmp_status = skcmp - > status ;
cmp_bytes = be32_to_cpu ( skcmp - > num_returned_bytes ) ;
skerr = & skdev - > skerr_table [ skdev - > skcomp_ix ] ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d \n " ,
skdev - > skcomp_cycle , skdev - > skcomp_ix , cmp_cycle ,
cmp_cntxt , cmp_status , skdev - > in_flight , cmp_bytes ,
skdev - > proto_ver ) ;
2013-10-16 00:19:07 +04:00
if ( cmp_cycle ! = skdev - > skcomp_cycle ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " end of completions \n " ) ;
2013-10-16 00:19:07 +04:00
break ;
}
/*
* Update the completion queue head index and possibly
* the completion cycle count . 8 - bit wrap - around .
*/
skdev - > skcomp_ix + + ;
if ( skdev - > skcomp_ix > = SKD_N_COMPLETION_ENTRY ) {
skdev - > skcomp_ix = 0 ;
skdev - > skcomp_cycle + + ;
}
/*
* The command context is a unique 32 - bit ID . The low order
* bits help locate the request . The request is usually a
* r / w request ( see skd_start ( ) above ) or a special request .
*/
req_id = cmp_cntxt ;
2017-08-17 23:13:26 +03:00
tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK ;
2013-10-16 00:19:07 +04:00
/* Is this other than a r/w request? */
2017-08-17 23:13:26 +03:00
if ( tag > = skdev - > num_req_context ) {
2013-10-16 00:19:07 +04:00
/*
* This is not a completion for a r / w request .
*/
2017-08-17 23:13:26 +03:00
WARN_ON_ONCE ( blk_map_queue_find_tag ( skdev - > queue - >
queue_tags , tag ) ) ;
2013-10-16 00:19:07 +04:00
skd_complete_other ( skdev , skcmp , skerr ) ;
continue ;
}
2017-08-17 23:13:26 +03:00
rq = blk_map_queue_find_tag ( skdev - > queue - > queue_tags , tag ) ;
if ( WARN ( ! rq , " No request for tag %#x -> %#x \n " , cmp_cntxt ,
tag ) )
continue ;
skreq = skd_skreq_from_rq ( skdev , rq ) ;
2013-10-16 00:19:07 +04:00
/*
* Make sure the request ID for the slot matches .
*/
if ( skreq - > id ! = req_id ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" mismatch comp_id=0x%x req_id=0x%x \n " , req_id ,
skreq - > id ) ;
2013-10-16 00:19:07 +04:00
{
u16 new_id = cmp_cntxt ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x \n " ,
req_id , skreq - > id , new_id ) ;
2013-10-16 00:19:07 +04:00
continue ;
}
}
SKD_ASSERT ( skreq - > state = = SKD_REQ_STATE_BUSY ) ;
skreq - > completion = * skcmp ;
if ( unlikely ( cmp_status = = SAM_STAT_CHECK_CONDITION ) ) {
skreq - > err_info = * skerr ;
skd_log_check_status ( skdev , cmp_status , skerr - > key ,
skerr - > code , skerr - > qual ,
skerr - > fruc ) ;
}
/* Release DMA resources for the request. */
if ( skreq - > n_sg > 0 )
skd_postop_sg_list ( skdev , skreq ) ;
2017-08-17 23:13:26 +03:00
/* Mark the FIT msg and timeout slot as free. */
skd_release_skreq ( skdev , skreq ) ;
2013-10-16 00:19:07 +04:00
/*
2017-08-17 23:13:26 +03:00
* Capture the outcome and post it back to the native request .
2013-10-16 00:19:07 +04:00
*/
2017-08-17 23:13:26 +03:00
if ( likely ( cmp_status = = SAM_STAT_GOOD ) )
skd_end_request ( skdev , rq , BLK_STS_OK ) ;
else
skd_resolve_req_exception ( skdev , skreq , rq ) ;
2013-10-16 00:19:07 +04:00
/* skd_isr_comp_limit equal zero means no limit */
if ( limit ) {
if ( + + processed > = limit ) {
rc = 1 ;
break ;
}
}
}
if ( ( skdev - > state = = SKD_DRVR_STATE_PAUSING )
& & ( skdev - > in_flight ) = = 0 ) {
skdev - > state = SKD_DRVR_STATE_PAUSED ;
wake_up_interruptible ( & skdev - > waitq ) ;
}
return rc ;
}
static void skd_complete_other ( struct skd_device * skdev ,
2017-08-17 23:13:17 +03:00
struct fit_completion_entry_v1 * skcomp ,
struct fit_comp_error_info * skerr )
2013-10-16 00:19:07 +04:00
{
u32 req_id = 0 ;
u32 req_table ;
u32 req_slot ;
struct skd_special_context * skspcl ;
2017-08-17 23:13:00 +03:00
lockdep_assert_held ( & skdev - > lock ) ;
2013-10-16 00:19:07 +04:00
req_id = skcomp - > tag ;
req_table = req_id & SKD_ID_TABLE_MASK ;
req_slot = req_id & SKD_ID_SLOT_MASK ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " table=0x%x id=0x%x slot=%d \n " , req_table ,
req_id , req_slot ) ;
2013-10-16 00:19:07 +04:00
/*
* Based on the request id , determine how to dispatch this completion .
* This swich / case is finding the good cases and forwarding the
* completion entry . Errors are reported below the switch .
*/
switch ( req_table ) {
case SKD_ID_RW_REQUEST :
/*
2017-08-17 23:12:54 +03:00
* The caller , skd_isr_completion_posted ( ) above ,
2013-10-16 00:19:07 +04:00
* handles r / w requests . The only way we get here
* is if the req_slot is out of bounds .
*/
break ;
case SKD_ID_INTERNAL :
if ( req_slot = = 0 ) {
skspcl = & skdev - > internal_skspcl ;
if ( skspcl - > req . id = = req_id & &
skspcl - > req . state = = SKD_REQ_STATE_BUSY ) {
skd_complete_internal ( skdev ,
skcomp , skerr , skspcl ) ;
return ;
}
}
break ;
case SKD_ID_FIT_MSG :
/*
* These id ' s should never appear in a completion record .
*/
break ;
default :
/*
* These id ' s should never appear anywhere ;
*/
break ;
}
/*
* If we get here it is a bad or stale id .
*/
}
static void skd_reset_skcomp ( struct skd_device * skdev )
{
2017-08-17 23:13:02 +03:00
memset ( skdev - > skcomp_table , 0 , SKD_SKCOMP_SIZE ) ;
2013-10-16 00:19:07 +04:00
skdev - > skcomp_ix = 0 ;
skdev - > skcomp_cycle = 1 ;
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* INTERRUPTS
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static void skd_completion_worker ( struct work_struct * work )
{
struct skd_device * skdev =
container_of ( work , struct skd_device , completion_worker ) ;
unsigned long flags ;
int flush_enqueued = 0 ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
/*
* pass in limit = 0 , which means no limit . .
* process everything in compq
*/
skd_isr_completion_posted ( skdev , 0 , & flush_enqueued ) ;
2017-08-17 23:13:22 +03:00
blk_run_queue_async ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
}
static void skd_isr_msg_from_dev ( struct skd_device * skdev ) ;
2016-11-09 15:55:35 +03:00
static irqreturn_t
skd_isr ( int irq , void * ptr )
2013-10-16 00:19:07 +04:00
{
2017-08-17 23:13:10 +03:00
struct skd_device * skdev = ptr ;
2013-10-16 00:19:07 +04:00
u32 intstat ;
u32 ack ;
int rc = 0 ;
int deferred = 0 ;
int flush_enqueued = 0 ;
spin_lock ( & skdev - > lock ) ;
for ( ; ; ) {
intstat = SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ;
ack = FIT_INT_DEF_MASK ;
ack & = intstat ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " intstat=0x%x ack=0x%x \n " , intstat ,
ack ) ;
2013-10-16 00:19:07 +04:00
/* As long as there is an int pending on device, keep
* running loop . When none , get out , but if we ' ve never
* done any processing , call completion handler ?
*/
if ( ack = = 0 ) {
/* No interrupts on device, but run the completion
* processor anyway ?
*/
if ( rc = = 0 )
if ( likely ( skdev - > state
= = SKD_DRVR_STATE_ONLINE ) )
deferred = 1 ;
break ;
}
rc = IRQ_HANDLED ;
SKD_WRITEL ( skdev , ack , FIT_INT_STATUS_HOST ) ;
if ( likely ( ( skdev - > state ! = SKD_DRVR_STATE_LOAD ) & &
( skdev - > state ! = SKD_DRVR_STATE_STOPPING ) ) ) {
if ( intstat & FIT_ISH_COMPLETION_POSTED ) {
/*
* If we have already deferred completion
* processing , don ' t bother running it again
*/
if ( deferred = = 0 )
deferred =
skd_isr_completion_posted ( skdev ,
skd_isr_comp_limit , & flush_enqueued ) ;
}
if ( intstat & FIT_ISH_FW_STATE_CHANGE ) {
skd_isr_fwstate ( skdev ) ;
if ( skdev - > state = = SKD_DRVR_STATE_FAULT | |
skdev - > state = =
SKD_DRVR_STATE_DISAPPEARED ) {
spin_unlock ( & skdev - > lock ) ;
return rc ;
}
}
if ( intstat & FIT_ISH_MSG_FROM_DEV )
skd_isr_msg_from_dev ( skdev ) ;
}
}
if ( unlikely ( flush_enqueued ) )
2017-08-17 23:13:22 +03:00
blk_run_queue_async ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
if ( deferred )
schedule_work ( & skdev - > completion_worker ) ;
else if ( ! flush_enqueued )
2017-08-17 23:13:22 +03:00
blk_run_queue_async ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
spin_unlock ( & skdev - > lock ) ;
return rc ;
}
static void skd_drive_fault ( struct skd_device * skdev )
{
skdev - > state = SKD_DRVR_STATE_FAULT ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Drive FAULT \n " ) ;
2013-10-16 00:19:07 +04:00
}
static void skd_drive_disappeared ( struct skd_device * skdev )
{
skdev - > state = SKD_DRVR_STATE_DISAPPEARED ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Drive DISAPPEARED \n " ) ;
2013-10-16 00:19:07 +04:00
}
static void skd_isr_fwstate ( struct skd_device * skdev )
{
u32 sense ;
u32 state ;
u32 mtd ;
int prev_driver_state = skdev - > state ;
sense = SKD_READL ( skdev , FIT_STATUS ) ;
state = sense & FIT_SR_DRIVE_STATE_MASK ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " s1120 state %s(%d)=>%s(%d) \n " ,
skd_drive_state_to_str ( skdev - > drive_state ) , skdev - > drive_state ,
skd_drive_state_to_str ( state ) , state ) ;
2013-10-16 00:19:07 +04:00
skdev - > drive_state = state ;
switch ( skdev - > drive_state ) {
case FIT_SR_DRIVE_INIT :
if ( skdev - > state = = SKD_DRVR_STATE_PROTOCOL_MISMATCH ) {
skd_disable_interrupts ( skdev ) ;
break ;
}
if ( skdev - > state = = SKD_DRVR_STATE_RESTARTING )
2017-08-17 23:13:14 +03:00
skd_recover_requests ( skdev ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > state = = SKD_DRVR_STATE_WAIT_BOOT ) {
skdev - > timer_countdown = SKD_STARTING_TIMO ;
skdev - > state = SKD_DRVR_STATE_STARTING ;
skd_soft_reset ( skdev ) ;
break ;
}
mtd = FIT_MXD_CONS ( FIT_MTD_FITFW_INIT , 0 , 0 ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
break ;
case FIT_SR_DRIVE_ONLINE :
skdev - > cur_max_queue_depth = skd_max_queue_depth ;
if ( skdev - > cur_max_queue_depth > skdev - > dev_max_queue_depth )
skdev - > cur_max_queue_depth = skdev - > dev_max_queue_depth ;
skdev - > queue_low_water_mark =
skdev - > cur_max_queue_depth * 2 / 3 + 1 ;
if ( skdev - > queue_low_water_mark < 1 )
skdev - > queue_low_water_mark = 1 ;
2017-08-17 23:12:58 +03:00
dev_info ( & skdev - > pdev - > dev ,
" Queue depth limit=%d dev=%d lowat=%d \n " ,
skdev - > cur_max_queue_depth ,
skdev - > dev_max_queue_depth ,
skdev - > queue_low_water_mark ) ;
2013-10-16 00:19:07 +04:00
skd_refresh_device_data ( skdev ) ;
break ;
case FIT_SR_DRIVE_BUSY :
skdev - > state = SKD_DRVR_STATE_BUSY ;
skdev - > timer_countdown = SKD_BUSY_TIMO ;
skd_quiesce_dev ( skdev ) ;
break ;
case FIT_SR_DRIVE_BUSY_SANITIZE :
/* set timer for 3 seconds, we'll abort any unfinished
* commands after that expires
*/
skdev - > state = SKD_DRVR_STATE_BUSY_SANITIZE ;
skdev - > timer_countdown = SKD_TIMER_SECONDS ( 3 ) ;
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
break ;
case FIT_SR_DRIVE_BUSY_ERASE :
skdev - > state = SKD_DRVR_STATE_BUSY_ERASE ;
skdev - > timer_countdown = SKD_BUSY_TIMO ;
break ;
case FIT_SR_DRIVE_OFFLINE :
skdev - > state = SKD_DRVR_STATE_IDLE ;
break ;
case FIT_SR_DRIVE_SOFT_RESET :
switch ( skdev - > state ) {
case SKD_DRVR_STATE_STARTING :
case SKD_DRVR_STATE_RESTARTING :
/* Expected by a caller of skd_soft_reset() */
break ;
default :
skdev - > state = SKD_DRVR_STATE_RESTARTING ;
break ;
}
break ;
case FIT_SR_DRIVE_FW_BOOTING :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " ISR FIT_SR_DRIVE_FW_BOOTING \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_WAIT_BOOT ;
skdev - > timer_countdown = SKD_WAIT_BOOT_TIMO ;
break ;
case FIT_SR_DRIVE_DEGRADED :
case FIT_SR_PCIE_LINK_DOWN :
case FIT_SR_DRIVE_NEED_FW_DOWNLOAD :
break ;
case FIT_SR_DRIVE_FAULT :
skd_drive_fault ( skdev ) ;
2017-08-17 23:13:14 +03:00
skd_recover_requests ( skdev ) ;
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
break ;
/* PCIe bus returned all Fs? */
case 0xFF :
2017-08-17 23:12:58 +03:00
dev_info ( & skdev - > pdev - > dev , " state=0x%x sense=0x%x \n " , state ,
sense ) ;
2013-10-16 00:19:07 +04:00
skd_drive_disappeared ( skdev ) ;
2017-08-17 23:13:14 +03:00
skd_recover_requests ( skdev ) ;
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
break ;
default :
/*
* Uknown FW State . Wait for a state we recognize .
*/
break ;
}
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Driver state %s(%d)=>%s(%d) \n " ,
skd_skdev_state_to_str ( prev_driver_state ) , prev_driver_state ,
skd_skdev_state_to_str ( skdev - > state ) , skdev - > state ) ;
2013-10-16 00:19:07 +04:00
}
2017-08-17 23:13:14 +03:00
static void skd_recover_requests ( struct skd_device * skdev )
2013-10-16 00:19:07 +04:00
{
int i ;
for ( i = 0 ; i < skdev - > num_req_context ; i + + ) {
struct skd_request_context * skreq = & skdev - > skreq_table [ i ] ;
2017-08-17 23:13:26 +03:00
struct request * req = skreq - > req ;
2013-10-16 00:19:07 +04:00
if ( skreq - > state = = SKD_REQ_STATE_BUSY ) {
skd_log_skreq ( skdev , skreq , " recover " ) ;
SKD_ASSERT ( ( skreq - > id & SKD_ID_INCR ) ! = 0 ) ;
2017-08-17 23:13:26 +03:00
SKD_ASSERT ( req ! = NULL ) ;
2013-10-16 00:19:07 +04:00
/* Release DMA resources for the request. */
if ( skreq - > n_sg > 0 )
skd_postop_sg_list ( skdev , skreq ) ;
2013-11-01 20:14:56 +04:00
skreq - > req = NULL ;
2013-10-16 00:19:07 +04:00
skreq - > state = SKD_REQ_STATE_IDLE ;
skreq - > id + = SKD_ID_INCR ;
2017-08-17 23:13:26 +03:00
skd_end_request ( skdev , req , BLK_STS_IOERR ) ;
2013-10-16 00:19:07 +04:00
}
}
for ( i = 0 ; i < SKD_N_TIMEOUT_SLOT ; i + + )
skdev - > timeout_slot [ i ] = 0 ;
skdev - > in_flight = 0 ;
}
static void skd_isr_msg_from_dev ( struct skd_device * skdev )
{
u32 mfd ;
u32 mtd ;
u32 data ;
mfd = SKD_READL ( skdev , FIT_MSG_FROM_DEVICE ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " mfd=0x%x last_mtd=0x%x \n " , mfd ,
skdev - > last_mtd ) ;
2013-10-16 00:19:07 +04:00
/* ignore any mtd that is an ack for something we didn't send */
if ( FIT_MXD_TYPE ( mfd ) ! = FIT_MXD_TYPE ( skdev - > last_mtd ) )
return ;
switch ( FIT_MXD_TYPE ( mfd ) ) {
case FIT_MTD_FITFW_INIT :
skdev - > proto_ver = FIT_PROTOCOL_MAJOR_VER ( mfd ) ;
if ( skdev - > proto_ver ! = FIT_PROTOCOL_VERSION_1 ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " protocol mismatch \n " ) ;
dev_err ( & skdev - > pdev - > dev , " got=%d support=%d \n " ,
skdev - > proto_ver , FIT_PROTOCOL_VERSION_1 ) ;
dev_err ( & skdev - > pdev - > dev , " please upgrade driver \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_PROTOCOL_MISMATCH ;
skd_soft_reset ( skdev ) ;
break ;
}
mtd = FIT_MXD_CONS ( FIT_MTD_GET_CMDQ_DEPTH , 0 , 0 ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
break ;
case FIT_MTD_GET_CMDQ_DEPTH :
skdev - > dev_max_queue_depth = FIT_MXD_DATA ( mfd ) ;
mtd = FIT_MXD_CONS ( FIT_MTD_SET_COMPQ_DEPTH , 0 ,
SKD_N_COMPLETION_ENTRY ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
break ;
case FIT_MTD_SET_COMPQ_DEPTH :
SKD_WRITEQ ( skdev , skdev - > cq_dma_address , FIT_MSG_TO_DEVICE_ARG ) ;
mtd = FIT_MXD_CONS ( FIT_MTD_SET_COMPQ_ADDR , 0 , 0 ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
break ;
case FIT_MTD_SET_COMPQ_ADDR :
skd_reset_skcomp ( skdev ) ;
mtd = FIT_MXD_CONS ( FIT_MTD_CMD_LOG_HOST_ID , 0 , skdev - > devno ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
break ;
case FIT_MTD_CMD_LOG_HOST_ID :
skdev - > connect_time_stamp = get_seconds ( ) ;
data = skdev - > connect_time_stamp & 0xFFFF ;
mtd = FIT_MXD_CONS ( FIT_MTD_CMD_LOG_TIME_STAMP_LO , 0 , data ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
break ;
case FIT_MTD_CMD_LOG_TIME_STAMP_LO :
skdev - > drive_jiffies = FIT_MXD_DATA ( mfd ) ;
data = ( skdev - > connect_time_stamp > > 16 ) & 0xFFFF ;
mtd = FIT_MXD_CONS ( FIT_MTD_CMD_LOG_TIME_STAMP_HI , 0 , data ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
break ;
case FIT_MTD_CMD_LOG_TIME_STAMP_HI :
skdev - > drive_jiffies | = ( FIT_MXD_DATA ( mfd ) < < 16 ) ;
mtd = FIT_MXD_CONS ( FIT_MTD_ARM_QUEUE , 0 , 0 ) ;
SKD_WRITEL ( skdev , mtd , FIT_MSG_TO_DEVICE ) ;
skdev - > last_mtd = mtd ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Time sync driver=0x%x device=0x%x \n " ,
skdev - > connect_time_stamp , skdev - > drive_jiffies ) ;
2013-10-16 00:19:07 +04:00
break ;
case FIT_MTD_ARM_QUEUE :
skdev - > last_mtd = 0 ;
/*
* State should be , or soon will be , FIT_SR_DRIVE_ONLINE .
*/
break ;
default :
break ;
}
}
static void skd_disable_interrupts ( struct skd_device * skdev )
{
u32 sense ;
sense = SKD_READL ( skdev , FIT_CONTROL ) ;
sense & = ~ FIT_CR_ENABLE_INTERRUPTS ;
SKD_WRITEL ( skdev , sense , FIT_CONTROL ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " sense 0x%x \n " , sense ) ;
2013-10-16 00:19:07 +04:00
/* Note that the 1s is written. A 1-bit means
* disable , a 0 means enable .
*/
SKD_WRITEL ( skdev , ~ 0 , FIT_INT_MASK_HOST ) ;
}
static void skd_enable_interrupts ( struct skd_device * skdev )
{
u32 val ;
/* unmask interrupts first */
val = FIT_ISH_FW_STATE_CHANGE +
FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV ;
/* Note that the compliment of mask is written. A 1-bit means
* disable , a 0 means enable . */
SKD_WRITEL ( skdev , ~ val , FIT_INT_MASK_HOST ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " interrupt mask=0x%x \n " , ~ val ) ;
2013-10-16 00:19:07 +04:00
val = SKD_READL ( skdev , FIT_CONTROL ) ;
val | = FIT_CR_ENABLE_INTERRUPTS ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " control=0x%x \n " , val ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEL ( skdev , val , FIT_CONTROL ) ;
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* START , STOP , RESTART , QUIESCE , UNQUIESCE
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static void skd_soft_reset ( struct skd_device * skdev )
{
u32 val ;
val = SKD_READL ( skdev , FIT_CONTROL ) ;
val | = ( FIT_CR_SOFT_RESET ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " control=0x%x \n " , val ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEL ( skdev , val , FIT_CONTROL ) ;
}
static void skd_start_device ( struct skd_device * skdev )
{
unsigned long flags ;
u32 sense ;
u32 state ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
/* ack all ghost interrupts */
SKD_WRITEL ( skdev , FIT_INT_DEF_MASK , FIT_INT_STATUS_HOST ) ;
sense = SKD_READL ( skdev , FIT_STATUS ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " initial status=0x%x \n " , sense ) ;
2013-10-16 00:19:07 +04:00
state = sense & FIT_SR_DRIVE_STATE_MASK ;
skdev - > drive_state = state ;
skdev - > last_mtd = 0 ;
skdev - > state = SKD_DRVR_STATE_STARTING ;
skdev - > timer_countdown = SKD_STARTING_TIMO ;
skd_enable_interrupts ( skdev ) ;
switch ( skdev - > drive_state ) {
case FIT_SR_DRIVE_OFFLINE :
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Drive offline... \n " ) ;
2013-10-16 00:19:07 +04:00
break ;
case FIT_SR_DRIVE_FW_BOOTING :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " FIT_SR_DRIVE_FW_BOOTING \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_WAIT_BOOT ;
skdev - > timer_countdown = SKD_WAIT_BOOT_TIMO ;
break ;
case FIT_SR_DRIVE_BUSY_SANITIZE :
2017-08-17 23:12:58 +03:00
dev_info ( & skdev - > pdev - > dev , " Start: BUSY_SANITIZE \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_BUSY_SANITIZE ;
skdev - > timer_countdown = SKD_STARTED_BUSY_TIMO ;
break ;
case FIT_SR_DRIVE_BUSY_ERASE :
2017-08-17 23:12:58 +03:00
dev_info ( & skdev - > pdev - > dev , " Start: BUSY_ERASE \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_BUSY_ERASE ;
skdev - > timer_countdown = SKD_STARTED_BUSY_TIMO ;
break ;
case FIT_SR_DRIVE_INIT :
case FIT_SR_DRIVE_ONLINE :
skd_soft_reset ( skdev ) ;
break ;
case FIT_SR_DRIVE_BUSY :
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Drive Busy... \n " ) ;
2013-10-16 00:19:07 +04:00
skdev - > state = SKD_DRVR_STATE_BUSY ;
skdev - > timer_countdown = SKD_STARTED_BUSY_TIMO ;
break ;
case FIT_SR_DRIVE_SOFT_RESET :
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " drive soft reset in prog \n " ) ;
2013-10-16 00:19:07 +04:00
break ;
case FIT_SR_DRIVE_FAULT :
/* Fault state is bad...soft reset won't do it...
* Hard reset , maybe , but does it work on device ?
* For now , just fault so the system doesn ' t hang .
*/
skd_drive_fault ( skdev ) ;
/*start the queue so we can respond with error to requests */
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " starting queue \n " ) ;
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
skdev - > gendisk_on = - 1 ;
wake_up_interruptible ( & skdev - > waitq ) ;
break ;
case 0xFF :
/* Most likely the device isn't there or isn't responding
* to the BAR1 addresses . */
skd_drive_disappeared ( skdev ) ;
/*start the queue so we can respond with error to requests */
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" starting queue to error-out reqs \n " ) ;
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
skdev - > gendisk_on = - 1 ;
wake_up_interruptible ( & skdev - > waitq ) ;
break ;
default :
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Start: unknown state %x \n " ,
skdev - > drive_state ) ;
2013-10-16 00:19:07 +04:00
break ;
}
state = SKD_READL ( skdev , FIT_CONTROL ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " FIT Control Status=0x%x \n " , state ) ;
2013-10-16 00:19:07 +04:00
state = SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " Intr Status=0x%x \n " , state ) ;
2013-10-16 00:19:07 +04:00
state = SKD_READL ( skdev , FIT_INT_MASK_HOST ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " Intr Mask=0x%x \n " , state ) ;
2013-10-16 00:19:07 +04:00
state = SKD_READL ( skdev , FIT_MSG_FROM_DEVICE ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " Msg from Dev=0x%x \n " , state ) ;
2013-10-16 00:19:07 +04:00
state = SKD_READL ( skdev , FIT_HW_VERSION ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " HW version=0x%x \n " , state ) ;
2013-10-16 00:19:07 +04:00
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
}
static void skd_stop_device ( struct skd_device * skdev )
{
unsigned long flags ;
struct skd_special_context * skspcl = & skdev - > internal_skspcl ;
u32 dev_state ;
int i ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
if ( skdev - > state ! = SKD_DRVR_STATE_ONLINE ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " %s not online no sync \n " , __func__ ) ;
2013-10-16 00:19:07 +04:00
goto stop_out ;
}
if ( skspcl - > req . state ! = SKD_REQ_STATE_IDLE ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " %s no special \n " , __func__ ) ;
2013-10-16 00:19:07 +04:00
goto stop_out ;
}
skdev - > state = SKD_DRVR_STATE_SYNCING ;
skdev - > sync_done = 0 ;
skd_send_internal_skspcl ( skdev , skspcl , SYNCHRONIZE_CACHE ) ;
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
wait_event_interruptible_timeout ( skdev - > waitq ,
( skdev - > sync_done ) , ( 10 * HZ ) ) ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
switch ( skdev - > sync_done ) {
case 0 :
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " %s no sync \n " , __func__ ) ;
2013-10-16 00:19:07 +04:00
break ;
case 1 :
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " %s sync done \n " , __func__ ) ;
2013-10-16 00:19:07 +04:00
break ;
default :
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " %s sync error \n " , __func__ ) ;
2013-10-16 00:19:07 +04:00
}
stop_out :
skdev - > state = SKD_DRVR_STATE_STOPPING ;
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
skd_kill_timer ( skdev ) ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
skd_disable_interrupts ( skdev ) ;
/* ensure all ints on device are cleared */
/* soft reset the device to unload with a clean slate */
SKD_WRITEL ( skdev , FIT_INT_DEF_MASK , FIT_INT_STATUS_HOST ) ;
SKD_WRITEL ( skdev , FIT_CR_SOFT_RESET , FIT_CONTROL ) ;
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
/* poll every 100ms, 1 second timeout */
for ( i = 0 ; i < 10 ; i + + ) {
dev_state =
SKD_READL ( skdev , FIT_STATUS ) & FIT_SR_DRIVE_STATE_MASK ;
if ( dev_state = = FIT_SR_DRIVE_INIT )
break ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule_timeout ( msecs_to_jiffies ( 100 ) ) ;
}
if ( dev_state ! = FIT_SR_DRIVE_INIT )
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " %s state error 0x%02x \n " , __func__ ,
dev_state ) ;
2013-10-16 00:19:07 +04:00
}
/* assume spinlock is held */
static void skd_restart_device ( struct skd_device * skdev )
{
u32 state ;
/* ack all ghost interrupts */
SKD_WRITEL ( skdev , FIT_INT_DEF_MASK , FIT_INT_STATUS_HOST ) ;
state = SKD_READL ( skdev , FIT_STATUS ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " drive status=0x%x \n " , state ) ;
2013-10-16 00:19:07 +04:00
state & = FIT_SR_DRIVE_STATE_MASK ;
skdev - > drive_state = state ;
skdev - > last_mtd = 0 ;
skdev - > state = SKD_DRVR_STATE_RESTARTING ;
skdev - > timer_countdown = SKD_RESTARTING_TIMO ;
skd_soft_reset ( skdev ) ;
}
/* assume spinlock is held */
static int skd_quiesce_dev ( struct skd_device * skdev )
{
int rc = 0 ;
switch ( skdev - > state ) {
case SKD_DRVR_STATE_BUSY :
case SKD_DRVR_STATE_BUSY_IMMINENT :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " stopping queue \n " ) ;
2013-11-01 20:38:45 +04:00
blk_stop_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
break ;
case SKD_DRVR_STATE_ONLINE :
case SKD_DRVR_STATE_STOPPING :
case SKD_DRVR_STATE_SYNCING :
case SKD_DRVR_STATE_PAUSING :
case SKD_DRVR_STATE_PAUSED :
case SKD_DRVR_STATE_STARTING :
case SKD_DRVR_STATE_RESTARTING :
case SKD_DRVR_STATE_RESUMING :
default :
rc = - EINVAL ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " state [%d] not implemented \n " ,
skdev - > state ) ;
2013-10-16 00:19:07 +04:00
}
return rc ;
}
/* assume spinlock is held */
static int skd_unquiesce_dev ( struct skd_device * skdev )
{
int prev_driver_state = skdev - > state ;
skd_log_skdev ( skdev , " unquiesce " ) ;
if ( skdev - > state = = SKD_DRVR_STATE_ONLINE ) {
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " **** device already ONLINE \n " ) ;
2013-10-16 00:19:07 +04:00
return 0 ;
}
if ( skdev - > drive_state ! = FIT_SR_DRIVE_ONLINE ) {
/*
* If there has been an state change to other than
* ONLINE , we will rely on controller state change
* to come back online and restart the queue .
* The BUSY state means that driver is ready to
* continue normal processing but waiting for controller
* to become available .
*/
skdev - > state = SKD_DRVR_STATE_BUSY ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " drive BUSY state \n " ) ;
2013-10-16 00:19:07 +04:00
return 0 ;
}
/*
* Drive has just come online , driver is either in startup ,
* paused performing a task , or bust waiting for hardware .
*/
switch ( skdev - > state ) {
case SKD_DRVR_STATE_PAUSED :
case SKD_DRVR_STATE_BUSY :
case SKD_DRVR_STATE_BUSY_IMMINENT :
case SKD_DRVR_STATE_BUSY_ERASE :
case SKD_DRVR_STATE_STARTING :
case SKD_DRVR_STATE_RESTARTING :
case SKD_DRVR_STATE_FAULT :
case SKD_DRVR_STATE_IDLE :
case SKD_DRVR_STATE_LOAD :
skdev - > state = SKD_DRVR_STATE_ONLINE ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " Driver state %s(%d)=>%s(%d) \n " ,
skd_skdev_state_to_str ( prev_driver_state ) ,
prev_driver_state , skd_skdev_state_to_str ( skdev - > state ) ,
skdev - > state ) ;
dev_dbg ( & skdev - > pdev - > dev ,
" **** device ONLINE...starting block queue \n " ) ;
dev_dbg ( & skdev - > pdev - > dev , " starting queue \n " ) ;
dev_info ( & skdev - > pdev - > dev , " STEC s1120 ONLINE \n " ) ;
2013-11-01 20:38:45 +04:00
blk_start_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
skdev - > gendisk_on = 1 ;
wake_up_interruptible ( & skdev - > waitq ) ;
break ;
case SKD_DRVR_STATE_DISAPPEARED :
default :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" **** driver state %d, not implemented \n " ,
skdev - > state ) ;
2013-10-16 00:19:07 +04:00
return - EBUSY ;
}
return 0 ;
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* PCIe MSI / MSI - X INTERRUPT HANDLERS
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static irqreturn_t skd_reserved_isr ( int irq , void * skd_host_data )
{
struct skd_device * skdev = skd_host_data ;
unsigned long flags ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " MSIX = 0x%x \n " ,
SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ) ;
dev_err ( & skdev - > pdev - > dev , " MSIX reserved irq %d = 0x%x \n " , irq ,
SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEL ( skdev , FIT_INT_RESERVED_MASK , FIT_INT_STATUS_HOST ) ;
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
return IRQ_HANDLED ;
}
static irqreturn_t skd_statec_isr ( int irq , void * skd_host_data )
{
struct skd_device * skdev = skd_host_data ;
unsigned long flags ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " MSIX = 0x%x \n " ,
SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEL ( skdev , FIT_ISH_FW_STATE_CHANGE , FIT_INT_STATUS_HOST ) ;
skd_isr_fwstate ( skdev ) ;
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
return IRQ_HANDLED ;
}
static irqreturn_t skd_comp_q ( int irq , void * skd_host_data )
{
struct skd_device * skdev = skd_host_data ;
unsigned long flags ;
int flush_enqueued = 0 ;
int deferred ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " MSIX = 0x%x \n " ,
SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEL ( skdev , FIT_ISH_COMPLETION_POSTED , FIT_INT_STATUS_HOST ) ;
deferred = skd_isr_completion_posted ( skdev , skd_isr_comp_limit ,
& flush_enqueued ) ;
if ( flush_enqueued )
2017-08-17 23:13:22 +03:00
blk_run_queue_async ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
if ( deferred )
schedule_work ( & skdev - > completion_worker ) ;
else if ( ! flush_enqueued )
2017-08-17 23:13:22 +03:00
blk_run_queue_async ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
return IRQ_HANDLED ;
}
static irqreturn_t skd_msg_isr ( int irq , void * skd_host_data )
{
struct skd_device * skdev = skd_host_data ;
unsigned long flags ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " MSIX = 0x%x \n " ,
SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEL ( skdev , FIT_ISH_MSG_FROM_DEV , FIT_INT_STATUS_HOST ) ;
skd_isr_msg_from_dev ( skdev ) ;
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
return IRQ_HANDLED ;
}
static irqreturn_t skd_qfull_isr ( int irq , void * skd_host_data )
{
struct skd_device * skdev = skd_host_data ;
unsigned long flags ;
spin_lock_irqsave ( & skdev - > lock , flags ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " MSIX = 0x%x \n " ,
SKD_READL ( skdev , FIT_INT_STATUS_HOST ) ) ;
2013-10-16 00:19:07 +04:00
SKD_WRITEL ( skdev , FIT_INT_QUEUE_FULL , FIT_INT_STATUS_HOST ) ;
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
return IRQ_HANDLED ;
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* PCIe MSI / MSI - X SETUP
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
struct skd_msix_entry {
char isr_name [ 30 ] ;
} ;
struct skd_init_msix_entry {
const char * name ;
irq_handler_t handler ;
} ;
# define SKD_MAX_MSIX_COUNT 13
# define SKD_MIN_MSIX_COUNT 7
# define SKD_BASE_MSIX_IRQ 4
static struct skd_init_msix_entry msix_entries [ SKD_MAX_MSIX_COUNT ] = {
{ " (DMA 0) " , skd_reserved_isr } ,
{ " (DMA 1) " , skd_reserved_isr } ,
{ " (DMA 2) " , skd_reserved_isr } ,
{ " (DMA 3) " , skd_reserved_isr } ,
{ " (State Change) " , skd_statec_isr } ,
{ " (COMPL_Q) " , skd_comp_q } ,
{ " (MSG) " , skd_msg_isr } ,
{ " (Reserved) " , skd_reserved_isr } ,
{ " (Reserved) " , skd_reserved_isr } ,
{ " (Queue Full 0) " , skd_qfull_isr } ,
{ " (Queue Full 1) " , skd_qfull_isr } ,
{ " (Queue Full 2) " , skd_qfull_isr } ,
{ " (Queue Full 3) " , skd_qfull_isr } ,
} ;
static int skd_acquire_msix ( struct skd_device * skdev )
{
2014-02-19 12:58:21 +04:00
int i , rc ;
2014-02-19 12:58:19 +04:00
struct pci_dev * pdev = skdev - > pdev ;
2013-10-16 00:19:07 +04:00
2016-11-07 22:14:07 +03:00
rc = pci_alloc_irq_vectors ( pdev , SKD_MAX_MSIX_COUNT , SKD_MAX_MSIX_COUNT ,
PCI_IRQ_MSIX ) ;
if ( rc < 0 ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " failed to enable MSI-X %d \n " , rc ) ;
2016-11-09 15:55:34 +03:00
goto out ;
2013-10-16 00:19:07 +04:00
}
2014-02-19 12:58:19 +04:00
2016-11-07 22:14:07 +03:00
skdev - > msix_entries = kcalloc ( SKD_MAX_MSIX_COUNT ,
sizeof ( struct skd_msix_entry ) , GFP_KERNEL ) ;
2013-10-16 00:19:07 +04:00
if ( ! skdev - > msix_entries ) {
rc = - ENOMEM ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " msix table allocation error \n " ) ;
2016-11-09 15:55:34 +03:00
goto out ;
2013-10-16 00:19:07 +04:00
}
/* Enable MSI-X vectors for the base queue */
2016-11-07 22:14:07 +03:00
for ( i = 0 ; i < SKD_MAX_MSIX_COUNT ; i + + ) {
struct skd_msix_entry * qentry = & skdev - > msix_entries [ i ] ;
2013-10-16 00:19:07 +04:00
snprintf ( qentry - > isr_name , sizeof ( qentry - > isr_name ) ,
" %s%d-msix %s " , DRV_NAME , skdev - > devno ,
msix_entries [ i ] . name ) ;
2016-11-07 22:14:07 +03:00
rc = devm_request_irq ( & skdev - > pdev - > dev ,
pci_irq_vector ( skdev - > pdev , i ) ,
msix_entries [ i ] . handler , 0 ,
qentry - > isr_name , skdev ) ;
2013-10-16 00:19:07 +04:00
if ( rc ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" Unable to register(%d) MSI-X handler %d: %s \n " ,
rc , i , qentry - > isr_name ) ;
2013-10-16 00:19:07 +04:00
goto msix_out ;
}
}
2016-11-07 22:14:07 +03:00
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " %d msix irq(s) enabled \n " ,
SKD_MAX_MSIX_COUNT ) ;
2013-10-16 00:19:07 +04:00
return 0 ;
msix_out :
2016-11-07 22:14:07 +03:00
while ( - - i > = 0 )
devm_free_irq ( & pdev - > dev , pci_irq_vector ( pdev , i ) , skdev ) ;
2016-11-09 15:55:34 +03:00
out :
2016-11-07 22:14:07 +03:00
kfree ( skdev - > msix_entries ) ;
skdev - > msix_entries = NULL ;
2013-10-16 00:19:07 +04:00
return rc ;
}
static int skd_acquire_irq ( struct skd_device * skdev )
{
2016-11-07 22:14:07 +03:00
struct pci_dev * pdev = skdev - > pdev ;
unsigned int irq_flag = PCI_IRQ_LEGACY ;
2013-10-16 00:19:07 +04:00
int rc ;
2016-11-07 22:14:07 +03:00
if ( skd_isr_type = = SKD_IRQ_MSIX ) {
2013-10-16 00:19:07 +04:00
rc = skd_acquire_msix ( skdev ) ;
if ( ! rc )
2016-11-07 22:14:07 +03:00
return 0 ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" failed to enable MSI-X, re-trying with MSI %d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
}
2016-11-07 22:14:07 +03:00
snprintf ( skdev - > isr_name , sizeof ( skdev - > isr_name ) , " %s%d " , DRV_NAME ,
skdev - > devno ) ;
if ( skd_isr_type ! = SKD_IRQ_LEGACY )
irq_flag | = PCI_IRQ_MSI ;
rc = pci_alloc_irq_vectors ( pdev , 1 , 1 , irq_flag ) ;
if ( rc < 0 ) {
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev ,
" failed to allocate the MSI interrupt %d \n " , rc ) ;
2016-11-07 22:14:07 +03:00
return rc ;
}
rc = devm_request_irq ( & pdev - > dev , pdev - > irq , skd_isr ,
pdev - > msi_enabled ? 0 : IRQF_SHARED ,
skdev - > isr_name , skdev ) ;
if ( rc ) {
pci_free_irq_vectors ( pdev ) ;
2017-08-17 23:12:58 +03:00
dev_err ( & skdev - > pdev - > dev , " failed to allocate interrupt %d \n " ,
rc ) ;
2016-11-07 22:14:07 +03:00
return rc ;
}
return 0 ;
2013-10-16 00:19:07 +04:00
}
static void skd_release_irq ( struct skd_device * skdev )
{
2016-11-07 22:14:07 +03:00
struct pci_dev * pdev = skdev - > pdev ;
if ( skdev - > msix_entries ) {
int i ;
for ( i = 0 ; i < SKD_MAX_MSIX_COUNT ; i + + ) {
devm_free_irq ( & pdev - > dev , pci_irq_vector ( pdev , i ) ,
skdev ) ;
}
kfree ( skdev - > msix_entries ) ;
skdev - > msix_entries = NULL ;
} else {
devm_free_irq ( & pdev - > dev , pdev - > irq , skdev ) ;
2013-10-16 00:19:07 +04:00
}
2016-11-07 22:14:07 +03:00
pci_free_irq_vectors ( pdev ) ;
2013-10-16 00:19:07 +04:00
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* CONSTRUCT
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static int skd_cons_skcomp ( struct skd_device * skdev )
{
int rc = 0 ;
struct fit_completion_entry_v1 * skcomp ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
2017-08-17 23:13:02 +03:00
" comp pci_alloc, total bytes %zd entries %d \n " ,
SKD_SKCOMP_SIZE , SKD_N_COMPLETION_ENTRY ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:13:02 +03:00
skcomp = pci_zalloc_consistent ( skdev - > pdev , SKD_SKCOMP_SIZE ,
2014-08-09 01:24:12 +04:00
& skdev - > cq_dma_address ) ;
2013-10-16 00:19:07 +04:00
if ( skcomp = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
skdev - > skcomp_table = skcomp ;
skdev - > skerr_table = ( struct fit_comp_error_info * ) ( ( char * ) skcomp +
sizeof ( * skcomp ) *
SKD_N_COMPLETION_ENTRY ) ;
err_out :
return rc ;
}
static int skd_cons_skmsg ( struct skd_device * skdev )
{
int rc = 0 ;
u32 i ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
2017-08-17 23:13:18 +03:00
" skmsg_table kcalloc, struct %lu, count %u total %lu \n " ,
2017-08-17 23:12:58 +03:00
sizeof ( struct skd_fitmsg_context ) , skdev - > num_fitmsg_context ,
sizeof ( struct skd_fitmsg_context ) * skdev - > num_fitmsg_context ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:13:18 +03:00
skdev - > skmsg_table = kcalloc ( skdev - > num_fitmsg_context ,
sizeof ( struct skd_fitmsg_context ) ,
GFP_KERNEL ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > skmsg_table = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
for ( i = 0 ; i < skdev - > num_fitmsg_context ; i + + ) {
struct skd_fitmsg_context * skmsg ;
skmsg = & skdev - > skmsg_table [ i ] ;
skmsg - > id = i + SKD_ID_FIT_MSG ;
skmsg - > msg_buf = pci_alloc_consistent ( skdev - > pdev ,
2017-08-17 23:13:06 +03:00
SKD_N_FITMSG_BYTES ,
2013-10-16 00:19:07 +04:00
& skmsg - > mb_dma_address ) ;
if ( skmsg - > msg_buf = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
2017-08-17 23:13:06 +03:00
WARN ( ( ( uintptr_t ) skmsg - > msg_buf | skmsg - > mb_dma_address ) &
( FIT_QCMD_ALIGN - 1 ) ,
" not aligned: msg_buf %p mb_dma_address %#llx \n " ,
skmsg - > msg_buf , skmsg - > mb_dma_address ) ;
2013-10-16 00:19:07 +04:00
memset ( skmsg - > msg_buf , 0 , SKD_N_FITMSG_BYTES ) ;
}
err_out :
return rc ;
}
2013-11-05 15:37:08 +04:00
static struct fit_sg_descriptor * skd_cons_sg_list ( struct skd_device * skdev ,
u32 n_sg ,
dma_addr_t * ret_dma_addr )
{
struct fit_sg_descriptor * sg_list ;
u32 nbytes ;
nbytes = sizeof ( * sg_list ) * n_sg ;
sg_list = pci_alloc_consistent ( skdev - > pdev , nbytes , ret_dma_addr ) ;
if ( sg_list ! = NULL ) {
uint64_t dma_address = * ret_dma_addr ;
u32 i ;
memset ( sg_list , 0 , nbytes ) ;
for ( i = 0 ; i < n_sg - 1 ; i + + ) {
uint64_t ndp_off ;
ndp_off = ( i + 1 ) * sizeof ( struct fit_sg_descriptor ) ;
sg_list [ i ] . next_desc_ptr = dma_address + ndp_off ;
}
sg_list [ i ] . next_desc_ptr = 0LL ;
}
return sg_list ;
}
2013-10-16 00:19:07 +04:00
static int skd_cons_skreq ( struct skd_device * skdev )
{
int rc = 0 ;
u32 i ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
2017-08-17 23:13:18 +03:00
" skreq_table kcalloc, struct %lu, count %u total %lu \n " ,
2017-08-17 23:12:58 +03:00
sizeof ( struct skd_request_context ) , skdev - > num_req_context ,
sizeof ( struct skd_request_context ) * skdev - > num_req_context ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:13:18 +03:00
skdev - > skreq_table = kcalloc ( skdev - > num_req_context ,
sizeof ( struct skd_request_context ) ,
GFP_KERNEL ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > skreq_table = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " alloc sg_table sg_per_req %u scatlist %lu total %lu \n " ,
skdev - > sgs_per_request , sizeof ( struct scatterlist ) ,
skdev - > sgs_per_request * sizeof ( struct scatterlist ) ) ;
2013-10-16 00:19:07 +04:00
for ( i = 0 ; i < skdev - > num_req_context ; i + + ) {
struct skd_request_context * skreq ;
skreq = & skdev - > skreq_table [ i ] ;
skreq - > state = SKD_REQ_STATE_IDLE ;
2017-08-17 23:13:18 +03:00
skreq - > sg = kcalloc ( skdev - > sgs_per_request ,
sizeof ( struct scatterlist ) , GFP_KERNEL ) ;
2013-10-16 00:19:07 +04:00
if ( skreq - > sg = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
sg_init_table ( skreq - > sg , skdev - > sgs_per_request ) ;
skreq - > sksg_list = skd_cons_sg_list ( skdev ,
skdev - > sgs_per_request ,
& skreq - > sksg_dma_address ) ;
if ( skreq - > sksg_list = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
}
err_out :
return rc ;
}
static int skd_cons_sksb ( struct skd_device * skdev )
{
int rc = 0 ;
struct skd_special_context * skspcl ;
u32 nbytes ;
skspcl = & skdev - > internal_skspcl ;
skspcl - > req . id = 0 + SKD_ID_INTERNAL ;
skspcl - > req . state = SKD_REQ_STATE_IDLE ;
nbytes = SKD_N_INTERNAL_BYTES ;
2014-08-09 01:24:12 +04:00
skspcl - > data_buf = pci_zalloc_consistent ( skdev - > pdev , nbytes ,
& skspcl - > db_dma_address ) ;
2013-10-16 00:19:07 +04:00
if ( skspcl - > data_buf = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
nbytes = SKD_N_SPECIAL_FITMSG_BYTES ;
2014-08-09 01:24:12 +04:00
skspcl - > msg_buf = pci_zalloc_consistent ( skdev - > pdev , nbytes ,
& skspcl - > mb_dma_address ) ;
2013-10-16 00:19:07 +04:00
if ( skspcl - > msg_buf = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
skspcl - > req . sksg_list = skd_cons_sg_list ( skdev , 1 ,
& skspcl - > req . sksg_dma_address ) ;
if ( skspcl - > req . sksg_list = = NULL ) {
rc = - ENOMEM ;
goto err_out ;
}
if ( ! skd_format_internal_skspcl ( skdev ) ) {
rc = - EINVAL ;
goto err_out ;
}
err_out :
return rc ;
}
static int skd_cons_disk ( struct skd_device * skdev )
{
int rc = 0 ;
struct gendisk * disk ;
struct request_queue * q ;
unsigned long flags ;
disk = alloc_disk ( SKD_MINORS_PER_DEVICE ) ;
if ( ! disk ) {
rc = - ENOMEM ;
goto err_out ;
}
skdev - > disk = disk ;
sprintf ( disk - > disk_name , DRV_NAME " %u " , skdev - > devno ) ;
disk - > major = skdev - > major ;
disk - > first_minor = skdev - > devno * SKD_MINORS_PER_DEVICE ;
disk - > fops = & skd_blockdev_ops ;
disk - > private_data = skdev ;
2013-11-01 20:14:56 +04:00
q = blk_init_queue ( skd_request_fn , & skdev - > lock ) ;
2013-10-16 00:19:07 +04:00
if ( ! q ) {
rc = - ENOMEM ;
goto err_out ;
}
2017-06-19 10:26:26 +03:00
blk_queue_bounce_limit ( q , BLK_BOUNCE_HIGH ) ;
2017-08-17 23:13:26 +03:00
q - > nr_requests = skd_max_queue_depth / 2 ;
blk_queue_init_tags ( q , skd_max_queue_depth , NULL , BLK_TAG_ALLOC_FIFO ) ;
2013-10-16 00:19:07 +04:00
skdev - > queue = q ;
disk - > queue = q ;
q - > queuedata = skdev ;
2016-03-30 19:11:42 +03:00
blk_queue_write_cache ( q , true , true ) ;
2013-10-16 00:19:07 +04:00
blk_queue_max_segments ( q , skdev - > sgs_per_request ) ;
blk_queue_max_hw_sectors ( q , SKD_N_MAX_SECTORS ) ;
2017-08-17 23:12:53 +03:00
/* set optimal I/O size to 8KB */
2013-10-16 00:19:07 +04:00
blk_queue_io_opt ( q , 8192 ) ;
queue_flag_set_unlocked ( QUEUE_FLAG_NONROT , q ) ;
2014-10-04 20:55:32 +04:00
queue_flag_clear_unlocked ( QUEUE_FLAG_ADD_RANDOM , q ) ;
2013-10-16 00:19:07 +04:00
spin_lock_irqsave ( & skdev - > lock , flags ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " stopping queue \n " ) ;
2013-11-01 20:38:45 +04:00
blk_stop_queue ( skdev - > queue ) ;
2013-10-16 00:19:07 +04:00
spin_unlock_irqrestore ( & skdev - > lock , flags ) ;
err_out :
return rc ;
}
2013-11-05 15:37:08 +04:00
# define SKD_N_DEV_TABLE 16u
static u32 skd_next_devno ;
2013-10-16 00:19:07 +04:00
2013-11-05 15:37:08 +04:00
static struct skd_device * skd_construct ( struct pci_dev * pdev )
2013-10-16 00:19:07 +04:00
{
2013-11-05 15:37:08 +04:00
struct skd_device * skdev ;
int blk_major = skd_major ;
int rc ;
2013-10-16 00:19:07 +04:00
2013-11-05 15:37:08 +04:00
skdev = kzalloc ( sizeof ( * skdev ) , GFP_KERNEL ) ;
2013-10-16 00:19:07 +04:00
2013-11-05 15:37:08 +04:00
if ( ! skdev ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " memory alloc failure \n " ) ;
2013-11-05 15:37:08 +04:00
return NULL ;
}
2013-10-16 00:19:07 +04:00
2013-11-05 15:37:08 +04:00
skdev - > state = SKD_DRVR_STATE_LOAD ;
skdev - > pdev = pdev ;
skdev - > devno = skd_next_devno + + ;
skdev - > major = blk_major ;
skdev - > dev_max_queue_depth = 0 ;
2013-10-16 00:19:07 +04:00
2013-11-05 15:37:08 +04:00
skdev - > num_req_context = skd_max_queue_depth ;
skdev - > num_fitmsg_context = skd_max_queue_depth ;
skdev - > cur_max_queue_depth = 1 ;
skdev - > queue_low_water_mark = 1 ;
skdev - > proto_ver = 99 ;
skdev - > sgs_per_request = skd_sgs_per_request ;
skdev - > dbg_level = skd_dbg_level ;
2013-10-16 00:19:07 +04:00
2013-11-05 15:37:08 +04:00
spin_lock_init ( & skdev - > lock ) ;
INIT_WORK ( & skdev - > completion_worker , skd_completion_worker ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skcomp \n " ) ;
2013-11-05 15:37:08 +04:00
rc = skd_cons_skcomp ( skdev ) ;
if ( rc < 0 )
goto err_out ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skmsg \n " ) ;
2013-11-05 15:37:08 +04:00
rc = skd_cons_skmsg ( skdev ) ;
if ( rc < 0 )
goto err_out ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skreq \n " ) ;
2013-11-05 15:37:08 +04:00
rc = skd_cons_skreq ( skdev ) ;
if ( rc < 0 )
goto err_out ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " sksb \n " ) ;
2013-11-05 15:37:08 +04:00
rc = skd_cons_sksb ( skdev ) ;
if ( rc < 0 )
goto err_out ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " disk \n " ) ;
2013-11-05 15:37:08 +04:00
rc = skd_cons_disk ( skdev ) ;
if ( rc < 0 )
goto err_out ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " VICTORY \n " ) ;
2013-11-05 15:37:08 +04:00
return skdev ;
err_out :
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " construct failed \n " ) ;
2013-11-05 15:37:08 +04:00
skd_destruct ( skdev ) ;
return NULL ;
2013-10-16 00:19:07 +04:00
}
2013-11-05 15:37:08 +04:00
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* DESTRUCT ( FREE )
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
2013-10-16 00:19:07 +04:00
static void skd_free_skcomp ( struct skd_device * skdev )
{
2017-08-17 23:13:03 +03:00
if ( skdev - > skcomp_table )
pci_free_consistent ( skdev - > pdev , SKD_SKCOMP_SIZE ,
2013-10-16 00:19:07 +04:00
skdev - > skcomp_table , skdev - > cq_dma_address ) ;
skdev - > skcomp_table = NULL ;
skdev - > cq_dma_address = 0 ;
}
static void skd_free_skmsg ( struct skd_device * skdev )
{
u32 i ;
if ( skdev - > skmsg_table = = NULL )
return ;
for ( i = 0 ; i < skdev - > num_fitmsg_context ; i + + ) {
struct skd_fitmsg_context * skmsg ;
skmsg = & skdev - > skmsg_table [ i ] ;
if ( skmsg - > msg_buf ! = NULL ) {
pci_free_consistent ( skdev - > pdev , SKD_N_FITMSG_BYTES ,
skmsg - > msg_buf ,
skmsg - > mb_dma_address ) ;
}
skmsg - > msg_buf = NULL ;
skmsg - > mb_dma_address = 0 ;
}
kfree ( skdev - > skmsg_table ) ;
skdev - > skmsg_table = NULL ;
}
2013-11-05 15:37:08 +04:00
static void skd_free_sg_list ( struct skd_device * skdev ,
struct fit_sg_descriptor * sg_list ,
u32 n_sg , dma_addr_t dma_addr )
{
if ( sg_list ! = NULL ) {
u32 nbytes ;
nbytes = sizeof ( * sg_list ) * n_sg ;
pci_free_consistent ( skdev - > pdev , nbytes , sg_list , dma_addr ) ;
}
}
2013-10-16 00:19:07 +04:00
static void skd_free_skreq ( struct skd_device * skdev )
{
u32 i ;
if ( skdev - > skreq_table = = NULL )
return ;
for ( i = 0 ; i < skdev - > num_req_context ; i + + ) {
struct skd_request_context * skreq ;
skreq = & skdev - > skreq_table [ i ] ;
skd_free_sg_list ( skdev , skreq - > sksg_list ,
skdev - > sgs_per_request ,
skreq - > sksg_dma_address ) ;
skreq - > sksg_list = NULL ;
skreq - > sksg_dma_address = 0 ;
kfree ( skreq - > sg ) ;
}
kfree ( skdev - > skreq_table ) ;
skdev - > skreq_table = NULL ;
}
static void skd_free_sksb ( struct skd_device * skdev )
{
struct skd_special_context * skspcl ;
u32 nbytes ;
skspcl = & skdev - > internal_skspcl ;
if ( skspcl - > data_buf ! = NULL ) {
nbytes = SKD_N_INTERNAL_BYTES ;
pci_free_consistent ( skdev - > pdev , nbytes ,
skspcl - > data_buf , skspcl - > db_dma_address ) ;
}
skspcl - > data_buf = NULL ;
skspcl - > db_dma_address = 0 ;
if ( skspcl - > msg_buf ! = NULL ) {
nbytes = SKD_N_SPECIAL_FITMSG_BYTES ;
pci_free_consistent ( skdev - > pdev , nbytes ,
skspcl - > msg_buf , skspcl - > mb_dma_address ) ;
}
skspcl - > msg_buf = NULL ;
skspcl - > mb_dma_address = 0 ;
skd_free_sg_list ( skdev , skspcl - > req . sksg_list , 1 ,
skspcl - > req . sksg_dma_address ) ;
skspcl - > req . sksg_list = NULL ;
skspcl - > req . sksg_dma_address = 0 ;
}
static void skd_free_disk ( struct skd_device * skdev )
{
struct gendisk * disk = skdev - > disk ;
2017-08-17 23:12:45 +03:00
if ( disk & & ( disk - > flags & GENHD_FL_UP ) )
del_gendisk ( disk ) ;
if ( skdev - > queue ) {
blk_cleanup_queue ( skdev - > queue ) ;
skdev - > queue = NULL ;
disk - > queue = NULL ;
2013-10-16 00:19:07 +04:00
}
2017-08-17 23:12:45 +03:00
put_disk ( disk ) ;
2013-10-16 00:19:07 +04:00
skdev - > disk = NULL ;
}
2013-11-05 15:37:08 +04:00
static void skd_destruct ( struct skd_device * skdev )
{
if ( skdev = = NULL )
return ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " disk \n " ) ;
2013-11-05 15:37:08 +04:00
skd_free_disk ( skdev ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " sksb \n " ) ;
2013-11-05 15:37:08 +04:00
skd_free_sksb ( skdev ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skreq \n " ) ;
2013-11-05 15:37:08 +04:00
skd_free_skreq ( skdev ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skmsg \n " ) ;
2013-11-05 15:37:08 +04:00
skd_free_skmsg ( skdev ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skcomp \n " ) ;
2013-11-05 15:37:08 +04:00
skd_free_skcomp ( skdev ) ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skdev \n " ) ;
2013-11-05 15:37:08 +04:00
kfree ( skdev ) ;
}
2013-10-16 00:19:07 +04:00
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* BLOCK DEVICE ( BDEV ) GLUE
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static int skd_bdev_getgeo ( struct block_device * bdev , struct hd_geometry * geo )
{
struct skd_device * skdev ;
u64 capacity ;
skdev = bdev - > bd_disk - > private_data ;
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " %s: CMD[%s] getgeo device \n " ,
bdev - > bd_disk - > disk_name , current - > comm ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > read_cap_is_valid ) {
capacity = get_capacity ( skdev - > disk ) ;
geo - > heads = 64 ;
geo - > sectors = 255 ;
geo - > cylinders = ( capacity ) / ( 255 * 64 ) ;
return 0 ;
}
return - EIO ;
}
2016-06-16 05:44:20 +03:00
static int skd_bdev_attach ( struct device * parent , struct skd_device * skdev )
2013-10-16 00:19:07 +04:00
{
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " add_disk \n " ) ;
2016-06-16 05:44:20 +03:00
device_add_disk ( parent , skdev - > disk ) ;
2013-10-16 00:19:07 +04:00
return 0 ;
}
static const struct block_device_operations skd_blockdev_ops = {
. owner = THIS_MODULE ,
. getgeo = skd_bdev_getgeo ,
} ;
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* PCIe DRIVER GLUE
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
2014-08-08 17:56:03 +04:00
static const struct pci_device_id skd_pci_tbl [ ] = {
2013-10-16 00:19:07 +04:00
{ PCI_VENDOR_ID_STEC , PCI_DEVICE_ID_S1120 ,
PCI_ANY_ID , PCI_ANY_ID , 0 , 0 , } ,
{ 0 } /* terminate list */
} ;
MODULE_DEVICE_TABLE ( pci , skd_pci_tbl ) ;
static char * skd_pci_info ( struct skd_device * skdev , char * str )
{
int pcie_reg ;
strcpy ( str , " PCIe ( " ) ;
pcie_reg = pci_find_capability ( skdev - > pdev , PCI_CAP_ID_EXP ) ;
if ( pcie_reg ) {
char lwstr [ 6 ] ;
uint16_t pcie_lstat , lspeed , lwidth ;
pcie_reg + = 0x12 ;
pci_read_config_word ( skdev - > pdev , pcie_reg , & pcie_lstat ) ;
lspeed = pcie_lstat & ( 0xF ) ;
lwidth = ( pcie_lstat & 0x3F0 ) > > 4 ;
if ( lspeed = = 1 )
strcat ( str , " 2.5GT/s " ) ;
else if ( lspeed = = 2 )
strcat ( str , " 5.0GT/s " ) ;
else
strcat ( str , " <unknown> " ) ;
snprintf ( lwstr , sizeof ( lwstr ) , " %dX) " , lwidth ) ;
strcat ( str , lwstr ) ;
}
return str ;
}
static int skd_pci_probe ( struct pci_dev * pdev , const struct pci_device_id * ent )
{
int i ;
int rc = 0 ;
char pci_str [ 32 ] ;
struct skd_device * skdev ;
2017-08-17 23:12:58 +03:00
dev_info ( & pdev - > dev , " STEC s1120 Driver(%s) version %s-b%s \n " ,
DRV_NAME , DRV_VERSION , DRV_BUILD_ID ) ;
dev_info ( & pdev - > dev , " vendor=%04X device=%04x \n " , pdev - > vendor ,
pdev - > device ) ;
2013-10-16 00:19:07 +04:00
rc = pci_enable_device ( pdev ) ;
if ( rc )
return rc ;
rc = pci_request_regions ( pdev , DRV_NAME ) ;
if ( rc )
goto err_out ;
rc = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ;
if ( ! rc ) {
if ( pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " consistent DMA mask error %d \n " ,
rc ) ;
2013-10-16 00:19:07 +04:00
}
} else {
2017-08-17 23:12:58 +03:00
rc = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
2013-10-16 00:19:07 +04:00
if ( rc ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " DMA mask error %d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
goto err_out_regions ;
}
}
2013-11-05 15:37:02 +04:00
if ( ! skd_major ) {
rc = register_blkdev ( 0 , DRV_NAME ) ;
if ( rc < 0 )
goto err_out_regions ;
BUG_ON ( ! rc ) ;
skd_major = rc ;
}
2013-10-16 00:19:07 +04:00
skdev = skd_construct ( pdev ) ;
2013-10-30 09:23:53 +04:00
if ( skdev = = NULL ) {
rc = - ENOMEM ;
2013-10-16 00:19:07 +04:00
goto err_out_regions ;
2013-10-30 09:23:53 +04:00
}
2013-10-16 00:19:07 +04:00
skd_pci_info ( skdev , pci_str ) ;
2017-08-17 23:12:58 +03:00
dev_info ( & pdev - > dev , " %s 64bit \n " , pci_str ) ;
2013-10-16 00:19:07 +04:00
pci_set_master ( pdev ) ;
rc = pci_enable_pcie_error_reporting ( pdev ) ;
if ( rc ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev ,
" bad enable of PCIe error reporting rc=%d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
skdev - > pcie_error_reporting_is_enabled = 0 ;
} else
skdev - > pcie_error_reporting_is_enabled = 1 ;
pci_set_drvdata ( pdev , skdev ) ;
2013-11-05 15:37:05 +04:00
2013-10-16 00:19:07 +04:00
for ( i = 0 ; i < SKD_MAX_BARS ; i + + ) {
skdev - > mem_phys [ i ] = pci_resource_start ( pdev , i ) ;
skdev - > mem_size [ i ] = ( u32 ) pci_resource_len ( pdev , i ) ;
skdev - > mem_map [ i ] = ioremap ( skdev - > mem_phys [ i ] ,
skdev - > mem_size [ i ] ) ;
if ( ! skdev - > mem_map [ i ] ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev ,
" Unable to map adapter memory! \n " ) ;
2013-10-16 00:19:07 +04:00
rc = - ENODEV ;
goto err_out_iounmap ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & pdev - > dev , " mem_map=%p, phyd=%016llx, size=%d \n " ,
skdev - > mem_map [ i ] , ( uint64_t ) skdev - > mem_phys [ i ] ,
skdev - > mem_size [ i ] ) ;
2013-10-16 00:19:07 +04:00
}
rc = skd_acquire_irq ( skdev ) ;
if ( rc ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " interrupt resource error %d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
goto err_out_iounmap ;
}
rc = skd_start_timer ( skdev ) ;
if ( rc )
goto err_out_timer ;
init_waitqueue_head ( & skdev - > waitq ) ;
skd_start_device ( skdev ) ;
rc = wait_event_interruptible_timeout ( skdev - > waitq ,
( skdev - > gendisk_on ) ,
( SKD_START_WAIT_SECONDS * HZ ) ) ;
if ( skdev - > gendisk_on > 0 ) {
/* device came on-line after reset */
2016-06-16 05:44:20 +03:00
skd_bdev_attach ( & pdev - > dev , skdev ) ;
2013-10-16 00:19:07 +04:00
rc = 0 ;
} else {
/* we timed out, something is wrong with the device,
don ' t add the disk structure */
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " error: waiting for s1120 timed out %d! \n " ,
rc ) ;
2013-10-16 00:19:07 +04:00
/* in case of no error; we timeout with ENXIO */
if ( ! rc )
rc = - ENXIO ;
goto err_out_timer ;
}
return rc ;
err_out_timer :
skd_stop_device ( skdev ) ;
skd_release_irq ( skdev ) ;
err_out_iounmap :
for ( i = 0 ; i < SKD_MAX_BARS ; i + + )
if ( skdev - > mem_map [ i ] )
iounmap ( skdev - > mem_map [ i ] ) ;
if ( skdev - > pcie_error_reporting_is_enabled )
pci_disable_pcie_error_reporting ( pdev ) ;
skd_destruct ( skdev ) ;
err_out_regions :
pci_release_regions ( pdev ) ;
err_out :
pci_disable_device ( pdev ) ;
pci_set_drvdata ( pdev , NULL ) ;
return rc ;
}
static void skd_pci_remove ( struct pci_dev * pdev )
{
int i ;
struct skd_device * skdev ;
skdev = pci_get_drvdata ( pdev ) ;
if ( ! skdev ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " no device data for PCI \n " ) ;
2013-10-16 00:19:07 +04:00
return ;
}
skd_stop_device ( skdev ) ;
skd_release_irq ( skdev ) ;
for ( i = 0 ; i < SKD_MAX_BARS ; i + + )
if ( skdev - > mem_map [ i ] )
2017-08-17 23:12:59 +03:00
iounmap ( skdev - > mem_map [ i ] ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > pcie_error_reporting_is_enabled )
pci_disable_pcie_error_reporting ( pdev ) ;
skd_destruct ( skdev ) ;
pci_release_regions ( pdev ) ;
pci_disable_device ( pdev ) ;
pci_set_drvdata ( pdev , NULL ) ;
return ;
}
static int skd_pci_suspend ( struct pci_dev * pdev , pm_message_t state )
{
int i ;
struct skd_device * skdev ;
skdev = pci_get_drvdata ( pdev ) ;
if ( ! skdev ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " no device data for PCI \n " ) ;
2013-10-16 00:19:07 +04:00
return - EIO ;
}
skd_stop_device ( skdev ) ;
skd_release_irq ( skdev ) ;
for ( i = 0 ; i < SKD_MAX_BARS ; i + + )
if ( skdev - > mem_map [ i ] )
2017-08-17 23:12:59 +03:00
iounmap ( skdev - > mem_map [ i ] ) ;
2013-10-16 00:19:07 +04:00
if ( skdev - > pcie_error_reporting_is_enabled )
pci_disable_pcie_error_reporting ( pdev ) ;
pci_release_regions ( pdev ) ;
pci_save_state ( pdev ) ;
pci_disable_device ( pdev ) ;
pci_set_power_state ( pdev , pci_choose_state ( pdev , state ) ) ;
return 0 ;
}
static int skd_pci_resume ( struct pci_dev * pdev )
{
int i ;
int rc = 0 ;
struct skd_device * skdev ;
skdev = pci_get_drvdata ( pdev ) ;
if ( ! skdev ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " no device data for PCI \n " ) ;
2013-10-16 00:19:07 +04:00
return - 1 ;
}
pci_set_power_state ( pdev , PCI_D0 ) ;
pci_enable_wake ( pdev , PCI_D0 , 0 ) ;
pci_restore_state ( pdev ) ;
rc = pci_enable_device ( pdev ) ;
if ( rc )
return rc ;
rc = pci_request_regions ( pdev , DRV_NAME ) ;
if ( rc )
goto err_out ;
rc = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ;
if ( ! rc ) {
if ( pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 64 ) ) ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " consistent DMA mask error %d \n " ,
rc ) ;
2013-10-16 00:19:07 +04:00
}
} else {
rc = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
if ( rc ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " DMA mask error %d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
goto err_out_regions ;
}
}
pci_set_master ( pdev ) ;
rc = pci_enable_pcie_error_reporting ( pdev ) ;
if ( rc ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev ,
" bad enable of PCIe error reporting rc=%d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
skdev - > pcie_error_reporting_is_enabled = 0 ;
} else
skdev - > pcie_error_reporting_is_enabled = 1 ;
for ( i = 0 ; i < SKD_MAX_BARS ; i + + ) {
skdev - > mem_phys [ i ] = pci_resource_start ( pdev , i ) ;
skdev - > mem_size [ i ] = ( u32 ) pci_resource_len ( pdev , i ) ;
skdev - > mem_map [ i ] = ioremap ( skdev - > mem_phys [ i ] ,
skdev - > mem_size [ i ] ) ;
if ( ! skdev - > mem_map [ i ] ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " Unable to map adapter memory! \n " ) ;
2013-10-16 00:19:07 +04:00
rc = - ENODEV ;
goto err_out_iounmap ;
}
2017-08-17 23:12:58 +03:00
dev_dbg ( & pdev - > dev , " mem_map=%p, phyd=%016llx, size=%d \n " ,
skdev - > mem_map [ i ] , ( uint64_t ) skdev - > mem_phys [ i ] ,
skdev - > mem_size [ i ] ) ;
2013-10-16 00:19:07 +04:00
}
rc = skd_acquire_irq ( skdev ) ;
if ( rc ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " interrupt resource error %d \n " , rc ) ;
2013-10-16 00:19:07 +04:00
goto err_out_iounmap ;
}
rc = skd_start_timer ( skdev ) ;
if ( rc )
goto err_out_timer ;
init_waitqueue_head ( & skdev - > waitq ) ;
skd_start_device ( skdev ) ;
return rc ;
err_out_timer :
skd_stop_device ( skdev ) ;
skd_release_irq ( skdev ) ;
err_out_iounmap :
for ( i = 0 ; i < SKD_MAX_BARS ; i + + )
if ( skdev - > mem_map [ i ] )
iounmap ( skdev - > mem_map [ i ] ) ;
if ( skdev - > pcie_error_reporting_is_enabled )
pci_disable_pcie_error_reporting ( pdev ) ;
err_out_regions :
pci_release_regions ( pdev ) ;
err_out :
pci_disable_device ( pdev ) ;
return rc ;
}
static void skd_pci_shutdown ( struct pci_dev * pdev )
{
struct skd_device * skdev ;
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " %s called \n " , __func__ ) ;
2013-10-16 00:19:07 +04:00
skdev = pci_get_drvdata ( pdev ) ;
if ( ! skdev ) {
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " no device data for PCI \n " ) ;
2013-10-16 00:19:07 +04:00
return ;
}
2017-08-17 23:12:58 +03:00
dev_err ( & pdev - > dev , " calling stop \n " ) ;
2013-10-16 00:19:07 +04:00
skd_stop_device ( skdev ) ;
}
static struct pci_driver skd_driver = {
. name = DRV_NAME ,
. id_table = skd_pci_tbl ,
. probe = skd_pci_probe ,
. remove = skd_pci_remove ,
. suspend = skd_pci_suspend ,
. resume = skd_pci_resume ,
. shutdown = skd_pci_shutdown ,
} ;
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* LOGGING SUPPORT
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
const char * skd_drive_state_to_str ( int state )
{
switch ( state ) {
case FIT_SR_DRIVE_OFFLINE :
return " OFFLINE " ;
case FIT_SR_DRIVE_INIT :
return " INIT " ;
case FIT_SR_DRIVE_ONLINE :
return " ONLINE " ;
case FIT_SR_DRIVE_BUSY :
return " BUSY " ;
case FIT_SR_DRIVE_FAULT :
return " FAULT " ;
case FIT_SR_DRIVE_DEGRADED :
return " DEGRADED " ;
case FIT_SR_PCIE_LINK_DOWN :
return " INK_DOWN " ;
case FIT_SR_DRIVE_SOFT_RESET :
return " SOFT_RESET " ;
case FIT_SR_DRIVE_NEED_FW_DOWNLOAD :
return " NEED_FW " ;
case FIT_SR_DRIVE_INIT_FAULT :
return " INIT_FAULT " ;
case FIT_SR_DRIVE_BUSY_SANITIZE :
return " BUSY_SANITIZE " ;
case FIT_SR_DRIVE_BUSY_ERASE :
return " BUSY_ERASE " ;
case FIT_SR_DRIVE_FW_BOOTING :
return " FW_BOOTING " ;
default :
return " ??? " ;
}
}
const char * skd_skdev_state_to_str ( enum skd_drvr_state state )
{
switch ( state ) {
case SKD_DRVR_STATE_LOAD :
return " LOAD " ;
case SKD_DRVR_STATE_IDLE :
return " IDLE " ;
case SKD_DRVR_STATE_BUSY :
return " BUSY " ;
case SKD_DRVR_STATE_STARTING :
return " STARTING " ;
case SKD_DRVR_STATE_ONLINE :
return " ONLINE " ;
case SKD_DRVR_STATE_PAUSING :
return " PAUSING " ;
case SKD_DRVR_STATE_PAUSED :
return " PAUSED " ;
case SKD_DRVR_STATE_DRAINING_TIMEOUT :
return " DRAINING_TIMEOUT " ;
case SKD_DRVR_STATE_RESTARTING :
return " RESTARTING " ;
case SKD_DRVR_STATE_RESUMING :
return " RESUMING " ;
case SKD_DRVR_STATE_STOPPING :
return " STOPPING " ;
case SKD_DRVR_STATE_SYNCING :
return " SYNCING " ;
case SKD_DRVR_STATE_FAULT :
return " FAULT " ;
case SKD_DRVR_STATE_DISAPPEARED :
return " DISAPPEARED " ;
case SKD_DRVR_STATE_BUSY_ERASE :
return " BUSY_ERASE " ;
case SKD_DRVR_STATE_BUSY_SANITIZE :
return " BUSY_SANITIZE " ;
case SKD_DRVR_STATE_BUSY_IMMINENT :
return " BUSY_IMMINENT " ;
case SKD_DRVR_STATE_WAIT_BOOT :
return " WAIT_BOOT " ;
default :
return " ??? " ;
}
}
2013-12-19 13:32:22 +04:00
static const char * skd_skreq_state_to_str ( enum skd_req_state state )
2013-10-16 00:19:07 +04:00
{
switch ( state ) {
case SKD_REQ_STATE_IDLE :
return " IDLE " ;
case SKD_REQ_STATE_SETUP :
return " SETUP " ;
case SKD_REQ_STATE_BUSY :
return " BUSY " ;
case SKD_REQ_STATE_COMPLETED :
return " COMPLETED " ;
case SKD_REQ_STATE_TIMEOUT :
return " TIMEOUT " ;
default :
return " ??? " ;
}
}
static void skd_log_skdev ( struct skd_device * skdev , const char * event )
{
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skdev=%p event='%s' \n " , skdev , event ) ;
dev_dbg ( & skdev - > pdev - > dev , " drive_state=%s(%d) driver_state=%s(%d) \n " ,
skd_drive_state_to_str ( skdev - > drive_state ) , skdev - > drive_state ,
skd_skdev_state_to_str ( skdev - > state ) , skdev - > state ) ;
dev_dbg ( & skdev - > pdev - > dev , " busy=%d limit=%d dev=%d lowat=%d \n " ,
skdev - > in_flight , skdev - > cur_max_queue_depth ,
skdev - > dev_max_queue_depth , skdev - > queue_low_water_mark ) ;
dev_dbg ( & skdev - > pdev - > dev , " timestamp=0x%x cycle=%d cycle_ix=%d \n " ,
skdev - > timeout_stamp , skdev - > skcomp_cycle , skdev - > skcomp_ix ) ;
2013-10-16 00:19:07 +04:00
}
static void skd_log_skreq ( struct skd_device * skdev ,
struct skd_request_context * skreq , const char * event )
{
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " skreq=%p event='%s' \n " , skreq , event ) ;
dev_dbg ( & skdev - > pdev - > dev , " state=%s(%d) id=0x%04x fitmsg=0x%04x \n " ,
skd_skreq_state_to_str ( skreq - > state ) , skreq - > state , skreq - > id ,
skreq - > fitmsg_id ) ;
dev_dbg ( & skdev - > pdev - > dev , " timo=0x%x sg_dir=%d n_sg=%d \n " ,
2017-08-17 23:13:12 +03:00
skreq - > timeout_stamp , skreq - > data_dir , skreq - > n_sg ) ;
2013-10-16 00:19:07 +04:00
2013-11-01 20:14:56 +04:00
if ( skreq - > req ! = NULL ) {
struct request * req = skreq - > req ;
u32 lba = ( u32 ) blk_rq_pos ( req ) ;
u32 count = blk_rq_sectors ( req ) ;
2013-10-16 00:19:07 +04:00
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev ,
" req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d \n " , req ,
lba , lba , count , count , ( int ) rq_data_dir ( req ) ) ;
2013-11-01 20:14:56 +04:00
} else
2017-08-17 23:12:58 +03:00
dev_dbg ( & skdev - > pdev - > dev , " req=NULL \n " ) ;
2013-10-16 00:19:07 +04:00
}
/*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* MODULE GLUE
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*/
static int __init skd_init ( void )
{
2017-08-17 23:13:08 +03:00
BUILD_BUG_ON ( sizeof ( struct fit_completion_entry_v1 ) ! = 8 ) ;
BUILD_BUG_ON ( sizeof ( struct fit_comp_error_info ) ! = 32 ) ;
BUILD_BUG_ON ( sizeof ( struct skd_command_header ) ! = 16 ) ;
BUILD_BUG_ON ( sizeof ( struct skd_scsi_request ) ! = 32 ) ;
BUILD_BUG_ON ( sizeof ( struct driver_inquiry_data ) ! = 44 ) ;
2017-08-17 23:13:07 +03:00
BUILD_BUG_ON ( offsetof ( struct skd_msg_buf , fmh ) ! = 0 ) ;
BUILD_BUG_ON ( offsetof ( struct skd_msg_buf , scsi ) ! = 64 ) ;
BUILD_BUG_ON ( sizeof ( struct skd_msg_buf ) ! = SKD_N_FITMSG_BYTES ) ;
2017-08-17 23:13:01 +03:00
2013-10-16 00:19:07 +04:00
pr_info ( PFX " v%s-b%s loaded \n " , DRV_VERSION , DRV_BUILD_ID ) ;
switch ( skd_isr_type ) {
case SKD_IRQ_LEGACY :
case SKD_IRQ_MSI :
case SKD_IRQ_MSIX :
break ;
default :
2013-11-05 15:37:01 +04:00
pr_err ( PFX " skd_isr_type %d invalid, re-set to %d \n " ,
2013-10-16 00:19:07 +04:00
skd_isr_type , SKD_IRQ_DEFAULT ) ;
skd_isr_type = SKD_IRQ_DEFAULT ;
}
2013-11-05 15:37:01 +04:00
if ( skd_max_queue_depth < 1 | |
skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH ) {
pr_err ( PFX " skd_max_queue_depth %d invalid, re-set to %d \n " ,
2013-10-16 00:19:07 +04:00
skd_max_queue_depth , SKD_MAX_QUEUE_DEPTH_DEFAULT ) ;
skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT ;
}
2017-08-17 23:13:01 +03:00
if ( skd_max_req_per_msg < 1 | |
skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG ) {
2013-11-05 15:37:01 +04:00
pr_err ( PFX " skd_max_req_per_msg %d invalid, re-set to %d \n " ,
2013-10-16 00:19:07 +04:00
skd_max_req_per_msg , SKD_MAX_REQ_PER_MSG_DEFAULT ) ;
skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT ;
}
if ( skd_sgs_per_request < 1 | | skd_sgs_per_request > 4096 ) {
2013-11-05 15:37:01 +04:00
pr_err ( PFX " skd_sg_per_request %d invalid, re-set to %d \n " ,
2013-10-16 00:19:07 +04:00
skd_sgs_per_request , SKD_N_SG_PER_REQ_DEFAULT ) ;
skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT ;
}
if ( skd_dbg_level < 0 | | skd_dbg_level > 2 ) {
2013-11-05 15:37:01 +04:00
pr_err ( PFX " skd_dbg_level %d invalid, re-set to %d \n " ,
2013-10-16 00:19:07 +04:00
skd_dbg_level , 0 ) ;
skd_dbg_level = 0 ;
}
if ( skd_isr_comp_limit < 0 ) {
2013-11-05 15:37:01 +04:00
pr_err ( PFX " skd_isr_comp_limit %d invalid, set to %d \n " ,
2013-10-16 00:19:07 +04:00
skd_isr_comp_limit , 0 ) ;
skd_isr_comp_limit = 0 ;
}
2013-11-05 15:37:02 +04:00
return pci_register_driver ( & skd_driver ) ;
2013-10-16 00:19:07 +04:00
}
static void __exit skd_exit ( void )
{
pr_info ( PFX " v%s-b%s unloading \n " , DRV_VERSION , DRV_BUILD_ID ) ;
pci_unregister_driver ( & skd_driver ) ;
2013-11-05 15:37:02 +04:00
if ( skd_major )
unregister_blkdev ( skd_major , DRV_NAME ) ;
2013-10-16 00:19:07 +04:00
}
module_init ( skd_init ) ;
module_exit ( skd_exit ) ;