2019-05-27 09:55:05 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2013-02-05 17:15:02 +04:00
/*
* Filename : dev . c
*
* Authors : Joshua Morris < josh . h . morris @ us . ibm . com >
* Philip Kelleher < pjk1939 @ linux . vnet . ibm . com >
*
* ( C ) Copyright 2013 IBM Corporation
*/
# include <linux/kernel.h>
# include <linux/interrupt.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/slab.h>
# include <linux/hdreg.h>
# include <linux/genhd.h>
# include <linux/blkdev.h>
# include <linux/bio.h>
# include <linux/fs.h>
# include "rsxx_priv.h"
static unsigned int blkdev_minors = 64 ;
module_param ( blkdev_minors , uint , 0444 ) ;
MODULE_PARM_DESC ( blkdev_minors , " Number of minors(partitions) " ) ;
/*
* For now I ' m making this tweakable in case any applications hit this limit .
* If you see a " bio too big " error in the log you will need to raise this
* value .
*/
static unsigned int blkdev_max_hw_sectors = 1024 ;
module_param ( blkdev_max_hw_sectors , uint , 0444 ) ;
MODULE_PARM_DESC ( blkdev_max_hw_sectors , " Max hw sectors for a single BIO " ) ;
static unsigned int enable_blkdev = 1 ;
module_param ( enable_blkdev , uint , 0444 ) ;
MODULE_PARM_DESC ( enable_blkdev , " Enable block device interfaces " ) ;
struct rsxx_bio_meta {
struct bio * bio ;
atomic_t pending_dmas ;
atomic_t error ;
unsigned long start_time ;
} ;
static struct kmem_cache * bio_meta_pool ;
2021-10-12 14:12:24 +03:00
static void rsxx_submit_bio ( struct bio * bio ) ;
2020-07-01 11:59:43 +03:00
2013-02-05 17:15:02 +04:00
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl ( struct block_device * bdev ,
fmode_t mode ,
unsigned int cmd ,
unsigned long arg )
{
struct rsxx_cardinfo * card = bdev - > bd_disk - > private_data ;
switch ( cmd ) {
case RSXX_GETREG :
return rsxx_reg_access ( card , ( void __user * ) arg , 1 ) ;
case RSXX_SETREG :
return rsxx_reg_access ( card , ( void __user * ) arg , 0 ) ;
}
return - ENOTTY ;
}
static int rsxx_getgeo ( struct block_device * bdev , struct hd_geometry * geo )
{
struct rsxx_cardinfo * card = bdev - > bd_disk - > private_data ;
u64 blocks = card - > size8 > > 9 ;
/*
* get geometry : Fake it . I haven ' t found any drivers that set
* geo - > start , so we won ' t either .
*/
if ( card - > size8 ) {
geo - > heads = 64 ;
geo - > sectors = 16 ;
do_div ( blocks , ( geo - > heads * geo - > sectors ) ) ;
geo - > cylinders = blocks ;
} else {
geo - > heads = 0 ;
geo - > sectors = 0 ;
geo - > cylinders = 0 ;
}
return 0 ;
}
static const struct block_device_operations rsxx_fops = {
. owner = THIS_MODULE ,
2020-07-01 11:59:43 +03:00
. submit_bio = rsxx_submit_bio ,
2013-02-05 17:15:02 +04:00
. getgeo = rsxx_getgeo ,
. ioctl = rsxx_blkdev_ioctl ,
} ;
static void bio_dma_done_cb ( struct rsxx_cardinfo * card ,
void * cb_data ,
unsigned int error )
{
2013-02-19 00:35:59 +04:00
struct rsxx_bio_meta * meta = cb_data ;
2013-02-05 17:15:02 +04:00
if ( error )
atomic_set ( & meta - > error , 1 ) ;
if ( atomic_dec_and_test ( & meta - > pending_dmas ) ) {
2013-06-18 23:36:26 +04:00
if ( ! card - > eeh_state & & card - > gendisk )
2020-05-27 08:24:06 +03:00
bio_end_io_acct ( meta - > bio , meta - > start_time ) ;
2013-02-05 17:15:02 +04:00
2015-07-20 16:29:37 +03:00
if ( atomic_read ( & meta - > error ) )
bio_io_error ( meta - > bio ) ;
else
bio_endio ( meta - > bio ) ;
2013-02-05 17:15:02 +04:00
kmem_cache_free ( bio_meta_pool , meta ) ;
}
}
2021-10-12 14:12:24 +03:00
static void rsxx_submit_bio ( struct bio * bio )
2013-02-05 17:15:02 +04:00
{
2021-01-24 13:02:34 +03:00
struct rsxx_cardinfo * card = bio - > bi_bdev - > bd_disk - > private_data ;
2013-02-05 17:15:02 +04:00
struct rsxx_bio_meta * bio_meta ;
2017-06-03 10:38:06 +03:00
blk_status_t st = BLK_STS_IOERR ;
2013-02-05 17:15:02 +04:00
2020-07-01 11:59:39 +03:00
blk_queue_split ( & bio ) ;
2015-04-24 08:37:18 +03:00
2013-02-05 17:15:02 +04:00
might_sleep ( ) ;
2013-06-18 23:46:04 +04:00
if ( ! card )
goto req_err ;
2013-10-12 02:44:27 +04:00
if ( bio_end_sector ( bio ) > get_capacity ( card - > gendisk ) )
2013-06-18 23:48:38 +04:00
goto req_err ;
2017-06-03 10:38:06 +03:00
if ( unlikely ( card - > halt ) )
2013-02-05 17:15:02 +04:00
goto req_err ;
2017-06-03 10:38:06 +03:00
if ( unlikely ( card - > dma_fault ) )
2013-02-05 17:15:02 +04:00
goto req_err ;
2013-10-12 02:44:27 +04:00
if ( bio - > bi_iter . bi_size = = 0 ) {
2013-02-05 17:15:02 +04:00
dev_err ( CARD_TO_DEV ( card ) , " size zero BIO! \n " ) ;
goto req_err ;
}
bio_meta = kmem_cache_alloc ( bio_meta_pool , GFP_KERNEL ) ;
if ( ! bio_meta ) {
2017-06-03 10:38:06 +03:00
st = BLK_STS_RESOURCE ;
2013-02-05 17:15:02 +04:00
goto req_err ;
}
bio_meta - > bio = bio ;
atomic_set ( & bio_meta - > error , 0 ) ;
atomic_set ( & bio_meta - > pending_dmas , 0 ) ;
2013-06-18 23:36:26 +04:00
if ( ! unlikely ( card - > halt ) )
2020-05-27 08:24:06 +03:00
bio_meta - > start_time = bio_start_io_acct ( bio ) ;
2013-02-05 17:15:02 +04:00
dev_dbg ( CARD_TO_DEV ( card ) , " BIO[%c]: meta: %p addr8: x%llx size: %d \n " ,
bio_data_dir ( bio ) ? ' W ' : ' R ' , bio_meta ,
2013-10-12 02:44:27 +04:00
( u64 ) bio - > bi_iter . bi_sector < < 9 , bio - > bi_iter . bi_size ) ;
2013-02-05 17:15:02 +04:00
st = rsxx_dma_queue_bio ( card , bio , & bio_meta - > pending_dmas ,
bio_dma_done_cb , bio_meta ) ;
if ( st )
goto queue_err ;
2021-10-12 14:12:24 +03:00
return ;
2013-02-05 17:15:02 +04:00
queue_err :
kmem_cache_free ( bio_meta_pool , bio_meta ) ;
req_err :
2015-07-20 16:29:37 +03:00
if ( st )
2017-06-03 10:38:06 +03:00
bio - > bi_status = st ;
2015-07-20 16:29:37 +03:00
bio_endio ( bio ) ;
2013-02-05 17:15:02 +04:00
}
/*----------------- Device Setup -------------------*/
static bool rsxx_discard_supported ( struct rsxx_cardinfo * card )
{
unsigned char pci_rev ;
pci_read_config_byte ( card - > dev , PCI_REVISION_ID , & pci_rev ) ;
return ( pci_rev > = RSXX_DISCARD_SUPPORT ) ;
}
int rsxx_attach_dev ( struct rsxx_cardinfo * card )
{
2021-09-28 01:01:53 +03:00
int err = 0 ;
2013-02-05 17:15:02 +04:00
mutex_lock ( & card - > dev_lock ) ;
/* The block device requires the stripe size from the config. */
if ( enable_blkdev ) {
if ( card - > config_valid )
set_capacity ( card - > gendisk , card - > size8 > > 9 ) ;
else
set_capacity ( card - > gendisk , 0 ) ;
2021-09-28 01:01:53 +03:00
err = device_add_disk ( CARD_TO_DEV ( card ) , card - > gendisk , NULL ) ;
if ( err = = 0 )
card - > bdev_attached = 1 ;
2013-02-05 17:15:02 +04:00
}
mutex_unlock ( & card - > dev_lock ) ;
2021-09-28 01:01:53 +03:00
if ( err )
blk_cleanup_disk ( card - > gendisk ) ;
return err ;
2013-02-05 17:15:02 +04:00
}
void rsxx_detach_dev ( struct rsxx_cardinfo * card )
{
mutex_lock ( & card - > dev_lock ) ;
if ( card - > bdev_attached ) {
del_gendisk ( card - > gendisk ) ;
card - > bdev_attached = 0 ;
}
mutex_unlock ( & card - > dev_lock ) ;
}
int rsxx_setup_dev ( struct rsxx_cardinfo * card )
{
unsigned short blk_size ;
mutex_init ( & card - > dev_lock ) ;
if ( ! enable_blkdev )
return 0 ;
card - > major = register_blkdev ( 0 , DRIVER_NAME ) ;
if ( card - > major < 0 ) {
dev_err ( CARD_TO_DEV ( card ) , " Failed to get major number \n " ) ;
return - ENOMEM ;
}
2021-05-21 08:50:59 +03:00
card - > gendisk = blk_alloc_disk ( blkdev_minors ) ;
2013-02-05 17:15:02 +04:00
if ( ! card - > gendisk ) {
dev_err ( CARD_TO_DEV ( card ) , " Failed disk alloc \n " ) ;
unregister_blkdev ( card - > major , DRIVER_NAME ) ;
return - ENOMEM ;
}
2013-10-19 02:12:35 +04:00
if ( card - > config_valid ) {
blk_size = card - > config . data . block_size ;
2021-05-21 08:50:59 +03:00
blk_queue_dma_alignment ( card - > gendisk - > queue , blk_size - 1 ) ;
blk_queue_logical_block_size ( card - > gendisk - > queue , blk_size ) ;
2013-10-19 02:12:35 +04:00
}
2013-02-05 17:15:02 +04:00
2021-05-21 08:50:59 +03:00
blk_queue_max_hw_sectors ( card - > gendisk - > queue , blkdev_max_hw_sectors ) ;
blk_queue_physical_block_size ( card - > gendisk - > queue , RSXX_HW_BLK_SIZE ) ;
2013-02-05 17:15:02 +04:00
2021-05-21 08:50:59 +03:00
blk_queue_flag_set ( QUEUE_FLAG_NONROT , card - > gendisk - > queue ) ;
blk_queue_flag_clear ( QUEUE_FLAG_ADD_RANDOM , card - > gendisk - > queue ) ;
2013-02-05 17:15:02 +04:00
if ( rsxx_discard_supported ( card ) ) {
2021-05-21 08:50:59 +03:00
blk_queue_flag_set ( QUEUE_FLAG_DISCARD , card - > gendisk - > queue ) ;
blk_queue_max_discard_sectors ( card - > gendisk - > queue ,
2013-02-05 17:15:02 +04:00
RSXX_HW_BLK_SIZE > > 9 ) ;
2021-05-21 08:50:59 +03:00
card - > gendisk - > queue - > limits . discard_granularity =
RSXX_HW_BLK_SIZE ;
card - > gendisk - > queue - > limits . discard_alignment =
RSXX_HW_BLK_SIZE ;
2013-02-05 17:15:02 +04:00
}
snprintf ( card - > gendisk - > disk_name , sizeof ( card - > gendisk - > disk_name ) ,
" rsxx%d " , card - > disk_id ) ;
card - > gendisk - > major = card - > major ;
2021-05-21 08:50:59 +03:00
card - > gendisk - > minors = blkdev_minors ;
2013-02-05 17:15:02 +04:00
card - > gendisk - > fops = & rsxx_fops ;
card - > gendisk - > private_data = card ;
return 0 ;
}
void rsxx_destroy_dev ( struct rsxx_cardinfo * card )
{
if ( ! enable_blkdev )
return ;
2021-05-21 08:50:59 +03:00
blk_cleanup_disk ( card - > gendisk ) ;
2013-02-05 17:15:02 +04:00
card - > gendisk = NULL ;
unregister_blkdev ( card - > major , DRIVER_NAME ) ;
}
int rsxx_dev_init ( void )
{
bio_meta_pool = KMEM_CACHE ( rsxx_bio_meta , SLAB_HWCACHE_ALIGN ) ;
if ( ! bio_meta_pool )
return - ENOMEM ;
return 0 ;
}
void rsxx_dev_cleanup ( void )
{
kmem_cache_destroy ( bio_meta_pool ) ;
}