2012-08-28 16:50:38 +02:00
/*
* Block driver for s390 storage class memory .
*
* Copyright IBM Corp . 2012
* Author ( s ) : Sebastian Ott < sebott @ linux . vnet . ibm . com >
*/
# define KMSG_COMPONENT "scm_block"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
# include <linux/interrupt.h>
# include <linux/spinlock.h>
2014-12-05 16:32:13 +01:00
# include <linux/mempool.h>
2012-08-28 16:50:38 +02:00
# include <linux/module.h>
# include <linux/blkdev.h>
2017-01-25 16:18:53 +01:00
# include <linux/blk-mq.h>
2012-08-28 16:50:38 +02:00
# include <linux/genhd.h>
# include <linux/slab.h>
# include <linux/list.h>
# include <asm/eadm.h>
# include "scm_blk.h"
debug_info_t * scm_debug ;
static int scm_major ;
2014-12-05 16:32:13 +01:00
static mempool_t * aidaw_pool ;
2012-08-28 16:50:38 +02:00
static DEFINE_SPINLOCK ( list_lock ) ;
static LIST_HEAD ( inactive_requests ) ;
static unsigned int nr_requests = 64 ;
2014-12-05 16:47:17 +01:00
static unsigned int nr_requests_per_io = 8 ;
2012-08-28 16:50:38 +02:00
static atomic_t nr_devices = ATOMIC_INIT ( 0 ) ;
module_param ( nr_requests , uint , S_IRUGO ) ;
MODULE_PARM_DESC ( nr_requests , " Number of parallel requests. " ) ;
2014-12-05 16:47:17 +01:00
module_param ( nr_requests_per_io , uint , S_IRUGO ) ;
MODULE_PARM_DESC ( nr_requests_per_io , " Number of requests per IO. " ) ;
2012-08-28 16:50:38 +02:00
MODULE_DESCRIPTION ( " Block driver for s390 storage class memory. " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_ALIAS ( " scm:scmdev* " ) ;
static void __scm_free_rq ( struct scm_request * scmrq )
{
struct aob_rq_header * aobrq = to_aobrq ( scmrq ) ;
free_page ( ( unsigned long ) scmrq - > aob ) ;
2014-12-05 16:47:17 +01:00
kfree ( scmrq - > request ) ;
2012-08-28 16:50:38 +02:00
kfree ( aobrq ) ;
}
static void scm_free_rqs ( void )
{
struct list_head * iter , * safe ;
struct scm_request * scmrq ;
spin_lock_irq ( & list_lock ) ;
list_for_each_safe ( iter , safe , & inactive_requests ) {
scmrq = list_entry ( iter , struct scm_request , list ) ;
list_del ( & scmrq - > list ) ;
__scm_free_rq ( scmrq ) ;
}
spin_unlock_irq ( & list_lock ) ;
2014-12-05 16:32:13 +01:00
mempool_destroy ( aidaw_pool ) ;
2012-08-28 16:50:38 +02:00
}
static int __scm_alloc_rq ( void )
{
struct aob_rq_header * aobrq ;
struct scm_request * scmrq ;
aobrq = kzalloc ( sizeof ( * aobrq ) + sizeof ( * scmrq ) , GFP_KERNEL ) ;
if ( ! aobrq )
return - ENOMEM ;
scmrq = ( void * ) aobrq - > data ;
scmrq - > aob = ( void * ) get_zeroed_page ( GFP_DMA ) ;
2014-12-05 16:47:17 +01:00
if ( ! scmrq - > aob )
goto free ;
2012-08-28 16:51:19 +02:00
2014-12-05 16:47:17 +01:00
scmrq - > request = kcalloc ( nr_requests_per_io , sizeof ( scmrq - > request [ 0 ] ) ,
GFP_KERNEL ) ;
if ( ! scmrq - > request )
goto free ;
2012-08-28 16:50:38 +02:00
INIT_LIST_HEAD ( & scmrq - > list ) ;
spin_lock_irq ( & list_lock ) ;
list_add ( & scmrq - > list , & inactive_requests ) ;
spin_unlock_irq ( & list_lock ) ;
return 0 ;
2014-12-05 16:47:17 +01:00
free :
__scm_free_rq ( scmrq ) ;
return - ENOMEM ;
2012-08-28 16:50:38 +02:00
}
static int scm_alloc_rqs ( unsigned int nrqs )
{
int ret = 0 ;
2014-12-05 16:32:13 +01:00
aidaw_pool = mempool_create_page_pool ( max ( nrqs / 8 , 1U ) , 0 ) ;
if ( ! aidaw_pool )
return - ENOMEM ;
2012-08-28 16:50:38 +02:00
while ( nrqs - - & & ! ret )
ret = __scm_alloc_rq ( ) ;
return ret ;
}
static struct scm_request * scm_request_fetch ( void )
{
struct scm_request * scmrq = NULL ;
2017-01-25 16:18:53 +01:00
spin_lock_irq ( & list_lock ) ;
2012-08-28 16:50:38 +02:00
if ( list_empty ( & inactive_requests ) )
goto out ;
scmrq = list_first_entry ( & inactive_requests , struct scm_request , list ) ;
list_del ( & scmrq - > list ) ;
out :
2017-01-25 16:18:53 +01:00
spin_unlock_irq ( & list_lock ) ;
2012-08-28 16:50:38 +02:00
return scmrq ;
}
static void scm_request_done ( struct scm_request * scmrq )
{
unsigned long flags ;
2014-12-05 16:43:58 +01:00
struct msb * msb ;
u64 aidaw ;
int i ;
2012-08-28 16:50:38 +02:00
2014-12-05 16:47:17 +01:00
for ( i = 0 ; i < nr_requests_per_io & & scmrq - > request [ i ] ; i + + ) {
2014-12-05 16:43:58 +01:00
msb = & scmrq - > aob - > msb [ i ] ;
aidaw = msb - > data_addr ;
if ( ( msb - > flags & MSB_FLAG_IDA ) & & aidaw & &
IS_ALIGNED ( aidaw , PAGE_SIZE ) )
mempool_free ( virt_to_page ( aidaw ) , aidaw_pool ) ;
}
2014-12-05 16:32:13 +01:00
2012-08-28 16:50:38 +02:00
spin_lock_irqsave ( & list_lock , flags ) ;
list_add ( & scmrq - > list , & inactive_requests ) ;
spin_unlock_irqrestore ( & list_lock , flags ) ;
}
2013-02-28 12:07:48 +01:00
static bool scm_permit_request ( struct scm_blk_dev * bdev , struct request * req )
{
return rq_data_dir ( req ) ! = WRITE | | bdev - > state ! = SCM_WR_PROHIBIT ;
}
2014-12-05 16:41:47 +01:00
static inline struct aidaw * scm_aidaw_alloc ( void )
2014-12-05 16:32:13 +01:00
{
struct page * page = mempool_alloc ( aidaw_pool , GFP_ATOMIC ) ;
return page ? page_address ( page ) : NULL ;
}
2014-12-05 16:41:47 +01:00
static inline unsigned long scm_aidaw_bytes ( struct aidaw * aidaw )
{
unsigned long _aidaw = ( unsigned long ) aidaw ;
unsigned long bytes = ALIGN ( _aidaw , PAGE_SIZE ) - _aidaw ;
return ( bytes / sizeof ( * aidaw ) ) * PAGE_SIZE ;
}
struct aidaw * scm_aidaw_fetch ( struct scm_request * scmrq , unsigned int bytes )
{
struct aidaw * aidaw ;
if ( scm_aidaw_bytes ( scmrq - > next_aidaw ) > = bytes )
return scmrq - > next_aidaw ;
aidaw = scm_aidaw_alloc ( ) ;
if ( aidaw )
memset ( aidaw , 0 , PAGE_SIZE ) ;
return aidaw ;
}
2014-12-05 16:32:13 +01:00
static int scm_request_prepare ( struct scm_request * scmrq )
2012-08-28 16:50:38 +02:00
{
struct scm_blk_dev * bdev = scmrq - > bdev ;
struct scm_device * scmdev = bdev - > gendisk - > private_data ;
2014-12-05 16:43:58 +01:00
int pos = scmrq - > aob - > request . msb_count ;
struct msb * msb = & scmrq - > aob - > msb [ pos ] ;
struct request * req = scmrq - > request [ pos ] ;
2012-08-28 16:50:38 +02:00
struct req_iterator iter ;
2014-12-05 16:41:47 +01:00
struct aidaw * aidaw ;
2013-11-23 17:19:00 -08:00
struct bio_vec bv ;
2012-08-28 16:50:38 +02:00
2014-12-05 16:43:58 +01:00
aidaw = scm_aidaw_fetch ( scmrq , blk_rq_bytes ( req ) ) ;
2014-12-05 16:32:13 +01:00
if ( ! aidaw )
return - ENOMEM ;
2012-08-28 16:50:38 +02:00
msb - > bs = MSB_BS_4K ;
2014-12-05 16:43:58 +01:00
scmrq - > aob - > request . msb_count + + ;
msb - > scm_addr = scmdev - > address + ( ( u64 ) blk_rq_pos ( req ) < < 9 ) ;
msb - > oc = ( rq_data_dir ( req ) = = READ ) ? MSB_OC_READ : MSB_OC_WRITE ;
2012-08-28 16:50:38 +02:00
msb - > flags | = MSB_FLAG_IDA ;
msb - > data_addr = ( u64 ) aidaw ;
2014-12-05 16:43:58 +01:00
rq_for_each_segment ( bv , req , iter ) {
2013-11-23 17:19:00 -08:00
WARN_ON ( bv . bv_offset ) ;
msb - > blk_count + = bv . bv_len > > 12 ;
aidaw - > data_addr = ( u64 ) page_address ( bv . bv_page ) ;
2012-08-28 16:50:38 +02:00
aidaw + + ;
}
2014-12-05 16:32:13 +01:00
2014-12-05 16:43:58 +01:00
scmrq - > next_aidaw = aidaw ;
2014-12-05 16:32:13 +01:00
return 0 ;
2012-08-28 16:50:38 +02:00
}
2014-12-05 16:43:58 +01:00
static inline void scm_request_set ( struct scm_request * scmrq ,
struct request * req )
{
scmrq - > request [ scmrq - > aob - > request . msb_count ] = req ;
}
2012-08-28 16:50:38 +02:00
static inline void scm_request_init ( struct scm_blk_dev * bdev ,
2014-12-05 16:43:58 +01:00
struct scm_request * scmrq )
2012-08-28 16:50:38 +02:00
{
struct aob_rq_header * aobrq = to_aobrq ( scmrq ) ;
struct aob * aob = scmrq - > aob ;
2014-12-05 16:47:17 +01:00
memset ( scmrq - > request , 0 ,
nr_requests_per_io * sizeof ( scmrq - > request [ 0 ] ) ) ;
2012-08-28 16:50:38 +02:00
memset ( aob , 0 , sizeof ( * aob ) ) ;
aobrq - > scmdev = bdev - > scmdev ;
aob - > request . cmd_code = ARQB_CMD_MOVE ;
aob - > request . data = ( u64 ) aobrq ;
scmrq - > bdev = bdev ;
scmrq - > retries = 4 ;
2017-06-03 09:38:04 +02:00
scmrq - > error = BLK_STS_OK ;
2014-12-05 16:41:47 +01:00
/* We don't use all msbs - place aidaws at the end of the aob page. */
2014-12-05 16:47:17 +01:00
scmrq - > next_aidaw = ( void * ) & aob - > msb [ nr_requests_per_io ] ;
2012-08-28 16:50:38 +02:00
}
2016-12-01 12:51:32 +01:00
static void scm_request_requeue ( struct scm_request * scmrq )
2012-08-28 16:50:38 +02:00
{
struct scm_blk_dev * bdev = scmrq - > bdev ;
2014-12-05 16:43:58 +01:00
int i ;
2012-08-28 16:50:38 +02:00
2014-12-05 16:47:17 +01:00
for ( i = 0 ; i < nr_requests_per_io & & scmrq - > request [ i ] ; i + + )
2017-01-25 16:18:53 +01:00
blk_mq_requeue_request ( scmrq - > request [ i ] , false ) ;
2014-12-05 16:43:58 +01:00
2013-02-28 12:07:27 +01:00
atomic_dec ( & bdev - > queued_reqs ) ;
2012-08-28 16:50:38 +02:00
scm_request_done ( scmrq ) ;
2017-01-25 16:18:53 +01:00
blk_mq_kick_requeue_list ( bdev - > rq ) ;
2012-08-28 16:50:38 +02:00
}
2016-12-01 12:51:32 +01:00
static void scm_request_finish ( struct scm_request * scmrq )
2012-08-28 16:50:38 +02:00
{
2013-02-28 12:07:27 +01:00
struct scm_blk_dev * bdev = scmrq - > bdev ;
2017-09-18 12:25:22 +02:00
blk_status_t * error ;
2014-12-05 16:43:58 +01:00
int i ;
2013-02-28 12:07:27 +01:00
2017-01-25 16:18:53 +01:00
for ( i = 0 ; i < nr_requests_per_io & & scmrq - > request [ i ] ; i + + ) {
2017-06-15 17:13:15 +02:00
error = blk_mq_rq_to_pdu ( scmrq - > request [ i ] ) ;
* error = scmrq - > error ;
blk_mq_complete_request ( scmrq - > request [ i ] ) ;
2017-01-25 16:18:53 +01:00
}
2014-12-05 16:43:58 +01:00
2013-02-28 12:07:27 +01:00
atomic_dec ( & bdev - > queued_reqs ) ;
2012-08-28 16:50:38 +02:00
scm_request_done ( scmrq ) ;
}
2017-01-25 16:18:53 +01:00
static void scm_request_start ( struct scm_request * scmrq )
2014-12-05 16:43:58 +01:00
{
struct scm_blk_dev * bdev = scmrq - > bdev ;
atomic_inc ( & bdev - > queued_reqs ) ;
2017-01-25 16:18:53 +01:00
if ( eadm_start_aob ( scmrq - > aob ) ) {
2014-12-05 16:43:58 +01:00
SCM_LOG ( 5 , " no subchannel " ) ;
scm_request_requeue ( scmrq ) ;
}
}
2017-02-24 17:50:17 +01:00
struct scm_queue {
struct scm_request * scmrq ;
spinlock_t lock ;
} ;
2017-07-04 17:58:18 +10:00
static blk_status_t scm_blk_request ( struct blk_mq_hw_ctx * hctx ,
2017-01-25 16:18:53 +01:00
const struct blk_mq_queue_data * qd )
2012-08-28 16:50:38 +02:00
{
2017-01-25 16:18:53 +01:00
struct scm_device * scmdev = hctx - > queue - > queuedata ;
2012-08-28 16:50:38 +02:00
struct scm_blk_dev * bdev = dev_get_drvdata ( & scmdev - > dev ) ;
2017-02-24 17:50:17 +01:00
struct scm_queue * sq = hctx - > driver_data ;
2017-01-25 16:18:53 +01:00
struct request * req = qd - > rq ;
struct scm_request * scmrq ;
2012-08-28 16:50:38 +02:00
2017-02-24 17:50:17 +01:00
spin_lock ( & sq - > lock ) ;
2017-01-25 16:18:53 +01:00
if ( ! scm_permit_request ( bdev , req ) ) {
2017-02-24 17:50:17 +01:00
spin_unlock ( & sq - > lock ) ;
2017-07-04 17:58:18 +10:00
return BLK_STS_RESOURCE ;
2017-01-25 16:18:53 +01:00
}
2014-12-05 16:43:58 +01:00
2017-02-24 17:50:17 +01:00
scmrq = sq - > scmrq ;
2017-01-25 16:18:53 +01:00
if ( ! scmrq ) {
scmrq = scm_request_fetch ( ) ;
2012-08-28 16:50:38 +02:00
if ( ! scmrq ) {
2017-01-25 16:18:53 +01:00
SCM_LOG ( 5 , " no request " ) ;
2017-02-24 17:50:17 +01:00
spin_unlock ( & sq - > lock ) ;
2017-07-04 17:58:18 +10:00
return BLK_STS_RESOURCE ;
2012-08-28 16:50:38 +02:00
}
2017-01-25 16:18:53 +01:00
scm_request_init ( bdev , scmrq ) ;
2017-02-24 17:50:17 +01:00
sq - > scmrq = scmrq ;
2017-01-25 16:18:53 +01:00
}
scm_request_set ( scmrq , req ) ;
2014-12-05 16:43:58 +01:00
2017-01-25 16:18:53 +01:00
if ( scm_request_prepare ( scmrq ) ) {
SCM_LOG ( 5 , " aidaw alloc failed " ) ;
scm_request_set ( scmrq , NULL ) ;
2014-12-05 16:43:58 +01:00
2017-01-25 16:18:53 +01:00
if ( scmrq - > aob - > request . msb_count )
scm_request_start ( scmrq ) ;
2014-12-05 16:43:58 +01:00
2017-02-24 17:50:17 +01:00
sq - > scmrq = NULL ;
spin_unlock ( & sq - > lock ) ;
2017-07-04 17:58:18 +10:00
return BLK_STS_RESOURCE ;
2012-08-28 16:50:38 +02:00
}
2017-01-25 16:18:53 +01:00
blk_mq_start_request ( req ) ;
2014-12-05 16:32:13 +01:00
2017-01-25 16:18:53 +01:00
if ( qd - > last | | scmrq - > aob - > request . msb_count = = nr_requests_per_io ) {
2014-12-05 16:43:58 +01:00
scm_request_start ( scmrq ) ;
2017-02-24 17:50:17 +01:00
sq - > scmrq = NULL ;
2017-01-25 16:18:53 +01:00
}
2017-02-24 17:50:17 +01:00
spin_unlock ( & sq - > lock ) ;
2017-07-04 17:58:18 +10:00
return BLK_STS_OK ;
2012-08-28 16:50:38 +02:00
}
2017-02-24 17:50:17 +01:00
static int scm_blk_init_hctx ( struct blk_mq_hw_ctx * hctx , void * data ,
unsigned int idx )
{
struct scm_queue * qd = kzalloc ( sizeof ( * qd ) , GFP_KERNEL ) ;
2014-12-05 16:43:58 +01:00
2017-02-24 17:50:17 +01:00
if ( ! qd )
return - ENOMEM ;
2014-12-05 16:43:58 +01:00
2017-02-24 17:50:17 +01:00
spin_lock_init ( & qd - > lock ) ;
hctx - > driver_data = qd ;
return 0 ;
}
static void scm_blk_exit_hctx ( struct blk_mq_hw_ctx * hctx , unsigned int idx )
{
struct scm_queue * qd = hctx - > driver_data ;
WARN_ON ( qd - > scmrq ) ;
kfree ( hctx - > driver_data ) ;
hctx - > driver_data = NULL ;
2012-08-28 16:50:38 +02:00
}
static void __scmrq_log_error ( struct scm_request * scmrq )
{
struct aob * aob = scmrq - > aob ;
2017-06-03 09:38:04 +02:00
if ( scmrq - > error = = BLK_STS_TIMEOUT )
2012-08-28 16:50:38 +02:00
SCM_LOG ( 1 , " Request timeout " ) ;
else {
SCM_LOG ( 1 , " Request error " ) ;
SCM_LOG_HEX ( 1 , & aob - > response , sizeof ( aob - > response ) ) ;
}
if ( scmrq - > retries )
SCM_LOG ( 1 , " Retry request " ) ;
else
pr_err ( " An I/O operation to SCM failed with rc=%d \n " ,
scmrq - > error ) ;
}
2013-02-28 12:07:48 +01:00
static void scm_blk_handle_error ( struct scm_request * scmrq )
{
struct scm_blk_dev * bdev = scmrq - > bdev ;
unsigned long flags ;
2017-06-03 09:38:04 +02:00
if ( scmrq - > error ! = BLK_STS_IOERR )
2013-02-28 12:07:48 +01:00
goto restart ;
/* For -EIO the response block is valid. */
switch ( scmrq - > aob - > response . eqc ) {
case EQC_WR_PROHIBIT :
spin_lock_irqsave ( & bdev - > lock , flags ) ;
if ( bdev - > state ! = SCM_WR_PROHIBIT )
2013-03-18 16:01:30 +01:00
pr_info ( " %lx: Write access to the SCM increment is suspended \n " ,
2013-02-28 12:07:48 +01:00
( unsigned long ) bdev - > scmdev - > address ) ;
bdev - > state = SCM_WR_PROHIBIT ;
spin_unlock_irqrestore ( & bdev - > lock , flags ) ;
goto requeue ;
default :
break ;
}
restart :
2013-11-14 10:44:56 +01:00
if ( ! eadm_start_aob ( scmrq - > aob ) )
2013-02-28 12:07:48 +01:00
return ;
requeue :
scm_request_requeue ( scmrq ) ;
}
2017-07-03 15:39:36 -07:00
void scm_blk_irq ( struct scm_device * scmdev , void * data , blk_status_t error )
2012-08-28 16:50:38 +02:00
{
2017-01-31 16:15:25 +01:00
struct scm_request * scmrq = data ;
2012-08-28 16:50:38 +02:00
2017-01-31 16:15:25 +01:00
scmrq - > error = error ;
if ( error ) {
__scmrq_log_error ( scmrq ) ;
if ( scmrq - > retries - - > 0 ) {
2013-02-28 12:07:48 +01:00
scm_blk_handle_error ( scmrq ) ;
2017-01-31 16:15:25 +01:00
return ;
2012-08-28 16:50:38 +02:00
}
}
2012-08-28 16:51:19 +02:00
2017-01-31 16:15:25 +01:00
scm_request_finish ( scmrq ) ;
}
2012-08-28 16:51:19 +02:00
2017-01-31 16:15:25 +01:00
static void scm_blk_request_done ( struct request * req )
{
2017-09-18 12:25:22 +02:00
blk_status_t * error = blk_mq_rq_to_pdu ( req ) ;
2017-06-15 17:13:15 +02:00
blk_mq_end_request ( req , * error ) ;
2012-08-28 16:50:38 +02:00
}
2013-11-14 10:44:56 +01:00
static const struct block_device_operations scm_blk_devops = {
. owner = THIS_MODULE ,
} ;
2017-01-25 16:18:53 +01:00
static const struct blk_mq_ops scm_mq_ops = {
. queue_rq = scm_blk_request ,
2017-01-31 16:15:25 +01:00
. complete = scm_blk_request_done ,
2017-02-24 17:50:17 +01:00
. init_hctx = scm_blk_init_hctx ,
. exit_hctx = scm_blk_exit_hctx ,
2017-01-25 16:18:53 +01:00
} ;
2012-08-28 16:50:38 +02:00
int scm_blk_dev_setup ( struct scm_blk_dev * bdev , struct scm_device * scmdev )
{
unsigned int devindex , nr_max_blk ;
2017-01-25 16:18:53 +01:00
struct request_queue * rq ;
int len , ret ;
2012-08-28 16:50:38 +02:00
devindex = atomic_inc_return ( & nr_devices ) - 1 ;
/* scma..scmz + scmaa..scmzz */
if ( devindex > 701 ) {
ret = - ENODEV ;
goto out ;
}
bdev - > scmdev = scmdev ;
2013-02-28 12:07:48 +01:00
bdev - > state = SCM_OPER ;
2012-08-28 16:50:38 +02:00
spin_lock_init ( & bdev - > lock ) ;
atomic_set ( & bdev - > queued_reqs , 0 ) ;
2017-01-25 16:18:53 +01:00
bdev - > tag_set . ops = & scm_mq_ops ;
2017-09-18 12:25:22 +02:00
bdev - > tag_set . cmd_size = sizeof ( blk_status_t ) ;
2017-02-24 17:50:17 +01:00
bdev - > tag_set . nr_hw_queues = nr_requests ;
2017-01-25 16:18:53 +01:00
bdev - > tag_set . queue_depth = nr_requests_per_io * nr_requests ;
bdev - > tag_set . flags = BLK_MQ_F_SHOULD_MERGE ;
ret = blk_mq_alloc_tag_set ( & bdev - > tag_set ) ;
if ( ret )
2012-08-28 16:50:38 +02:00
goto out ;
2017-01-25 16:18:53 +01:00
rq = blk_mq_init_queue ( & bdev - > tag_set ) ;
if ( IS_ERR ( rq ) ) {
ret = PTR_ERR ( rq ) ;
goto out_tag ;
}
2012-08-28 16:50:38 +02:00
bdev - > rq = rq ;
nr_max_blk = min ( scmdev - > nr_max_block ,
( unsigned int ) ( PAGE_SIZE / sizeof ( struct aidaw ) ) ) ;
blk_queue_logical_block_size ( rq , 1 < < 12 ) ;
blk_queue_max_hw_sectors ( rq , nr_max_blk < < 3 ) ; /* 8 * 512 = blk_size */
blk_queue_max_segments ( rq , nr_max_blk ) ;
queue_flag_set_unlocked ( QUEUE_FLAG_NONROT , rq ) ;
2014-10-04 10:55:32 -06:00
queue_flag_clear_unlocked ( QUEUE_FLAG_ADD_RANDOM , rq ) ;
2012-08-28 16:50:38 +02:00
bdev - > gendisk = alloc_disk ( SCM_NR_PARTS ) ;
2017-01-25 16:18:53 +01:00
if ( ! bdev - > gendisk ) {
ret = - ENOMEM ;
2012-08-28 16:50:38 +02:00
goto out_queue ;
2017-01-25 16:18:53 +01:00
}
2012-08-28 16:50:38 +02:00
rq - > queuedata = scmdev ;
bdev - > gendisk - > private_data = scmdev ;
bdev - > gendisk - > fops = & scm_blk_devops ;
bdev - > gendisk - > queue = rq ;
bdev - > gendisk - > major = scm_major ;
bdev - > gendisk - > first_minor = devindex * SCM_NR_PARTS ;
len = snprintf ( bdev - > gendisk - > disk_name , DISK_NAME_LEN , " scm " ) ;
if ( devindex > 25 ) {
len + = snprintf ( bdev - > gendisk - > disk_name + len ,
DISK_NAME_LEN - len , " %c " ,
' a ' + ( devindex / 26 ) - 1 ) ;
devindex = devindex % 26 ;
}
snprintf ( bdev - > gendisk - > disk_name + len , DISK_NAME_LEN - len , " %c " ,
' a ' + devindex ) ;
/* 512 byte sectors */
set_capacity ( bdev - > gendisk , scmdev - > size > > 9 ) ;
2016-06-15 19:44:20 -07:00
device_add_disk ( & scmdev - > dev , bdev - > gendisk ) ;
2012-08-28 16:50:38 +02:00
return 0 ;
out_queue :
blk_cleanup_queue ( rq ) ;
2017-01-25 16:18:53 +01:00
out_tag :
blk_mq_free_tag_set ( & bdev - > tag_set ) ;
2012-08-28 16:50:38 +02:00
out :
atomic_dec ( & nr_devices ) ;
return ret ;
}
void scm_blk_dev_cleanup ( struct scm_blk_dev * bdev )
{
del_gendisk ( bdev - > gendisk ) ;
blk_cleanup_queue ( bdev - > gendisk - > queue ) ;
2017-01-25 16:18:53 +01:00
blk_mq_free_tag_set ( & bdev - > tag_set ) ;
2012-08-28 16:50:38 +02:00
put_disk ( bdev - > gendisk ) ;
}
2013-02-28 12:07:48 +01:00
void scm_blk_set_available ( struct scm_blk_dev * bdev )
{
unsigned long flags ;
spin_lock_irqsave ( & bdev - > lock , flags ) ;
if ( bdev - > state = = SCM_WR_PROHIBIT )
2013-03-18 16:01:30 +01:00
pr_info ( " %lx: Write access to the SCM increment is restored \n " ,
2013-02-28 12:07:48 +01:00
( unsigned long ) bdev - > scmdev - > address ) ;
bdev - > state = SCM_OPER ;
spin_unlock_irqrestore ( & bdev - > lock , flags ) ;
}
2014-12-05 16:47:17 +01:00
static bool __init scm_blk_params_valid ( void )
{
if ( ! nr_requests_per_io | | nr_requests_per_io > 64 )
return false ;
2016-12-01 12:51:32 +01:00
return true ;
2014-12-05 16:47:17 +01:00
}
2012-08-28 16:50:38 +02:00
static int __init scm_blk_init ( void )
{
2012-08-28 16:51:19 +02:00
int ret = - EINVAL ;
2014-12-05 16:47:17 +01:00
if ( ! scm_blk_params_valid ( ) )
2012-08-28 16:51:19 +02:00
goto out ;
2012-08-28 16:50:38 +02:00
ret = register_blkdev ( 0 , " scm " ) ;
if ( ret < 0 )
goto out ;
scm_major = ret ;
2013-03-20 13:40:54 +01:00
ret = scm_alloc_rqs ( nr_requests ) ;
if ( ret )
2013-04-25 13:03:18 +02:00
goto out_free ;
2012-08-28 16:50:38 +02:00
scm_debug = debug_register ( " scm_log " , 16 , 1 , 16 ) ;
2013-03-20 13:40:54 +01:00
if ( ! scm_debug ) {
ret = - ENOMEM ;
2012-08-28 16:50:38 +02:00
goto out_free ;
2013-03-20 13:40:54 +01:00
}
2012-08-28 16:50:38 +02:00
debug_register_view ( scm_debug , & debug_hex_ascii_view ) ;
debug_set_level ( scm_debug , 2 ) ;
ret = scm_drv_init ( ) ;
if ( ret )
goto out_dbf ;
return ret ;
out_dbf :
debug_unregister ( scm_debug ) ;
out_free :
scm_free_rqs ( ) ;
unregister_blkdev ( scm_major , " scm " ) ;
out :
return ret ;
}
module_init ( scm_blk_init ) ;
static void __exit scm_blk_cleanup ( void )
{
scm_drv_cleanup ( ) ;
debug_unregister ( scm_debug ) ;
scm_free_rqs ( ) ;
unregister_blkdev ( scm_major , " scm " ) ;
}
module_exit ( scm_blk_cleanup ) ;