2015-10-03 15:46:41 +02:00
/*
* Copyright ( c ) 2011 - 2014 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*/
# ifndef _NVME_H
# define _NVME_H
# include <linux/nvme.h>
# include <linux/pci.h>
# include <linux/kref.h>
# include <linux/blk-mq.h>
2015-11-26 12:58:11 +01:00
enum {
/*
* Driver internal status code for commands that were cancelled due
* to timeouts or controller shutdown . The value is negative so
* that it a ) doesn ' t overlap with the unsigned hardware error codes ,
* and b ) can easily be tested for .
*/
NVME_SC_CANCELLED = - EINTR ,
} ;
2015-10-03 15:46:41 +02:00
extern unsigned char nvme_io_timeout ;
# define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
2015-11-26 09:08:36 +01:00
extern unsigned char admin_timeout ;
# define ADMIN_TIMEOUT (admin_timeout * HZ)
2015-11-28 15:03:49 +01:00
extern unsigned char shutdown_timeout ;
# define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
2015-10-29 17:57:29 +09:00
enum {
NVME_NS_LBA = 0 ,
NVME_NS_LIGHTNVM = 1 ,
} ;
2015-11-26 10:07:41 +01:00
/*
* List of workarounds for devices that required behavior not specified in
* the standard .
*/
enum nvme_quirks {
/*
* Prefers I / O aligned to a stripe size specified in a vendor
* specific Identify field .
*/
NVME_QUIRK_STRIPE_SIZE = ( 1 < < 0 ) ,
2015-10-22 15:45:06 -06:00
/*
* The controller doesn ' t handle Identify value others than 0 or 1
* correctly .
*/
NVME_QUIRK_IDENTIFY_CNS = ( 1 < < 1 ) ,
2015-11-26 10:07:41 +01:00
} ;
2015-11-26 10:06:56 +01:00
struct nvme_ctrl {
const struct nvme_ctrl_ops * ops ;
2015-10-03 15:46:41 +02:00
struct request_queue * admin_q ;
struct device * dev ;
2015-11-26 10:54:19 +01:00
struct kref kref ;
2015-10-03 15:46:41 +02:00
int instance ;
2015-11-28 15:39:07 +01:00
struct blk_mq_tag_set * tagset ;
struct list_head namespaces ;
struct device * device ; /* char device */
2015-11-28 15:40:19 +01:00
struct list_head node ;
2015-11-26 10:06:56 +01:00
2015-10-03 15:46:41 +02:00
char name [ 12 ] ;
char serial [ 20 ] ;
char model [ 40 ] ;
char firmware_rev [ 8 ] ;
2015-11-28 15:03:49 +01:00
u32 ctrl_config ;
u32 page_size ;
2015-11-28 15:37:52 +01:00
u32 max_hw_sectors ;
u32 stripe_size ;
2015-10-03 15:46:41 +02:00
u16 oncs ;
2015-11-20 09:36:44 +01:00
atomic_t abort_limit ;
2015-10-03 15:46:41 +02:00
u8 event_limit ;
u8 vwc ;
2015-11-28 15:40:19 +01:00
u32 vs ;
bool subsystem ;
2015-11-26 10:07:41 +01:00
unsigned long quirks ;
2015-10-03 15:46:41 +02:00
} ;
/*
* An NVM Express namespace is equivalent to a SCSI LUN
*/
struct nvme_ns {
struct list_head list ;
2015-11-26 10:06:56 +01:00
struct nvme_ctrl * ctrl ;
2015-10-03 15:46:41 +02:00
struct request_queue * queue ;
struct gendisk * disk ;
struct kref kref ;
unsigned ns_id ;
int lba_shift ;
u16 ms ;
bool ext ;
u8 pi_type ;
2015-10-29 17:57:29 +09:00
int type ;
2015-10-03 15:46:41 +02:00
u64 mode_select_num_blocks ;
u32 mode_select_block_len ;
} ;
2015-11-26 10:06:56 +01:00
struct nvme_ctrl_ops {
int ( * reg_read32 ) ( struct nvme_ctrl * ctrl , u32 off , u32 * val ) ;
2015-11-28 15:03:49 +01:00
int ( * reg_write32 ) ( struct nvme_ctrl * ctrl , u32 off , u32 val ) ;
2015-11-28 15:37:52 +01:00
int ( * reg_read64 ) ( struct nvme_ctrl * ctrl , u32 off , u64 * val ) ;
2015-11-28 15:39:07 +01:00
bool ( * io_incapable ) ( struct nvme_ctrl * ctrl ) ;
2015-11-28 15:40:19 +01:00
int ( * reset_ctrl ) ( struct nvme_ctrl * ctrl ) ;
2015-11-26 10:54:19 +01:00
void ( * free_ctrl ) ( struct nvme_ctrl * ctrl ) ;
2015-11-26 10:06:56 +01:00
} ;
static inline bool nvme_ctrl_ready ( struct nvme_ctrl * ctrl )
{
u32 val = 0 ;
if ( ctrl - > ops - > reg_read32 ( ctrl , NVME_REG_CSTS , & val ) )
return false ;
return val & NVME_CSTS_RDY ;
}
2015-11-28 15:39:07 +01:00
static inline bool nvme_io_incapable ( struct nvme_ctrl * ctrl )
{
u32 val = 0 ;
if ( ctrl - > ops - > io_incapable ( ctrl ) )
return false ;
if ( ctrl - > ops - > reg_read32 ( ctrl , NVME_REG_CSTS , & val ) )
return false ;
return val & NVME_CSTS_CFS ;
}
2015-11-28 15:40:19 +01:00
static inline int nvme_reset_subsystem ( struct nvme_ctrl * ctrl )
{
if ( ! ctrl - > subsystem )
return - ENOTTY ;
return ctrl - > ops - > reg_write32 ( ctrl , NVME_REG_NSSR , 0x4E564D65 ) ;
}
2015-10-03 15:46:41 +02:00
static inline u64 nvme_block_nr ( struct nvme_ns * ns , sector_t sector )
{
return ( sector > > ( ns - > lba_shift - 9 ) ) ;
}
2015-10-16 07:58:40 +02:00
static inline void nvme_setup_flush ( struct nvme_ns * ns ,
struct nvme_command * cmnd )
{
memset ( cmnd , 0 , sizeof ( * cmnd ) ) ;
cmnd - > common . opcode = nvme_cmd_flush ;
cmnd - > common . nsid = cpu_to_le32 ( ns - > ns_id ) ;
}
static inline void nvme_setup_rw ( struct nvme_ns * ns , struct request * req ,
struct nvme_command * cmnd )
{
u16 control = 0 ;
u32 dsmgmt = 0 ;
if ( req - > cmd_flags & REQ_FUA )
control | = NVME_RW_FUA ;
if ( req - > cmd_flags & ( REQ_FAILFAST_DEV | REQ_RAHEAD ) )
control | = NVME_RW_LR ;
if ( req - > cmd_flags & REQ_RAHEAD )
dsmgmt | = NVME_RW_DSM_FREQ_PREFETCH ;
memset ( cmnd , 0 , sizeof ( * cmnd ) ) ;
cmnd - > rw . opcode = ( rq_data_dir ( req ) ? nvme_cmd_write : nvme_cmd_read ) ;
cmnd - > rw . command_id = req - > tag ;
cmnd - > rw . nsid = cpu_to_le32 ( ns - > ns_id ) ;
cmnd - > rw . slba = cpu_to_le64 ( nvme_block_nr ( ns , blk_rq_pos ( req ) ) ) ;
cmnd - > rw . length = cpu_to_le16 ( ( blk_rq_bytes ( req ) > > ns - > lba_shift ) - 1 ) ;
if ( ns - > ms ) {
switch ( ns - > pi_type ) {
case NVME_NS_DPS_PI_TYPE3 :
control | = NVME_RW_PRINFO_PRCHK_GUARD ;
break ;
case NVME_NS_DPS_PI_TYPE1 :
case NVME_NS_DPS_PI_TYPE2 :
control | = NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF ;
cmnd - > rw . reftag = cpu_to_le32 (
nvme_block_nr ( ns , blk_rq_pos ( req ) ) ) ;
break ;
}
if ( ! blk_integrity_rq ( req ) )
control | = NVME_RW_PRINFO_PRACT ;
}
cmnd - > rw . control = cpu_to_le16 ( control ) ;
cmnd - > rw . dsmgmt = cpu_to_le32 ( dsmgmt ) ;
}
2015-10-16 07:58:39 +02:00
static inline int nvme_error_status ( u16 status )
{
switch ( status & 0x7ff ) {
case NVME_SC_SUCCESS :
return 0 ;
case NVME_SC_CAP_EXCEEDED :
return - ENOSPC ;
default :
return - EIO ;
}
}
2015-11-28 15:41:58 +01:00
static inline bool nvme_req_needs_retry ( struct request * req , u16 status )
{
return ! ( status & NVME_SC_DNR | | blk_noretry_request ( req ) ) & &
( jiffies - req - > start_time ) < req - > timeout ;
}
2015-11-28 15:03:49 +01:00
int nvme_disable_ctrl ( struct nvme_ctrl * ctrl , u64 cap ) ;
int nvme_enable_ctrl ( struct nvme_ctrl * ctrl , u64 cap ) ;
int nvme_shutdown_ctrl ( struct nvme_ctrl * ctrl ) ;
2015-11-28 15:40:19 +01:00
int nvme_init_ctrl ( struct nvme_ctrl * ctrl , struct device * dev ,
const struct nvme_ctrl_ops * ops , unsigned long quirks ) ;
2015-11-28 15:41:02 +01:00
void nvme_uninit_ctrl ( struct nvme_ctrl * ctrl ) ;
2015-11-26 10:54:19 +01:00
void nvme_put_ctrl ( struct nvme_ctrl * ctrl ) ;
2015-11-28 15:37:52 +01:00
int nvme_init_identify ( struct nvme_ctrl * ctrl ) ;
2015-11-28 15:39:07 +01:00
void nvme_scan_namespaces ( struct nvme_ctrl * ctrl ) ;
void nvme_remove_namespaces ( struct nvme_ctrl * ctrl ) ;
2015-11-26 10:54:19 +01:00
2015-11-20 09:00:02 +01:00
struct request * nvme_alloc_request ( struct request_queue * q ,
struct nvme_command * cmd , unsigned int flags ) ;
2015-11-28 15:41:58 +01:00
void nvme_requeue_req ( struct request * req ) ;
2015-10-03 15:46:41 +02:00
int nvme_submit_sync_cmd ( struct request_queue * q , struct nvme_command * cmd ,
void * buf , unsigned bufflen ) ;
int __nvme_submit_sync_cmd ( struct request_queue * q , struct nvme_command * cmd ,
2015-11-20 09:00:02 +01:00
void * buffer , unsigned bufflen , u32 * result , unsigned timeout ) ;
int nvme_submit_user_cmd ( struct request_queue * q , struct nvme_command * cmd ,
void __user * ubuffer , unsigned bufflen , u32 * result ,
unsigned timeout ) ;
2015-10-23 09:47:28 -06:00
int __nvme_submit_user_cmd ( struct request_queue * q , struct nvme_command * cmd ,
void __user * ubuffer , unsigned bufflen ,
void __user * meta_buffer , unsigned meta_len , u32 meta_seed ,
u32 * result , unsigned timeout ) ;
2015-11-26 10:06:56 +01:00
int nvme_identify_ctrl ( struct nvme_ctrl * dev , struct nvme_id_ctrl * * id ) ;
int nvme_identify_ns ( struct nvme_ctrl * dev , unsigned nsid ,
2015-10-03 15:46:41 +02:00
struct nvme_id_ns * * id ) ;
2015-11-26 10:06:56 +01:00
int nvme_get_log_page ( struct nvme_ctrl * dev , struct nvme_smart_log * * log ) ;
int nvme_get_features ( struct nvme_ctrl * dev , unsigned fid , unsigned nsid ,
2015-10-03 15:46:41 +02:00
dma_addr_t dma_addr , u32 * result ) ;
2015-11-26 10:06:56 +01:00
int nvme_set_features ( struct nvme_ctrl * dev , unsigned fid , unsigned dword11 ,
2015-10-03 15:46:41 +02:00
dma_addr_t dma_addr , u32 * result ) ;
2015-11-26 11:09:06 +01:00
int nvme_set_queue_count ( struct nvme_ctrl * ctrl , int * count ) ;
2015-10-03 15:46:41 +02:00
2015-11-26 10:54:19 +01:00
extern spinlock_t dev_list_lock ;
2015-10-03 15:46:41 +02:00
struct sg_io_hdr ;
int nvme_sg_io ( struct nvme_ns * ns , struct sg_io_hdr __user * u_hdr ) ;
int nvme_sg_io32 ( struct nvme_ns * ns , unsigned long arg ) ;
int nvme_sg_get_version_num ( int __user * ip ) ;
2015-10-29 17:57:29 +09:00
int nvme_nvm_ns_supported ( struct nvme_ns * ns , struct nvme_id_ns * id ) ;
int nvme_nvm_register ( struct request_queue * q , char * disk_name ) ;
void nvme_nvm_unregister ( struct request_queue * q , char * disk_name ) ;
2015-11-28 15:39:07 +01:00
int __init nvme_core_init ( void ) ;
void nvme_core_exit ( void ) ;
2015-10-03 15:46:41 +02:00
# endif /* _NVME_H */