2015-11-26 09:08:36 +01:00
/*
* NVM Express device driver
* Copyright ( c ) 2011 - 2014 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*/
# include <linux/blkdev.h>
# include <linux/blk-mq.h>
# include <linux/errno.h>
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/types.h>
# include "nvme.h"
2015-11-20 09:00:02 +01:00
struct request * nvme_alloc_request ( struct request_queue * q ,
struct nvme_command * cmd , unsigned int flags )
2015-11-26 09:08:36 +01:00
{
bool write = cmd - > common . opcode & 1 ;
struct request * req ;
2015-11-20 09:00:02 +01:00
req = blk_mq_alloc_request ( q , write , flags ) ;
2015-11-26 09:08:36 +01:00
if ( IS_ERR ( req ) )
2015-11-20 09:00:02 +01:00
return req ;
2015-11-26 09:08:36 +01:00
req - > cmd_type = REQ_TYPE_DRV_PRIV ;
req - > cmd_flags | = REQ_FAILFAST_DRIVER ;
req - > __data_len = 0 ;
req - > __sector = ( sector_t ) - 1 ;
req - > bio = req - > biotail = NULL ;
req - > cmd = ( unsigned char * ) cmd ;
req - > cmd_len = sizeof ( struct nvme_command ) ;
req - > special = ( void * ) 0 ;
2015-11-20 09:00:02 +01:00
return req ;
}
/*
* Returns 0 on success . If the result is negative , it ' s a Linux error code ;
* if the result is positive , it ' s an NVM Express status code
*/
int __nvme_submit_sync_cmd ( struct request_queue * q , struct nvme_command * cmd ,
void * buffer , unsigned bufflen , u32 * result , unsigned timeout )
{
struct request * req ;
int ret ;
req = nvme_alloc_request ( q , cmd , 0 ) ;
if ( IS_ERR ( req ) )
return PTR_ERR ( req ) ;
req - > timeout = timeout ? timeout : ADMIN_TIMEOUT ;
2015-11-26 09:08:36 +01:00
if ( buffer & & bufflen ) {
ret = blk_rq_map_kern ( q , req , buffer , bufflen , GFP_KERNEL ) ;
if ( ret )
goto out ;
2015-11-20 09:00:02 +01:00
}
blk_execute_rq ( req - > q , NULL , req , 0 ) ;
if ( result )
* result = ( u32 ) ( uintptr_t ) req - > special ;
ret = req - > errors ;
out :
blk_mq_free_request ( req ) ;
return ret ;
}
int nvme_submit_sync_cmd ( struct request_queue * q , struct nvme_command * cmd ,
void * buffer , unsigned bufflen )
{
return __nvme_submit_sync_cmd ( q , cmd , buffer , bufflen , NULL , 0 ) ;
}
2015-10-23 09:47:28 -06:00
int __nvme_submit_user_cmd ( struct request_queue * q , struct nvme_command * cmd ,
void __user * ubuffer , unsigned bufflen ,
void __user * meta_buffer , unsigned meta_len , u32 meta_seed ,
u32 * result , unsigned timeout )
2015-11-20 09:00:02 +01:00
{
2015-10-23 09:47:28 -06:00
bool write = cmd - > common . opcode & 1 ;
struct nvme_ns * ns = q - > queuedata ;
struct gendisk * disk = ns ? ns - > disk : NULL ;
2015-11-20 09:00:02 +01:00
struct request * req ;
2015-10-23 09:47:28 -06:00
struct bio * bio = NULL ;
void * meta = NULL ;
2015-11-20 09:00:02 +01:00
int ret ;
req = nvme_alloc_request ( q , cmd , 0 ) ;
if ( IS_ERR ( req ) )
return PTR_ERR ( req ) ;
req - > timeout = timeout ? timeout : ADMIN_TIMEOUT ;
if ( ubuffer & & bufflen ) {
2015-11-26 09:08:36 +01:00
ret = blk_rq_map_user ( q , req , NULL , ubuffer , bufflen ,
GFP_KERNEL ) ;
if ( ret )
goto out ;
bio = req - > bio ;
2015-10-23 09:47:28 -06:00
if ( ! disk )
goto submit ;
bio - > bi_bdev = bdget_disk ( disk , 0 ) ;
if ( ! bio - > bi_bdev ) {
ret = - ENODEV ;
goto out_unmap ;
}
if ( meta_buffer ) {
struct bio_integrity_payload * bip ;
meta = kmalloc ( meta_len , GFP_KERNEL ) ;
if ( ! meta ) {
ret = - ENOMEM ;
goto out_unmap ;
}
if ( write ) {
if ( copy_from_user ( meta , meta_buffer ,
meta_len ) ) {
ret = - EFAULT ;
goto out_free_meta ;
}
}
bip = bio_integrity_alloc ( bio , GFP_KERNEL , 1 ) ;
if ( ! bip ) {
ret = - ENOMEM ;
goto out_free_meta ;
}
bip - > bip_iter . bi_size = meta_len ;
bip - > bip_iter . bi_sector = meta_seed ;
ret = bio_integrity_add_page ( bio , virt_to_page ( meta ) ,
meta_len , offset_in_page ( meta ) ) ;
if ( ret ! = meta_len ) {
ret = - ENOMEM ;
goto out_free_meta ;
}
}
}
submit :
blk_execute_rq ( req - > q , disk , req , 0 ) ;
ret = req - > errors ;
2015-11-26 09:08:36 +01:00
if ( result )
* result = ( u32 ) ( uintptr_t ) req - > special ;
2015-10-23 09:47:28 -06:00
if ( meta & & ! ret & & ! write ) {
if ( copy_to_user ( meta_buffer , meta , meta_len ) )
ret = - EFAULT ;
}
out_free_meta :
kfree ( meta ) ;
out_unmap :
if ( bio ) {
if ( disk & & bio - > bi_bdev )
bdput ( bio - > bi_bdev ) ;
blk_rq_unmap_user ( bio ) ;
}
2015-11-26 09:08:36 +01:00
out :
blk_mq_free_request ( req ) ;
return ret ;
}
2015-10-23 09:47:28 -06:00
int nvme_submit_user_cmd ( struct request_queue * q , struct nvme_command * cmd ,
void __user * ubuffer , unsigned bufflen , u32 * result ,
unsigned timeout )
{
return __nvme_submit_user_cmd ( q , cmd , ubuffer , bufflen , NULL , 0 , 0 ,
result , timeout ) ;
}
2015-11-26 10:06:56 +01:00
int nvme_identify_ctrl ( struct nvme_ctrl * dev , struct nvme_id_ctrl * * id )
2015-11-26 09:08:36 +01:00
{
struct nvme_command c = { } ;
int error ;
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c . identify . opcode = nvme_admin_identify ;
c . identify . cns = cpu_to_le32 ( 1 ) ;
* id = kmalloc ( sizeof ( struct nvme_id_ctrl ) , GFP_KERNEL ) ;
if ( ! * id )
return - ENOMEM ;
error = nvme_submit_sync_cmd ( dev - > admin_q , & c , * id ,
sizeof ( struct nvme_id_ctrl ) ) ;
if ( error )
kfree ( * id ) ;
return error ;
}
2015-11-26 10:06:56 +01:00
int nvme_identify_ns ( struct nvme_ctrl * dev , unsigned nsid ,
2015-11-26 09:08:36 +01:00
struct nvme_id_ns * * id )
{
struct nvme_command c = { } ;
int error ;
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c . identify . opcode = nvme_admin_identify ,
c . identify . nsid = cpu_to_le32 ( nsid ) ,
* id = kmalloc ( sizeof ( struct nvme_id_ns ) , GFP_KERNEL ) ;
if ( ! * id )
return - ENOMEM ;
error = nvme_submit_sync_cmd ( dev - > admin_q , & c , * id ,
sizeof ( struct nvme_id_ns ) ) ;
if ( error )
kfree ( * id ) ;
return error ;
}
2015-11-26 10:06:56 +01:00
int nvme_get_features ( struct nvme_ctrl * dev , unsigned fid , unsigned nsid ,
2015-11-26 09:08:36 +01:00
dma_addr_t dma_addr , u32 * result )
{
struct nvme_command c ;
memset ( & c , 0 , sizeof ( c ) ) ;
c . features . opcode = nvme_admin_get_features ;
c . features . nsid = cpu_to_le32 ( nsid ) ;
c . features . prp1 = cpu_to_le64 ( dma_addr ) ;
c . features . fid = cpu_to_le32 ( fid ) ;
2015-11-20 09:00:02 +01:00
return __nvme_submit_sync_cmd ( dev - > admin_q , & c , NULL , 0 , result , 0 ) ;
2015-11-26 09:08:36 +01:00
}
2015-11-26 10:06:56 +01:00
int nvme_set_features ( struct nvme_ctrl * dev , unsigned fid , unsigned dword11 ,
2015-11-26 09:08:36 +01:00
dma_addr_t dma_addr , u32 * result )
{
struct nvme_command c ;
memset ( & c , 0 , sizeof ( c ) ) ;
c . features . opcode = nvme_admin_set_features ;
c . features . prp1 = cpu_to_le64 ( dma_addr ) ;
c . features . fid = cpu_to_le32 ( fid ) ;
c . features . dword11 = cpu_to_le32 ( dword11 ) ;
2015-11-20 09:00:02 +01:00
return __nvme_submit_sync_cmd ( dev - > admin_q , & c , NULL , 0 , result , 0 ) ;
2015-11-26 09:08:36 +01:00
}
2015-11-26 10:06:56 +01:00
int nvme_get_log_page ( struct nvme_ctrl * dev , struct nvme_smart_log * * log )
2015-11-26 09:08:36 +01:00
{
struct nvme_command c = { } ;
int error ;
c . common . opcode = nvme_admin_get_log_page ,
c . common . nsid = cpu_to_le32 ( 0xFFFFFFFF ) ,
c . common . cdw10 [ 0 ] = cpu_to_le32 (
( ( ( sizeof ( struct nvme_smart_log ) / 4 ) - 1 ) < < 16 ) |
NVME_LOG_SMART ) ,
* log = kmalloc ( sizeof ( struct nvme_smart_log ) , GFP_KERNEL ) ;
if ( ! * log )
return - ENOMEM ;
error = nvme_submit_sync_cmd ( dev - > admin_q , & c , * log ,
sizeof ( struct nvme_smart_log ) ) ;
if ( error )
kfree ( * log ) ;
return error ;
}