2019-05-29 17:18:09 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-04-25 10:56:17 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2019-02-03 03:35:26 +03:00
# include <linux/moduleparam.h>
2015-05-31 21:41:48 +03:00
# include <linux/vmalloc.h>
2015-04-25 10:56:17 +03:00
# include <linux/device.h>
2015-06-08 21:27:06 +03:00
# include <linux/ndctl.h>
2015-04-25 10:56:17 +03:00
# include <linux/slab.h>
# include <linux/io.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include "nd-core.h"
2015-05-30 19:35:36 +03:00
# include "label.h"
2017-01-14 07:36:58 +03:00
# include "pmem.h"
2015-05-31 21:41:48 +03:00
# include "nd.h"
2015-04-25 10:56:17 +03:00
static DEFINE_IDA ( dimm_ida ) ;
2015-05-31 21:41:48 +03:00
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
*/
2016-08-16 22:08:40 +03:00
int nvdimm_check_config_data ( struct device * dev )
2015-05-31 21:41:48 +03:00
{
2016-08-16 22:08:40 +03:00
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2015-05-31 21:41:48 +03:00
2016-08-16 22:08:40 +03:00
if ( ! nvdimm - > cmd_mask | |
! test_bit ( ND_CMD_GET_CONFIG_DATA , & nvdimm - > cmd_mask ) ) {
2020-01-30 23:06:18 +03:00
if ( test_bit ( NDD_LABELING , & nvdimm - > flags ) )
2016-08-16 22:08:40 +03:00
return - ENXIO ;
else
return - ENOTTY ;
}
2015-05-31 21:41:48 +03:00
return 0 ;
}
static int validate_dimm ( struct nvdimm_drvdata * ndd )
{
2016-08-16 22:08:40 +03:00
int rc ;
2015-05-31 21:41:48 +03:00
2016-08-16 22:08:40 +03:00
if ( ! ndd )
return - EINVAL ;
rc = nvdimm_check_config_data ( ndd - > dev ) ;
if ( rc )
2019-03-25 22:32:28 +03:00
dev_dbg ( ndd - > dev , " %ps: %s error: %d \n " ,
2015-05-31 21:41:48 +03:00
__builtin_return_address ( 0 ) , __func__ , rc ) ;
return rc ;
}
/**
* nvdimm_init_nsarea - determine the geometry of a dimm ' s namespace area
* @ nvdimm : dimm to initialize
*/
int nvdimm_init_nsarea ( struct nvdimm_drvdata * ndd )
{
struct nd_cmd_get_config_size * cmd = & ndd - > nsarea ;
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
struct nvdimm_bus_descriptor * nd_desc ;
int rc = validate_dimm ( ndd ) ;
2017-05-04 21:47:22 +03:00
int cmd_rc = 0 ;
2015-05-31 21:41:48 +03:00
if ( rc )
return rc ;
if ( cmd - > config_size )
return 0 ; /* already valid */
memset ( cmd , 0 , sizeof ( * cmd ) ) ;
nd_desc = nvdimm_bus - > nd_desc ;
2017-05-04 21:47:22 +03:00
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
ND_CMD_GET_CONFIG_SIZE , cmd , sizeof ( * cmd ) , & cmd_rc ) ;
if ( rc < 0 )
return rc ;
return cmd_rc ;
2015-05-31 21:41:48 +03:00
}
2018-10-11 02:39:20 +03:00
int nvdimm_get_config_data ( struct nvdimm_drvdata * ndd , void * buf ,
size_t offset , size_t len )
2015-05-31 21:41:48 +03:00
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
2018-10-11 02:39:20 +03:00
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
2018-04-09 22:34:24 +03:00
int rc = validate_dimm ( ndd ) , cmd_rc = 0 ;
2015-05-31 21:41:48 +03:00
struct nd_cmd_get_config_data_hdr * cmd ;
2018-10-11 02:39:20 +03:00
size_t max_cmd_size , buf_offset ;
2015-05-31 21:41:48 +03:00
if ( rc )
return rc ;
2018-10-11 02:39:20 +03:00
if ( offset + len > ndd - > nsarea . config_size )
2015-05-31 21:41:48 +03:00
return - ENXIO ;
2018-10-11 02:39:20 +03:00
max_cmd_size = min_t ( u32 , len , ndd - > nsarea . max_xfer ) ;
2018-10-11 02:38:24 +03:00
cmd = kvzalloc ( max_cmd_size + sizeof ( * cmd ) , GFP_KERNEL ) ;
2015-05-31 21:41:48 +03:00
if ( ! cmd )
return - ENOMEM ;
2018-10-11 02:39:20 +03:00
for ( buf_offset = 0 ; len ;
len - = cmd - > in_length , buf_offset + = cmd - > in_length ) {
size_t cmd_size ;
cmd - > in_offset = offset + buf_offset ;
cmd - > in_length = min ( max_cmd_size , len ) ;
cmd_size = sizeof ( * cmd ) + cmd - > in_length ;
2015-05-31 21:41:48 +03:00
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
2018-10-11 02:39:20 +03:00
ND_CMD_GET_CONFIG_DATA , cmd , cmd_size , & cmd_rc ) ;
2018-04-09 22:34:24 +03:00
if ( rc < 0 )
break ;
if ( cmd_rc < 0 ) {
rc = cmd_rc ;
2015-05-31 21:41:48 +03:00
break ;
}
2018-10-11 02:39:20 +03:00
/* out_buf should be valid, copy it into our output buffer */
memcpy ( buf + buf_offset , cmd - > out_buf , cmd - > in_length ) ;
2015-05-31 21:41:48 +03:00
}
2018-10-11 02:38:24 +03:00
kvfree ( cmd ) ;
2015-05-31 21:41:48 +03:00
return rc ;
}
2015-05-30 19:36:02 +03:00
int nvdimm_set_config_data ( struct nvdimm_drvdata * ndd , size_t offset ,
void * buf , size_t len )
{
size_t max_cmd_size , buf_offset ;
struct nd_cmd_set_config_hdr * cmd ;
2018-04-09 22:34:24 +03:00
int rc = validate_dimm ( ndd ) , cmd_rc = 0 ;
2015-05-30 19:36:02 +03:00
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
if ( rc )
return rc ;
if ( offset + len > ndd - > nsarea . config_size )
return - ENXIO ;
2018-10-11 02:38:24 +03:00
max_cmd_size = min_t ( u32 , len , ndd - > nsarea . max_xfer ) ;
cmd = kvzalloc ( max_cmd_size + sizeof ( * cmd ) + sizeof ( u32 ) , GFP_KERNEL ) ;
2015-05-30 19:36:02 +03:00
if ( ! cmd )
return - ENOMEM ;
for ( buf_offset = 0 ; len ; len - = cmd - > in_length ,
buf_offset + = cmd - > in_length ) {
size_t cmd_size ;
cmd - > in_offset = offset + buf_offset ;
cmd - > in_length = min ( max_cmd_size , len ) ;
memcpy ( cmd - > in_buf , buf + buf_offset , cmd - > in_length ) ;
/* status is output in the last 4-bytes of the command buffer */
cmd_size = sizeof ( * cmd ) + cmd - > in_length + sizeof ( u32 ) ;
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
2018-04-09 22:34:24 +03:00
ND_CMD_SET_CONFIG_DATA , cmd , cmd_size , & cmd_rc ) ;
if ( rc < 0 )
break ;
if ( cmd_rc < 0 ) {
rc = cmd_rc ;
2015-05-30 19:36:02 +03:00
break ;
}
}
2018-10-11 02:38:24 +03:00
kvfree ( cmd ) ;
2015-05-30 19:36:02 +03:00
return rc ;
}
2020-01-30 23:06:18 +03:00
void nvdimm_set_labeling ( struct device * dev )
2016-10-16 01:33:52 +03:00
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2020-01-30 23:06:18 +03:00
set_bit ( NDD_LABELING , & nvdimm - > flags ) ;
2017-05-05 00:01:24 +03:00
}
void nvdimm_set_locked ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
set_bit ( NDD_LOCKED , & nvdimm - > flags ) ;
2016-10-16 01:33:52 +03:00
}
2017-09-25 21:01:31 +03:00
void nvdimm_clear_locked ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
clear_bit ( NDD_LOCKED , & nvdimm - > flags ) ;
}
2015-04-25 10:56:17 +03:00
static void nvdimm_release ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
ida_simple_remove ( & dimm_ida , nvdimm - > id ) ;
kfree ( nvdimm ) ;
}
struct nvdimm * to_nvdimm ( struct device * dev )
{
struct nvdimm * nvdimm = container_of ( dev , struct nvdimm , dev ) ;
WARN_ON ( ! is_nvdimm ( dev ) ) ;
return nvdimm ;
}
EXPORT_SYMBOL_GPL ( to_nvdimm ) ;
2015-06-18 00:14:46 +03:00
struct nvdimm_drvdata * to_ndd ( struct nd_mapping * nd_mapping )
{
struct nvdimm * nvdimm = nd_mapping - > nvdimm ;
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( & nvdimm - > dev ) ) ;
return dev_get_drvdata ( & nvdimm - > dev ) ;
}
EXPORT_SYMBOL ( to_ndd ) ;
void nvdimm_drvdata_release ( struct kref * kref )
{
struct nvdimm_drvdata * ndd = container_of ( kref , typeof ( * ndd ) , kref ) ;
struct device * dev = ndd - > dev ;
struct resource * res , * _r ;
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " trace \n " ) ;
2015-06-18 00:14:46 +03:00
nvdimm_bus_lock ( dev ) ;
for_each_dpa_resource_safe ( ndd , res , _r )
nvdimm_free_dpa ( ndd , res ) ;
nvdimm_bus_unlock ( dev ) ;
2015-08-28 02:35:48 +03:00
kvfree ( ndd - > data ) ;
2015-06-18 00:14:46 +03:00
kfree ( ndd ) ;
put_device ( dev ) ;
}
void get_ndd ( struct nvdimm_drvdata * ndd )
{
kref_get ( & ndd - > kref ) ;
}
void put_ndd ( struct nvdimm_drvdata * ndd )
{
if ( ndd )
kref_put ( & ndd - > kref , nvdimm_drvdata_release ) ;
}
2015-04-25 10:56:17 +03:00
const char * nvdimm_name ( struct nvdimm * nvdimm )
{
return dev_name ( & nvdimm - > dev ) ;
}
EXPORT_SYMBOL_GPL ( nvdimm_name ) ;
2016-08-23 05:28:37 +03:00
struct kobject * nvdimm_kobj ( struct nvdimm * nvdimm )
{
return & nvdimm - > dev . kobj ;
}
EXPORT_SYMBOL_GPL ( nvdimm_kobj ) ;
2016-04-29 02:17:07 +03:00
unsigned long nvdimm_cmd_mask ( struct nvdimm * nvdimm )
{
return nvdimm - > cmd_mask ;
}
EXPORT_SYMBOL_GPL ( nvdimm_cmd_mask ) ;
2015-04-25 10:56:17 +03:00
void * nvdimm_provider_data ( struct nvdimm * nvdimm )
{
2015-06-08 21:27:06 +03:00
if ( nvdimm )
return nvdimm - > provider_data ;
return NULL ;
2015-04-25 10:56:17 +03:00
}
EXPORT_SYMBOL_GPL ( nvdimm_provider_data ) ;
2015-06-08 21:27:06 +03:00
static ssize_t commands_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
int cmd , len = 0 ;
2016-04-29 02:17:07 +03:00
if ( ! nvdimm - > cmd_mask )
2015-06-08 21:27:06 +03:00
return sprintf ( buf , " \n " ) ;
2016-04-29 02:17:07 +03:00
for_each_set_bit ( cmd , & nvdimm - > cmd_mask , BITS_PER_LONG )
2015-06-08 21:27:06 +03:00
len + = sprintf ( buf + len , " %s " , nvdimm_cmd_name ( cmd ) ) ;
len + = sprintf ( buf + len , " \n " ) ;
return len ;
}
static DEVICE_ATTR_RO ( commands ) ;
2017-09-25 20:24:26 +03:00
static ssize_t flags_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2022-03-10 06:49:48 +03:00
return sprintf ( buf , " %s%s \n " ,
2020-01-30 23:06:18 +03:00
test_bit ( NDD_LABELING , & nvdimm - > flags ) ? " label " : " " ,
2017-09-25 20:24:26 +03:00
test_bit ( NDD_LOCKED , & nvdimm - > flags ) ? " lock " : " " ) ;
}
static DEVICE_ATTR_RO ( flags ) ;
2015-05-01 20:11:27 +03:00
static ssize_t state_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
/*
* The state may be in the process of changing , userspace should
* quiesce probing if it wants a static answer
*/
nvdimm_bus_lock ( dev ) ;
nvdimm_bus_unlock ( dev ) ;
return sprintf ( buf , " %s \n " , atomic_read ( & nvdimm - > busy )
? " active " : " idle " ) ;
}
static DEVICE_ATTR_RO ( state ) ;
2021-02-02 03:20:40 +03:00
static ssize_t __available_slots_show ( struct nvdimm_drvdata * ndd , char * buf )
2015-05-30 19:35:36 +03:00
{
2021-02-02 03:20:40 +03:00
struct device * dev ;
2015-05-30 19:35:36 +03:00
ssize_t rc ;
u32 nfree ;
if ( ! ndd )
return - ENXIO ;
2021-02-02 03:20:40 +03:00
dev = ndd - > dev ;
2015-05-30 19:35:36 +03:00
nvdimm_bus_lock ( dev ) ;
nfree = nd_label_nfree ( ndd ) ;
if ( nfree - 1 > nfree ) {
dev_WARN_ONCE ( dev , 1 , " we ate our last label? \n " ) ;
nfree = 0 ;
} else
nfree - - ;
rc = sprintf ( buf , " %d \n " , nfree ) ;
nvdimm_bus_unlock ( dev ) ;
return rc ;
}
2021-02-02 03:20:40 +03:00
static ssize_t available_slots_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
ssize_t rc ;
2022-04-21 18:33:39 +03:00
device_lock ( dev ) ;
2021-02-02 03:20:40 +03:00
rc = __available_slots_show ( dev_get_drvdata ( dev ) , buf ) ;
2022-04-21 18:33:39 +03:00
device_unlock ( dev ) ;
2021-02-02 03:20:40 +03:00
return rc ;
}
2015-05-30 19:35:36 +03:00
static DEVICE_ATTR_RO ( available_slots ) ;
2018-12-10 23:20:42 +03:00
__weak ssize_t security_show ( struct device * dev ,
2018-12-06 10:39:29 +03:00
struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2020-08-04 01:41:38 +03:00
if ( test_bit ( NVDIMM_SECURITY_OVERWRITE , & nvdimm - > sec . flags ) )
return sprintf ( buf , " overwrite \n " ) ;
2019-08-27 03:54:54 +03:00
if ( test_bit ( NVDIMM_SECURITY_DISABLED , & nvdimm - > sec . flags ) )
2018-12-06 10:39:29 +03:00
return sprintf ( buf , " disabled \n " ) ;
2019-08-27 03:54:54 +03:00
if ( test_bit ( NVDIMM_SECURITY_UNLOCKED , & nvdimm - > sec . flags ) )
2018-12-06 10:39:29 +03:00
return sprintf ( buf , " unlocked \n " ) ;
2019-08-27 03:54:54 +03:00
if ( test_bit ( NVDIMM_SECURITY_LOCKED , & nvdimm - > sec . flags ) )
2018-12-06 10:39:29 +03:00
return sprintf ( buf , " locked \n " ) ;
return - ENOTTY ;
}
2018-12-06 20:14:08 +03:00
2019-08-27 03:54:54 +03:00
static ssize_t frozen_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
return sprintf ( buf , " %d \n " , test_bit ( NVDIMM_SECURITY_FROZEN ,
& nvdimm - > sec . flags ) ) ;
}
static DEVICE_ATTR_RO ( frozen ) ;
2018-12-06 20:14:08 +03:00
static ssize_t security_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t len )
{
ssize_t rc ;
/*
* Require all userspace triggered security management to be
* done while probing is idle and the DIMM is not in active use
* in any region .
*/
2022-04-21 18:33:39 +03:00
device_lock ( dev ) ;
2018-12-06 20:14:08 +03:00
nvdimm_bus_lock ( dev ) ;
wait_nvdimm_bus_probe_idle ( dev ) ;
2019-08-27 03:55:05 +03:00
rc = nvdimm_security_store ( dev , buf , len ) ;
2018-12-06 20:14:08 +03:00
nvdimm_bus_unlock ( dev ) ;
2022-04-21 18:33:39 +03:00
device_unlock ( dev ) ;
2018-12-06 20:14:08 +03:00
return rc ;
}
static DEVICE_ATTR_RW ( security ) ;
2018-12-06 10:39:29 +03:00
2015-06-08 21:27:06 +03:00
static struct attribute * nvdimm_attributes [ ] = {
2015-05-01 20:11:27 +03:00
& dev_attr_state . attr ,
2017-09-25 20:24:26 +03:00
& dev_attr_flags . attr ,
2015-06-08 21:27:06 +03:00
& dev_attr_commands . attr ,
2015-05-30 19:35:36 +03:00
& dev_attr_available_slots . attr ,
2018-12-06 10:39:29 +03:00
& dev_attr_security . attr ,
2019-08-27 03:54:54 +03:00
& dev_attr_frozen . attr ,
2015-06-08 21:27:06 +03:00
NULL ,
} ;
2018-12-06 10:39:29 +03:00
static umode_t nvdimm_visible ( struct kobject * kobj , struct attribute * a , int n )
{
struct device * dev = container_of ( kobj , typeof ( * dev ) , kobj ) ;
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2019-08-27 03:54:54 +03:00
if ( a ! = & dev_attr_security . attr & & a ! = & dev_attr_frozen . attr )
2018-12-06 10:39:29 +03:00
return a - > mode ;
2019-08-27 03:54:54 +03:00
if ( ! nvdimm - > sec . flags )
2018-12-06 10:39:29 +03:00
return 0 ;
2019-08-27 03:54:54 +03:00
if ( a = = & dev_attr_security . attr ) {
/* Are there any state mutation ops (make writable)? */
if ( nvdimm - > sec . ops - > freeze | | nvdimm - > sec . ops - > disable
| | nvdimm - > sec . ops - > change_key
| | nvdimm - > sec . ops - > erase
| | nvdimm - > sec . ops - > overwrite )
return a - > mode ;
return 0444 ;
}
if ( nvdimm - > sec . ops - > freeze )
2018-12-06 20:14:08 +03:00
return a - > mode ;
2019-08-27 03:54:54 +03:00
return 0 ;
2018-12-06 10:39:29 +03:00
}
2019-11-13 04:08:04 +03:00
static const struct attribute_group nvdimm_attribute_group = {
2015-06-08 21:27:06 +03:00
. attrs = nvdimm_attributes ,
2018-12-06 10:39:29 +03:00
. is_visible = nvdimm_visible ,
2015-06-08 21:27:06 +03:00
} ;
2019-11-13 04:08:04 +03:00
2020-07-21 01:08:18 +03:00
static ssize_t result_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
enum nvdimm_fwa_result result ;
if ( ! nvdimm - > fw_ops )
return - EOPNOTSUPP ;
nvdimm_bus_lock ( dev ) ;
result = nvdimm - > fw_ops - > activate_result ( nvdimm ) ;
nvdimm_bus_unlock ( dev ) ;
switch ( result ) {
case NVDIMM_FWA_RESULT_NONE :
return sprintf ( buf , " none \n " ) ;
case NVDIMM_FWA_RESULT_SUCCESS :
return sprintf ( buf , " success \n " ) ;
case NVDIMM_FWA_RESULT_FAIL :
return sprintf ( buf , " fail \n " ) ;
case NVDIMM_FWA_RESULT_NOTSTAGED :
return sprintf ( buf , " not_staged \n " ) ;
case NVDIMM_FWA_RESULT_NEEDRESET :
return sprintf ( buf , " need_reset \n " ) ;
default :
return - ENXIO ;
}
}
static DEVICE_ATTR_ADMIN_RO ( result ) ;
static ssize_t activate_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
enum nvdimm_fwa_state state ;
if ( ! nvdimm - > fw_ops )
return - EOPNOTSUPP ;
nvdimm_bus_lock ( dev ) ;
state = nvdimm - > fw_ops - > activate_state ( nvdimm ) ;
nvdimm_bus_unlock ( dev ) ;
switch ( state ) {
case NVDIMM_FWA_IDLE :
return sprintf ( buf , " idle \n " ) ;
case NVDIMM_FWA_BUSY :
return sprintf ( buf , " busy \n " ) ;
case NVDIMM_FWA_ARMED :
return sprintf ( buf , " armed \n " ) ;
default :
return - ENXIO ;
}
}
static ssize_t activate_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t len )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
enum nvdimm_fwa_trigger arg ;
int rc ;
if ( ! nvdimm - > fw_ops )
return - EOPNOTSUPP ;
if ( sysfs_streq ( buf , " arm " ) )
arg = NVDIMM_FWA_ARM ;
else if ( sysfs_streq ( buf , " disarm " ) )
arg = NVDIMM_FWA_DISARM ;
else
return - EINVAL ;
nvdimm_bus_lock ( dev ) ;
rc = nvdimm - > fw_ops - > arm ( nvdimm , arg ) ;
nvdimm_bus_unlock ( dev ) ;
if ( rc < 0 )
return rc ;
return len ;
}
static DEVICE_ATTR_ADMIN_RW ( activate ) ;
static struct attribute * nvdimm_firmware_attributes [ ] = {
& dev_attr_activate . attr ,
& dev_attr_result . attr ,
2020-08-12 11:55:01 +03:00
NULL ,
2020-07-21 01:08:18 +03:00
} ;
static umode_t nvdimm_firmware_visible ( struct kobject * kobj , struct attribute * a , int n )
{
struct device * dev = container_of ( kobj , typeof ( * dev ) , kobj ) ;
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
enum nvdimm_fwa_capability cap ;
if ( ! nd_desc - > fw_ops )
return 0 ;
if ( ! nvdimm - > fw_ops )
return 0 ;
nvdimm_bus_lock ( dev ) ;
cap = nd_desc - > fw_ops - > capability ( nd_desc ) ;
nvdimm_bus_unlock ( dev ) ;
if ( cap < NVDIMM_FWA_CAP_QUIESCE )
return 0 ;
return a - > mode ;
}
static const struct attribute_group nvdimm_firmware_attribute_group = {
. name = " firmware " ,
. attrs = nvdimm_firmware_attributes ,
. is_visible = nvdimm_firmware_visible ,
} ;
2019-11-13 04:08:04 +03:00
static const struct attribute_group * nvdimm_attribute_groups [ ] = {
& nd_device_attribute_group ,
& nvdimm_attribute_group ,
2020-07-21 01:08:18 +03:00
& nvdimm_firmware_attribute_group ,
2019-11-13 04:08:04 +03:00
NULL ,
} ;
static const struct device_type nvdimm_device_type = {
. name = " nvdimm " ,
. release = nvdimm_release ,
. groups = nvdimm_attribute_groups ,
} ;
bool is_nvdimm ( struct device * dev )
{
return dev - > type = = & nvdimm_device_type ;
}
2015-06-08 21:27:06 +03:00
2022-04-21 18:33:29 +03:00
static struct lock_class_key nvdimm_key ;
2018-12-04 21:31:20 +03:00
struct nvdimm * __nvdimm_create ( struct nvdimm_bus * nvdimm_bus ,
void * provider_data , const struct attribute_group * * groups ,
unsigned long flags , unsigned long cmd_mask , int num_flush ,
2018-12-06 10:39:29 +03:00
struct resource * flush_wpq , const char * dimm_id ,
2020-07-21 01:08:24 +03:00
const struct nvdimm_security_ops * sec_ops ,
const struct nvdimm_fw_ops * fw_ops )
2015-04-25 10:56:17 +03:00
{
struct nvdimm * nvdimm = kzalloc ( sizeof ( * nvdimm ) , GFP_KERNEL ) ;
struct device * dev ;
if ( ! nvdimm )
return NULL ;
nvdimm - > id = ida_simple_get ( & dimm_ida , 0 , 0 , GFP_KERNEL ) ;
if ( nvdimm - > id < 0 ) {
kfree ( nvdimm ) ;
return NULL ;
}
2018-12-04 21:31:20 +03:00
nvdimm - > dimm_id = dimm_id ;
2015-04-25 10:56:17 +03:00
nvdimm - > provider_data = provider_data ;
nvdimm - > flags = flags ;
2016-04-29 02:17:07 +03:00
nvdimm - > cmd_mask = cmd_mask ;
2016-06-08 03:00:04 +03:00
nvdimm - > num_flush = num_flush ;
nvdimm - > flush_wpq = flush_wpq ;
2015-05-01 20:11:27 +03:00
atomic_set ( & nvdimm - > busy , 0 ) ;
2015-04-25 10:56:17 +03:00
dev = & nvdimm - > dev ;
dev_set_name ( dev , " nmem%d " , nvdimm - > id ) ;
dev - > parent = & nvdimm_bus - > dev ;
dev - > type = & nvdimm_device_type ;
2015-06-08 21:27:06 +03:00
dev - > devt = MKDEV ( nvdimm_major , nvdimm - > id ) ;
2015-04-25 10:56:17 +03:00
dev - > groups = groups ;
2018-12-06 10:39:29 +03:00
nvdimm - > sec . ops = sec_ops ;
2020-07-21 01:08:24 +03:00
nvdimm - > fw_ops = fw_ops ;
2018-12-14 01:36:18 +03:00
nvdimm - > sec . overwrite_tmo = 0 ;
INIT_DELAYED_WORK ( & nvdimm - > dwork , nvdimm_security_overwrite_query ) ;
2018-12-06 10:39:29 +03:00
/*
* Security state must be initialized before device_add ( ) for
* attribute visibility .
*/
2018-12-10 20:53:22 +03:00
/* get security state and extended (master) state */
2019-08-27 03:54:54 +03:00
nvdimm - > sec . flags = nvdimm_security_flags ( nvdimm , NVDIMM_USER ) ;
nvdimm - > sec . ext_flags = nvdimm_security_flags ( nvdimm , NVDIMM_MASTER ) ;
2022-04-21 18:33:29 +03:00
device_initialize ( dev ) ;
lockdep_set_class ( & dev - > mutex , & nvdimm_key ) ;
2015-05-31 21:41:48 +03:00
nd_device_register ( dev ) ;
2015-04-25 10:56:17 +03:00
return nvdimm ;
}
2018-12-04 21:31:20 +03:00
EXPORT_SYMBOL_GPL ( __nvdimm_create ) ;
2015-05-31 21:41:48 +03:00
2021-06-16 02:18:22 +03:00
void nvdimm_delete ( struct nvdimm * nvdimm )
{
struct device * dev = & nvdimm - > dev ;
bool dev_put = false ;
/* We are shutting down. Make state frozen artificially. */
nvdimm_bus_lock ( dev ) ;
set_bit ( NVDIMM_SECURITY_FROZEN , & nvdimm - > sec . flags ) ;
if ( test_and_clear_bit ( NDD_WORK_PENDING , & nvdimm - > flags ) )
dev_put = true ;
nvdimm_bus_unlock ( dev ) ;
cancel_delayed_work_sync ( & nvdimm - > dwork ) ;
if ( dev_put )
put_device ( dev ) ;
nd_device_unregister ( dev , ND_SYNC ) ;
}
EXPORT_SYMBOL_GPL ( nvdimm_delete ) ;
2019-01-19 19:45:56 +03:00
static void shutdown_security_notify ( void * data )
2018-12-14 01:36:18 +03:00
{
2019-01-19 19:45:56 +03:00
struct nvdimm * nvdimm = data ;
sysfs_put ( nvdimm - > sec . overwrite_state ) ;
}
int nvdimm_security_setup_events ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2019-08-27 03:54:54 +03:00
if ( ! nvdimm - > sec . flags | | ! nvdimm - > sec . ops
2019-01-19 19:45:56 +03:00
| | ! nvdimm - > sec . ops - > overwrite )
return 0 ;
nvdimm - > sec . overwrite_state = sysfs_get_dirent ( dev - > kobj . sd , " security " ) ;
2018-12-14 01:36:18 +03:00
if ( ! nvdimm - > sec . overwrite_state )
2019-01-19 19:45:56 +03:00
return - ENOMEM ;
return devm_add_action_or_reset ( dev , shutdown_security_notify , nvdimm ) ;
2018-12-14 01:36:18 +03:00
}
EXPORT_SYMBOL_GPL ( nvdimm_security_setup_events ) ;
int nvdimm_in_overwrite ( struct nvdimm * nvdimm )
{
return test_bit ( NDD_SECURITY_OVERWRITE , & nvdimm - > flags ) ;
}
EXPORT_SYMBOL_GPL ( nvdimm_in_overwrite ) ;
2018-12-06 20:14:08 +03:00
int nvdimm_security_freeze ( struct nvdimm * nvdimm )
{
int rc ;
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( & nvdimm - > dev ) ) ;
if ( ! nvdimm - > sec . ops | | ! nvdimm - > sec . ops - > freeze )
return - EOPNOTSUPP ;
2019-08-27 03:54:54 +03:00
if ( ! nvdimm - > sec . flags )
2018-12-06 20:14:08 +03:00
return - EIO ;
2018-12-14 01:36:18 +03:00
if ( test_bit ( NDD_SECURITY_OVERWRITE , & nvdimm - > flags ) ) {
dev_warn ( & nvdimm - > dev , " Overwrite operation in progress. \n " ) ;
return - EBUSY ;
}
2018-12-06 20:14:08 +03:00
rc = nvdimm - > sec . ops - > freeze ( nvdimm ) ;
2019-08-27 03:54:54 +03:00
nvdimm - > sec . flags = nvdimm_security_flags ( nvdimm , NVDIMM_USER ) ;
2018-12-06 20:14:08 +03:00
return rc ;
}
2020-01-30 23:06:23 +03:00
static unsigned long dpa_align ( struct nd_region * nd_region )
{
struct device * dev = & nd_region - > dev ;
if ( dev_WARN_ONCE ( dev , ! is_nvdimm_bus_locked ( dev ) ,
" bus lock required for capacity provision \n " ) )
return 0 ;
if ( dev_WARN_ONCE ( dev , ! nd_region - > ndr_mappings | | nd_region - > align
% nd_region - > ndr_mappings ,
" invalid region align %#lx mappings: %d \n " ,
nd_region - > align , nd_region - > ndr_mappings ) )
return 0 ;
return nd_region - > align / nd_region - > ndr_mappings ;
}
2018-07-25 00:07:57 +03:00
/**
* nd_pmem_max_contiguous_dpa - For the given dimm + region , return the max
* contiguous unallocated dpa range .
* @ nd_region : constrain available space check to this reference region
* @ nd_mapping : container of dpa - resource - root + labels
*/
resource_size_t nd_pmem_max_contiguous_dpa ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping )
{
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct nvdimm_bus * nvdimm_bus ;
resource_size_t max = 0 ;
struct resource * res ;
2020-01-30 23:06:23 +03:00
unsigned long align ;
2018-07-25 00:07:57 +03:00
/* if a dimm is disabled the available capacity is zero */
if ( ! ndd )
return 0 ;
2020-01-30 23:06:23 +03:00
align = dpa_align ( nd_region ) ;
if ( ! align )
return 0 ;
2018-07-25 00:07:57 +03:00
nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
if ( __reserve_free_pmem ( & nd_region - > dev , nd_mapping - > nvdimm ) )
return 0 ;
for_each_dpa_resource ( ndd , res ) {
2020-01-30 23:06:23 +03:00
resource_size_t start , end ;
2018-07-25 00:07:57 +03:00
if ( strcmp ( res - > name , " pmem-reserve " ) ! = 0 )
continue ;
2020-01-30 23:06:23 +03:00
/* trim free space relative to current alignment setting */
start = ALIGN ( res - > start , align ) ;
end = ALIGN_DOWN ( res - > end + 1 , align ) - 1 ;
if ( end < start )
continue ;
if ( end - start + 1 > max )
max = end - start + 1 ;
2018-07-25 00:07:57 +03:00
}
release_free_pmem ( nvdimm_bus , nd_mapping ) ;
return max ;
}
2015-06-18 00:14:46 +03:00
/**
* nd_pmem_available_dpa - for the given dimm + region account unallocated dpa
* @ nd_mapping : container of dpa - resource - root + labels
* @ nd_region : constrain available space check to this reference region
*
* Validate that a PMEM label , if present , aligns with the start of an
2022-03-10 06:49:48 +03:00
* interleave set .
2015-06-18 00:14:46 +03:00
*/
resource_size_t nd_pmem_available_dpa ( struct nd_region * nd_region ,
2022-03-10 06:49:48 +03:00
struct nd_mapping * nd_mapping )
2015-06-18 00:14:46 +03:00
{
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2022-03-10 06:49:48 +03:00
resource_size_t map_start , map_end , busy = 0 ;
2015-06-18 00:14:46 +03:00
struct resource * res ;
2020-01-30 23:06:23 +03:00
unsigned long align ;
2015-06-18 00:14:46 +03:00
if ( ! ndd )
return 0 ;
2020-01-30 23:06:23 +03:00
align = dpa_align ( nd_region ) ;
if ( ! align )
return 0 ;
2015-06-18 00:14:46 +03:00
map_start = nd_mapping - > start ;
map_end = map_start + nd_mapping - > size - 1 ;
2016-10-01 03:28:58 +03:00
for_each_dpa_resource ( ndd , res ) {
2020-01-30 23:06:23 +03:00
resource_size_t start , end ;
start = ALIGN_DOWN ( res - > start , align ) ;
end = ALIGN ( res - > end + 1 , align ) - 1 ;
if ( start > = map_start & & start < map_end ) {
2022-03-10 06:49:48 +03:00
if ( end > map_end ) {
nd_dbg_dpa ( nd_region , ndd , res ,
" misaligned to iset \n " ) ;
return 0 ;
}
busy + = end - start + 1 ;
2020-01-30 23:06:23 +03:00
} else if ( end > = map_start & & end < = map_end ) {
2022-03-10 06:49:48 +03:00
busy + = end - start + 1 ;
2020-01-30 23:06:23 +03:00
} else if ( map_start > start & & map_start < end ) {
2015-06-18 00:14:46 +03:00
/* total eclipse of the mapping */
busy + = nd_mapping - > size ;
}
2016-10-01 03:28:58 +03:00
}
2015-06-18 00:14:46 +03:00
2022-03-10 06:49:48 +03:00
if ( busy < nd_mapping - > size )
return ALIGN_DOWN ( nd_mapping - > size - busy , align ) ;
2015-06-18 00:14:46 +03:00
return 0 ;
}
2015-06-09 23:09:36 +03:00
void nvdimm_free_dpa ( struct nvdimm_drvdata * ndd , struct resource * res )
{
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
kfree ( res - > name ) ;
__release_region ( & ndd - > dpa , res - > start , resource_size ( res ) ) ;
}
struct resource * nvdimm_allocate_dpa ( struct nvdimm_drvdata * ndd ,
struct nd_label_id * label_id , resource_size_t start ,
resource_size_t n )
{
char * name = kmemdup ( label_id , sizeof ( * label_id ) , GFP_KERNEL ) ;
struct resource * res ;
if ( ! name )
return NULL ;
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
res = __request_region ( & ndd - > dpa , start , n , name , 0 ) ;
if ( ! res )
kfree ( name ) ;
return res ;
}
2015-06-18 00:14:46 +03:00
/**
* nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
* @ nvdimm : container of dpa - resource - root + labels
2022-03-10 06:49:48 +03:00
* @ label_id : dpa resource name of the form pmem - < human readable uuid >
2015-06-18 00:14:46 +03:00
*/
resource_size_t nvdimm_allocated_dpa ( struct nvdimm_drvdata * ndd ,
struct nd_label_id * label_id )
{
resource_size_t allocated = 0 ;
struct resource * res ;
for_each_dpa_resource ( ndd , res )
if ( strcmp ( res - > name , label_id - > id ) = = 0 )
allocated + = resource_size ( res ) ;
return allocated ;
}
2015-05-31 21:41:48 +03:00
static int count_dimms ( struct device * dev , void * c )
{
int * count = c ;
if ( is_nvdimm ( dev ) )
( * count ) + + ;
return 0 ;
}
int nvdimm_bus_check_dimm_count ( struct nvdimm_bus * nvdimm_bus , int dimm_count )
{
int count = 0 ;
/* Flush any possible dimm registration failures */
nd_synchronize ( ) ;
device_for_each_child ( & nvdimm_bus - > dev , & count , count_dimms ) ;
2018-03-06 03:39:31 +03:00
dev_dbg ( & nvdimm_bus - > dev , " count: %d \n " , count ) ;
2015-05-31 21:41:48 +03:00
if ( count ! = dimm_count )
return - ENXIO ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( nvdimm_bus_check_dimm_count ) ;
2016-05-18 06:24:16 +03:00
void __exit nvdimm_devs_exit ( void )
{
ida_destroy ( & dimm_ida ) ;
}