2015-04-25 10:56:17 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2015-05-31 21:41:48 +03:00
# include <linux/vmalloc.h>
2015-04-25 10:56:17 +03:00
# include <linux/device.h>
2015-06-08 21:27:06 +03:00
# include <linux/ndctl.h>
2015-04-25 10:56:17 +03:00
# include <linux/slab.h>
# include <linux/io.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include "nd-core.h"
2015-05-30 19:35:36 +03:00
# include "label.h"
2017-01-14 07:36:58 +03:00
# include "pmem.h"
2015-05-31 21:41:48 +03:00
# include "nd.h"
2015-04-25 10:56:17 +03:00
static DEFINE_IDA ( dimm_ida ) ;
2015-05-31 21:41:48 +03:00
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
*/
2016-08-16 22:08:40 +03:00
int nvdimm_check_config_data ( struct device * dev )
2015-05-31 21:41:48 +03:00
{
2016-08-16 22:08:40 +03:00
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2015-05-31 21:41:48 +03:00
2016-08-16 22:08:40 +03:00
if ( ! nvdimm - > cmd_mask | |
! test_bit ( ND_CMD_GET_CONFIG_DATA , & nvdimm - > cmd_mask ) ) {
2017-05-05 00:01:24 +03:00
if ( test_bit ( NDD_ALIASING , & nvdimm - > flags ) )
2016-08-16 22:08:40 +03:00
return - ENXIO ;
else
return - ENOTTY ;
}
2015-05-31 21:41:48 +03:00
return 0 ;
}
static int validate_dimm ( struct nvdimm_drvdata * ndd )
{
2016-08-16 22:08:40 +03:00
int rc ;
2015-05-31 21:41:48 +03:00
2016-08-16 22:08:40 +03:00
if ( ! ndd )
return - EINVAL ;
rc = nvdimm_check_config_data ( ndd - > dev ) ;
if ( rc )
2015-05-31 21:41:48 +03:00
dev_dbg ( ndd - > dev , " %pf: %s error: %d \n " ,
__builtin_return_address ( 0 ) , __func__ , rc ) ;
return rc ;
}
/**
* nvdimm_init_nsarea - determine the geometry of a dimm ' s namespace area
* @ nvdimm : dimm to initialize
*/
int nvdimm_init_nsarea ( struct nvdimm_drvdata * ndd )
{
struct nd_cmd_get_config_size * cmd = & ndd - > nsarea ;
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
struct nvdimm_bus_descriptor * nd_desc ;
int rc = validate_dimm ( ndd ) ;
2017-05-04 21:47:22 +03:00
int cmd_rc = 0 ;
2015-05-31 21:41:48 +03:00
if ( rc )
return rc ;
if ( cmd - > config_size )
return 0 ; /* already valid */
memset ( cmd , 0 , sizeof ( * cmd ) ) ;
nd_desc = nvdimm_bus - > nd_desc ;
2017-05-04 21:47:22 +03:00
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
ND_CMD_GET_CONFIG_SIZE , cmd , sizeof ( * cmd ) , & cmd_rc ) ;
if ( rc < 0 )
return rc ;
return cmd_rc ;
2015-05-31 21:41:48 +03:00
}
2018-10-11 02:39:20 +03:00
int nvdimm_get_config_data ( struct nvdimm_drvdata * ndd , void * buf ,
size_t offset , size_t len )
2015-05-31 21:41:48 +03:00
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
2018-10-11 02:39:20 +03:00
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
2018-04-09 22:34:24 +03:00
int rc = validate_dimm ( ndd ) , cmd_rc = 0 ;
2015-05-31 21:41:48 +03:00
struct nd_cmd_get_config_data_hdr * cmd ;
2018-10-11 02:39:20 +03:00
size_t max_cmd_size , buf_offset ;
2015-05-31 21:41:48 +03:00
if ( rc )
return rc ;
2018-10-11 02:39:20 +03:00
if ( offset + len > ndd - > nsarea . config_size )
2015-05-31 21:41:48 +03:00
return - ENXIO ;
2018-10-11 02:39:20 +03:00
max_cmd_size = min_t ( u32 , len , ndd - > nsarea . max_xfer ) ;
2018-10-11 02:38:24 +03:00
cmd = kvzalloc ( max_cmd_size + sizeof ( * cmd ) , GFP_KERNEL ) ;
2015-05-31 21:41:48 +03:00
if ( ! cmd )
return - ENOMEM ;
2018-10-11 02:39:20 +03:00
for ( buf_offset = 0 ; len ;
len - = cmd - > in_length , buf_offset + = cmd - > in_length ) {
size_t cmd_size ;
cmd - > in_offset = offset + buf_offset ;
cmd - > in_length = min ( max_cmd_size , len ) ;
cmd_size = sizeof ( * cmd ) + cmd - > in_length ;
2015-05-31 21:41:48 +03:00
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
2018-10-11 02:39:20 +03:00
ND_CMD_GET_CONFIG_DATA , cmd , cmd_size , & cmd_rc ) ;
2018-04-09 22:34:24 +03:00
if ( rc < 0 )
break ;
if ( cmd_rc < 0 ) {
rc = cmd_rc ;
2015-05-31 21:41:48 +03:00
break ;
}
2018-10-11 02:39:20 +03:00
/* out_buf should be valid, copy it into our output buffer */
memcpy ( buf + buf_offset , cmd - > out_buf , cmd - > in_length ) ;
2015-05-31 21:41:48 +03:00
}
2018-10-11 02:38:24 +03:00
kvfree ( cmd ) ;
2015-05-31 21:41:48 +03:00
return rc ;
}
2015-05-30 19:36:02 +03:00
int nvdimm_set_config_data ( struct nvdimm_drvdata * ndd , size_t offset ,
void * buf , size_t len )
{
size_t max_cmd_size , buf_offset ;
struct nd_cmd_set_config_hdr * cmd ;
2018-04-09 22:34:24 +03:00
int rc = validate_dimm ( ndd ) , cmd_rc = 0 ;
2015-05-30 19:36:02 +03:00
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
if ( rc )
return rc ;
if ( offset + len > ndd - > nsarea . config_size )
return - ENXIO ;
2018-10-11 02:38:24 +03:00
max_cmd_size = min_t ( u32 , len , ndd - > nsarea . max_xfer ) ;
cmd = kvzalloc ( max_cmd_size + sizeof ( * cmd ) + sizeof ( u32 ) , GFP_KERNEL ) ;
2015-05-30 19:36:02 +03:00
if ( ! cmd )
return - ENOMEM ;
for ( buf_offset = 0 ; len ; len - = cmd - > in_length ,
buf_offset + = cmd - > in_length ) {
size_t cmd_size ;
cmd - > in_offset = offset + buf_offset ;
cmd - > in_length = min ( max_cmd_size , len ) ;
memcpy ( cmd - > in_buf , buf + buf_offset , cmd - > in_length ) ;
/* status is output in the last 4-bytes of the command buffer */
cmd_size = sizeof ( * cmd ) + cmd - > in_length + sizeof ( u32 ) ;
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
2018-04-09 22:34:24 +03:00
ND_CMD_SET_CONFIG_DATA , cmd , cmd_size , & cmd_rc ) ;
if ( rc < 0 )
break ;
if ( cmd_rc < 0 ) {
rc = cmd_rc ;
2015-05-30 19:36:02 +03:00
break ;
}
}
2018-10-11 02:38:24 +03:00
kvfree ( cmd ) ;
2015-05-30 19:36:02 +03:00
return rc ;
}
2016-10-16 01:33:52 +03:00
void nvdimm_set_aliasing ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2017-05-05 00:01:24 +03:00
set_bit ( NDD_ALIASING , & nvdimm - > flags ) ;
}
void nvdimm_set_locked ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
set_bit ( NDD_LOCKED , & nvdimm - > flags ) ;
2016-10-16 01:33:52 +03:00
}
2017-09-25 21:01:31 +03:00
void nvdimm_clear_locked ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
clear_bit ( NDD_LOCKED , & nvdimm - > flags ) ;
}
2015-04-25 10:56:17 +03:00
static void nvdimm_release ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
ida_simple_remove ( & dimm_ida , nvdimm - > id ) ;
kfree ( nvdimm ) ;
}
static struct device_type nvdimm_device_type = {
. name = " nvdimm " ,
. release = nvdimm_release ,
} ;
2015-06-08 21:27:06 +03:00
bool is_nvdimm ( struct device * dev )
2015-04-25 10:56:17 +03:00
{
return dev - > type = = & nvdimm_device_type ;
}
struct nvdimm * to_nvdimm ( struct device * dev )
{
struct nvdimm * nvdimm = container_of ( dev , struct nvdimm , dev ) ;
WARN_ON ( ! is_nvdimm ( dev ) ) ;
return nvdimm ;
}
EXPORT_SYMBOL_GPL ( to_nvdimm ) ;
2015-06-25 11:21:02 +03:00
struct nvdimm * nd_blk_region_to_dimm ( struct nd_blk_region * ndbr )
{
struct nd_region * nd_region = & ndbr - > nd_region ;
struct nd_mapping * nd_mapping = & nd_region - > mapping [ 0 ] ;
return nd_mapping - > nvdimm ;
}
EXPORT_SYMBOL_GPL ( nd_blk_region_to_dimm ) ;
2017-01-14 07:36:58 +03:00
unsigned long nd_blk_memremap_flags ( struct nd_blk_region * ndbr )
{
/* pmem mapping properties are private to libnvdimm */
return ARCH_MEMREMAP_PMEM ;
}
EXPORT_SYMBOL_GPL ( nd_blk_memremap_flags ) ;
2015-06-18 00:14:46 +03:00
struct nvdimm_drvdata * to_ndd ( struct nd_mapping * nd_mapping )
{
struct nvdimm * nvdimm = nd_mapping - > nvdimm ;
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( & nvdimm - > dev ) ) ;
return dev_get_drvdata ( & nvdimm - > dev ) ;
}
EXPORT_SYMBOL ( to_ndd ) ;
void nvdimm_drvdata_release ( struct kref * kref )
{
struct nvdimm_drvdata * ndd = container_of ( kref , typeof ( * ndd ) , kref ) ;
struct device * dev = ndd - > dev ;
struct resource * res , * _r ;
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " trace \n " ) ;
2015-06-18 00:14:46 +03:00
nvdimm_bus_lock ( dev ) ;
for_each_dpa_resource_safe ( ndd , res , _r )
nvdimm_free_dpa ( ndd , res ) ;
nvdimm_bus_unlock ( dev ) ;
2015-08-28 02:35:48 +03:00
kvfree ( ndd - > data ) ;
2015-06-18 00:14:46 +03:00
kfree ( ndd ) ;
put_device ( dev ) ;
}
void get_ndd ( struct nvdimm_drvdata * ndd )
{
kref_get ( & ndd - > kref ) ;
}
void put_ndd ( struct nvdimm_drvdata * ndd )
{
if ( ndd )
kref_put ( & ndd - > kref , nvdimm_drvdata_release ) ;
}
2015-04-25 10:56:17 +03:00
const char * nvdimm_name ( struct nvdimm * nvdimm )
{
return dev_name ( & nvdimm - > dev ) ;
}
EXPORT_SYMBOL_GPL ( nvdimm_name ) ;
2016-08-23 05:28:37 +03:00
struct kobject * nvdimm_kobj ( struct nvdimm * nvdimm )
{
return & nvdimm - > dev . kobj ;
}
EXPORT_SYMBOL_GPL ( nvdimm_kobj ) ;
2016-04-29 02:17:07 +03:00
unsigned long nvdimm_cmd_mask ( struct nvdimm * nvdimm )
{
return nvdimm - > cmd_mask ;
}
EXPORT_SYMBOL_GPL ( nvdimm_cmd_mask ) ;
2015-04-25 10:56:17 +03:00
void * nvdimm_provider_data ( struct nvdimm * nvdimm )
{
2015-06-08 21:27:06 +03:00
if ( nvdimm )
return nvdimm - > provider_data ;
return NULL ;
2015-04-25 10:56:17 +03:00
}
EXPORT_SYMBOL_GPL ( nvdimm_provider_data ) ;
2015-06-08 21:27:06 +03:00
static ssize_t commands_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
int cmd , len = 0 ;
2016-04-29 02:17:07 +03:00
if ( ! nvdimm - > cmd_mask )
2015-06-08 21:27:06 +03:00
return sprintf ( buf , " \n " ) ;
2016-04-29 02:17:07 +03:00
for_each_set_bit ( cmd , & nvdimm - > cmd_mask , BITS_PER_LONG )
2015-06-08 21:27:06 +03:00
len + = sprintf ( buf + len , " %s " , nvdimm_cmd_name ( cmd ) ) ;
len + = sprintf ( buf + len , " \n " ) ;
return len ;
}
static DEVICE_ATTR_RO ( commands ) ;
2017-09-25 20:24:26 +03:00
static ssize_t flags_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
return sprintf ( buf , " %s%s \n " ,
test_bit ( NDD_ALIASING , & nvdimm - > flags ) ? " alias " : " " ,
test_bit ( NDD_LOCKED , & nvdimm - > flags ) ? " lock " : " " ) ;
}
static DEVICE_ATTR_RO ( flags ) ;
2015-05-01 20:11:27 +03:00
static ssize_t state_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
/*
* The state may be in the process of changing , userspace should
* quiesce probing if it wants a static answer
*/
nvdimm_bus_lock ( dev ) ;
nvdimm_bus_unlock ( dev ) ;
return sprintf ( buf , " %s \n " , atomic_read ( & nvdimm - > busy )
? " active " : " idle " ) ;
}
static DEVICE_ATTR_RO ( state ) ;
2015-05-30 19:35:36 +03:00
static ssize_t available_slots_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm_drvdata * ndd = dev_get_drvdata ( dev ) ;
ssize_t rc ;
u32 nfree ;
if ( ! ndd )
return - ENXIO ;
nvdimm_bus_lock ( dev ) ;
nfree = nd_label_nfree ( ndd ) ;
if ( nfree - 1 > nfree ) {
dev_WARN_ONCE ( dev , 1 , " we ate our last label? \n " ) ;
nfree = 0 ;
} else
nfree - - ;
rc = sprintf ( buf , " %d \n " , nfree ) ;
nvdimm_bus_unlock ( dev ) ;
return rc ;
}
static DEVICE_ATTR_RO ( available_slots ) ;
2015-06-08 21:27:06 +03:00
static struct attribute * nvdimm_attributes [ ] = {
2015-05-01 20:11:27 +03:00
& dev_attr_state . attr ,
2017-09-25 20:24:26 +03:00
& dev_attr_flags . attr ,
2015-06-08 21:27:06 +03:00
& dev_attr_commands . attr ,
2015-05-30 19:35:36 +03:00
& dev_attr_available_slots . attr ,
2015-06-08 21:27:06 +03:00
NULL ,
} ;
struct attribute_group nvdimm_attribute_group = {
. attrs = nvdimm_attributes ,
} ;
EXPORT_SYMBOL_GPL ( nvdimm_attribute_group ) ;
2015-04-25 10:56:17 +03:00
struct nvdimm * nvdimm_create ( struct nvdimm_bus * nvdimm_bus , void * provider_data ,
2015-06-08 21:27:06 +03:00
const struct attribute_group * * groups , unsigned long flags ,
2016-06-08 03:00:04 +03:00
unsigned long cmd_mask , int num_flush ,
struct resource * flush_wpq )
2015-04-25 10:56:17 +03:00
{
struct nvdimm * nvdimm = kzalloc ( sizeof ( * nvdimm ) , GFP_KERNEL ) ;
struct device * dev ;
if ( ! nvdimm )
return NULL ;
nvdimm - > id = ida_simple_get ( & dimm_ida , 0 , 0 , GFP_KERNEL ) ;
if ( nvdimm - > id < 0 ) {
kfree ( nvdimm ) ;
return NULL ;
}
nvdimm - > provider_data = provider_data ;
nvdimm - > flags = flags ;
2016-04-29 02:17:07 +03:00
nvdimm - > cmd_mask = cmd_mask ;
2016-06-08 03:00:04 +03:00
nvdimm - > num_flush = num_flush ;
nvdimm - > flush_wpq = flush_wpq ;
2015-05-01 20:11:27 +03:00
atomic_set ( & nvdimm - > busy , 0 ) ;
2015-04-25 10:56:17 +03:00
dev = & nvdimm - > dev ;
dev_set_name ( dev , " nmem%d " , nvdimm - > id ) ;
dev - > parent = & nvdimm_bus - > dev ;
dev - > type = & nvdimm_device_type ;
2015-06-08 21:27:06 +03:00
dev - > devt = MKDEV ( nvdimm_major , nvdimm - > id ) ;
2015-04-25 10:56:17 +03:00
dev - > groups = groups ;
2015-05-31 21:41:48 +03:00
nd_device_register ( dev ) ;
2015-04-25 10:56:17 +03:00
return nvdimm ;
}
EXPORT_SYMBOL_GPL ( nvdimm_create ) ;
2015-05-31 21:41:48 +03:00
2016-10-05 02:09:59 +03:00
int alias_dpa_busy ( struct device * dev , void * data )
2016-10-01 03:28:58 +03:00
{
2017-04-05 01:08:36 +03:00
resource_size_t map_end , blk_start , new ;
2016-10-01 03:28:58 +03:00
struct blk_alloc_info * info = data ;
struct nd_mapping * nd_mapping ;
struct nd_region * nd_region ;
struct nvdimm_drvdata * ndd ;
struct resource * res ;
int i ;
2017-05-30 09:12:19 +03:00
if ( ! is_memory ( dev ) )
2016-10-01 03:28:58 +03:00
return 0 ;
nd_region = to_nd_region ( dev ) ;
for ( i = 0 ; i < nd_region - > ndr_mappings ; i + + ) {
nd_mapping = & nd_region - > mapping [ i ] ;
if ( nd_mapping - > nvdimm = = info - > nd_mapping - > nvdimm )
break ;
}
if ( i > = nd_region - > ndr_mappings )
return 0 ;
ndd = to_ndd ( nd_mapping ) ;
map_end = nd_mapping - > start + nd_mapping - > size - 1 ;
blk_start = nd_mapping - > start ;
2016-10-05 02:09:59 +03:00
/*
* In the allocation case - > res is set to free space that we are
* looking to validate against PMEM aliasing collision rules
* ( i . e . BLK is allocated after all aliased PMEM ) .
*/
if ( info - > res ) {
if ( info - > res - > start > = nd_mapping - > start
& & info - > res - > start < map_end )
/* pass */ ;
else
return 0 ;
}
2016-10-01 03:28:58 +03:00
retry :
/*
* Find the free dpa from the end of the last pmem allocation to
2017-04-05 01:08:36 +03:00
* the end of the interleave - set mapping .
2016-10-01 03:28:58 +03:00
*/
for_each_dpa_resource ( ndd , res ) {
2017-04-05 01:08:36 +03:00
if ( strncmp ( res - > name , " pmem " , 4 ) ! = 0 )
continue ;
2016-10-01 03:28:58 +03:00
if ( ( res - > start > = blk_start & & res - > start < map_end )
| | ( res - > end > = blk_start
& & res - > end < = map_end ) ) {
2017-04-05 01:08:36 +03:00
new = max ( blk_start , min ( map_end + 1 , res - > end + 1 ) ) ;
if ( new ! = blk_start ) {
blk_start = new ;
goto retry ;
}
2016-10-01 03:28:58 +03:00
}
}
2016-10-05 02:09:59 +03:00
/* update the free space range with the probed blk_start */
if ( info - > res & & blk_start > info - > res - > start ) {
info - > res - > start = max ( info - > res - > start , blk_start ) ;
if ( info - > res - > start > info - > res - > end )
info - > res - > end = info - > res - > start - 1 ;
return 1 ;
}
2017-04-05 01:08:36 +03:00
info - > available - = blk_start - nd_mapping - > start ;
2016-10-05 02:09:59 +03:00
2016-10-01 03:28:58 +03:00
return 0 ;
}
2015-05-01 20:34:01 +03:00
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @ nd_mapping : container of dpa - resource - root + labels
*
2016-10-01 03:28:58 +03:00
* Unlike PMEM , BLK namespaces can occupy discontiguous DPA ranges , but
* we arrange for them to never start at an lower dpa than the last
* PMEM allocation in an aliased region .
2015-05-01 20:34:01 +03:00
*/
2016-10-01 03:28:58 +03:00
resource_size_t nd_blk_available_dpa ( struct nd_region * nd_region )
2015-05-01 20:34:01 +03:00
{
2016-10-01 03:28:58 +03:00
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( & nd_region - > dev ) ;
struct nd_mapping * nd_mapping = & nd_region - > mapping [ 0 ] ;
2015-05-01 20:34:01 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2016-10-01 03:28:58 +03:00
struct blk_alloc_info info = {
. nd_mapping = nd_mapping ,
. available = nd_mapping - > size ,
2016-10-05 02:09:59 +03:00
. res = NULL ,
2016-10-01 03:28:58 +03:00
} ;
2015-05-01 20:34:01 +03:00
struct resource * res ;
if ( ! ndd )
return 0 ;
2016-10-01 03:28:58 +03:00
device_for_each_child ( & nvdimm_bus - > dev , & info , alias_dpa_busy ) ;
2015-05-01 20:34:01 +03:00
2016-10-01 03:28:58 +03:00
/* now account for busy blk allocations in unaliased dpa */
for_each_dpa_resource ( ndd , res ) {
if ( strncmp ( res - > name , " blk " , 3 ) ! = 0 )
continue ;
2017-04-05 01:08:36 +03:00
info . available - = resource_size ( res ) ;
2016-10-01 03:28:58 +03:00
}
return info . available ;
2015-05-01 20:34:01 +03:00
}
2018-07-25 00:07:57 +03:00
/**
* nd_pmem_max_contiguous_dpa - For the given dimm + region , return the max
* contiguous unallocated dpa range .
* @ nd_region : constrain available space check to this reference region
* @ nd_mapping : container of dpa - resource - root + labels
*/
resource_size_t nd_pmem_max_contiguous_dpa ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping )
{
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct nvdimm_bus * nvdimm_bus ;
resource_size_t max = 0 ;
struct resource * res ;
/* if a dimm is disabled the available capacity is zero */
if ( ! ndd )
return 0 ;
nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
if ( __reserve_free_pmem ( & nd_region - > dev , nd_mapping - > nvdimm ) )
return 0 ;
for_each_dpa_resource ( ndd , res ) {
if ( strcmp ( res - > name , " pmem-reserve " ) ! = 0 )
continue ;
if ( resource_size ( res ) > max )
max = resource_size ( res ) ;
}
release_free_pmem ( nvdimm_bus , nd_mapping ) ;
return max ;
}
2015-06-18 00:14:46 +03:00
/**
* nd_pmem_available_dpa - for the given dimm + region account unallocated dpa
* @ nd_mapping : container of dpa - resource - root + labels
* @ nd_region : constrain available space check to this reference region
* @ overlap : calculate available space assuming this level of overlap
*
* Validate that a PMEM label , if present , aligns with the start of an
* interleave set and truncate the available size at the lowest BLK
* overlap point .
*
* The expectation is that this routine is called multiple times as it
* probes for the largest BLK encroachment for any single member DIMM of
* the interleave set . Once that value is determined the PMEM - limit for
* the set can be established .
*/
resource_size_t nd_pmem_available_dpa ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping , resource_size_t * overlap )
{
resource_size_t map_start , map_end , busy = 0 , available , blk_start ;
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct resource * res ;
const char * reason ;
if ( ! ndd )
return 0 ;
map_start = nd_mapping - > start ;
map_end = map_start + nd_mapping - > size - 1 ;
blk_start = max ( map_start , map_end + 1 - * overlap ) ;
2016-10-01 03:28:58 +03:00
for_each_dpa_resource ( ndd , res ) {
2015-06-18 00:14:46 +03:00
if ( res - > start > = map_start & & res - > start < map_end ) {
if ( strncmp ( res - > name , " blk " , 3 ) = = 0 )
2016-10-01 03:28:58 +03:00
blk_start = min ( blk_start ,
max ( map_start , res - > start ) ) ;
else if ( res - > end > map_end ) {
2015-06-18 00:14:46 +03:00
reason = " misaligned to iset " ;
goto err ;
2016-10-01 03:28:58 +03:00
} else
2015-06-18 00:14:46 +03:00
busy + = resource_size ( res ) ;
} else if ( res - > end > = map_start & & res - > end < = map_end ) {
if ( strncmp ( res - > name , " blk " , 3 ) = = 0 ) {
/*
* If a BLK allocation overlaps the start of
* PMEM the entire interleave set may now only
* be used for BLK .
*/
blk_start = map_start ;
2016-10-01 03:28:58 +03:00
} else
busy + = resource_size ( res ) ;
2015-06-18 00:14:46 +03:00
} else if ( map_start > res - > start & & map_start < res - > end ) {
/* total eclipse of the mapping */
busy + = nd_mapping - > size ;
blk_start = map_start ;
}
2016-10-01 03:28:58 +03:00
}
2015-06-18 00:14:46 +03:00
* overlap = map_end + 1 - blk_start ;
available = blk_start - map_start ;
if ( busy < available )
return available - busy ;
return 0 ;
err :
nd_dbg_dpa ( nd_region , ndd , res , " %s \n " , reason ) ;
return 0 ;
}
2015-06-09 23:09:36 +03:00
void nvdimm_free_dpa ( struct nvdimm_drvdata * ndd , struct resource * res )
{
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
kfree ( res - > name ) ;
__release_region ( & ndd - > dpa , res - > start , resource_size ( res ) ) ;
}
struct resource * nvdimm_allocate_dpa ( struct nvdimm_drvdata * ndd ,
struct nd_label_id * label_id , resource_size_t start ,
resource_size_t n )
{
char * name = kmemdup ( label_id , sizeof ( * label_id ) , GFP_KERNEL ) ;
struct resource * res ;
if ( ! name )
return NULL ;
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
res = __request_region ( & ndd - > dpa , start , n , name , 0 ) ;
if ( ! res )
kfree ( name ) ;
return res ;
}
2015-06-18 00:14:46 +03:00
/**
* nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
* @ nvdimm : container of dpa - resource - root + labels
* @ label_id : dpa resource name of the form { pmem | blk } - < human readable uuid >
*/
resource_size_t nvdimm_allocated_dpa ( struct nvdimm_drvdata * ndd ,
struct nd_label_id * label_id )
{
resource_size_t allocated = 0 ;
struct resource * res ;
for_each_dpa_resource ( ndd , res )
if ( strcmp ( res - > name , label_id - > id ) = = 0 )
allocated + = resource_size ( res ) ;
return allocated ;
}
2015-05-31 21:41:48 +03:00
static int count_dimms ( struct device * dev , void * c )
{
int * count = c ;
if ( is_nvdimm ( dev ) )
( * count ) + + ;
return 0 ;
}
int nvdimm_bus_check_dimm_count ( struct nvdimm_bus * nvdimm_bus , int dimm_count )
{
int count = 0 ;
/* Flush any possible dimm registration failures */
nd_synchronize ( ) ;
device_for_each_child ( & nvdimm_bus - > dev , & count , count_dimms ) ;
2018-03-06 03:39:31 +03:00
dev_dbg ( & nvdimm_bus - > dev , " count: %d \n " , count ) ;
2015-05-31 21:41:48 +03:00
if ( count ! = dimm_count )
return - ENXIO ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( nvdimm_bus_check_dimm_count ) ;
2016-05-18 06:24:16 +03:00
void __exit nvdimm_devs_exit ( void )
{
ida_destroy ( & dimm_ida ) ;
}