2015-04-25 10:56:17 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2015-05-31 21:41:48 +03:00
# include <linux/vmalloc.h>
2015-04-25 10:56:17 +03:00
# include <linux/device.h>
2015-06-08 21:27:06 +03:00
# include <linux/ndctl.h>
2015-04-25 10:56:17 +03:00
# include <linux/slab.h>
# include <linux/io.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include "nd-core.h"
2015-05-30 19:35:36 +03:00
# include "label.h"
2015-05-31 21:41:48 +03:00
# include "nd.h"
2015-04-25 10:56:17 +03:00
static DEFINE_IDA ( dimm_ida ) ;
2015-05-31 21:41:48 +03:00
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
*/
2016-08-16 22:08:40 +03:00
int nvdimm_check_config_data ( struct device * dev )
2015-05-31 21:41:48 +03:00
{
2016-08-16 22:08:40 +03:00
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2015-05-31 21:41:48 +03:00
2016-08-16 22:08:40 +03:00
if ( ! nvdimm - > cmd_mask | |
! test_bit ( ND_CMD_GET_CONFIG_DATA , & nvdimm - > cmd_mask ) ) {
2017-05-05 00:01:24 +03:00
if ( test_bit ( NDD_ALIASING , & nvdimm - > flags ) )
2016-08-16 22:08:40 +03:00
return - ENXIO ;
else
return - ENOTTY ;
}
2015-05-31 21:41:48 +03:00
return 0 ;
}
static int validate_dimm ( struct nvdimm_drvdata * ndd )
{
2016-08-16 22:08:40 +03:00
int rc ;
2015-05-31 21:41:48 +03:00
2016-08-16 22:08:40 +03:00
if ( ! ndd )
return - EINVAL ;
rc = nvdimm_check_config_data ( ndd - > dev ) ;
if ( rc )
2015-05-31 21:41:48 +03:00
dev_dbg ( ndd - > dev , " %pf: %s error: %d \n " ,
__builtin_return_address ( 0 ) , __func__ , rc ) ;
return rc ;
}
/**
* nvdimm_init_nsarea - determine the geometry of a dimm ' s namespace area
* @ nvdimm : dimm to initialize
*/
int nvdimm_init_nsarea ( struct nvdimm_drvdata * ndd )
{
struct nd_cmd_get_config_size * cmd = & ndd - > nsarea ;
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
struct nvdimm_bus_descriptor * nd_desc ;
int rc = validate_dimm ( ndd ) ;
2017-05-04 21:47:22 +03:00
int cmd_rc = 0 ;
2015-05-31 21:41:48 +03:00
if ( rc )
return rc ;
if ( cmd - > config_size )
return 0 ; /* already valid */
memset ( cmd , 0 , sizeof ( * cmd ) ) ;
nd_desc = nvdimm_bus - > nd_desc ;
2017-05-04 21:47:22 +03:00
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
ND_CMD_GET_CONFIG_SIZE , cmd , sizeof ( * cmd ) , & cmd_rc ) ;
if ( rc < 0 )
return rc ;
return cmd_rc ;
2015-05-31 21:41:48 +03:00
}
int nvdimm_init_config_data ( struct nvdimm_drvdata * ndd )
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
struct nd_cmd_get_config_data_hdr * cmd ;
struct nvdimm_bus_descriptor * nd_desc ;
int rc = validate_dimm ( ndd ) ;
u32 max_cmd_size , config_size ;
size_t offset ;
if ( rc )
return rc ;
if ( ndd - > data )
return 0 ;
2015-06-09 23:09:36 +03:00
if ( ndd - > nsarea . status | | ndd - > nsarea . max_xfer = = 0
| | ndd - > nsarea . config_size < ND_LABEL_MIN_SIZE ) {
dev_dbg ( ndd - > dev , " failed to init config data area: (%d:%d) \n " ,
ndd - > nsarea . max_xfer , ndd - > nsarea . config_size ) ;
2015-05-31 21:41:48 +03:00
return - ENXIO ;
2015-06-09 23:09:36 +03:00
}
2015-05-31 21:41:48 +03:00
2017-05-09 01:57:27 +03:00
ndd - > data = kvmalloc ( ndd - > nsarea . config_size , GFP_KERNEL ) ;
2015-05-31 21:41:48 +03:00
if ( ! ndd - > data )
return - ENOMEM ;
max_cmd_size = min_t ( u32 , PAGE_SIZE , ndd - > nsarea . max_xfer ) ;
cmd = kzalloc ( max_cmd_size + sizeof ( * cmd ) , GFP_KERNEL ) ;
if ( ! cmd )
return - ENOMEM ;
nd_desc = nvdimm_bus - > nd_desc ;
for ( config_size = ndd - > nsarea . config_size , offset = 0 ;
config_size ; config_size - = cmd - > in_length ,
offset + = cmd - > in_length ) {
cmd - > in_length = min ( config_size , max_cmd_size ) ;
cmd - > in_offset = offset ;
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
ND_CMD_GET_CONFIG_DATA , cmd ,
2016-02-13 04:01:11 +03:00
cmd - > in_length + sizeof ( * cmd ) , NULL ) ;
2015-05-31 21:41:48 +03:00
if ( rc | | cmd - > status ) {
rc = - ENXIO ;
break ;
}
memcpy ( ndd - > data + offset , cmd - > out_buf , cmd - > in_length ) ;
}
dev_dbg ( ndd - > dev , " %s: len: %zu rc: %d \n " , __func__ , offset , rc ) ;
kfree ( cmd ) ;
return rc ;
}
2015-05-30 19:36:02 +03:00
int nvdimm_set_config_data ( struct nvdimm_drvdata * ndd , size_t offset ,
void * buf , size_t len )
{
int rc = validate_dimm ( ndd ) ;
size_t max_cmd_size , buf_offset ;
struct nd_cmd_set_config_hdr * cmd ;
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( ndd - > dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
if ( rc )
return rc ;
if ( ! ndd - > data )
return - ENXIO ;
if ( offset + len > ndd - > nsarea . config_size )
return - ENXIO ;
max_cmd_size = min_t ( u32 , PAGE_SIZE , len ) ;
max_cmd_size = min_t ( u32 , max_cmd_size , ndd - > nsarea . max_xfer ) ;
cmd = kzalloc ( max_cmd_size + sizeof ( * cmd ) + sizeof ( u32 ) , GFP_KERNEL ) ;
if ( ! cmd )
return - ENOMEM ;
for ( buf_offset = 0 ; len ; len - = cmd - > in_length ,
buf_offset + = cmd - > in_length ) {
size_t cmd_size ;
u32 * status ;
cmd - > in_offset = offset + buf_offset ;
cmd - > in_length = min ( max_cmd_size , len ) ;
memcpy ( cmd - > in_buf , buf + buf_offset , cmd - > in_length ) ;
/* status is output in the last 4-bytes of the command buffer */
cmd_size = sizeof ( * cmd ) + cmd - > in_length + sizeof ( u32 ) ;
status = ( ( void * ) cmd ) + cmd_size - sizeof ( u32 ) ;
rc = nd_desc - > ndctl ( nd_desc , to_nvdimm ( ndd - > dev ) ,
2016-02-13 04:01:11 +03:00
ND_CMD_SET_CONFIG_DATA , cmd , cmd_size , NULL ) ;
2015-05-30 19:36:02 +03:00
if ( rc | | * status ) {
rc = rc ? rc : - ENXIO ;
break ;
}
}
kfree ( cmd ) ;
return rc ;
}
2016-10-16 01:33:52 +03:00
void nvdimm_set_aliasing ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
2017-05-05 00:01:24 +03:00
set_bit ( NDD_ALIASING , & nvdimm - > flags ) ;
}
void nvdimm_set_locked ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
set_bit ( NDD_LOCKED , & nvdimm - > flags ) ;
2016-10-16 01:33:52 +03:00
}
2015-04-25 10:56:17 +03:00
static void nvdimm_release ( struct device * dev )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
ida_simple_remove ( & dimm_ida , nvdimm - > id ) ;
kfree ( nvdimm ) ;
}
static struct device_type nvdimm_device_type = {
. name = " nvdimm " ,
. release = nvdimm_release ,
} ;
2015-06-08 21:27:06 +03:00
bool is_nvdimm ( struct device * dev )
2015-04-25 10:56:17 +03:00
{
return dev - > type = = & nvdimm_device_type ;
}
struct nvdimm * to_nvdimm ( struct device * dev )
{
struct nvdimm * nvdimm = container_of ( dev , struct nvdimm , dev ) ;
WARN_ON ( ! is_nvdimm ( dev ) ) ;
return nvdimm ;
}
EXPORT_SYMBOL_GPL ( to_nvdimm ) ;
2015-06-25 11:21:02 +03:00
struct nvdimm * nd_blk_region_to_dimm ( struct nd_blk_region * ndbr )
{
struct nd_region * nd_region = & ndbr - > nd_region ;
struct nd_mapping * nd_mapping = & nd_region - > mapping [ 0 ] ;
return nd_mapping - > nvdimm ;
}
EXPORT_SYMBOL_GPL ( nd_blk_region_to_dimm ) ;
2015-06-18 00:14:46 +03:00
struct nvdimm_drvdata * to_ndd ( struct nd_mapping * nd_mapping )
{
struct nvdimm * nvdimm = nd_mapping - > nvdimm ;
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( & nvdimm - > dev ) ) ;
return dev_get_drvdata ( & nvdimm - > dev ) ;
}
EXPORT_SYMBOL ( to_ndd ) ;
void nvdimm_drvdata_release ( struct kref * kref )
{
struct nvdimm_drvdata * ndd = container_of ( kref , typeof ( * ndd ) , kref ) ;
struct device * dev = ndd - > dev ;
struct resource * res , * _r ;
dev_dbg ( dev , " %s \n " , __func__ ) ;
nvdimm_bus_lock ( dev ) ;
for_each_dpa_resource_safe ( ndd , res , _r )
nvdimm_free_dpa ( ndd , res ) ;
nvdimm_bus_unlock ( dev ) ;
2015-08-28 02:35:48 +03:00
kvfree ( ndd - > data ) ;
2015-06-18 00:14:46 +03:00
kfree ( ndd ) ;
put_device ( dev ) ;
}
void get_ndd ( struct nvdimm_drvdata * ndd )
{
kref_get ( & ndd - > kref ) ;
}
void put_ndd ( struct nvdimm_drvdata * ndd )
{
if ( ndd )
kref_put ( & ndd - > kref , nvdimm_drvdata_release ) ;
}
2015-04-25 10:56:17 +03:00
const char * nvdimm_name ( struct nvdimm * nvdimm )
{
return dev_name ( & nvdimm - > dev ) ;
}
EXPORT_SYMBOL_GPL ( nvdimm_name ) ;
2016-08-23 05:28:37 +03:00
struct kobject * nvdimm_kobj ( struct nvdimm * nvdimm )
{
return & nvdimm - > dev . kobj ;
}
EXPORT_SYMBOL_GPL ( nvdimm_kobj ) ;
2016-04-29 02:17:07 +03:00
unsigned long nvdimm_cmd_mask ( struct nvdimm * nvdimm )
{
return nvdimm - > cmd_mask ;
}
EXPORT_SYMBOL_GPL ( nvdimm_cmd_mask ) ;
2015-04-25 10:56:17 +03:00
void * nvdimm_provider_data ( struct nvdimm * nvdimm )
{
2015-06-08 21:27:06 +03:00
if ( nvdimm )
return nvdimm - > provider_data ;
return NULL ;
2015-04-25 10:56:17 +03:00
}
EXPORT_SYMBOL_GPL ( nvdimm_provider_data ) ;
2015-06-08 21:27:06 +03:00
static ssize_t commands_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
int cmd , len = 0 ;
2016-04-29 02:17:07 +03:00
if ( ! nvdimm - > cmd_mask )
2015-06-08 21:27:06 +03:00
return sprintf ( buf , " \n " ) ;
2016-04-29 02:17:07 +03:00
for_each_set_bit ( cmd , & nvdimm - > cmd_mask , BITS_PER_LONG )
2015-06-08 21:27:06 +03:00
len + = sprintf ( buf + len , " %s " , nvdimm_cmd_name ( cmd ) ) ;
len + = sprintf ( buf + len , " \n " ) ;
return len ;
}
static DEVICE_ATTR_RO ( commands ) ;
2015-05-01 20:11:27 +03:00
static ssize_t state_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct nvdimm * nvdimm = to_nvdimm ( dev ) ;
/*
* The state may be in the process of changing , userspace should
* quiesce probing if it wants a static answer
*/
nvdimm_bus_lock ( dev ) ;
nvdimm_bus_unlock ( dev ) ;
return sprintf ( buf , " %s \n " , atomic_read ( & nvdimm - > busy )
? " active " : " idle " ) ;
}
static DEVICE_ATTR_RO ( state ) ;
2015-05-30 19:35:36 +03:00
static ssize_t available_slots_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm_drvdata * ndd = dev_get_drvdata ( dev ) ;
ssize_t rc ;
u32 nfree ;
if ( ! ndd )
return - ENXIO ;
nvdimm_bus_lock ( dev ) ;
nfree = nd_label_nfree ( ndd ) ;
if ( nfree - 1 > nfree ) {
dev_WARN_ONCE ( dev , 1 , " we ate our last label? \n " ) ;
nfree = 0 ;
} else
nfree - - ;
rc = sprintf ( buf , " %d \n " , nfree ) ;
nvdimm_bus_unlock ( dev ) ;
return rc ;
}
static DEVICE_ATTR_RO ( available_slots ) ;
2015-06-08 21:27:06 +03:00
static struct attribute * nvdimm_attributes [ ] = {
2015-05-01 20:11:27 +03:00
& dev_attr_state . attr ,
2015-06-08 21:27:06 +03:00
& dev_attr_commands . attr ,
2015-05-30 19:35:36 +03:00
& dev_attr_available_slots . attr ,
2015-06-08 21:27:06 +03:00
NULL ,
} ;
struct attribute_group nvdimm_attribute_group = {
. attrs = nvdimm_attributes ,
} ;
EXPORT_SYMBOL_GPL ( nvdimm_attribute_group ) ;
2015-04-25 10:56:17 +03:00
struct nvdimm * nvdimm_create ( struct nvdimm_bus * nvdimm_bus , void * provider_data ,
2015-06-08 21:27:06 +03:00
const struct attribute_group * * groups , unsigned long flags ,
2016-06-08 03:00:04 +03:00
unsigned long cmd_mask , int num_flush ,
struct resource * flush_wpq )
2015-04-25 10:56:17 +03:00
{
struct nvdimm * nvdimm = kzalloc ( sizeof ( * nvdimm ) , GFP_KERNEL ) ;
struct device * dev ;
if ( ! nvdimm )
return NULL ;
nvdimm - > id = ida_simple_get ( & dimm_ida , 0 , 0 , GFP_KERNEL ) ;
if ( nvdimm - > id < 0 ) {
kfree ( nvdimm ) ;
return NULL ;
}
nvdimm - > provider_data = provider_data ;
nvdimm - > flags = flags ;
2016-04-29 02:17:07 +03:00
nvdimm - > cmd_mask = cmd_mask ;
2016-06-08 03:00:04 +03:00
nvdimm - > num_flush = num_flush ;
nvdimm - > flush_wpq = flush_wpq ;
2015-05-01 20:11:27 +03:00
atomic_set ( & nvdimm - > busy , 0 ) ;
2015-04-25 10:56:17 +03:00
dev = & nvdimm - > dev ;
dev_set_name ( dev , " nmem%d " , nvdimm - > id ) ;
dev - > parent = & nvdimm_bus - > dev ;
dev - > type = & nvdimm_device_type ;
2015-06-08 21:27:06 +03:00
dev - > devt = MKDEV ( nvdimm_major , nvdimm - > id ) ;
2015-04-25 10:56:17 +03:00
dev - > groups = groups ;
2015-05-31 21:41:48 +03:00
nd_device_register ( dev ) ;
2015-04-25 10:56:17 +03:00
return nvdimm ;
}
EXPORT_SYMBOL_GPL ( nvdimm_create ) ;
2015-05-31 21:41:48 +03:00
2016-10-05 02:09:59 +03:00
int alias_dpa_busy ( struct device * dev , void * data )
2016-10-01 03:28:58 +03:00
{
2017-04-05 01:08:36 +03:00
resource_size_t map_end , blk_start , new ;
2016-10-01 03:28:58 +03:00
struct blk_alloc_info * info = data ;
struct nd_mapping * nd_mapping ;
struct nd_region * nd_region ;
struct nvdimm_drvdata * ndd ;
struct resource * res ;
int i ;
if ( ! is_nd_pmem ( dev ) )
return 0 ;
nd_region = to_nd_region ( dev ) ;
for ( i = 0 ; i < nd_region - > ndr_mappings ; i + + ) {
nd_mapping = & nd_region - > mapping [ i ] ;
if ( nd_mapping - > nvdimm = = info - > nd_mapping - > nvdimm )
break ;
}
if ( i > = nd_region - > ndr_mappings )
return 0 ;
ndd = to_ndd ( nd_mapping ) ;
map_end = nd_mapping - > start + nd_mapping - > size - 1 ;
blk_start = nd_mapping - > start ;
2016-10-05 02:09:59 +03:00
/*
* In the allocation case - > res is set to free space that we are
* looking to validate against PMEM aliasing collision rules
* ( i . e . BLK is allocated after all aliased PMEM ) .
*/
if ( info - > res ) {
if ( info - > res - > start > = nd_mapping - > start
& & info - > res - > start < map_end )
/* pass */ ;
else
return 0 ;
}
2016-10-01 03:28:58 +03:00
retry :
/*
* Find the free dpa from the end of the last pmem allocation to
2017-04-05 01:08:36 +03:00
* the end of the interleave - set mapping .
2016-10-01 03:28:58 +03:00
*/
for_each_dpa_resource ( ndd , res ) {
2017-04-05 01:08:36 +03:00
if ( strncmp ( res - > name , " pmem " , 4 ) ! = 0 )
continue ;
2016-10-01 03:28:58 +03:00
if ( ( res - > start > = blk_start & & res - > start < map_end )
| | ( res - > end > = blk_start
& & res - > end < = map_end ) ) {
2017-04-05 01:08:36 +03:00
new = max ( blk_start , min ( map_end + 1 , res - > end + 1 ) ) ;
if ( new ! = blk_start ) {
blk_start = new ;
goto retry ;
}
2016-10-01 03:28:58 +03:00
}
}
2016-10-05 02:09:59 +03:00
/* update the free space range with the probed blk_start */
if ( info - > res & & blk_start > info - > res - > start ) {
info - > res - > start = max ( info - > res - > start , blk_start ) ;
if ( info - > res - > start > info - > res - > end )
info - > res - > end = info - > res - > start - 1 ;
return 1 ;
}
2017-04-05 01:08:36 +03:00
info - > available - = blk_start - nd_mapping - > start ;
2016-10-05 02:09:59 +03:00
2016-10-01 03:28:58 +03:00
return 0 ;
}
2015-05-01 20:34:01 +03:00
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @ nd_mapping : container of dpa - resource - root + labels
*
2016-10-01 03:28:58 +03:00
* Unlike PMEM , BLK namespaces can occupy discontiguous DPA ranges , but
* we arrange for them to never start at an lower dpa than the last
* PMEM allocation in an aliased region .
2015-05-01 20:34:01 +03:00
*/
2016-10-01 03:28:58 +03:00
resource_size_t nd_blk_available_dpa ( struct nd_region * nd_region )
2015-05-01 20:34:01 +03:00
{
2016-10-01 03:28:58 +03:00
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( & nd_region - > dev ) ;
struct nd_mapping * nd_mapping = & nd_region - > mapping [ 0 ] ;
2015-05-01 20:34:01 +03:00
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
2016-10-01 03:28:58 +03:00
struct blk_alloc_info info = {
. nd_mapping = nd_mapping ,
. available = nd_mapping - > size ,
2016-10-05 02:09:59 +03:00
. res = NULL ,
2016-10-01 03:28:58 +03:00
} ;
2015-05-01 20:34:01 +03:00
struct resource * res ;
if ( ! ndd )
return 0 ;
2016-10-01 03:28:58 +03:00
device_for_each_child ( & nvdimm_bus - > dev , & info , alias_dpa_busy ) ;
2015-05-01 20:34:01 +03:00
2016-10-01 03:28:58 +03:00
/* now account for busy blk allocations in unaliased dpa */
for_each_dpa_resource ( ndd , res ) {
if ( strncmp ( res - > name , " blk " , 3 ) ! = 0 )
continue ;
2017-04-05 01:08:36 +03:00
info . available - = resource_size ( res ) ;
2016-10-01 03:28:58 +03:00
}
return info . available ;
2015-05-01 20:34:01 +03:00
}
2015-06-18 00:14:46 +03:00
/**
* nd_pmem_available_dpa - for the given dimm + region account unallocated dpa
* @ nd_mapping : container of dpa - resource - root + labels
* @ nd_region : constrain available space check to this reference region
* @ overlap : calculate available space assuming this level of overlap
*
* Validate that a PMEM label , if present , aligns with the start of an
* interleave set and truncate the available size at the lowest BLK
* overlap point .
*
* The expectation is that this routine is called multiple times as it
* probes for the largest BLK encroachment for any single member DIMM of
* the interleave set . Once that value is determined the PMEM - limit for
* the set can be established .
*/
resource_size_t nd_pmem_available_dpa ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping , resource_size_t * overlap )
{
resource_size_t map_start , map_end , busy = 0 , available , blk_start ;
struct nvdimm_drvdata * ndd = to_ndd ( nd_mapping ) ;
struct resource * res ;
const char * reason ;
if ( ! ndd )
return 0 ;
map_start = nd_mapping - > start ;
map_end = map_start + nd_mapping - > size - 1 ;
blk_start = max ( map_start , map_end + 1 - * overlap ) ;
2016-10-01 03:28:58 +03:00
for_each_dpa_resource ( ndd , res ) {
2015-06-18 00:14:46 +03:00
if ( res - > start > = map_start & & res - > start < map_end ) {
if ( strncmp ( res - > name , " blk " , 3 ) = = 0 )
2016-10-01 03:28:58 +03:00
blk_start = min ( blk_start ,
max ( map_start , res - > start ) ) ;
else if ( res - > end > map_end ) {
2015-06-18 00:14:46 +03:00
reason = " misaligned to iset " ;
goto err ;
2016-10-01 03:28:58 +03:00
} else
2015-06-18 00:14:46 +03:00
busy + = resource_size ( res ) ;
} else if ( res - > end > = map_start & & res - > end < = map_end ) {
if ( strncmp ( res - > name , " blk " , 3 ) = = 0 ) {
/*
* If a BLK allocation overlaps the start of
* PMEM the entire interleave set may now only
* be used for BLK .
*/
blk_start = map_start ;
2016-10-01 03:28:58 +03:00
} else
busy + = resource_size ( res ) ;
2015-06-18 00:14:46 +03:00
} else if ( map_start > res - > start & & map_start < res - > end ) {
/* total eclipse of the mapping */
busy + = nd_mapping - > size ;
blk_start = map_start ;
}
2016-10-01 03:28:58 +03:00
}
2015-06-18 00:14:46 +03:00
* overlap = map_end + 1 - blk_start ;
available = blk_start - map_start ;
if ( busy < available )
return available - busy ;
return 0 ;
err :
nd_dbg_dpa ( nd_region , ndd , res , " %s \n " , reason ) ;
return 0 ;
}
2015-06-09 23:09:36 +03:00
void nvdimm_free_dpa ( struct nvdimm_drvdata * ndd , struct resource * res )
{
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
kfree ( res - > name ) ;
__release_region ( & ndd - > dpa , res - > start , resource_size ( res ) ) ;
}
struct resource * nvdimm_allocate_dpa ( struct nvdimm_drvdata * ndd ,
struct nd_label_id * label_id , resource_size_t start ,
resource_size_t n )
{
char * name = kmemdup ( label_id , sizeof ( * label_id ) , GFP_KERNEL ) ;
struct resource * res ;
if ( ! name )
return NULL ;
WARN_ON_ONCE ( ! is_nvdimm_bus_locked ( ndd - > dev ) ) ;
res = __request_region ( & ndd - > dpa , start , n , name , 0 ) ;
if ( ! res )
kfree ( name ) ;
return res ;
}
2015-06-18 00:14:46 +03:00
/**
* nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
* @ nvdimm : container of dpa - resource - root + labels
* @ label_id : dpa resource name of the form { pmem | blk } - < human readable uuid >
*/
resource_size_t nvdimm_allocated_dpa ( struct nvdimm_drvdata * ndd ,
struct nd_label_id * label_id )
{
resource_size_t allocated = 0 ;
struct resource * res ;
for_each_dpa_resource ( ndd , res )
if ( strcmp ( res - > name , label_id - > id ) = = 0 )
allocated + = resource_size ( res ) ;
return allocated ;
}
2015-05-31 21:41:48 +03:00
static int count_dimms ( struct device * dev , void * c )
{
int * count = c ;
if ( is_nvdimm ( dev ) )
( * count ) + + ;
return 0 ;
}
int nvdimm_bus_check_dimm_count ( struct nvdimm_bus * nvdimm_bus , int dimm_count )
{
int count = 0 ;
/* Flush any possible dimm registration failures */
nd_synchronize ( ) ;
device_for_each_child ( & nvdimm_bus - > dev , & count , count_dimms ) ;
dev_dbg ( & nvdimm_bus - > dev , " %s: count: %d \n " , __func__ , count ) ;
if ( count ! = dimm_count )
return - ENXIO ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( nvdimm_bus_check_dimm_count ) ;
2016-05-18 06:24:16 +03:00
void __exit nvdimm_devs_exit ( void )
{
ida_destroy ( & dimm_ida ) ;
}