2015-07-31 00:57:47 +03:00
/*
2016-03-11 21:15:36 +03:00
* Copyright ( c ) 2013 - 2016 Intel Corporation . All rights reserved .
2015-07-31 00:57:47 +03:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*/
2016-03-22 10:29:43 +03:00
# include <linux/memremap.h>
2015-07-31 00:57:47 +03:00
# include <linux/blkdev.h>
# include <linux/device.h>
# include <linux/genhd.h>
# include <linux/sizes.h>
# include <linux/slab.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include "nd-core.h"
# include "pfn.h"
# include "nd.h"
static void nd_pfn_release ( struct device * dev )
{
struct nd_region * nd_region = to_nd_region ( dev - > parent ) ;
struct nd_pfn * nd_pfn = to_nd_pfn ( dev ) ;
dev_dbg ( dev , " %s \n " , __func__ ) ;
nd_detach_ndns ( & nd_pfn - > dev , & nd_pfn - > ndns ) ;
ida_simple_remove ( & nd_region - > pfn_ida , nd_pfn - > id ) ;
kfree ( nd_pfn - > uuid ) ;
kfree ( nd_pfn ) ;
}
static struct device_type nd_pfn_device_type = {
. name = " nd_pfn " ,
. release = nd_pfn_release ,
} ;
bool is_nd_pfn ( struct device * dev )
{
return dev ? dev - > type = = & nd_pfn_device_type : false ;
}
EXPORT_SYMBOL ( is_nd_pfn ) ;
struct nd_pfn * to_nd_pfn ( struct device * dev )
{
struct nd_pfn * nd_pfn = container_of ( dev , struct nd_pfn , dev ) ;
WARN_ON ( ! is_nd_pfn ( dev ) ) ;
return nd_pfn ;
}
EXPORT_SYMBOL ( to_nd_pfn ) ;
2016-03-11 21:15:36 +03:00
static struct nd_pfn * to_nd_pfn_safe ( struct device * dev )
{
/*
* pfn device attributes are re - used by dax device instances , so we
* need to be careful to correct device - to - nd_pfn conversion .
*/
if ( is_nd_pfn ( dev ) )
return to_nd_pfn ( dev ) ;
if ( is_nd_dax ( dev ) ) {
struct nd_dax * nd_dax = to_nd_dax ( dev ) ;
return & nd_dax - > nd_pfn ;
}
WARN_ON ( 1 ) ;
return NULL ;
}
2015-07-31 00:57:47 +03:00
static ssize_t mode_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-07-31 00:57:47 +03:00
switch ( nd_pfn - > mode ) {
case PFN_MODE_RAM :
return sprintf ( buf , " ram \n " ) ;
case PFN_MODE_PMEM :
return sprintf ( buf , " pmem \n " ) ;
default :
return sprintf ( buf , " none \n " ) ;
}
}
static ssize_t mode_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t len )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-07-31 00:57:47 +03:00
ssize_t rc = 0 ;
device_lock ( dev ) ;
nvdimm_bus_lock ( dev ) ;
if ( dev - > driver )
rc = - EBUSY ;
else {
size_t n = len - 1 ;
if ( strncmp ( buf , " pmem \n " , n ) = = 0
| | strncmp ( buf , " pmem " , n ) = = 0 ) {
2016-01-16 03:56:26 +03:00
nd_pfn - > mode = PFN_MODE_PMEM ;
2015-07-31 00:57:47 +03:00
} else if ( strncmp ( buf , " ram \n " , n ) = = 0
| | strncmp ( buf , " ram " , n ) = = 0 )
nd_pfn - > mode = PFN_MODE_RAM ;
else if ( strncmp ( buf , " none \n " , n ) = = 0
| | strncmp ( buf , " none " , n ) = = 0 )
nd_pfn - > mode = PFN_MODE_NONE ;
else
rc = - EINVAL ;
}
dev_dbg ( dev , " %s: result: %zd wrote: %s%s " , __func__ ,
rc , buf , buf [ len - 1 ] = = ' \n ' ? " " : " \n " ) ;
nvdimm_bus_unlock ( dev ) ;
device_unlock ( dev ) ;
return rc ? rc : len ;
}
static DEVICE_ATTR_RW ( mode ) ;
2015-12-11 01:45:23 +03:00
static ssize_t align_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-12-11 01:45:23 +03:00
return sprintf ( buf , " %lx \n " , nd_pfn - > align ) ;
}
static ssize_t __align_store ( struct nd_pfn * nd_pfn , const char * buf )
{
unsigned long val ;
int rc ;
rc = kstrtoul ( buf , 0 , & val ) ;
if ( rc )
return rc ;
if ( ! is_power_of_2 ( val ) | | val < PAGE_SIZE | | val > SZ_1G )
return - EINVAL ;
if ( nd_pfn - > dev . driver )
return - EBUSY ;
else
nd_pfn - > align = val ;
return 0 ;
}
static ssize_t align_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t len )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-12-11 01:45:23 +03:00
ssize_t rc ;
device_lock ( dev ) ;
nvdimm_bus_lock ( dev ) ;
rc = __align_store ( nd_pfn , buf ) ;
dev_dbg ( dev , " %s: result: %zd wrote: %s%s " , __func__ ,
rc , buf , buf [ len - 1 ] = = ' \n ' ? " " : " \n " ) ;
nvdimm_bus_unlock ( dev ) ;
device_unlock ( dev ) ;
return rc ? rc : len ;
}
static DEVICE_ATTR_RW ( align ) ;
2015-07-31 00:57:47 +03:00
static ssize_t uuid_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-07-31 00:57:47 +03:00
if ( nd_pfn - > uuid )
return sprintf ( buf , " %pUb \n " , nd_pfn - > uuid ) ;
return sprintf ( buf , " \n " ) ;
}
static ssize_t uuid_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t len )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-07-31 00:57:47 +03:00
ssize_t rc ;
device_lock ( dev ) ;
rc = nd_uuid_store ( dev , & nd_pfn - > uuid , buf , len ) ;
dev_dbg ( dev , " %s: result: %zd wrote: %s%s " , __func__ ,
rc , buf , buf [ len - 1 ] = = ' \n ' ? " " : " \n " ) ;
device_unlock ( dev ) ;
return rc ? rc : len ;
}
static DEVICE_ATTR_RW ( uuid ) ;
static ssize_t namespace_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-07-31 00:57:47 +03:00
ssize_t rc ;
nvdimm_bus_lock ( dev ) ;
rc = sprintf ( buf , " %s \n " , nd_pfn - > ndns
? dev_name ( & nd_pfn - > ndns - > dev ) : " " ) ;
nvdimm_bus_unlock ( dev ) ;
return rc ;
}
static ssize_t namespace_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t len )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2015-07-31 00:57:47 +03:00
ssize_t rc ;
device_lock ( dev ) ;
2015-09-16 16:25:38 +03:00
nvdimm_bus_lock ( dev ) ;
2015-07-31 00:57:47 +03:00
rc = nd_namespace_store ( dev , & nd_pfn - > ndns , buf , len ) ;
dev_dbg ( dev , " %s: result: %zd wrote: %s%s " , __func__ ,
rc , buf , buf [ len - 1 ] = = ' \n ' ? " " : " \n " ) ;
nvdimm_bus_unlock ( dev ) ;
2015-09-16 16:25:38 +03:00
device_unlock ( dev ) ;
2015-07-31 00:57:47 +03:00
return rc ;
}
static DEVICE_ATTR_RW ( namespace ) ;
2016-03-03 20:46:04 +03:00
static ssize_t resource_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2016-03-03 20:46:04 +03:00
ssize_t rc ;
device_lock ( dev ) ;
if ( dev - > driver ) {
struct nd_pfn_sb * pfn_sb = nd_pfn - > pfn_sb ;
u64 offset = __le64_to_cpu ( pfn_sb - > dataoff ) ;
struct nd_namespace_common * ndns = nd_pfn - > ndns ;
u32 start_pad = __le32_to_cpu ( pfn_sb - > start_pad ) ;
struct nd_namespace_io * nsio = to_nd_namespace_io ( & ndns - > dev ) ;
rc = sprintf ( buf , " %#llx \n " , ( unsigned long long ) nsio - > res . start
+ start_pad + offset ) ;
} else {
/* no address to convey if the pfn instance is disabled */
rc = - ENXIO ;
}
device_unlock ( dev ) ;
return rc ;
}
static DEVICE_ATTR_RO ( resource ) ;
static ssize_t size_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn = to_nd_pfn_safe ( dev ) ;
2016-03-03 20:46:04 +03:00
ssize_t rc ;
device_lock ( dev ) ;
if ( dev - > driver ) {
struct nd_pfn_sb * pfn_sb = nd_pfn - > pfn_sb ;
u64 offset = __le64_to_cpu ( pfn_sb - > dataoff ) ;
struct nd_namespace_common * ndns = nd_pfn - > ndns ;
u32 start_pad = __le32_to_cpu ( pfn_sb - > start_pad ) ;
u32 end_trunc = __le32_to_cpu ( pfn_sb - > end_trunc ) ;
struct nd_namespace_io * nsio = to_nd_namespace_io ( & ndns - > dev ) ;
rc = sprintf ( buf , " %llu \n " , ( unsigned long long )
resource_size ( & nsio - > res ) - start_pad
- end_trunc - offset ) ;
} else {
/* no size to convey if the pfn instance is disabled */
rc = - ENXIO ;
}
device_unlock ( dev ) ;
return rc ;
}
static DEVICE_ATTR_RO ( size ) ;
2015-07-31 00:57:47 +03:00
static struct attribute * nd_pfn_attributes [ ] = {
& dev_attr_mode . attr ,
& dev_attr_namespace . attr ,
& dev_attr_uuid . attr ,
2015-12-11 01:45:23 +03:00
& dev_attr_align . attr ,
2016-03-03 20:46:04 +03:00
& dev_attr_resource . attr ,
& dev_attr_size . attr ,
2015-07-31 00:57:47 +03:00
NULL ,
} ;
2016-03-11 21:15:36 +03:00
struct attribute_group nd_pfn_attribute_group = {
2015-07-31 00:57:47 +03:00
. attrs = nd_pfn_attributes ,
} ;
static const struct attribute_group * nd_pfn_attribute_groups [ ] = {
& nd_pfn_attribute_group ,
& nd_device_attribute_group ,
& nd_numa_attribute_group ,
NULL ,
} ;
2016-03-11 21:15:36 +03:00
struct device * nd_pfn_devinit ( struct nd_pfn * nd_pfn ,
2015-07-31 00:57:47 +03:00
struct nd_namespace_common * ndns )
{
2016-03-11 21:15:36 +03:00
struct device * dev = & nd_pfn - > dev ;
2015-07-31 00:57:47 +03:00
2016-03-11 21:15:36 +03:00
if ( ! nd_pfn )
return NULL ;
nd_pfn - > mode = PFN_MODE_NONE ;
nd_pfn - > align = HPAGE_SIZE ;
dev = & nd_pfn - > dev ;
device_initialize ( & nd_pfn - > dev ) ;
if ( ndns & & ! __nd_attach_ndns ( & nd_pfn - > dev , ndns , & nd_pfn - > ndns ) ) {
dev_dbg ( & ndns - > dev , " %s failed, already claimed by %s \n " ,
__func__ , dev_name ( ndns - > claim ) ) ;
put_device ( dev ) ;
2015-07-31 00:57:47 +03:00
return NULL ;
2016-03-11 21:15:36 +03:00
}
return dev ;
}
static struct nd_pfn * nd_pfn_alloc ( struct nd_region * nd_region )
{
struct nd_pfn * nd_pfn ;
struct device * dev ;
2015-07-31 00:57:47 +03:00
nd_pfn = kzalloc ( sizeof ( * nd_pfn ) , GFP_KERNEL ) ;
if ( ! nd_pfn )
return NULL ;
nd_pfn - > id = ida_simple_get ( & nd_region - > pfn_ida , 0 , 0 , GFP_KERNEL ) ;
if ( nd_pfn - > id < 0 ) {
kfree ( nd_pfn ) ;
return NULL ;
}
dev = & nd_pfn - > dev ;
dev_set_name ( dev , " pfn%d.%d " , nd_region - > id , nd_pfn - > id ) ;
dev - > groups = nd_pfn_attribute_groups ;
2016-03-11 21:15:36 +03:00
dev - > type = & nd_pfn_device_type ;
dev - > parent = & nd_region - > dev ;
return nd_pfn ;
2015-07-31 00:57:47 +03:00
}
struct device * nd_pfn_create ( struct nd_region * nd_region )
{
2016-03-11 21:15:36 +03:00
struct nd_pfn * nd_pfn ;
struct device * dev ;
if ( ! is_nd_pmem ( & nd_region - > dev ) )
return NULL ;
nd_pfn = nd_pfn_alloc ( nd_region ) ;
dev = nd_pfn_devinit ( nd_pfn , NULL ) ;
2015-07-31 00:57:47 +03:00
2016-03-11 21:15:36 +03:00
__nd_device_register ( dev ) ;
2015-07-31 00:57:47 +03:00
return dev ;
}
2016-05-19 00:50:12 +03:00
int nd_pfn_validate ( struct nd_pfn * nd_pfn , const char * sig )
2015-07-31 00:57:47 +03:00
{
u64 checksum , offset ;
2015-12-13 03:09:14 +03:00
struct nd_namespace_io * nsio ;
struct nd_pfn_sb * pfn_sb = nd_pfn - > pfn_sb ;
struct nd_namespace_common * ndns = nd_pfn - > ndns ;
const u8 * parent_uuid = nd_dev_to_uuid ( & ndns - > dev ) ;
2015-07-31 00:57:47 +03:00
if ( ! pfn_sb | | ! ndns )
return - ENODEV ;
if ( ! is_nd_pmem ( nd_pfn - > dev . parent ) )
return - ENODEV ;
if ( nvdimm_read_bytes ( ndns , SZ_4K , pfn_sb , sizeof ( * pfn_sb ) ) )
return - ENXIO ;
2016-05-19 00:50:12 +03:00
if ( memcmp ( pfn_sb - > signature , sig , PFN_SIG_LEN ) ! = 0 )
2015-07-31 00:57:47 +03:00
return - ENODEV ;
checksum = le64_to_cpu ( pfn_sb - > checksum ) ;
pfn_sb - > checksum = 0 ;
if ( checksum ! = nd_sb_checksum ( ( struct nd_gen_sb * ) pfn_sb ) )
return - ENODEV ;
pfn_sb - > checksum = cpu_to_le64 ( checksum ) ;
2015-12-13 03:09:14 +03:00
if ( memcmp ( pfn_sb - > parent_uuid , parent_uuid , 16 ) ! = 0 )
return - ENODEV ;
2016-03-03 20:38:00 +03:00
if ( __le16_to_cpu ( pfn_sb - > version_minor ) < 1 ) {
pfn_sb - > start_pad = 0 ;
pfn_sb - > end_trunc = 0 ;
}
2016-04-01 01:41:18 +03:00
if ( __le16_to_cpu ( pfn_sb - > version_minor ) < 2 )
pfn_sb - > align = 0 ;
2015-07-31 00:57:47 +03:00
switch ( le32_to_cpu ( pfn_sb - > mode ) ) {
case PFN_MODE_RAM :
case PFN_MODE_PMEM :
2016-01-30 04:42:51 +03:00
break ;
2015-07-31 00:57:47 +03:00
default :
return - ENXIO ;
}
if ( ! nd_pfn - > uuid ) {
/* from probe we allocate */
nd_pfn - > uuid = kmemdup ( pfn_sb - > uuid , 16 , GFP_KERNEL ) ;
if ( ! nd_pfn - > uuid )
return - ENOMEM ;
} else {
/* from init we validate */
if ( memcmp ( nd_pfn - > uuid , pfn_sb - > uuid , 16 ) ! = 0 )
2016-04-08 05:59:27 +03:00
return - ENODEV ;
2015-07-31 00:57:47 +03:00
}
2015-12-11 01:45:23 +03:00
if ( nd_pfn - > align > nvdimm_namespace_capacity ( ndns ) ) {
dev_err ( & nd_pfn - > dev , " alignment: %lx exceeds capacity %llx \n " ,
nd_pfn - > align , nvdimm_namespace_capacity ( ndns ) ) ;
return - EINVAL ;
}
2015-07-31 00:57:47 +03:00
/*
* These warnings are verbose because they can only trigger in
* the case where the physical address alignment of the
* namespace has changed since the pfn superblock was
* established .
*/
offset = le64_to_cpu ( pfn_sb - > dataoff ) ;
nsio = to_nd_namespace_io ( & ndns - > dev ) ;
2015-12-11 02:14:20 +03:00
if ( offset > = resource_size ( & nsio - > res ) ) {
2015-07-31 00:57:47 +03:00
dev_err ( & nd_pfn - > dev , " pfn array size exceeds capacity of %s \n " ,
dev_name ( & ndns - > dev ) ) ;
return - EBUSY ;
}
2016-04-01 01:41:18 +03:00
nd_pfn - > align = le32_to_cpu ( pfn_sb - > align ) ;
2015-12-11 01:45:23 +03:00
if ( ! is_power_of_2 ( offset ) | | offset < PAGE_SIZE ) {
dev_err ( & nd_pfn - > dev , " bad offset: %#llx dax disabled \n " ,
offset ) ;
return - ENXIO ;
}
2015-07-31 00:57:47 +03:00
return 0 ;
}
2015-08-01 09:16:37 +03:00
EXPORT_SYMBOL ( nd_pfn_validate ) ;
2015-07-31 00:57:47 +03:00
2016-03-22 10:22:16 +03:00
int nd_pfn_probe ( struct device * dev , struct nd_namespace_common * ndns )
2015-07-31 00:57:47 +03:00
{
int rc ;
struct nd_pfn * nd_pfn ;
2016-03-18 04:16:15 +03:00
struct device * pfn_dev ;
2015-07-31 00:57:47 +03:00
struct nd_pfn_sb * pfn_sb ;
struct nd_region * nd_region = to_nd_region ( ndns - > dev . parent ) ;
if ( ndns - > force_raw )
return - ENODEV ;
nvdimm_bus_lock ( & ndns - > dev ) ;
2016-03-11 21:15:36 +03:00
nd_pfn = nd_pfn_alloc ( nd_region ) ;
pfn_dev = nd_pfn_devinit ( nd_pfn , ndns ) ;
2015-07-31 00:57:47 +03:00
nvdimm_bus_unlock ( & ndns - > dev ) ;
2016-03-18 04:16:15 +03:00
if ( ! pfn_dev )
2015-07-31 00:57:47 +03:00
return - ENOMEM ;
2016-03-18 04:16:15 +03:00
pfn_sb = devm_kzalloc ( dev , sizeof ( * pfn_sb ) , GFP_KERNEL ) ;
nd_pfn = to_nd_pfn ( pfn_dev ) ;
2015-07-31 00:57:47 +03:00
nd_pfn - > pfn_sb = pfn_sb ;
2016-05-19 00:50:12 +03:00
rc = nd_pfn_validate ( nd_pfn , PFN_SIG ) ;
2016-03-18 04:16:15 +03:00
dev_dbg ( dev , " %s: pfn: %s \n " , __func__ ,
rc = = 0 ? dev_name ( pfn_dev ) : " <none> " ) ;
2015-07-31 00:57:47 +03:00
if ( rc < 0 ) {
2016-03-18 04:16:15 +03:00
__nd_detach_ndns ( pfn_dev , & nd_pfn - > ndns ) ;
put_device ( pfn_dev ) ;
2015-07-31 00:57:47 +03:00
} else
2016-03-18 04:16:15 +03:00
__nd_device_register ( pfn_dev ) ;
2015-07-31 00:57:47 +03:00
return rc ;
}
EXPORT_SYMBOL ( nd_pfn_probe ) ;
2016-03-22 10:29:43 +03:00
/*
* We hotplug memory at section granularity , pad the reserved area from
* the previous section base to the namespace base address .
*/
static unsigned long init_altmap_base ( resource_size_t base )
{
unsigned long base_pfn = PHYS_PFN ( base ) ;
return PFN_SECTION_ALIGN_DOWN ( base_pfn ) ;
}
static unsigned long init_altmap_reserve ( resource_size_t base )
{
unsigned long reserve = PHYS_PFN ( SZ_8K ) ;
unsigned long base_pfn = PHYS_PFN ( base ) ;
reserve + = base_pfn - PFN_SECTION_ALIGN_DOWN ( base_pfn ) ;
return reserve ;
}
static struct vmem_altmap * __nvdimm_setup_pfn ( struct nd_pfn * nd_pfn ,
struct resource * res , struct vmem_altmap * altmap )
{
struct nd_pfn_sb * pfn_sb = nd_pfn - > pfn_sb ;
u64 offset = le64_to_cpu ( pfn_sb - > dataoff ) ;
u32 start_pad = __le32_to_cpu ( pfn_sb - > start_pad ) ;
u32 end_trunc = __le32_to_cpu ( pfn_sb - > end_trunc ) ;
struct nd_namespace_common * ndns = nd_pfn - > ndns ;
struct nd_namespace_io * nsio = to_nd_namespace_io ( & ndns - > dev ) ;
resource_size_t base = nsio - > res . start + start_pad ;
struct vmem_altmap __altmap = {
. base_pfn = init_altmap_base ( base ) ,
. reserve = init_altmap_reserve ( base ) ,
} ;
memcpy ( res , & nsio - > res , sizeof ( * res ) ) ;
res - > start + = start_pad ;
res - > end - = end_trunc ;
nd_pfn - > mode = le32_to_cpu ( nd_pfn - > pfn_sb - > mode ) ;
if ( nd_pfn - > mode = = PFN_MODE_RAM ) {
if ( offset < SZ_8K )
return ERR_PTR ( - EINVAL ) ;
nd_pfn - > npfns = le64_to_cpu ( pfn_sb - > npfns ) ;
altmap = NULL ;
} else if ( nd_pfn - > mode = = PFN_MODE_PMEM ) {
nd_pfn - > npfns = ( resource_size ( res ) - offset ) / PAGE_SIZE ;
if ( le64_to_cpu ( nd_pfn - > pfn_sb - > npfns ) > nd_pfn - > npfns )
dev_info ( & nd_pfn - > dev ,
" number of pfns truncated from %lld to %ld \n " ,
le64_to_cpu ( nd_pfn - > pfn_sb - > npfns ) ,
nd_pfn - > npfns ) ;
memcpy ( altmap , & __altmap , sizeof ( * altmap ) ) ;
altmap - > free = PHYS_PFN ( offset - SZ_8K ) ;
altmap - > alloc = 0 ;
} else
return ERR_PTR ( - ENXIO ) ;
return altmap ;
}
static int nd_pfn_init ( struct nd_pfn * nd_pfn )
{
2016-03-31 19:37:11 +03:00
u32 dax_label_reserve = is_nd_dax ( & nd_pfn - > dev ) ? SZ_128K : 0 ;
2016-03-22 10:29:43 +03:00
struct nd_namespace_common * ndns = nd_pfn - > ndns ;
u32 start_pad = 0 , end_trunc = 0 ;
resource_size_t start , size ;
struct nd_namespace_io * nsio ;
struct nd_region * nd_region ;
struct nd_pfn_sb * pfn_sb ;
unsigned long npfns ;
phys_addr_t offset ;
2016-05-19 00:50:12 +03:00
const char * sig ;
2016-03-22 10:29:43 +03:00
u64 checksum ;
int rc ;
pfn_sb = devm_kzalloc ( & nd_pfn - > dev , sizeof ( * pfn_sb ) , GFP_KERNEL ) ;
if ( ! pfn_sb )
return - ENOMEM ;
nd_pfn - > pfn_sb = pfn_sb ;
2016-05-19 00:50:12 +03:00
if ( is_nd_dax ( & nd_pfn - > dev ) )
sig = DAX_SIG ;
else
sig = PFN_SIG ;
rc = nd_pfn_validate ( nd_pfn , sig ) ;
2016-03-22 10:29:43 +03:00
if ( rc ! = - ENODEV )
return rc ;
/* no info block, do init */ ;
nd_region = to_nd_region ( nd_pfn - > dev . parent ) ;
if ( nd_region - > ro ) {
dev_info ( & nd_pfn - > dev ,
" %s is read-only, unable to init metadata \n " ,
dev_name ( & nd_region - > dev ) ) ;
return - ENXIO ;
}
memset ( pfn_sb , 0 , sizeof ( * pfn_sb ) ) ;
/*
* Check if pmem collides with ' System RAM ' when section aligned and
* trim it accordingly
*/
nsio = to_nd_namespace_io ( & ndns - > dev ) ;
start = PHYS_SECTION_ALIGN_DOWN ( nsio - > res . start ) ;
size = resource_size ( & nsio - > res ) ;
if ( region_intersects ( start , size , IORESOURCE_SYSTEM_RAM ,
IORES_DESC_NONE ) = = REGION_MIXED ) {
start = nsio - > res . start ;
start_pad = PHYS_SECTION_ALIGN_UP ( start ) - start ;
}
start = nsio - > res . start ;
size = PHYS_SECTION_ALIGN_UP ( start + size ) - start ;
if ( region_intersects ( start , size , IORESOURCE_SYSTEM_RAM ,
IORES_DESC_NONE ) = = REGION_MIXED ) {
size = resource_size ( & nsio - > res ) ;
end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN ( start + size ) ;
}
if ( start_pad + end_trunc )
dev_info ( & nd_pfn - > dev , " %s section collision, truncate %d bytes \n " ,
dev_name ( & ndns - > dev ) , start_pad + end_trunc ) ;
/*
* Note , we use 64 here for the standard size of struct page ,
* debugging options may cause it to be larger in which case the
* implementation will limit the pfns advertised through
* - > direct_access ( ) to those that are included in the memmap .
*/
start + = start_pad ;
size = resource_size ( & nsio - > res ) ;
npfns = ( size - start_pad - end_trunc - SZ_8K ) / SZ_4K ;
if ( nd_pfn - > mode = = PFN_MODE_PMEM )
2016-03-31 19:37:11 +03:00
offset = ALIGN ( start + SZ_8K + 64 * npfns + dax_label_reserve ,
nd_pfn - > align ) - start ;
2016-03-22 10:29:43 +03:00
else if ( nd_pfn - > mode = = PFN_MODE_RAM )
2016-03-31 19:37:11 +03:00
offset = ALIGN ( start + SZ_8K + dax_label_reserve ,
nd_pfn - > align ) - start ;
2016-03-22 10:29:43 +03:00
else
return - ENXIO ;
if ( offset + start_pad + end_trunc > = size ) {
dev_err ( & nd_pfn - > dev , " %s unable to satisfy requested alignment \n " ,
dev_name ( & ndns - > dev ) ) ;
return - ENXIO ;
}
npfns = ( size - offset - start_pad - end_trunc ) / SZ_4K ;
pfn_sb - > mode = cpu_to_le32 ( nd_pfn - > mode ) ;
pfn_sb - > dataoff = cpu_to_le64 ( offset ) ;
pfn_sb - > npfns = cpu_to_le64 ( npfns ) ;
2016-05-19 00:50:12 +03:00
memcpy ( pfn_sb - > signature , sig , PFN_SIG_LEN ) ;
2016-03-22 10:29:43 +03:00
memcpy ( pfn_sb - > uuid , nd_pfn - > uuid , 16 ) ;
memcpy ( pfn_sb - > parent_uuid , nd_dev_to_uuid ( & ndns - > dev ) , 16 ) ;
pfn_sb - > version_major = cpu_to_le16 ( 1 ) ;
2016-04-01 01:41:18 +03:00
pfn_sb - > version_minor = cpu_to_le16 ( 2 ) ;
2016-03-22 10:29:43 +03:00
pfn_sb - > start_pad = cpu_to_le32 ( start_pad ) ;
pfn_sb - > end_trunc = cpu_to_le32 ( end_trunc ) ;
2016-04-01 01:41:18 +03:00
pfn_sb - > align = cpu_to_le32 ( nd_pfn - > align ) ;
2016-03-22 10:29:43 +03:00
checksum = nd_sb_checksum ( ( struct nd_gen_sb * ) pfn_sb ) ;
pfn_sb - > checksum = cpu_to_le64 ( checksum ) ;
return nvdimm_write_bytes ( ndns , SZ_4K , pfn_sb , sizeof ( * pfn_sb ) ) ;
}
/*
* Determine the effective resource range and vmem_altmap from an nd_pfn
* instance .
*/
struct vmem_altmap * nvdimm_setup_pfn ( struct nd_pfn * nd_pfn ,
struct resource * res , struct vmem_altmap * altmap )
{
int rc ;
if ( ! nd_pfn - > uuid | | ! nd_pfn - > ndns )
return ERR_PTR ( - ENODEV ) ;
rc = nd_pfn_init ( nd_pfn ) ;
if ( rc )
return ERR_PTR ( rc ) ;
/* we need a valid pfn_sb before we can init a vmem_altmap */
return __nvdimm_setup_pfn ( nd_pfn , res , altmap ) ;
}
EXPORT_SYMBOL_GPL ( nvdimm_setup_pfn ) ;