2019-05-29 17:18:09 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-03-11 21:15:36 +03:00
/*
* Copyright ( c ) 2013 - 2016 Intel Corporation . All rights reserved .
*/
# include <linux/device.h>
# include <linux/sizes.h>
# include <linux/slab.h>
# include <linux/mm.h>
# include "nd-core.h"
2016-05-19 00:50:12 +03:00
# include "pfn.h"
2016-03-11 21:15:36 +03:00
# include "nd.h"
static void nd_dax_release ( struct device * dev )
{
struct nd_region * nd_region = to_nd_region ( dev - > parent ) ;
struct nd_dax * nd_dax = to_nd_dax ( dev ) ;
struct nd_pfn * nd_pfn = & nd_dax - > nd_pfn ;
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " trace \n " ) ;
2016-03-11 21:15:36 +03:00
nd_detach_ndns ( dev , & nd_pfn - > ndns ) ;
ida_simple_remove ( & nd_region - > dax_ida , nd_pfn - > id ) ;
kfree ( nd_pfn - > uuid ) ;
kfree ( nd_dax ) ;
}
static struct device_type nd_dax_device_type = {
. name = " nd_dax " ,
. release = nd_dax_release ,
} ;
bool is_nd_dax ( struct device * dev )
{
return dev ? dev - > type = = & nd_dax_device_type : false ;
}
EXPORT_SYMBOL ( is_nd_dax ) ;
struct nd_dax * to_nd_dax ( struct device * dev )
{
struct nd_dax * nd_dax = container_of ( dev , struct nd_dax , nd_pfn . dev ) ;
WARN_ON ( ! is_nd_dax ( dev ) ) ;
return nd_dax ;
}
EXPORT_SYMBOL ( to_nd_dax ) ;
static const struct attribute_group * nd_dax_attribute_groups [ ] = {
& nd_pfn_attribute_group ,
& nd_device_attribute_group ,
& nd_numa_attribute_group ,
NULL ,
} ;
static struct nd_dax * nd_dax_alloc ( struct nd_region * nd_region )
{
struct nd_pfn * nd_pfn ;
struct nd_dax * nd_dax ;
struct device * dev ;
nd_dax = kzalloc ( sizeof ( * nd_dax ) , GFP_KERNEL ) ;
if ( ! nd_dax )
return NULL ;
nd_pfn = & nd_dax - > nd_pfn ;
nd_pfn - > id = ida_simple_get ( & nd_region - > dax_ida , 0 , 0 , GFP_KERNEL ) ;
if ( nd_pfn - > id < 0 ) {
kfree ( nd_dax ) ;
return NULL ;
}
dev = & nd_pfn - > dev ;
dev_set_name ( dev , " dax%d.%d " , nd_region - > id , nd_pfn - > id ) ;
dev - > groups = nd_dax_attribute_groups ;
dev - > type = & nd_dax_device_type ;
dev - > parent = & nd_region - > dev ;
return nd_dax ;
}
struct device * nd_dax_create ( struct nd_region * nd_region )
{
struct device * dev = NULL ;
struct nd_dax * nd_dax ;
2017-05-30 09:12:19 +03:00
if ( ! is_memory ( & nd_region - > dev ) )
2016-03-11 21:15:36 +03:00
return NULL ;
nd_dax = nd_dax_alloc ( nd_region ) ;
if ( nd_dax )
dev = nd_pfn_devinit ( & nd_dax - > nd_pfn , NULL ) ;
__nd_device_register ( dev ) ;
return dev ;
}
2016-05-19 00:50:12 +03:00
int nd_dax_probe ( struct device * dev , struct nd_namespace_common * ndns )
{
int rc ;
struct nd_dax * nd_dax ;
struct device * dax_dev ;
struct nd_pfn * nd_pfn ;
struct nd_pfn_sb * pfn_sb ;
struct nd_region * nd_region = to_nd_region ( ndns - > dev . parent ) ;
if ( ndns - > force_raw )
return - ENODEV ;
2017-06-04 04:18:39 +03:00
switch ( ndns - > claim_class ) {
case NVDIMM_CCLASS_NONE :
case NVDIMM_CCLASS_DAX :
break ;
default :
return - ENODEV ;
}
2016-05-19 00:50:12 +03:00
nvdimm_bus_lock ( & ndns - > dev ) ;
nd_dax = nd_dax_alloc ( nd_region ) ;
nd_pfn = & nd_dax - > nd_pfn ;
dax_dev = nd_pfn_devinit ( nd_pfn , ndns ) ;
nvdimm_bus_unlock ( & ndns - > dev ) ;
if ( ! dax_dev )
return - ENOMEM ;
2019-07-19 01:58:36 +03:00
pfn_sb = devm_kmalloc ( dev , sizeof ( * pfn_sb ) , GFP_KERNEL ) ;
2016-05-19 00:50:12 +03:00
nd_pfn - > pfn_sb = pfn_sb ;
rc = nd_pfn_validate ( nd_pfn , DAX_SIG ) ;
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " dax: %s \n " , rc = = 0 ? dev_name ( dax_dev ) : " <none> " ) ;
2016-05-19 00:50:12 +03:00
if ( rc < 0 ) {
2017-04-29 08:05:14 +03:00
nd_detach_ndns ( dax_dev , & nd_pfn - > ndns ) ;
2016-05-19 00:50:12 +03:00
put_device ( dax_dev ) ;
} else
__nd_device_register ( dax_dev ) ;
return rc ;
}
EXPORT_SYMBOL ( nd_dax_probe ) ;