2019-05-29 17:18:09 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-05-20 05:54:31 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*/
# include <linux/libnvdimm.h>
2016-01-05 10:50:23 +03:00
# include <linux/badblocks.h>
2020-07-21 01:08:18 +03:00
# include <linux/suspend.h>
2015-05-20 05:54:31 +03:00
# include <linux/export.h>
# include <linux/module.h>
2015-06-25 11:21:52 +03:00
# include <linux/blkdev.h>
2015-05-20 05:54:31 +03:00
# include <linux/device.h>
2015-06-18 00:14:46 +03:00
# include <linux/ctype.h>
2015-06-08 21:27:06 +03:00
# include <linux/ndctl.h>
2015-04-27 02:26:48 +03:00
# include <linux/mutex.h>
2015-05-20 05:54:31 +03:00
# include <linux/slab.h>
2016-06-07 03:42:38 +03:00
# include <linux/io.h>
2015-05-20 05:54:31 +03:00
# include "nd-core.h"
2015-05-31 21:41:48 +03:00
# include "nd.h"
2015-05-20 05:54:31 +03:00
2015-04-25 10:56:17 +03:00
LIST_HEAD ( nvdimm_bus_list ) ;
DEFINE_MUTEX ( nvdimm_bus_list_mutex ) ;
2015-05-20 05:54:31 +03:00
2015-05-31 22:02:11 +03:00
void nvdimm_bus_lock ( struct device * dev )
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( dev ) ;
if ( ! nvdimm_bus )
return ;
mutex_lock ( & nvdimm_bus - > reconfig_mutex ) ;
}
EXPORT_SYMBOL ( nvdimm_bus_lock ) ;
void nvdimm_bus_unlock ( struct device * dev )
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( dev ) ;
if ( ! nvdimm_bus )
return ;
mutex_unlock ( & nvdimm_bus - > reconfig_mutex ) ;
}
EXPORT_SYMBOL ( nvdimm_bus_unlock ) ;
bool is_nvdimm_bus_locked ( struct device * dev )
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( dev ) ;
if ( ! nvdimm_bus )
return false ;
return mutex_is_locked ( & nvdimm_bus - > reconfig_mutex ) ;
}
EXPORT_SYMBOL ( is_nvdimm_bus_locked ) ;
2016-06-07 03:42:38 +03:00
struct nvdimm_map {
struct nvdimm_bus * nvdimm_bus ;
struct list_head list ;
resource_size_t offset ;
unsigned long flags ;
size_t size ;
union {
void * mem ;
void __iomem * iomem ;
} ;
struct kref kref ;
} ;
static struct nvdimm_map * find_nvdimm_map ( struct device * dev ,
resource_size_t offset )
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( dev ) ;
struct nvdimm_map * nvdimm_map ;
list_for_each_entry ( nvdimm_map , & nvdimm_bus - > mapping_list , list )
if ( nvdimm_map - > offset = = offset )
return nvdimm_map ;
return NULL ;
}
static struct nvdimm_map * alloc_nvdimm_map ( struct device * dev ,
resource_size_t offset , size_t size , unsigned long flags )
{
struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus ( dev ) ;
struct nvdimm_map * nvdimm_map ;
nvdimm_map = kzalloc ( sizeof ( * nvdimm_map ) , GFP_KERNEL ) ;
if ( ! nvdimm_map )
return NULL ;
INIT_LIST_HEAD ( & nvdimm_map - > list ) ;
nvdimm_map - > nvdimm_bus = nvdimm_bus ;
nvdimm_map - > offset = offset ;
nvdimm_map - > flags = flags ;
nvdimm_map - > size = size ;
kref_init ( & nvdimm_map - > kref ) ;
2016-09-21 19:22:33 +03:00
if ( ! request_mem_region ( offset , size , dev_name ( & nvdimm_bus - > dev ) ) ) {
dev_err ( & nvdimm_bus - > dev , " failed to request %pa + %zd for %s \n " ,
& offset , size , dev_name ( dev ) ) ;
2016-06-07 03:42:38 +03:00
goto err_request_region ;
2016-09-21 19:22:33 +03:00
}
2016-06-07 03:42:38 +03:00
if ( flags )
nvdimm_map - > mem = memremap ( offset , size , flags ) ;
else
nvdimm_map - > iomem = ioremap ( offset , size ) ;
if ( ! nvdimm_map - > mem )
goto err_map ;
dev_WARN_ONCE ( dev , ! is_nvdimm_bus_locked ( dev ) , " %s: bus unlocked! " ,
__func__ ) ;
list_add ( & nvdimm_map - > list , & nvdimm_bus - > mapping_list ) ;
return nvdimm_map ;
err_map :
release_mem_region ( offset , size ) ;
err_request_region :
kfree ( nvdimm_map ) ;
return NULL ;
}
static void nvdimm_map_release ( struct kref * kref )
{
struct nvdimm_bus * nvdimm_bus ;
struct nvdimm_map * nvdimm_map ;
nvdimm_map = container_of ( kref , struct nvdimm_map , kref ) ;
nvdimm_bus = nvdimm_map - > nvdimm_bus ;
2018-03-06 03:39:31 +03:00
dev_dbg ( & nvdimm_bus - > dev , " %pa \n " , & nvdimm_map - > offset ) ;
2016-06-07 03:42:38 +03:00
list_del ( & nvdimm_map - > list ) ;
if ( nvdimm_map - > flags )
memunmap ( nvdimm_map - > mem ) ;
else
iounmap ( nvdimm_map - > iomem ) ;
release_mem_region ( nvdimm_map - > offset , nvdimm_map - > size ) ;
kfree ( nvdimm_map ) ;
}
static void nvdimm_map_put ( void * data )
{
struct nvdimm_map * nvdimm_map = data ;
struct nvdimm_bus * nvdimm_bus = nvdimm_map - > nvdimm_bus ;
nvdimm_bus_lock ( & nvdimm_bus - > dev ) ;
kref_put ( & nvdimm_map - > kref , nvdimm_map_release ) ;
nvdimm_bus_unlock ( & nvdimm_bus - > dev ) ;
}
/**
* devm_nvdimm_memremap - map a resource that is shared across regions
* @ dev : device that will own a reference to the shared mapping
* @ offset : physical base address of the mapping
* @ size : mapping size
* @ flags : memremap flags , or , if zero , perform an ioremap instead
*/
void * devm_nvdimm_memremap ( struct device * dev , resource_size_t offset ,
size_t size , unsigned long flags )
{
struct nvdimm_map * nvdimm_map ;
nvdimm_bus_lock ( dev ) ;
nvdimm_map = find_nvdimm_map ( dev , offset ) ;
if ( ! nvdimm_map )
nvdimm_map = alloc_nvdimm_map ( dev , offset , size , flags ) ;
else
kref_get ( & nvdimm_map - > kref ) ;
nvdimm_bus_unlock ( dev ) ;
2016-09-21 19:22:33 +03:00
if ( ! nvdimm_map )
return NULL ;
2016-06-07 03:42:38 +03:00
if ( devm_add_action_or_reset ( dev , nvdimm_map_put , nvdimm_map ) )
return NULL ;
return nvdimm_map - > mem ;
}
EXPORT_SYMBOL_GPL ( devm_nvdimm_memremap ) ;
2015-05-01 20:11:27 +03:00
u64 nd_fletcher64 ( void * addr , size_t len , bool le )
{
u32 * buf = addr ;
u32 lo32 = 0 ;
u64 hi32 = 0 ;
int i ;
for ( i = 0 ; i < len / sizeof ( u32 ) ; i + + ) {
lo32 + = le ? le32_to_cpu ( ( __le32 ) buf [ i ] ) : buf [ i ] ;
hi32 + = lo32 ;
}
return hi32 < < 32 | lo32 ;
}
EXPORT_SYMBOL_GPL ( nd_fletcher64 ) ;
2015-04-27 02:26:48 +03:00
struct nvdimm_bus_descriptor * to_nd_desc ( struct nvdimm_bus * nvdimm_bus )
{
/* struct nvdimm_bus definition is private to libnvdimm */
return nvdimm_bus - > nd_desc ;
}
EXPORT_SYMBOL_GPL ( to_nd_desc ) ;
2016-07-24 07:51:42 +03:00
struct device * to_nvdimm_bus_dev ( struct nvdimm_bus * nvdimm_bus )
{
/* struct nvdimm_bus definition is private to libnvdimm */
return & nvdimm_bus - > dev ;
}
EXPORT_SYMBOL_GPL ( to_nvdimm_bus_dev ) ;
2015-06-18 00:14:46 +03:00
static bool is_uuid_sep ( char sep )
{
if ( sep = = ' \n ' | | sep = = ' - ' | | sep = = ' : ' | | sep = = ' \0 ' )
return true ;
return false ;
}
static int nd_uuid_parse ( struct device * dev , u8 * uuid_out , const char * buf ,
size_t len )
{
const char * str = buf ;
u8 uuid [ 16 ] ;
int i ;
for ( i = 0 ; i < 16 ; i + + ) {
if ( ! isxdigit ( str [ 0 ] ) | | ! isxdigit ( str [ 1 ] ) ) {
2018-03-06 03:39:31 +03:00
dev_dbg ( dev , " pos: %d buf[%zd]: %c buf[%zd]: %c \n " ,
i , str - buf , str [ 0 ] ,
2015-06-18 00:14:46 +03:00
str + 1 - buf , str [ 1 ] ) ;
return - EINVAL ;
}
uuid [ i ] = ( hex_to_bin ( str [ 0 ] ) < < 4 ) | hex_to_bin ( str [ 1 ] ) ;
str + = 2 ;
if ( is_uuid_sep ( * str ) )
str + + ;
}
memcpy ( uuid_out , uuid , sizeof ( uuid ) ) ;
return 0 ;
}
/**
* nd_uuid_store : common implementation for writing ' uuid ' sysfs attributes
* @ dev : container device for the uuid property
* @ uuid_out : uuid buffer to replace
* @ buf : raw sysfs buffer to parse
*
* Enforce that uuids can only be changed while the device is disabled
* ( driver detached )
2019-07-18 04:08:26 +03:00
* LOCKING : expects nd_device_lock ( ) is held on entry
2015-06-18 00:14:46 +03:00
*/
int nd_uuid_store ( struct device * dev , u8 * * uuid_out , const char * buf ,
size_t len )
{
u8 uuid [ 16 ] ;
int rc ;
if ( dev - > driver )
return - EBUSY ;
rc = nd_uuid_parse ( dev , uuid , buf , len ) ;
if ( rc )
return rc ;
kfree ( * uuid_out ) ;
* uuid_out = kmemdup ( uuid , sizeof ( uuid ) , GFP_KERNEL ) ;
if ( ! ( * uuid_out ) )
return - ENOMEM ;
return 0 ;
}
2017-08-12 03:36:54 +03:00
ssize_t nd_size_select_show ( unsigned long current_size ,
2015-05-01 20:34:01 +03:00
const unsigned long * supported , char * buf )
{
ssize_t len = 0 ;
int i ;
for ( i = 0 ; supported [ i ] ; i + + )
2017-08-12 03:36:54 +03:00
if ( current_size = = supported [ i ] )
2015-05-01 20:34:01 +03:00
len + = sprintf ( buf + len , " [%ld] " , supported [ i ] ) ;
else
len + = sprintf ( buf + len , " %ld " , supported [ i ] ) ;
len + = sprintf ( buf + len , " \n " ) ;
return len ;
}
2017-08-12 03:36:54 +03:00
ssize_t nd_size_select_store ( struct device * dev , const char * buf ,
unsigned long * current_size , const unsigned long * supported )
2015-05-01 20:34:01 +03:00
{
unsigned long lbasize ;
int rc , i ;
if ( dev - > driver )
return - EBUSY ;
rc = kstrtoul ( buf , 0 , & lbasize ) ;
if ( rc )
return rc ;
for ( i = 0 ; supported [ i ] ; i + + )
if ( lbasize = = supported [ i ] )
break ;
if ( supported [ i ] ) {
2017-08-12 03:36:54 +03:00
* current_size = lbasize ;
2015-05-01 20:34:01 +03:00
return 0 ;
} else {
return - EINVAL ;
}
}
2015-06-08 21:27:06 +03:00
static ssize_t commands_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
int cmd , len = 0 ;
struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus ( dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
2016-04-29 02:17:07 +03:00
for_each_set_bit ( cmd , & nd_desc - > cmd_mask , BITS_PER_LONG )
2015-06-08 21:27:06 +03:00
len + = sprintf ( buf + len , " %s " , nvdimm_bus_cmd_name ( cmd ) ) ;
len + = sprintf ( buf + len , " \n " ) ;
return len ;
}
static DEVICE_ATTR_RO ( commands ) ;
2015-04-27 02:26:48 +03:00
static const char * nvdimm_bus_provider ( struct nvdimm_bus * nvdimm_bus )
{
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
struct device * parent = nvdimm_bus - > dev . parent ;
if ( nd_desc - > provider_name )
return nd_desc - > provider_name ;
else if ( parent )
return dev_name ( parent ) ;
else
return " unknown " ;
}
static ssize_t provider_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus ( dev ) ;
return sprintf ( buf , " %s \n " , nvdimm_bus_provider ( nvdimm_bus ) ) ;
}
static DEVICE_ATTR_RO ( provider ) ;
2015-05-31 21:41:48 +03:00
static int flush_namespaces ( struct device * dev , void * data )
{
2019-07-18 04:08:26 +03:00
nd_device_lock ( dev ) ;
nd_device_unlock ( dev ) ;
2015-05-31 21:41:48 +03:00
return 0 ;
}
static int flush_regions_dimms ( struct device * dev , void * data )
{
2019-07-18 04:08:26 +03:00
nd_device_lock ( dev ) ;
nd_device_unlock ( dev ) ;
2015-05-31 21:41:48 +03:00
device_for_each_child ( dev , NULL , flush_namespaces ) ;
return 0 ;
}
static ssize_t wait_probe_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
2016-02-19 23:16:34 +03:00
struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus ( dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
int rc ;
if ( nd_desc - > flush_probe ) {
rc = nd_desc - > flush_probe ( nd_desc ) ;
if ( rc )
return rc ;
}
2015-05-31 21:41:48 +03:00
nd_synchronize ( ) ;
device_for_each_child ( dev , NULL , flush_regions_dimms ) ;
return sprintf ( buf , " 1 \n " ) ;
}
static DEVICE_ATTR_RO ( wait_probe ) ;
2015-04-27 02:26:48 +03:00
static struct attribute * nvdimm_bus_attributes [ ] = {
2015-06-08 21:27:06 +03:00
& dev_attr_commands . attr ,
2015-05-31 21:41:48 +03:00
& dev_attr_wait_probe . attr ,
2015-04-27 02:26:48 +03:00
& dev_attr_provider . attr ,
NULL ,
} ;
2019-11-13 04:08:56 +03:00
static const struct attribute_group nvdimm_bus_attribute_group = {
2015-04-27 02:26:48 +03:00
. attrs = nvdimm_bus_attributes ,
} ;
2019-11-13 04:08:56 +03:00
2020-07-21 01:08:18 +03:00
static ssize_t capability_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus ( dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
enum nvdimm_fwa_capability cap ;
if ( ! nd_desc - > fw_ops )
return - EOPNOTSUPP ;
nvdimm_bus_lock ( dev ) ;
cap = nd_desc - > fw_ops - > capability ( nd_desc ) ;
nvdimm_bus_unlock ( dev ) ;
switch ( cap ) {
case NVDIMM_FWA_CAP_QUIESCE :
return sprintf ( buf , " quiesce \n " ) ;
case NVDIMM_FWA_CAP_LIVE :
return sprintf ( buf , " live \n " ) ;
default :
return - EOPNOTSUPP ;
}
}
static DEVICE_ATTR_RO ( capability ) ;
static ssize_t activate_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus ( dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
enum nvdimm_fwa_capability cap ;
enum nvdimm_fwa_state state ;
if ( ! nd_desc - > fw_ops )
return - EOPNOTSUPP ;
nvdimm_bus_lock ( dev ) ;
cap = nd_desc - > fw_ops - > capability ( nd_desc ) ;
state = nd_desc - > fw_ops - > activate_state ( nd_desc ) ;
nvdimm_bus_unlock ( dev ) ;
if ( cap < NVDIMM_FWA_CAP_QUIESCE )
return - EOPNOTSUPP ;
switch ( state ) {
case NVDIMM_FWA_IDLE :
return sprintf ( buf , " idle \n " ) ;
case NVDIMM_FWA_BUSY :
return sprintf ( buf , " busy \n " ) ;
case NVDIMM_FWA_ARMED :
return sprintf ( buf , " armed \n " ) ;
case NVDIMM_FWA_ARM_OVERFLOW :
return sprintf ( buf , " overflow \n " ) ;
default :
return - ENXIO ;
}
}
static int exec_firmware_activate ( void * data )
{
struct nvdimm_bus_descriptor * nd_desc = data ;
return nd_desc - > fw_ops - > activate ( nd_desc ) ;
}
static ssize_t activate_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t len )
{
struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus ( dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
enum nvdimm_fwa_state state ;
bool quiesce ;
ssize_t rc ;
if ( ! nd_desc - > fw_ops )
return - EOPNOTSUPP ;
if ( sysfs_streq ( buf , " live " ) )
quiesce = false ;
else if ( sysfs_streq ( buf , " quiesce " ) )
quiesce = true ;
else
return - EINVAL ;
nvdimm_bus_lock ( dev ) ;
state = nd_desc - > fw_ops - > activate_state ( nd_desc ) ;
switch ( state ) {
case NVDIMM_FWA_BUSY :
rc = - EBUSY ;
break ;
case NVDIMM_FWA_ARMED :
case NVDIMM_FWA_ARM_OVERFLOW :
if ( quiesce )
rc = hibernate_quiet_exec ( exec_firmware_activate , nd_desc ) ;
else
rc = nd_desc - > fw_ops - > activate ( nd_desc ) ;
break ;
case NVDIMM_FWA_IDLE :
default :
rc = - ENXIO ;
}
nvdimm_bus_unlock ( dev ) ;
if ( rc = = 0 )
rc = len ;
return rc ;
}
static DEVICE_ATTR_ADMIN_RW ( activate ) ;
static umode_t nvdimm_bus_firmware_visible ( struct kobject * kobj , struct attribute * a , int n )
{
struct device * dev = container_of ( kobj , typeof ( * dev ) , kobj ) ;
struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus ( dev ) ;
struct nvdimm_bus_descriptor * nd_desc = nvdimm_bus - > nd_desc ;
enum nvdimm_fwa_capability cap ;
/*
* Both ' activate ' and ' capability ' disappear when no ops
* detected , or a negative capability is indicated .
*/
if ( ! nd_desc - > fw_ops )
return 0 ;
nvdimm_bus_lock ( dev ) ;
cap = nd_desc - > fw_ops - > capability ( nd_desc ) ;
nvdimm_bus_unlock ( dev ) ;
if ( cap < NVDIMM_FWA_CAP_QUIESCE )
return 0 ;
return a - > mode ;
}
static struct attribute * nvdimm_bus_firmware_attributes [ ] = {
& dev_attr_activate . attr ,
& dev_attr_capability . attr ,
NULL ,
} ;
static const struct attribute_group nvdimm_bus_firmware_attribute_group = {
. name = " firmware " ,
. attrs = nvdimm_bus_firmware_attributes ,
. is_visible = nvdimm_bus_firmware_visible ,
} ;
2019-11-13 04:08:56 +03:00
const struct attribute_group * nvdimm_bus_attribute_groups [ ] = {
& nvdimm_bus_attribute_group ,
2020-07-21 01:08:18 +03:00
& nvdimm_bus_firmware_attribute_group ,
2019-11-13 04:08:56 +03:00
NULL ,
} ;
2015-04-27 02:26:48 +03:00
2017-08-23 22:48:26 +03:00
int nvdimm_bus_add_badrange ( struct nvdimm_bus * nvdimm_bus , u64 addr , u64 length )
2016-01-09 18:48:43 +03:00
{
2017-08-23 22:48:26 +03:00
return badrange_add ( & nvdimm_bus - > badrange , addr , length ) ;
2016-01-09 18:48:43 +03:00
}
2017-08-23 22:48:26 +03:00
EXPORT_SYMBOL_GPL ( nvdimm_bus_add_badrange ) ;
2016-10-01 02:19:31 +03:00
2015-06-25 11:21:52 +03:00
# ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init ( struct gendisk * disk , unsigned long meta_size )
{
2015-10-21 20:19:33 +03:00
struct blk_integrity bi ;
2015-06-25 11:21:52 +03:00
2015-06-25 11:22:39 +03:00
if ( meta_size = = 0 )
return 0 ;
2016-06-23 12:52:04 +03:00
memset ( & bi , 0 , sizeof ( bi ) ) ;
2015-10-21 20:19:33 +03:00
bi . tuple_size = meta_size ;
bi . tag_size = meta_size ;
2015-10-21 20:19:49 +03:00
blk_integrity_register ( disk , & bi ) ;
2015-06-25 11:21:52 +03:00
blk_queue_max_integrity_segments ( disk - > queue , 1 ) ;
return 0 ;
}
EXPORT_SYMBOL ( nd_integrity_init ) ;
# else /* CONFIG_BLK_DEV_INTEGRITY */
int nd_integrity_init ( struct gendisk * disk , unsigned long meta_size )
{
return 0 ;
}
EXPORT_SYMBOL ( nd_integrity_init ) ;
# endif
2015-04-27 02:26:48 +03:00
static __init int libnvdimm_init ( void )
{
2015-05-31 21:41:48 +03:00
int rc ;
rc = nvdimm_bus_init ( ) ;
if ( rc )
return rc ;
rc = nvdimm_init ( ) ;
if ( rc )
goto err_dimm ;
2015-05-31 22:02:11 +03:00
rc = nd_region_init ( ) ;
if ( rc )
goto err_region ;
2017-06-04 04:18:39 +03:00
nd_label_init ( ) ;
2015-05-31 21:41:48 +03:00
return 0 ;
2015-05-31 22:02:11 +03:00
err_region :
nvdimm_exit ( ) ;
2015-05-31 21:41:48 +03:00
err_dimm :
nvdimm_bus_exit ( ) ;
return rc ;
2015-04-27 02:26:48 +03:00
}
static __exit void libnvdimm_exit ( void )
{
WARN_ON ( ! list_empty ( & nvdimm_bus_list ) ) ;
2015-05-31 22:02:11 +03:00
nd_region_exit ( ) ;
2015-05-31 21:41:48 +03:00
nvdimm_exit ( ) ;
2015-04-27 02:26:48 +03:00
nvdimm_bus_exit ( ) ;
2016-05-18 06:24:16 +03:00
nvdimm_devs_exit ( ) ;
2015-04-27 02:26:48 +03:00
}
2015-05-20 05:54:31 +03:00
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_AUTHOR ( " Intel Corporation " ) ;
2015-04-27 02:26:48 +03:00
subsys_initcall ( libnvdimm_init ) ;
module_exit ( libnvdimm_exit ) ;