2019-05-29 17:18:09 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-04-11 19:49:49 +03:00
/*
* Copyright ( c ) 2017 Intel Corporation . All rights reserved .
*/
# include <linux/pagemap.h>
# include <linux/module.h>
# include <linux/mount.h>
2019-03-25 19:38:24 +03:00
# include <linux/pseudo_fs.h>
2017-04-11 19:49:49 +03:00
# include <linux/magic.h>
2017-05-08 20:55:27 +03:00
# include <linux/genhd.h>
2017-10-14 21:33:32 +03:00
# include <linux/pfn_t.h>
2017-04-11 19:49:49 +03:00
# include <linux/cdev.h>
# include <linux/hash.h>
# include <linux/slab.h>
2017-05-29 22:57:56 +03:00
# include <linux/uio.h>
2017-01-25 05:44:18 +03:00
# include <linux/dax.h>
2017-04-11 19:49:49 +03:00
# include <linux/fs.h>
2017-07-13 03:58:21 +03:00
# include "dax-private.h"
2017-04-11 19:49:49 +03:00
static dev_t dax_devt ;
DEFINE_STATIC_SRCU ( dax_srcu ) ;
static struct vfsmount * dax_mnt ;
static DEFINE_IDA ( dax_minor_ida ) ;
static struct kmem_cache * dax_cache __read_mostly ;
static struct super_block * dax_superblock __read_mostly ;
2017-04-20 01:14:31 +03:00
# define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
static struct hlist_head dax_host_list [ DAX_HASH_SIZE ] ;
static DEFINE_SPINLOCK ( dax_host_lock ) ;
2017-04-11 19:49:49 +03:00
int dax_read_lock ( void )
{
return srcu_read_lock ( & dax_srcu ) ;
}
EXPORT_SYMBOL_GPL ( dax_read_lock ) ;
void dax_read_unlock ( int id )
{
srcu_read_unlock ( & dax_srcu , id ) ;
}
EXPORT_SYMBOL_GPL ( dax_read_unlock ) ;
2017-05-14 02:18:21 +03:00
# ifdef CONFIG_BLOCK
2017-08-30 19:16:38 +03:00
# include <linux/blkdev.h>
2017-05-08 20:55:27 +03:00
int bdev_dax_pgoff ( struct block_device * bdev , sector_t sector , size_t size ,
pgoff_t * pgoff )
{
phys_addr_t phys_off = ( get_start_sect ( bdev ) + sector ) * 512 ;
if ( pgoff )
* pgoff = PHYS_PFN ( phys_off ) ;
if ( phys_off % PAGE_SIZE | | size % PAGE_SIZE )
return - EINVAL ;
return 0 ;
}
EXPORT_SYMBOL ( bdev_dax_pgoff ) ;
2017-09-03 20:17:53 +03:00
# if IS_ENABLED(CONFIG_FS_DAX)
2017-08-30 19:16:38 +03:00
struct dax_device * fs_dax_get_by_bdev ( struct block_device * bdev )
{
if ( ! blk_queue_dax ( bdev - > bd_queue ) )
return NULL ;
return fs_dax_get_by_host ( bdev - > bd_disk - > disk_name ) ;
}
EXPORT_SYMBOL_GPL ( fs_dax_get_by_bdev ) ;
2017-09-03 20:17:53 +03:00
# endif
2017-08-30 19:16:38 +03:00
2019-05-16 23:26:29 +03:00
bool __generic_fsdax_supported ( struct dax_device * dax_dev ,
struct block_device * bdev , int blocksize , sector_t start ,
sector_t sectors )
2017-05-08 20:55:27 +03:00
{
2018-05-16 21:46:08 +03:00
bool dax_enabled = false ;
2019-02-21 08:12:50 +03:00
pgoff_t pgoff , pgoff_end ;
2018-05-30 23:03:45 +03:00
char buf [ BDEVNAME_SIZE ] ;
2019-02-21 08:12:50 +03:00
void * kaddr , * end_kaddr ;
pfn_t pfn , end_pfn ;
sector_t last_page ;
long len , len2 ;
int err , id ;
2017-05-08 20:55:27 +03:00
if ( blocksize ! = PAGE_SIZE ) {
2018-05-30 23:03:45 +03:00
pr_debug ( " %s: error: unsupported blocksize for dax \n " ,
bdevname ( bdev , buf ) ) ;
2018-05-30 23:03:46 +03:00
return false ;
2017-05-08 20:55:27 +03:00
}
2019-05-16 23:26:29 +03:00
err = bdev_dax_pgoff ( bdev , start , PAGE_SIZE , & pgoff ) ;
2017-05-08 20:55:27 +03:00
if ( err ) {
2018-05-30 23:03:45 +03:00
pr_debug ( " %s: error: unaligned partition for dax \n " ,
bdevname ( bdev , buf ) ) ;
2018-05-30 23:03:46 +03:00
return false ;
2017-05-08 20:55:27 +03:00
}
2019-05-16 23:26:29 +03:00
last_page = PFN_DOWN ( ( start + sectors - 1 ) * 512 ) * PAGE_SIZE / 512 ;
2019-02-21 08:12:50 +03:00
err = bdev_dax_pgoff ( bdev , last_page , PAGE_SIZE , & pgoff_end ) ;
if ( err ) {
pr_debug ( " %s: error: unaligned partition for dax \n " ,
bdevname ( bdev , buf ) ) ;
return false ;
}
2017-05-08 20:55:27 +03:00
id = dax_read_lock ( ) ;
2019-02-21 08:12:50 +03:00
len = dax_direct_access ( dax_dev , pgoff , 1 , & kaddr , & pfn ) ;
len2 = dax_direct_access ( dax_dev , pgoff_end , 1 , & end_kaddr , & end_pfn ) ;
2017-05-08 20:55:27 +03:00
dax_read_unlock ( id ) ;
2019-02-21 08:12:50 +03:00
if ( len < 1 | | len2 < 1 ) {
2018-05-30 23:03:45 +03:00
pr_debug ( " %s: error: dax access failed (%ld) \n " ,
2019-02-21 08:12:50 +03:00
bdevname ( bdev , buf ) , len < 1 ? len : len2 ) ;
2018-05-30 23:03:46 +03:00
return false ;
2017-05-08 20:55:27 +03:00
}
2017-10-15 03:13:45 +03:00
if ( IS_ENABLED ( CONFIG_FS_DAX_LIMITED ) & & pfn_t_special ( pfn ) ) {
/*
* An arch that has enabled the pmem api should also
* have its drivers support pfn_t_devmap ( )
*
* This is a developer warning and should not trigger in
* production . dax_flush ( ) will crash since it depends
* on being able to do ( page_address ( pfn_to_page ( ) ) ) .
*/
WARN_ON ( IS_ENABLED ( CONFIG_ARCH_HAS_PMEM_API ) ) ;
2018-05-16 21:46:08 +03:00
dax_enabled = true ;
2019-02-21 08:12:50 +03:00
} else if ( pfn_t_devmap ( pfn ) & & pfn_t_devmap ( end_pfn ) ) {
struct dev_pagemap * pgmap , * end_pgmap ;
2018-05-16 21:46:08 +03:00
pgmap = get_dev_pagemap ( pfn_t_to_pfn ( pfn ) , NULL ) ;
2019-02-21 08:12:50 +03:00
end_pgmap = get_dev_pagemap ( pfn_t_to_pfn ( end_pfn ) , NULL ) ;
if ( pgmap & & pgmap = = end_pgmap & & pgmap - > type = = MEMORY_DEVICE_FS_DAX
& & pfn_t_to_page ( pfn ) - > pgmap = = pgmap
& & pfn_t_to_page ( end_pfn ) - > pgmap = = pgmap
& & pfn_t_to_pfn ( pfn ) = = PHYS_PFN ( __pa ( kaddr ) )
& & pfn_t_to_pfn ( end_pfn ) = = PHYS_PFN ( __pa ( end_kaddr ) ) )
2018-05-16 21:46:08 +03:00
dax_enabled = true ;
put_dev_pagemap ( pgmap ) ;
2019-02-21 08:12:50 +03:00
put_dev_pagemap ( end_pgmap ) ;
2018-05-16 21:46:08 +03:00
}
if ( ! dax_enabled ) {
2018-05-30 23:03:45 +03:00
pr_debug ( " %s: error: dax support not enabled \n " ,
bdevname ( bdev , buf ) ) ;
2018-05-30 23:03:46 +03:00
return false ;
2017-10-14 21:33:32 +03:00
}
2018-05-30 23:03:46 +03:00
return true ;
2017-05-08 20:55:27 +03:00
}
2019-05-16 23:26:29 +03:00
EXPORT_SYMBOL_GPL ( __generic_fsdax_supported ) ;
/**
* __bdev_dax_supported ( ) - Check if the device supports dax for filesystem
* @ bdev : block device to check
* @ blocksize : The block size of the device
*
* This is a library function for filesystems to check if the block device
* can be mounted with dax option .
*
* Return : true if supported , false if unsupported
*/
bool __bdev_dax_supported ( struct block_device * bdev , int blocksize )
{
struct dax_device * dax_dev ;
struct request_queue * q ;
char buf [ BDEVNAME_SIZE ] ;
bool ret ;
int id ;
q = bdev_get_queue ( bdev ) ;
if ( ! q | | ! blk_queue_dax ( q ) ) {
pr_debug ( " %s: error: request queue doesn't support dax \n " ,
bdevname ( bdev , buf ) ) ;
return false ;
}
dax_dev = dax_get_by_host ( bdev - > bd_disk - > disk_name ) ;
if ( ! dax_dev ) {
pr_debug ( " %s: error: device does not support dax \n " ,
bdevname ( bdev , buf ) ) ;
return false ;
}
id = dax_read_lock ( ) ;
ret = dax_supported ( dax_dev , bdev , blocksize , 0 ,
i_size_read ( bdev - > bd_inode ) / 512 ) ;
dax_read_unlock ( id ) ;
put_dax ( dax_dev ) ;
return ret ;
}
2017-05-08 20:55:27 +03:00
EXPORT_SYMBOL_GPL ( __bdev_dax_supported ) ;
2017-05-14 02:18:21 +03:00
# endif
2017-05-08 20:55:27 +03:00
2017-06-28 03:59:28 +03:00
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
DAXDEV_ALIVE ,
2017-06-27 07:28:41 +03:00
/* gate whether dax_flush() calls the low level flush routine */
DAXDEV_WRITE_CACHE ,
2019-07-05 17:03:24 +03:00
/* flag to check if device supports synchronous flush */
DAXDEV_SYNC ,
2017-06-28 03:59:28 +03:00
} ;
2017-04-11 19:49:49 +03:00
/**
* struct dax_device - anchor object for dax services
* @ inode : core vfs
* @ cdev : optional character interface for " device dax "
2017-04-20 01:14:31 +03:00
* @ host : optional name for lookups where the device path is not available
2017-04-11 19:49:49 +03:00
* @ private : dax driver private data
2017-06-28 03:59:28 +03:00
* @ flags : state and boolean properties
2017-04-11 19:49:49 +03:00
*/
struct dax_device {
2017-04-20 01:14:31 +03:00
struct hlist_node list ;
2017-04-11 19:49:49 +03:00
struct inode inode ;
struct cdev cdev ;
2017-04-20 01:14:31 +03:00
const char * host ;
2017-04-11 19:49:49 +03:00
void * private ;
2017-06-28 03:59:28 +03:00
unsigned long flags ;
2017-01-25 05:44:18 +03:00
const struct dax_operations * ops ;
2017-04-11 19:49:49 +03:00
} ;
2017-06-27 07:28:41 +03:00
static ssize_t write_cache_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct dax_device * dax_dev = dax_get_by_host ( dev_name ( dev ) ) ;
ssize_t rc ;
WARN_ON_ONCE ( ! dax_dev ) ;
if ( ! dax_dev )
return - ENXIO ;
2018-06-06 19:45:14 +03:00
rc = sprintf ( buf , " %d \n " , ! ! dax_write_cache_enabled ( dax_dev ) ) ;
2017-06-27 07:28:41 +03:00
put_dax ( dax_dev ) ;
return rc ;
}
static ssize_t write_cache_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t len )
{
bool write_cache ;
int rc = strtobool ( buf , & write_cache ) ;
struct dax_device * dax_dev = dax_get_by_host ( dev_name ( dev ) ) ;
WARN_ON_ONCE ( ! dax_dev ) ;
if ( ! dax_dev )
return - ENXIO ;
if ( rc )
len = rc ;
else
2018-06-06 19:45:14 +03:00
dax_write_cache ( dax_dev , write_cache ) ;
2017-06-27 07:28:41 +03:00
put_dax ( dax_dev ) ;
return len ;
}
static DEVICE_ATTR_RW ( write_cache ) ;
static umode_t dax_visible ( struct kobject * kobj , struct attribute * a , int n )
{
struct device * dev = container_of ( kobj , typeof ( * dev ) , kobj ) ;
struct dax_device * dax_dev = dax_get_by_host ( dev_name ( dev ) ) ;
WARN_ON_ONCE ( ! dax_dev ) ;
if ( ! dax_dev )
return 0 ;
2017-09-01 04:47:43 +03:00
# ifndef CONFIG_ARCH_HAS_PMEM_API
if ( a = = & dev_attr_write_cache . attr )
2017-06-27 07:28:41 +03:00
return 0 ;
2017-09-01 04:47:43 +03:00
# endif
2017-06-27 07:28:41 +03:00
return a - > mode ;
}
static struct attribute * dax_attributes [ ] = {
& dev_attr_write_cache . attr ,
NULL ,
} ;
struct attribute_group dax_attribute_group = {
. name = " dax " ,
. attrs = dax_attributes ,
. is_visible = dax_visible ,
} ;
EXPORT_SYMBOL_GPL ( dax_attribute_group ) ;
2017-01-27 07:37:35 +03:00
/**
* dax_direct_access ( ) - translate a device pgoff to an absolute pfn
* @ dax_dev : a dax_device instance representing the logical memory range
* @ pgoff : offset in pages from the start of the device to translate
* @ nr_pages : number of consecutive pages caller can handle relative to @ pfn
* @ kaddr : output parameter that returns a virtual address mapping of pfn
* @ pfn : output parameter that returns an absolute pfn translation of @ pgoff
*
* Return : negative errno if an error occurs , otherwise the number of
* pages accessible at the device relative @ pgoff .
*/
long dax_direct_access ( struct dax_device * dax_dev , pgoff_t pgoff , long nr_pages ,
void * * kaddr , pfn_t * pfn )
{
long avail ;
if ( ! dax_dev )
return - EOPNOTSUPP ;
if ( ! dax_alive ( dax_dev ) )
return - ENXIO ;
if ( nr_pages < 0 )
return nr_pages ;
avail = dax_dev - > ops - > direct_access ( dax_dev , pgoff , nr_pages ,
kaddr , pfn ) ;
if ( ! avail )
return - ERANGE ;
return min ( avail , nr_pages ) ;
}
EXPORT_SYMBOL_GPL ( dax_direct_access ) ;
2019-05-16 23:26:29 +03:00
bool dax_supported ( struct dax_device * dax_dev , struct block_device * bdev ,
int blocksize , sector_t start , sector_t len )
{
if ( ! dax_alive ( dax_dev ) )
return false ;
return dax_dev - > ops - > dax_supported ( dax_dev , bdev , blocksize , start , len ) ;
}
2017-05-29 22:57:56 +03:00
size_t dax_copy_from_iter ( struct dax_device * dax_dev , pgoff_t pgoff , void * addr ,
size_t bytes , struct iov_iter * i )
{
if ( ! dax_alive ( dax_dev ) )
return 0 ;
return dax_dev - > ops - > copy_from_iter ( dax_dev , pgoff , addr , bytes , i ) ;
}
EXPORT_SYMBOL_GPL ( dax_copy_from_iter ) ;
2018-05-02 16:46:33 +03:00
size_t dax_copy_to_iter ( struct dax_device * dax_dev , pgoff_t pgoff , void * addr ,
size_t bytes , struct iov_iter * i )
{
if ( ! dax_alive ( dax_dev ) )
return 0 ;
return dax_dev - > ops - > copy_to_iter ( dax_dev , pgoff , addr , bytes , i ) ;
}
EXPORT_SYMBOL_GPL ( dax_copy_to_iter ) ;
2017-09-01 04:47:43 +03:00
# ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem ( void * addr , size_t size ) ;
void dax_flush ( struct dax_device * dax_dev , void * addr , size_t size )
2017-05-29 23:02:52 +03:00
{
2018-06-06 19:45:14 +03:00
if ( unlikely ( ! dax_write_cache_enabled ( dax_dev ) ) )
2017-06-27 07:28:41 +03:00
return ;
2017-09-01 04:47:43 +03:00
arch_wb_cache_pmem ( addr , size ) ;
2017-05-29 23:02:52 +03:00
}
2017-09-01 04:47:43 +03:00
# else
void dax_flush ( struct dax_device * dax_dev , void * addr , size_t size )
{
}
# endif
2017-05-29 23:02:52 +03:00
EXPORT_SYMBOL_GPL ( dax_flush ) ;
2017-06-27 07:28:41 +03:00
void dax_write_cache ( struct dax_device * dax_dev , bool wc )
{
if ( wc )
set_bit ( DAXDEV_WRITE_CACHE , & dax_dev - > flags ) ;
else
clear_bit ( DAXDEV_WRITE_CACHE , & dax_dev - > flags ) ;
}
EXPORT_SYMBOL_GPL ( dax_write_cache ) ;
2017-07-26 16:35:09 +03:00
bool dax_write_cache_enabled ( struct dax_device * dax_dev )
{
return test_bit ( DAXDEV_WRITE_CACHE , & dax_dev - > flags ) ;
}
EXPORT_SYMBOL_GPL ( dax_write_cache_enabled ) ;
2019-07-05 17:03:24 +03:00
bool __dax_synchronous ( struct dax_device * dax_dev )
{
return test_bit ( DAXDEV_SYNC , & dax_dev - > flags ) ;
}
EXPORT_SYMBOL_GPL ( __dax_synchronous ) ;
void __set_dax_synchronous ( struct dax_device * dax_dev )
{
set_bit ( DAXDEV_SYNC , & dax_dev - > flags ) ;
}
EXPORT_SYMBOL_GPL ( __set_dax_synchronous ) ;
2017-04-11 19:49:49 +03:00
bool dax_alive ( struct dax_device * dax_dev )
{
lockdep_assert_held ( & dax_srcu ) ;
2017-06-28 03:59:28 +03:00
return test_bit ( DAXDEV_ALIVE , & dax_dev - > flags ) ;
2017-04-11 19:49:49 +03:00
}
EXPORT_SYMBOL_GPL ( dax_alive ) ;
2017-04-20 01:14:31 +03:00
static int dax_host_hash ( const char * host )
{
return hashlen_hash ( hashlen_string ( " DAX " , host ) ) % DAX_HASH_SIZE ;
}
2017-04-11 19:49:49 +03:00
/*
* Note , rcu is not protecting the liveness of dax_dev , rcu is ensuring
* that any fault handlers or operations that might have seen
* dax_alive ( ) , have completed . Any operations that start after
* synchronize_srcu ( ) has run will abort upon seeing ! dax_alive ( ) .
*/
void kill_dax ( struct dax_device * dax_dev )
{
if ( ! dax_dev )
return ;
2017-06-28 03:59:28 +03:00
clear_bit ( DAXDEV_ALIVE , & dax_dev - > flags ) ;
2017-04-20 01:14:31 +03:00
2017-04-11 19:49:49 +03:00
synchronize_srcu ( & dax_srcu ) ;
2017-04-20 01:14:31 +03:00
spin_lock ( & dax_host_lock ) ;
hlist_del_init ( & dax_dev - > list ) ;
spin_unlock ( & dax_host_lock ) ;
2017-04-11 19:49:49 +03:00
}
EXPORT_SYMBOL_GPL ( kill_dax ) ;
2017-07-13 03:58:21 +03:00
void run_dax ( struct dax_device * dax_dev )
{
set_bit ( DAXDEV_ALIVE , & dax_dev - > flags ) ;
}
EXPORT_SYMBOL_GPL ( run_dax ) ;
2017-04-11 19:49:49 +03:00
static struct inode * dax_alloc_inode ( struct super_block * sb )
{
struct dax_device * dax_dev ;
2017-06-09 18:50:49 +03:00
struct inode * inode ;
2017-04-11 19:49:49 +03:00
dax_dev = kmem_cache_alloc ( dax_cache , GFP_KERNEL ) ;
2017-11-14 17:59:54 +03:00
if ( ! dax_dev )
return NULL ;
2017-06-09 18:50:49 +03:00
inode = & dax_dev - > inode ;
inode - > i_rdev = 0 ;
return inode ;
2017-04-11 19:49:49 +03:00
}
static struct dax_device * to_dax_dev ( struct inode * inode )
{
return container_of ( inode , struct dax_device , inode ) ;
}
2019-04-10 21:57:19 +03:00
static void dax_free_inode ( struct inode * inode )
2017-04-11 19:49:49 +03:00
{
struct dax_device * dax_dev = to_dax_dev ( inode ) ;
2017-04-20 01:14:31 +03:00
kfree ( dax_dev - > host ) ;
dax_dev - > host = NULL ;
2017-06-09 18:50:49 +03:00
if ( inode - > i_rdev )
ida_simple_remove ( & dax_minor_ida , MINOR ( inode - > i_rdev ) ) ;
2017-04-11 19:49:49 +03:00
kmem_cache_free ( dax_cache , dax_dev ) ;
}
static void dax_destroy_inode ( struct inode * inode )
{
struct dax_device * dax_dev = to_dax_dev ( inode ) ;
2017-06-28 03:59:28 +03:00
WARN_ONCE ( test_bit ( DAXDEV_ALIVE , & dax_dev - > flags ) ,
2017-04-11 19:49:49 +03:00
" kill_dax() must be called before final iput() \n " ) ;
}
static const struct super_operations dax_sops = {
. statfs = simple_statfs ,
. alloc_inode = dax_alloc_inode ,
. destroy_inode = dax_destroy_inode ,
2019-04-10 21:57:19 +03:00
. free_inode = dax_free_inode ,
2017-04-11 19:49:49 +03:00
. drop_inode = generic_delete_inode ,
} ;
2019-03-25 19:38:24 +03:00
static int dax_init_fs_context ( struct fs_context * fc )
2017-04-11 19:49:49 +03:00
{
2019-03-25 19:38:24 +03:00
struct pseudo_fs_context * ctx = init_pseudo ( fc , DAXFS_MAGIC ) ;
if ( ! ctx )
return - ENOMEM ;
ctx - > ops = & dax_sops ;
return 0 ;
2017-04-11 19:49:49 +03:00
}
static struct file_system_type dax_fs_type = {
2019-03-25 19:38:24 +03:00
. name = " dax " ,
. init_fs_context = dax_init_fs_context ,
. kill_sb = kill_anon_super ,
2017-04-11 19:49:49 +03:00
} ;
static int dax_test ( struct inode * inode , void * data )
{
dev_t devt = * ( dev_t * ) data ;
return inode - > i_rdev = = devt ;
}
static int dax_set ( struct inode * inode , void * data )
{
dev_t devt = * ( dev_t * ) data ;
inode - > i_rdev = devt ;
return 0 ;
}
static struct dax_device * dax_dev_get ( dev_t devt )
{
struct dax_device * dax_dev ;
struct inode * inode ;
inode = iget5_locked ( dax_superblock , hash_32 ( devt + DAXFS_MAGIC , 31 ) ,
dax_test , dax_set , & devt ) ;
if ( ! inode )
return NULL ;
dax_dev = to_dax_dev ( inode ) ;
if ( inode - > i_state & I_NEW ) {
2017-06-28 03:59:28 +03:00
set_bit ( DAXDEV_ALIVE , & dax_dev - > flags ) ;
2017-04-11 19:49:49 +03:00
inode - > i_cdev = & dax_dev - > cdev ;
inode - > i_mode = S_IFCHR ;
inode - > i_flags = S_DAX ;
mapping_set_gfp_mask ( & inode - > i_data , GFP_USER ) ;
unlock_new_inode ( inode ) ;
}
return dax_dev ;
}
2017-04-20 01:14:31 +03:00
static void dax_add_host ( struct dax_device * dax_dev , const char * host )
{
int hash ;
/*
* Unconditionally init dax_dev since it ' s coming from a
* non - zeroed slab cache
*/
INIT_HLIST_NODE ( & dax_dev - > list ) ;
dax_dev - > host = host ;
if ( ! host )
return ;
hash = dax_host_hash ( host ) ;
spin_lock ( & dax_host_lock ) ;
hlist_add_head ( & dax_dev - > list , & dax_host_list [ hash ] ) ;
spin_unlock ( & dax_host_lock ) ;
}
2017-01-25 05:44:18 +03:00
struct dax_device * alloc_dax ( void * private , const char * __host ,
2019-07-05 17:03:24 +03:00
const struct dax_operations * ops , unsigned long flags )
2017-04-11 19:49:49 +03:00
{
struct dax_device * dax_dev ;
2017-04-20 01:14:31 +03:00
const char * host ;
2017-04-11 19:49:49 +03:00
dev_t devt ;
int minor ;
2017-04-20 01:14:31 +03:00
host = kstrdup ( __host , GFP_KERNEL ) ;
if ( __host & & ! host )
return NULL ;
2017-05-08 22:33:53 +03:00
minor = ida_simple_get ( & dax_minor_ida , 0 , MINORMASK + 1 , GFP_KERNEL ) ;
2017-04-11 19:49:49 +03:00
if ( minor < 0 )
2017-04-20 01:14:31 +03:00
goto err_minor ;
2017-04-11 19:49:49 +03:00
devt = MKDEV ( MAJOR ( dax_devt ) , minor ) ;
dax_dev = dax_dev_get ( devt ) ;
if ( ! dax_dev )
2017-04-20 01:14:31 +03:00
goto err_dev ;
2017-04-11 19:49:49 +03:00
2017-04-20 01:14:31 +03:00
dax_add_host ( dax_dev , host ) ;
2017-01-25 05:44:18 +03:00
dax_dev - > ops = ops ;
2017-04-11 19:49:49 +03:00
dax_dev - > private = private ;
2019-07-05 17:03:24 +03:00
if ( flags & DAXDEV_F_SYNC )
set_dax_synchronous ( dax_dev ) ;
2017-04-11 19:49:49 +03:00
return dax_dev ;
2017-04-20 01:14:31 +03:00
err_dev :
2017-04-11 19:49:49 +03:00
ida_simple_remove ( & dax_minor_ida , minor ) ;
2017-04-20 01:14:31 +03:00
err_minor :
kfree ( host ) ;
2017-04-11 19:49:49 +03:00
return NULL ;
}
EXPORT_SYMBOL_GPL ( alloc_dax ) ;
void put_dax ( struct dax_device * dax_dev )
{
if ( ! dax_dev )
return ;
iput ( & dax_dev - > inode ) ;
}
EXPORT_SYMBOL_GPL ( put_dax ) ;
2017-04-20 01:14:31 +03:00
/**
* dax_get_by_host ( ) - temporary lookup mechanism for filesystem - dax
* @ host : alternate name for the device registered by a dax driver
*/
struct dax_device * dax_get_by_host ( const char * host )
{
struct dax_device * dax_dev , * found = NULL ;
int hash , id ;
if ( ! host )
return NULL ;
hash = dax_host_hash ( host ) ;
id = dax_read_lock ( ) ;
spin_lock ( & dax_host_lock ) ;
hlist_for_each_entry ( dax_dev , & dax_host_list [ hash ] , list ) {
if ( ! dax_alive ( dax_dev )
| | strcmp ( host , dax_dev - > host ) ! = 0 )
continue ;
if ( igrab ( & dax_dev - > inode ) )
found = dax_dev ;
break ;
}
spin_unlock ( & dax_host_lock ) ;
dax_read_unlock ( id ) ;
return found ;
}
EXPORT_SYMBOL_GPL ( dax_get_by_host ) ;
2017-04-11 19:49:49 +03:00
/**
* inode_dax : convert a public inode into its dax_dev
* @ inode : An inode with i_cdev pointing to a dax_dev
*
* Note this is not equivalent to to_dax_dev ( ) which is for private
* internal use where we know the inode filesystem type = = dax_fs_type .
*/
struct dax_device * inode_dax ( struct inode * inode )
{
struct cdev * cdev = inode - > i_cdev ;
return container_of ( cdev , struct dax_device , cdev ) ;
}
EXPORT_SYMBOL_GPL ( inode_dax ) ;
struct inode * dax_inode ( struct dax_device * dax_dev )
{
return & dax_dev - > inode ;
}
EXPORT_SYMBOL_GPL ( dax_inode ) ;
void * dax_get_private ( struct dax_device * dax_dev )
{
2017-07-13 03:58:21 +03:00
if ( ! test_bit ( DAXDEV_ALIVE , & dax_dev - > flags ) )
return NULL ;
2017-04-11 19:49:49 +03:00
return dax_dev - > private ;
}
EXPORT_SYMBOL_GPL ( dax_get_private ) ;
static void init_once ( void * _dax_dev )
{
struct dax_device * dax_dev = _dax_dev ;
struct inode * inode = & dax_dev - > inode ;
2017-06-09 18:50:49 +03:00
memset ( dax_dev , 0 , sizeof ( * dax_dev ) ) ;
2017-04-11 19:49:49 +03:00
inode_init_once ( inode ) ;
}
2017-07-13 03:58:21 +03:00
static int dax_fs_init ( void )
2017-04-11 19:49:49 +03:00
{
int rc ;
dax_cache = kmem_cache_create ( " dax_cache " , sizeof ( struct dax_device ) , 0 ,
( SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD | SLAB_ACCOUNT ) ,
init_once ) ;
if ( ! dax_cache )
return - ENOMEM ;
dax_mnt = kern_mount ( & dax_fs_type ) ;
if ( IS_ERR ( dax_mnt ) ) {
rc = PTR_ERR ( dax_mnt ) ;
goto err_mount ;
}
dax_superblock = dax_mnt - > mnt_sb ;
return 0 ;
err_mount :
kmem_cache_destroy ( dax_cache ) ;
return rc ;
}
2017-07-13 03:58:21 +03:00
static void dax_fs_exit ( void )
2017-04-11 19:49:49 +03:00
{
kern_unmount ( dax_mnt ) ;
kmem_cache_destroy ( dax_cache ) ;
}
2017-07-13 03:58:21 +03:00
static int __init dax_core_init ( void )
2017-04-11 19:49:49 +03:00
{
int rc ;
2017-07-13 03:58:21 +03:00
rc = dax_fs_init ( ) ;
2017-04-11 19:49:49 +03:00
if ( rc )
return rc ;
2017-05-08 22:33:53 +03:00
rc = alloc_chrdev_region ( & dax_devt , 0 , MINORMASK + 1 , " dax " ) ;
2017-04-11 19:49:49 +03:00
if ( rc )
2017-07-13 03:58:21 +03:00
goto err_chrdev ;
rc = dax_bus_init ( ) ;
if ( rc )
goto err_bus ;
return 0 ;
err_bus :
unregister_chrdev_region ( dax_devt , MINORMASK + 1 ) ;
err_chrdev :
dax_fs_exit ( ) ;
return 0 ;
2017-04-11 19:49:49 +03:00
}
2017-07-13 03:58:21 +03:00
static void __exit dax_core_exit ( void )
2017-04-11 19:49:49 +03:00
{
2017-05-08 22:33:53 +03:00
unregister_chrdev_region ( dax_devt , MINORMASK + 1 ) ;
2017-04-11 19:49:49 +03:00
ida_destroy ( & dax_minor_ida ) ;
2017-07-13 03:58:21 +03:00
dax_fs_exit ( ) ;
2017-04-11 19:49:49 +03:00
}
MODULE_AUTHOR ( " Intel Corporation " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2017-07-13 03:58:21 +03:00
subsys_initcall ( dax_core_init ) ;
module_exit ( dax_core_exit ) ;