2019-05-29 17:18:09 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2015-05-20 05:54:31 +03:00
/*
* Copyright ( c ) 2013 - 2015 Intel Corporation . All rights reserved .
*/
# ifndef __ND_CORE_H__
# define __ND_CORE_H__
# include <linux/libnvdimm.h>
# include <linux/device.h>
2015-05-01 20:11:27 +03:00
# include <linux/sizes.h>
# include <linux/mutex.h>
2015-05-01 20:34:01 +03:00
# include <linux/nd.h>
2019-07-18 04:08:26 +03:00
# include "nd.h"
2015-05-20 05:54:31 +03:00
2015-04-25 10:56:17 +03:00
extern struct list_head nvdimm_bus_list ;
extern struct mutex nvdimm_bus_list_mutex ;
2015-06-08 21:27:06 +03:00
extern int nvdimm_major ;
2018-12-14 01:36:18 +03:00
extern struct workqueue_struct * nvdimm_wq ;
2015-04-25 10:56:17 +03:00
2015-05-20 05:54:31 +03:00
struct nvdimm_bus {
struct nvdimm_bus_descriptor * nd_desc ;
2019-07-18 04:08:15 +03:00
wait_queue_head_t wait ;
2015-04-27 02:26:48 +03:00
struct list_head list ;
2015-05-20 05:54:31 +03:00
struct device dev ;
2015-05-01 20:11:27 +03:00
int id , probe_active ;
2019-07-18 04:08:15 +03:00
atomic_t ioctl_active ;
2016-06-07 03:42:38 +03:00
struct list_head mapping_list ;
2015-05-31 22:02:11 +03:00
struct mutex reconfig_mutex ;
2017-08-23 22:48:26 +03:00
struct badrange badrange ;
2015-05-20 05:54:31 +03:00
} ;
2015-04-27 02:26:48 +03:00
2015-04-25 10:56:17 +03:00
struct nvdimm {
unsigned long flags ;
void * provider_data ;
2016-04-29 02:17:07 +03:00
unsigned long cmd_mask ;
2015-04-25 10:56:17 +03:00
struct device dev ;
2015-05-01 20:11:27 +03:00
atomic_t busy ;
2016-06-08 03:00:04 +03:00
int id , num_flush ;
struct resource * flush_wpq ;
2018-12-04 21:31:20 +03:00
const char * dimm_id ;
2018-12-06 10:39:29 +03:00
struct {
const struct nvdimm_security_ops * ops ;
2019-08-27 03:54:54 +03:00
unsigned long flags ;
unsigned long ext_flags ;
2018-12-14 01:36:18 +03:00
unsigned int overwrite_tmo ;
struct kernfs_node * overwrite_state ;
2018-12-06 10:39:29 +03:00
} sec ;
2018-12-14 01:36:18 +03:00
struct delayed_work dwork ;
2020-07-21 01:08:18 +03:00
const struct nvdimm_fw_ops * fw_ops ;
2015-04-25 10:56:17 +03:00
} ;
2019-08-27 03:54:54 +03:00
static inline unsigned long nvdimm_security_flags (
2019-01-15 21:26:21 +03:00
struct nvdimm * nvdimm , enum nvdimm_passphrase_type ptype )
2018-12-06 10:39:29 +03:00
{
2019-08-27 03:54:54 +03:00
u64 flags ;
const u64 state_flags = 1UL < < NVDIMM_SECURITY_DISABLED
| 1UL < < NVDIMM_SECURITY_LOCKED
| 1UL < < NVDIMM_SECURITY_UNLOCKED
| 1UL < < NVDIMM_SECURITY_OVERWRITE ;
2018-12-06 10:39:29 +03:00
if ( ! nvdimm - > sec . ops )
2019-08-27 03:54:54 +03:00
return 0 ;
2018-12-06 10:39:29 +03:00
2019-08-27 03:54:54 +03:00
flags = nvdimm - > sec . ops - > get_flags ( nvdimm , ptype ) ;
/* disabled, locked, unlocked, and overwrite are mutually exclusive */
dev_WARN_ONCE ( & nvdimm - > dev , hweight64 ( flags & state_flags ) > 1 ,
" reported invalid security state: %#llx \n " ,
( unsigned long long ) flags ) ;
return flags ;
2018-12-06 10:39:29 +03:00
}
2018-12-06 20:14:08 +03:00
int nvdimm_security_freeze ( struct nvdimm * nvdimm ) ;
2018-12-07 20:33:30 +03:00
# if IS_ENABLED(CONFIG_NVDIMM_KEYS)
2019-08-27 03:55:05 +03:00
ssize_t nvdimm_security_store ( struct device * dev , const char * buf , size_t len ) ;
2018-12-14 01:36:18 +03:00
void nvdimm_security_overwrite_query ( struct work_struct * work ) ;
2018-12-07 20:33:30 +03:00
# else
2019-08-27 03:55:05 +03:00
static inline ssize_t nvdimm_security_store ( struct device * dev ,
const char * buf , size_t len )
2018-12-14 01:36:18 +03:00
{
return - EOPNOTSUPP ;
}
static inline void nvdimm_security_overwrite_query ( struct work_struct * work )
{
}
2018-12-07 20:33:30 +03:00
# endif
2018-12-06 10:39:29 +03:00
2016-10-05 02:09:59 +03:00
/**
* struct blk_alloc_info - tracking info for BLK dpa scanning
* @ nd_mapping : blk region mapping boundaries
* @ available : decremented in alias_dpa_busy as aliased PMEM is scanned
* @ busy : decremented in blk_dpa_busy to account for ranges already
* handled by alias_dpa_busy
* @ res : alias_dpa_busy interprets this a free space range that needs to
* be truncated to the valid BLK allocation starting DPA , blk_dpa_busy
* treats it as a busy range that needs the aliased PMEM ranges
* truncated .
*/
struct blk_alloc_info {
struct nd_mapping * nd_mapping ;
resource_size_t available , busy ;
struct resource * res ;
} ;
2015-05-31 22:02:11 +03:00
bool is_nvdimm ( struct device * dev ) ;
bool is_nd_pmem ( struct device * dev ) ;
2017-05-30 09:12:19 +03:00
bool is_nd_volatile ( struct device * dev ) ;
2015-06-25 11:21:02 +03:00
bool is_nd_blk ( struct device * dev ) ;
2017-05-30 09:12:19 +03:00
static inline bool is_nd_region ( struct device * dev )
{
return is_nd_pmem ( dev ) | | is_nd_blk ( dev ) | | is_nd_volatile ( dev ) ;
}
static inline bool is_memory ( struct device * dev )
{
return is_nd_pmem ( dev ) | | is_nd_volatile ( dev ) ;
}
2015-04-25 10:56:17 +03:00
struct nvdimm_bus * walk_to_nvdimm_bus ( struct device * nd_dev ) ;
2015-04-27 02:26:48 +03:00
int __init nvdimm_bus_init ( void ) ;
2015-05-31 21:41:48 +03:00
void nvdimm_bus_exit ( void ) ;
2016-05-18 06:24:16 +03:00
void nvdimm_devs_exit ( void ) ;
2015-05-01 20:34:01 +03:00
struct nd_region ;
2019-09-05 18:45:57 +03:00
void nd_region_advance_seeds ( struct nd_region * nd_region , struct device * dev ) ;
2016-10-01 01:28:27 +03:00
void nd_region_create_ns_seed ( struct nd_region * nd_region ) ;
2015-06-25 11:20:04 +03:00
void nd_region_create_btt_seed ( struct nd_region * nd_region ) ;
2015-12-13 22:41:36 +03:00
void nd_region_create_pfn_seed ( struct nd_region * nd_region ) ;
2016-03-11 21:15:36 +03:00
void nd_region_create_dax_seed ( struct nd_region * nd_region ) ;
2015-04-27 02:26:48 +03:00
int nvdimm_bus_create_ndctl ( struct nvdimm_bus * nvdimm_bus ) ;
void nvdimm_bus_destroy_ndctl ( struct nvdimm_bus * nvdimm_bus ) ;
2015-05-31 21:41:48 +03:00
void nd_synchronize ( void ) ;
2015-06-25 11:20:04 +03:00
void __nd_device_register ( struct device * dev ) ;
2015-06-18 00:14:46 +03:00
struct nd_label_id ;
char * nd_label_gen_id ( struct nd_label_id * label_id , u8 * uuid , u32 flags ) ;
bool nd_is_uuid_unique ( struct device * dev , u8 * uuid ) ;
struct nd_region ;
struct nvdimm_drvdata ;
struct nd_mapping ;
2016-09-20 02:04:21 +03:00
void nd_mapping_free_labels ( struct nd_mapping * nd_mapping ) ;
2018-07-25 00:07:57 +03:00
int __reserve_free_pmem ( struct device * dev , void * data ) ;
void release_free_pmem ( struct nvdimm_bus * nvdimm_bus ,
struct nd_mapping * nd_mapping ) ;
resource_size_t nd_pmem_max_contiguous_dpa ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping ) ;
resource_size_t nd_region_allocatable_dpa ( struct nd_region * nd_region ) ;
2015-06-18 00:14:46 +03:00
resource_size_t nd_pmem_available_dpa ( struct nd_region * nd_region ,
struct nd_mapping * nd_mapping , resource_size_t * overlap ) ;
2016-10-01 03:28:58 +03:00
resource_size_t nd_blk_available_dpa ( struct nd_region * nd_region ) ;
2015-06-18 00:14:46 +03:00
resource_size_t nd_region_available_dpa ( struct nd_region * nd_region ) ;
2018-11-24 21:47:04 +03:00
int nd_region_conflict ( struct nd_region * nd_region , resource_size_t start ,
resource_size_t size ) ;
2015-06-18 00:14:46 +03:00
resource_size_t nvdimm_allocated_dpa ( struct nvdimm_drvdata * ndd ,
struct nd_label_id * label_id ) ;
2016-10-05 02:09:59 +03:00
int alias_dpa_busy ( struct device * dev , void * data ) ;
2015-05-01 20:34:01 +03:00
struct resource * nsblk_add_resource ( struct nd_region * nd_region ,
struct nvdimm_drvdata * ndd , struct nd_namespace_blk * nsblk ,
resource_size_t start ) ;
2015-05-30 19:35:36 +03:00
int nvdimm_num_label_slots ( struct nvdimm_drvdata * ndd ) ;
2015-06-18 00:14:46 +03:00
void get_ndd ( struct nvdimm_drvdata * ndd ) ;
2015-06-25 11:20:04 +03:00
resource_size_t __nvdimm_namespace_capacity ( struct nd_namespace_common * ndns ) ;
2015-07-31 00:57:47 +03:00
void nd_detach_ndns ( struct device * dev , struct nd_namespace_common * * _ndns ) ;
void __nd_detach_ndns ( struct device * dev , struct nd_namespace_common * * _ndns ) ;
bool nd_attach_ndns ( struct device * dev , struct nd_namespace_common * attach ,
struct nd_namespace_common * * _ndns ) ;
bool __nd_attach_ndns ( struct device * dev , struct nd_namespace_common * attach ,
struct nd_namespace_common * * _ndns ) ;
ssize_t nd_namespace_store ( struct device * dev ,
struct nd_namespace_common * * _ndns , const char * buf ,
size_t len ) ;
2016-05-21 22:22:41 +03:00
struct nd_pfn * to_nd_pfn_safe ( struct device * dev ) ;
2019-07-18 04:08:26 +03:00
bool is_nvdimm_bus ( struct device * dev ) ;
2019-10-31 13:57:41 +03:00
# if IS_ENABLED(CONFIG_ND_CLAIM)
int devm_nsio_enable ( struct device * dev , struct nd_namespace_io * nsio ,
resource_size_t size ) ;
void devm_nsio_disable ( struct device * dev , struct nd_namespace_io * nsio ) ;
# else
static inline int devm_nsio_enable ( struct device * dev ,
struct nd_namespace_io * nsio , resource_size_t size )
{
return - ENXIO ;
}
static inline void devm_nsio_disable ( struct device * dev ,
struct nd_namespace_io * nsio )
{
}
# endif
2019-07-18 04:08:26 +03:00
# ifdef CONFIG_PROVE_LOCKING
extern struct class * nd_class ;
enum {
LOCK_BUS ,
LOCK_NDCTL ,
LOCK_REGION ,
LOCK_DIMM = LOCK_REGION ,
LOCK_NAMESPACE ,
LOCK_CLAIM ,
} ;
static inline void debug_nvdimm_lock ( struct device * dev )
{
if ( is_nd_region ( dev ) )
mutex_lock_nested ( & dev - > lockdep_mutex , LOCK_REGION ) ;
else if ( is_nvdimm ( dev ) )
mutex_lock_nested ( & dev - > lockdep_mutex , LOCK_DIMM ) ;
else if ( is_nd_btt ( dev ) | | is_nd_pfn ( dev ) | | is_nd_dax ( dev ) )
mutex_lock_nested ( & dev - > lockdep_mutex , LOCK_CLAIM ) ;
else if ( dev - > parent & & ( is_nd_region ( dev - > parent ) ) )
mutex_lock_nested ( & dev - > lockdep_mutex , LOCK_NAMESPACE ) ;
else if ( is_nvdimm_bus ( dev ) )
mutex_lock_nested ( & dev - > lockdep_mutex , LOCK_BUS ) ;
else if ( dev - > class & & dev - > class = = nd_class )
mutex_lock_nested ( & dev - > lockdep_mutex , LOCK_NDCTL ) ;
else
dev_WARN ( dev , " unknown lock level \n " ) ;
}
static inline void debug_nvdimm_unlock ( struct device * dev )
{
mutex_unlock ( & dev - > lockdep_mutex ) ;
}
static inline void nd_device_lock ( struct device * dev )
{
device_lock ( dev ) ;
debug_nvdimm_lock ( dev ) ;
}
static inline void nd_device_unlock ( struct device * dev )
{
debug_nvdimm_unlock ( dev ) ;
device_unlock ( dev ) ;
}
# else
static inline void nd_device_lock ( struct device * dev )
{
device_lock ( dev ) ;
}
static inline void nd_device_unlock ( struct device * dev )
{
device_unlock ( dev ) ;
}
static inline void debug_nvdimm_lock ( struct device * dev )
{
}
static inline void debug_nvdimm_unlock ( struct device * dev )
{
}
# endif
2015-05-20 05:54:31 +03:00
# endif /* __ND_CORE_H__ */