2019-05-29 17:18:09 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2017-04-08 01:33:36 +03:00
/*
* Copyright ( c ) 2016 Intel Corporation . All rights reserved .
*/
# ifndef __DAX_PRIVATE_H__
# define __DAX_PRIVATE_H__
# include <linux/device.h>
# include <linux/cdev.h>
2020-10-14 02:50:13 +03:00
# include <linux/idr.h>
2017-04-08 01:33:36 +03:00
2017-07-13 03:58:21 +03:00
/* private routines between core files */
struct dax_device ;
struct dax_device * inode_dax ( struct inode * inode ) ;
struct inode * dax_inode ( struct dax_device * dax_dev ) ;
2017-07-13 03:58:21 +03:00
int dax_bus_init ( void ) ;
void dax_bus_exit ( void ) ;
2017-07-13 03:58:21 +03:00
2017-04-08 01:33:36 +03:00
/**
* struct dax_region - mapping infrastructure for dax devices
* @ id : kernel - wide unique region for a memory range
2018-11-09 23:43:07 +03:00
* @ target_node : effective numa node if this memory range is onlined
2017-04-08 01:33:36 +03:00
* @ kref : to pin while other agents have a need to do lookups
* @ dev : parent device backing this region
* @ align : allocation and mapping alignment for child dax devices
2020-10-14 02:50:13 +03:00
* @ ida : instance id allocator
2020-10-14 02:50:03 +03:00
* @ res : resource tree to track instance allocations
2020-10-14 02:50:13 +03:00
* @ seed : allow userspace to find the first unbound seed device
* @ youngest : allow userspace to find the most recently created device
2017-04-08 01:33:36 +03:00
*/
struct dax_region {
int id ;
2018-11-09 23:43:07 +03:00
int target_node ;
2017-04-08 01:33:36 +03:00
struct kref kref ;
struct device * dev ;
unsigned int align ;
2020-10-14 02:50:13 +03:00
struct ida ida ;
2017-04-08 01:33:36 +03:00
struct resource res ;
2020-10-14 02:50:13 +03:00
struct device * seed ;
struct device * youngest ;
2017-04-08 01:33:36 +03:00
} ;
2020-10-14 02:50:45 +03:00
struct dax_mapping {
struct device dev ;
int range_id ;
int id ;
} ;
2017-04-08 01:33:36 +03:00
/**
2018-10-30 01:52:42 +03:00
* struct dev_dax - instance data for a subdivision of a dax region , and
* data while the device is activated in the driver .
2017-04-08 01:33:36 +03:00
* @ region - parent region
2017-05-05 09:38:43 +03:00
* @ dax_dev - core dax functionality
2018-11-09 23:43:07 +03:00
* @ target_node : effective numa node if dev_dax memory range is onlined
2023-06-03 09:14:05 +03:00
* @ dyn_id : is this a dynamic or statically created instance
* @ id : ida allocated id when the dax_region is not static
2020-10-14 02:50:45 +03:00
* @ ida : mapping id allocator
2017-05-05 09:38:43 +03:00
* @ dev - device core
2018-10-30 01:52:42 +03:00
* @ pgmap - pgmap for memmap setup / lifetime ( driver owned )
2020-10-14 02:50:39 +03:00
* @ nr_range : size of @ ranges
* @ ranges : resource - span + pgoff tuples for the instance
2017-04-08 01:33:36 +03:00
*/
2017-05-05 09:38:43 +03:00
struct dev_dax {
2017-04-08 01:33:36 +03:00
struct dax_region * region ;
2017-05-05 09:38:43 +03:00
struct dax_device * dax_dev ;
2020-10-14 02:50:50 +03:00
unsigned int align ;
2018-11-09 23:43:07 +03:00
int target_node ;
2023-06-03 09:14:05 +03:00
bool dyn_id ;
2020-10-14 02:50:13 +03:00
int id ;
2020-10-14 02:50:45 +03:00
struct ida ida ;
2017-04-08 01:33:36 +03:00
struct device dev ;
2020-10-14 02:49:43 +03:00
struct dev_pagemap * pgmap ;
2020-10-14 02:50:39 +03:00
int nr_range ;
struct dev_dax_range {
unsigned long pgoff ;
struct range range ;
2020-10-14 02:50:45 +03:00
struct dax_mapping * mapping ;
2020-10-14 02:50:39 +03:00
} * ranges ;
2017-04-08 01:33:36 +03:00
} ;
2017-07-13 03:58:21 +03:00
2023-05-17 15:55:09 +03:00
/*
* While run_dax ( ) is potentially a generic operation that could be
* defined in include / linux / dax . h we don ' t want to grow any users
* outside of drivers / dax /
*/
void run_dax ( struct dax_device * dax_dev ) ;
2017-07-13 03:58:21 +03:00
static inline struct dev_dax * to_dev_dax ( struct device * dev )
{
return container_of ( dev , struct dev_dax , dev ) ;
}
2020-10-14 02:50:45 +03:00
static inline struct dax_mapping * to_dax_mapping ( struct device * dev )
{
return container_of ( dev , struct dax_mapping , dev ) ;
}
2020-10-14 02:50:50 +03:00
phys_addr_t dax_pgoff_to_phys ( struct dev_dax * dev_dax , pgoff_t pgoff , unsigned long size ) ;
2020-10-14 02:50:55 +03:00
# ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline bool dax_align_valid ( unsigned long align )
{
if ( align = = PUD_SIZE & & IS_ENABLED ( CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD ) )
return true ;
if ( align = = PMD_SIZE & & has_transparent_hugepage ( ) )
return true ;
if ( align = = PAGE_SIZE )
return true ;
return false ;
}
# else
static inline bool dax_align_valid ( unsigned long align )
{
return align = = PAGE_SIZE ;
}
# endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2017-04-08 01:33:36 +03:00
# endif