2017-07-13 03:58:21 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
2018-10-30 01:52:42 +03:00
# include <linux/memremap.h>
2016-05-18 19:15:08 +03:00
# include <linux/pagemap.h>
# include <linux/module.h>
# include <linux/device.h>
# include <linux/pfn_t.h>
2016-07-25 01:55:42 +03:00
# include <linux/cdev.h>
2016-05-18 19:15:08 +03:00
# include <linux/slab.h>
# include <linux/dax.h>
# include <linux/fs.h>
# include <linux/mm.h>
2018-04-19 23:39:43 +03:00
# include <linux/mman.h>
2017-04-08 01:33:36 +03:00
# include "dax-private.h"
2017-07-13 03:58:21 +03:00
# include "bus.h"
2016-05-18 19:15:08 +03:00
2018-10-30 01:52:42 +03:00
static struct dev_dax * ref_to_dev_dax ( struct percpu_ref * ref )
{
return container_of ( ref , struct dev_dax , ref ) ;
}
static void dev_dax_percpu_release ( struct percpu_ref * ref )
{
struct dev_dax * dev_dax = ref_to_dev_dax ( ref ) ;
dev_dbg ( & dev_dax - > dev , " %s \n " , __func__ ) ;
complete ( & dev_dax - > cmp ) ;
}
static void dev_dax_percpu_exit ( void * data )
{
struct percpu_ref * ref = data ;
struct dev_dax * dev_dax = ref_to_dev_dax ( ref ) ;
dev_dbg ( & dev_dax - > dev , " %s \n " , __func__ ) ;
wait_for_completion ( & dev_dax - > cmp ) ;
percpu_ref_exit ( ref ) ;
}
static void dev_dax_percpu_kill ( struct percpu_ref * data )
{
struct percpu_ref * ref = data ;
struct dev_dax * dev_dax = ref_to_dev_dax ( ref ) ;
dev_dbg ( & dev_dax - > dev , " %s \n " , __func__ ) ;
percpu_ref_kill ( ref ) ;
}
2017-01-31 08:43:10 +03:00
static int check_vma ( struct dev_dax * dev_dax , struct vm_area_struct * vma ,
2016-05-14 22:20:44 +03:00
const char * func )
{
2017-01-31 08:43:10 +03:00
struct dax_region * dax_region = dev_dax - > region ;
struct device * dev = & dev_dax - > dev ;
2016-05-14 22:20:44 +03:00
unsigned long mask ;
2017-04-11 19:49:49 +03:00
if ( ! dax_alive ( dev_dax - > dax_dev ) )
2016-05-14 22:20:44 +03:00
return - ENXIO ;
2016-11-16 20:00:38 +03:00
/* prevent private mappings from being established */
2016-12-07 04:03:35 +03:00
if ( ( vma - > vm_flags & VM_MAYSHARE ) ! = VM_MAYSHARE ) {
2018-06-27 18:43:58 +03:00
dev_info_ratelimited ( dev ,
" %s: %s: fail, attempted private mapping \n " ,
2016-05-14 22:20:44 +03:00
current - > comm , func ) ;
return - EINVAL ;
}
mask = dax_region - > align - 1 ;
if ( vma - > vm_start & mask | | vma - > vm_end & mask ) {
2018-06-27 18:43:58 +03:00
dev_info_ratelimited ( dev ,
" %s: %s: fail, unaligned vma (%#lx - %#lx, %#lx) \n " ,
2016-05-14 22:20:44 +03:00
current - > comm , func , vma - > vm_start , vma - > vm_end ,
mask ) ;
return - EINVAL ;
}
if ( ( dax_region - > pfn_flags & ( PFN_DEV | PFN_MAP ) ) = = PFN_DEV
& & ( vma - > vm_flags & VM_DONTCOPY ) = = 0 ) {
2018-06-27 18:43:58 +03:00
dev_info_ratelimited ( dev ,
" %s: %s: fail, dax range requires MADV_DONTFORK \n " ,
2016-05-14 22:20:44 +03:00
current - > comm , func ) ;
return - EINVAL ;
}
if ( ! vma_is_dax ( vma ) ) {
2018-06-27 18:43:58 +03:00
dev_info_ratelimited ( dev ,
" %s: %s: fail, vma is not DAX capable \n " ,
2016-05-14 22:20:44 +03:00
current - > comm , func ) ;
return - EINVAL ;
}
return 0 ;
}
2017-04-08 01:33:36 +03:00
/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
2017-05-05 09:38:43 +03:00
__weak phys_addr_t dax_pgoff_to_phys ( struct dev_dax * dev_dax , pgoff_t pgoff ,
2016-05-14 22:20:44 +03:00
unsigned long size )
{
2017-07-14 23:54:50 +03:00
struct resource * res = & dev_dax - > region - > res ;
phys_addr_t phys ;
2016-05-14 22:20:44 +03:00
2017-07-14 23:54:50 +03:00
phys = pgoff * PAGE_SIZE + res - > start ;
if ( phys > = res - > start & & phys < = res - > end ) {
2016-05-14 22:20:44 +03:00
if ( phys + size - 1 < = res - > end )
return phys ;
}
return - 1 ;
}
2018-07-14 07:49:34 +03:00
static vm_fault_t __dev_dax_pte_fault ( struct dev_dax * dev_dax ,
2018-07-14 07:49:40 +03:00
struct vm_fault * vmf , pfn_t * pfn )
2016-05-14 22:20:44 +03:00
{
2017-01-31 08:43:10 +03:00
struct device * dev = & dev_dax - > dev ;
2016-05-14 22:20:44 +03:00
struct dax_region * dax_region ;
phys_addr_t phys ;
2017-03-10 23:24:22 +03:00
unsigned int fault_size = PAGE_SIZE ;
2016-05-14 22:20:44 +03:00
2017-01-31 08:43:10 +03:00
if ( check_vma ( dev_dax , vmf - > vma , __func__ ) )
2016-05-14 22:20:44 +03:00
return VM_FAULT_SIGBUS ;
2017-01-31 08:43:10 +03:00
dax_region = dev_dax - > region ;
2016-05-14 22:20:44 +03:00
if ( dax_region - > align > PAGE_SIZE ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " alignment (%#x) > fault size (%#x) \n " ,
dax_region - > align , fault_size ) ;
2016-05-14 22:20:44 +03:00
return VM_FAULT_SIGBUS ;
}
2017-03-10 23:24:22 +03:00
if ( fault_size ! = dax_region - > align )
return VM_FAULT_SIGBUS ;
2017-05-05 09:38:43 +03:00
phys = dax_pgoff_to_phys ( dev_dax , vmf - > pgoff , PAGE_SIZE ) ;
2016-05-14 22:20:44 +03:00
if ( phys = = - 1 ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " pgoff_to_phys(%#lx) failed \n " , vmf - > pgoff ) ;
2016-05-14 22:20:44 +03:00
return VM_FAULT_SIGBUS ;
}
2018-07-14 07:49:40 +03:00
* pfn = phys_to_pfn_t ( phys , dax_region - > pfn_flags ) ;
2016-05-14 22:20:44 +03:00
2018-07-14 07:49:40 +03:00
return vmf_insert_mixed ( vmf - > vma , vmf - > address , * pfn ) ;
2016-05-14 22:20:44 +03:00
}
2018-07-14 07:49:34 +03:00
static vm_fault_t __dev_dax_pmd_fault ( struct dev_dax * dev_dax ,
2018-07-14 07:49:40 +03:00
struct vm_fault * vmf , pfn_t * pfn )
2016-05-14 22:20:44 +03:00
{
2017-02-23 02:40:03 +03:00
unsigned long pmd_addr = vmf - > address & PMD_MASK ;
2017-01-31 08:43:10 +03:00
struct device * dev = & dev_dax - > dev ;
2016-05-14 22:20:44 +03:00
struct dax_region * dax_region ;
phys_addr_t phys ;
pgoff_t pgoff ;
2017-03-10 23:24:22 +03:00
unsigned int fault_size = PMD_SIZE ;
2016-05-14 22:20:44 +03:00
2017-01-31 08:43:10 +03:00
if ( check_vma ( dev_dax , vmf - > vma , __func__ ) )
2016-05-14 22:20:44 +03:00
return VM_FAULT_SIGBUS ;
2017-01-31 08:43:10 +03:00
dax_region = dev_dax - > region ;
2016-05-14 22:20:44 +03:00
if ( dax_region - > align > PMD_SIZE ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " alignment (%#x) > fault size (%#x) \n " ,
dax_region - > align , fault_size ) ;
2016-05-14 22:20:44 +03:00
return VM_FAULT_SIGBUS ;
}
/* dax pmd mappings require pfn_t_devmap() */
if ( ( dax_region - > pfn_flags & ( PFN_DEV | PFN_MAP ) ) ! = ( PFN_DEV | PFN_MAP ) ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " region lacks devmap flags \n " ) ;
2016-05-14 22:20:44 +03:00
return VM_FAULT_SIGBUS ;
}
2017-03-10 23:24:22 +03:00
if ( fault_size < dax_region - > align )
return VM_FAULT_SIGBUS ;
else if ( fault_size > dax_region - > align )
return VM_FAULT_FALLBACK ;
/* if we are outside of the VMA */
if ( pmd_addr < vmf - > vma - > vm_start | |
( pmd_addr + PMD_SIZE ) > vmf - > vma - > vm_end )
return VM_FAULT_SIGBUS ;
2017-02-23 02:40:06 +03:00
pgoff = linear_page_index ( vmf - > vma , pmd_addr ) ;
2017-05-05 09:38:43 +03:00
phys = dax_pgoff_to_phys ( dev_dax , pgoff , PMD_SIZE ) ;
2016-05-14 22:20:44 +03:00
if ( phys = = - 1 ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " pgoff_to_phys(%#lx) failed \n " , pgoff ) ;
2016-05-14 22:20:44 +03:00
return VM_FAULT_SIGBUS ;
}
2018-07-14 07:49:40 +03:00
* pfn = phys_to_pfn_t ( phys , dax_region - > pfn_flags ) ;
2016-05-14 22:20:44 +03:00
2019-05-14 03:15:33 +03:00
return vmf_insert_pfn_pmd ( vmf , * pfn , vmf - > flags & FAULT_FLAG_WRITE ) ;
2016-05-14 22:20:44 +03:00
}
2017-02-25 01:57:05 +03:00
# ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2018-07-14 07:49:34 +03:00
static vm_fault_t __dev_dax_pud_fault ( struct dev_dax * dev_dax ,
2018-07-14 07:49:40 +03:00
struct vm_fault * vmf , pfn_t * pfn )
2017-02-25 01:57:05 +03:00
{
unsigned long pud_addr = vmf - > address & PUD_MASK ;
2017-01-31 08:43:10 +03:00
struct device * dev = & dev_dax - > dev ;
2017-02-25 01:57:05 +03:00
struct dax_region * dax_region ;
phys_addr_t phys ;
pgoff_t pgoff ;
2017-03-10 23:24:27 +03:00
unsigned int fault_size = PUD_SIZE ;
2017-02-25 01:57:05 +03:00
2017-01-31 08:43:10 +03:00
if ( check_vma ( dev_dax , vmf - > vma , __func__ ) )
2017-02-25 01:57:05 +03:00
return VM_FAULT_SIGBUS ;
2017-01-31 08:43:10 +03:00
dax_region = dev_dax - > region ;
2017-02-25 01:57:05 +03:00
if ( dax_region - > align > PUD_SIZE ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " alignment (%#x) > fault size (%#x) \n " ,
dax_region - > align , fault_size ) ;
2017-02-25 01:57:05 +03:00
return VM_FAULT_SIGBUS ;
}
/* dax pud mappings require pfn_t_devmap() */
if ( ( dax_region - > pfn_flags & ( PFN_DEV | PFN_MAP ) ) ! = ( PFN_DEV | PFN_MAP ) ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " region lacks devmap flags \n " ) ;
2017-02-25 01:57:05 +03:00
return VM_FAULT_SIGBUS ;
}
2017-03-10 23:24:27 +03:00
if ( fault_size < dax_region - > align )
return VM_FAULT_SIGBUS ;
else if ( fault_size > dax_region - > align )
return VM_FAULT_FALLBACK ;
/* if we are outside of the VMA */
if ( pud_addr < vmf - > vma - > vm_start | |
( pud_addr + PUD_SIZE ) > vmf - > vma - > vm_end )
return VM_FAULT_SIGBUS ;
2017-02-25 01:57:05 +03:00
pgoff = linear_page_index ( vmf - > vma , pud_addr ) ;
2017-05-05 09:38:43 +03:00
phys = dax_pgoff_to_phys ( dev_dax , pgoff , PUD_SIZE ) ;
2017-02-25 01:57:05 +03:00
if ( phys = = - 1 ) {
2018-03-06 03:40:05 +03:00
dev_dbg ( dev , " pgoff_to_phys(%#lx) failed \n " , pgoff ) ;
2017-02-25 01:57:05 +03:00
return VM_FAULT_SIGBUS ;
}
2018-07-14 07:49:40 +03:00
* pfn = phys_to_pfn_t ( phys , dax_region - > pfn_flags ) ;
2017-02-25 01:57:05 +03:00
2019-05-14 03:15:33 +03:00
return vmf_insert_pfn_pud ( vmf , * pfn , vmf - > flags & FAULT_FLAG_WRITE ) ;
2017-02-25 01:57:05 +03:00
}
# else
2018-07-14 07:49:34 +03:00
static vm_fault_t __dev_dax_pud_fault ( struct dev_dax * dev_dax ,
2018-07-14 07:49:40 +03:00
struct vm_fault * vmf , pfn_t * pfn )
2017-02-25 01:57:05 +03:00
{
return VM_FAULT_FALLBACK ;
}
# endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2018-07-14 07:49:34 +03:00
static vm_fault_t dev_dax_huge_fault ( struct vm_fault * vmf ,
2017-02-25 01:57:08 +03:00
enum page_entry_size pe_size )
2016-05-14 22:20:44 +03:00
{
2017-02-23 02:40:06 +03:00
struct file * filp = vmf - > vma - > vm_file ;
2018-07-14 07:49:40 +03:00
unsigned long fault_size ;
2018-09-05 01:46:26 +03:00
vm_fault_t rc = VM_FAULT_SIGBUS ;
int id ;
2018-07-14 07:49:40 +03:00
pfn_t pfn ;
2017-01-31 08:43:10 +03:00
struct dev_dax * dev_dax = filp - > private_data ;
2016-05-14 22:20:44 +03:00
2018-03-06 03:40:05 +03:00
dev_dbg ( & dev_dax - > dev , " %s: %s (%#lx - %#lx) size = %d \n " , current - > comm ,
( vmf - > flags & FAULT_FLAG_WRITE ) ? " write " : " read " ,
2017-04-11 18:59:36 +03:00
vmf - > vma - > vm_start , vmf - > vma - > vm_end , pe_size ) ;
2016-05-14 22:20:44 +03:00
2017-04-11 19:49:49 +03:00
id = dax_read_lock ( ) ;
2017-02-25 01:57:08 +03:00
switch ( pe_size ) {
case PE_SIZE_PTE :
2018-07-14 07:49:40 +03:00
fault_size = PAGE_SIZE ;
rc = __dev_dax_pte_fault ( dev_dax , vmf , & pfn ) ;
mm,fs,dax: change ->pmd_fault to ->huge_fault
Patch series "1G transparent hugepage support for device dax", v2.
The following series implements support for 1G trasparent hugepage on
x86 for device dax. The bulk of the code was written by Mathew Wilcox a
while back supporting transparent 1G hugepage for fs DAX. I have
forward ported the relevant bits to 4.10-rc. The current submission has
only the necessary code to support device DAX.
Comments from Dan Williams: So the motivation and intended user of this
functionality mirrors the motivation and users of 1GB page support in
hugetlbfs. Given expected capacities of persistent memory devices an
in-memory database may want to reduce tlb pressure beyond what they can
already achieve with 2MB mappings of a device-dax file. We have
customer feedback to that effect as Willy mentioned in his previous
version of these patches [1].
[1]: https://lkml.org/lkml/2016/1/31/52
Comments from Nilesh @ Oracle:
There are applications which have a process model; and if you assume
10,000 processes attempting to mmap all the 6TB memory available on a
server; we are looking at the following:
processes : 10,000
memory : 6TB
pte @ 4k page size: 8 bytes / 4K of memory * #processes = 6TB / 4k * 8 * 10000 = 1.5GB * 80000 = 120,000GB
pmd @ 2M page size: 120,000 / 512 = ~240GB
pud @ 1G page size: 240GB / 512 = ~480MB
As you can see with 2M pages, this system will use up an exorbitant
amount of DRAM to hold the page tables; but the 1G pages finally brings
it down to a reasonable level. Memory sizes will keep increasing; so
this number will keep increasing.
An argument can be made to convert the applications from process model
to thread model, but in the real world that may not be always practical.
Hopefully this helps explain the use case where this is valuable.
This patch (of 3):
In preparation for adding the ability to handle PUD pages, convert
vm_operations_struct.pmd_fault to vm_operations_struct.huge_fault. The
vm_fault structure is extended to include a union of the different page
table pointers that may be needed, and three flag bits are reserved to
indicate which type of pointer is in the union.
[ross.zwisler@linux.intel.com: remove unused function ext4_dax_huge_fault()]
Link: http://lkml.kernel.org/r/1485813172-7284-1-git-send-email-ross.zwisler@linux.intel.com
[dave.jiang@intel.com: clear PMD or PUD size flags when in fall through path]
Link: http://lkml.kernel.org/r/148589842696.5820.16078080610311444794.stgit@djiang5-desk3.ch.intel.com
Link: http://lkml.kernel.org/r/148545058784.17912.6353162518188733642.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Nilesh Choudhury <nilesh.choudhury@oracle.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-02-25 01:56:59 +03:00
break ;
2017-02-25 01:57:08 +03:00
case PE_SIZE_PMD :
2018-07-14 07:49:40 +03:00
fault_size = PMD_SIZE ;
rc = __dev_dax_pmd_fault ( dev_dax , vmf , & pfn ) ;
2017-02-25 01:57:05 +03:00
break ;
2017-02-25 01:57:08 +03:00
case PE_SIZE_PUD :
2018-07-14 07:49:40 +03:00
fault_size = PUD_SIZE ;
rc = __dev_dax_pud_fault ( dev_dax , vmf , & pfn ) ;
mm,fs,dax: change ->pmd_fault to ->huge_fault
Patch series "1G transparent hugepage support for device dax", v2.
The following series implements support for 1G trasparent hugepage on
x86 for device dax. The bulk of the code was written by Mathew Wilcox a
while back supporting transparent 1G hugepage for fs DAX. I have
forward ported the relevant bits to 4.10-rc. The current submission has
only the necessary code to support device DAX.
Comments from Dan Williams: So the motivation and intended user of this
functionality mirrors the motivation and users of 1GB page support in
hugetlbfs. Given expected capacities of persistent memory devices an
in-memory database may want to reduce tlb pressure beyond what they can
already achieve with 2MB mappings of a device-dax file. We have
customer feedback to that effect as Willy mentioned in his previous
version of these patches [1].
[1]: https://lkml.org/lkml/2016/1/31/52
Comments from Nilesh @ Oracle:
There are applications which have a process model; and if you assume
10,000 processes attempting to mmap all the 6TB memory available on a
server; we are looking at the following:
processes : 10,000
memory : 6TB
pte @ 4k page size: 8 bytes / 4K of memory * #processes = 6TB / 4k * 8 * 10000 = 1.5GB * 80000 = 120,000GB
pmd @ 2M page size: 120,000 / 512 = ~240GB
pud @ 1G page size: 240GB / 512 = ~480MB
As you can see with 2M pages, this system will use up an exorbitant
amount of DRAM to hold the page tables; but the 1G pages finally brings
it down to a reasonable level. Memory sizes will keep increasing; so
this number will keep increasing.
An argument can be made to convert the applications from process model
to thread model, but in the real world that may not be always practical.
Hopefully this helps explain the use case where this is valuable.
This patch (of 3):
In preparation for adding the ability to handle PUD pages, convert
vm_operations_struct.pmd_fault to vm_operations_struct.huge_fault. The
vm_fault structure is extended to include a union of the different page
table pointers that may be needed, and three flag bits are reserved to
indicate which type of pointer is in the union.
[ross.zwisler@linux.intel.com: remove unused function ext4_dax_huge_fault()]
Link: http://lkml.kernel.org/r/1485813172-7284-1-git-send-email-ross.zwisler@linux.intel.com
[dave.jiang@intel.com: clear PMD or PUD size flags when in fall through path]
Link: http://lkml.kernel.org/r/148589842696.5820.16078080610311444794.stgit@djiang5-desk3.ch.intel.com
Link: http://lkml.kernel.org/r/148545058784.17912.6353162518188733642.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Nilesh Choudhury <nilesh.choudhury@oracle.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-02-25 01:56:59 +03:00
break ;
default :
2017-04-11 19:12:25 +03:00
rc = VM_FAULT_SIGBUS ;
mm,fs,dax: change ->pmd_fault to ->huge_fault
Patch series "1G transparent hugepage support for device dax", v2.
The following series implements support for 1G trasparent hugepage on
x86 for device dax. The bulk of the code was written by Mathew Wilcox a
while back supporting transparent 1G hugepage for fs DAX. I have
forward ported the relevant bits to 4.10-rc. The current submission has
only the necessary code to support device DAX.
Comments from Dan Williams: So the motivation and intended user of this
functionality mirrors the motivation and users of 1GB page support in
hugetlbfs. Given expected capacities of persistent memory devices an
in-memory database may want to reduce tlb pressure beyond what they can
already achieve with 2MB mappings of a device-dax file. We have
customer feedback to that effect as Willy mentioned in his previous
version of these patches [1].
[1]: https://lkml.org/lkml/2016/1/31/52
Comments from Nilesh @ Oracle:
There are applications which have a process model; and if you assume
10,000 processes attempting to mmap all the 6TB memory available on a
server; we are looking at the following:
processes : 10,000
memory : 6TB
pte @ 4k page size: 8 bytes / 4K of memory * #processes = 6TB / 4k * 8 * 10000 = 1.5GB * 80000 = 120,000GB
pmd @ 2M page size: 120,000 / 512 = ~240GB
pud @ 1G page size: 240GB / 512 = ~480MB
As you can see with 2M pages, this system will use up an exorbitant
amount of DRAM to hold the page tables; but the 1G pages finally brings
it down to a reasonable level. Memory sizes will keep increasing; so
this number will keep increasing.
An argument can be made to convert the applications from process model
to thread model, but in the real world that may not be always practical.
Hopefully this helps explain the use case where this is valuable.
This patch (of 3):
In preparation for adding the ability to handle PUD pages, convert
vm_operations_struct.pmd_fault to vm_operations_struct.huge_fault. The
vm_fault structure is extended to include a union of the different page
table pointers that may be needed, and three flag bits are reserved to
indicate which type of pointer is in the union.
[ross.zwisler@linux.intel.com: remove unused function ext4_dax_huge_fault()]
Link: http://lkml.kernel.org/r/1485813172-7284-1-git-send-email-ross.zwisler@linux.intel.com
[dave.jiang@intel.com: clear PMD or PUD size flags when in fall through path]
Link: http://lkml.kernel.org/r/148589842696.5820.16078080610311444794.stgit@djiang5-desk3.ch.intel.com
Link: http://lkml.kernel.org/r/148545058784.17912.6353162518188733642.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Nilesh Choudhury <nilesh.choudhury@oracle.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-02-25 01:56:59 +03:00
}
2018-07-14 07:49:40 +03:00
if ( rc = = VM_FAULT_NOPAGE ) {
unsigned long i ;
2018-07-14 07:49:45 +03:00
pgoff_t pgoff ;
2018-07-14 07:49:40 +03:00
/*
* In the device - dax case the only possibility for a
* VM_FAULT_NOPAGE result is when device - dax capacity is
* mapped . No need to consider the zero page , or racing
* conflicting mappings .
*/
2018-07-14 07:49:45 +03:00
pgoff = linear_page_index ( vmf - > vma , vmf - > address
& ~ ( fault_size - 1 ) ) ;
2018-07-14 07:49:40 +03:00
for ( i = 0 ; i < fault_size / PAGE_SIZE ; i + + ) {
struct page * page ;
page = pfn_to_page ( pfn_t_to_pfn ( pfn ) + i ) ;
if ( page - > mapping )
continue ;
page - > mapping = filp - > f_mapping ;
2018-07-14 07:49:45 +03:00
page - > index = pgoff + i ;
2018-07-14 07:49:40 +03:00
}
}
2017-04-11 19:49:49 +03:00
dax_read_unlock ( id ) ;
2016-05-14 22:20:44 +03:00
return rc ;
}
2018-07-14 07:49:34 +03:00
static vm_fault_t dev_dax_fault ( struct vm_fault * vmf )
2017-02-25 01:57:08 +03:00
{
2017-01-31 08:43:10 +03:00
return dev_dax_huge_fault ( vmf , PE_SIZE_PTE ) ;
2017-02-25 01:57:08 +03:00
}
2017-11-30 03:10:32 +03:00
static int dev_dax_split ( struct vm_area_struct * vma , unsigned long addr )
{
struct file * filp = vma - > vm_file ;
struct dev_dax * dev_dax = filp - > private_data ;
struct dax_region * dax_region = dev_dax - > region ;
if ( ! IS_ALIGNED ( addr , dax_region - > align ) )
return - EINVAL ;
return 0 ;
}
2018-04-06 02:24:28 +03:00
static unsigned long dev_dax_pagesize ( struct vm_area_struct * vma )
{
struct file * filp = vma - > vm_file ;
struct dev_dax * dev_dax = filp - > private_data ;
struct dax_region * dax_region = dev_dax - > region ;
return dax_region - > align ;
}
2017-01-31 08:43:10 +03:00
static const struct vm_operations_struct dax_vm_ops = {
. fault = dev_dax_fault ,
. huge_fault = dev_dax_huge_fault ,
2017-11-30 03:10:32 +03:00
. split = dev_dax_split ,
2018-04-06 02:24:28 +03:00
. pagesize = dev_dax_pagesize ,
2016-05-14 22:20:44 +03:00
} ;
2016-08-11 10:38:03 +03:00
static int dax_mmap ( struct file * filp , struct vm_area_struct * vma )
2016-05-14 22:20:44 +03:00
{
2017-01-31 08:43:10 +03:00
struct dev_dax * dev_dax = filp - > private_data ;
2017-04-11 19:49:49 +03:00
int rc , id ;
2016-05-14 22:20:44 +03:00
2018-03-06 03:40:05 +03:00
dev_dbg ( & dev_dax - > dev , " trace \n " ) ;
2016-05-14 22:20:44 +03:00
2017-04-11 19:49:49 +03:00
/*
* We lock to check dax_dev liveness and will re - check at
* fault time .
*/
id = dax_read_lock ( ) ;
2017-01-31 08:43:10 +03:00
rc = check_vma ( dev_dax , vma , __func__ ) ;
2017-04-11 19:49:49 +03:00
dax_read_unlock ( id ) ;
2016-05-14 22:20:44 +03:00
if ( rc )
return rc ;
2017-01-31 08:43:10 +03:00
vma - > vm_ops = & dax_vm_ops ;
2018-08-18 01:43:40 +03:00
vma - > vm_flags | = VM_HUGEPAGE ;
2016-05-14 22:20:44 +03:00
return 0 ;
2016-08-07 18:23:56 +03:00
}
/* return an unmapped area aligned to the dax region specified alignment */
2016-08-11 10:38:03 +03:00
static unsigned long dax_get_unmapped_area ( struct file * filp ,
2016-08-07 18:23:56 +03:00
unsigned long addr , unsigned long len , unsigned long pgoff ,
unsigned long flags )
{
unsigned long off , off_end , off_align , len_align , addr_align , align ;
2017-01-31 08:43:10 +03:00
struct dev_dax * dev_dax = filp ? filp - > private_data : NULL ;
2016-08-07 18:23:56 +03:00
struct dax_region * dax_region ;
2017-01-31 08:43:10 +03:00
if ( ! dev_dax | | addr )
2016-08-07 18:23:56 +03:00
goto out ;
2017-01-31 08:43:10 +03:00
dax_region = dev_dax - > region ;
2016-08-07 18:23:56 +03:00
align = dax_region - > align ;
off = pgoff < < PAGE_SHIFT ;
off_end = off + len ;
off_align = round_up ( off , align ) ;
if ( ( off_end < = off_align ) | | ( ( off_end - off_align ) < align ) )
goto out ;
len_align = len + align ;
if ( ( off + len_align ) < off )
goto out ;
2016-05-14 22:20:44 +03:00
2016-08-07 18:23:56 +03:00
addr_align = current - > mm - > get_unmapped_area ( filp , addr , len_align ,
pgoff , flags ) ;
if ( ! IS_ERR_VALUE ( addr_align ) ) {
addr_align + = ( off - addr_align ) & ( align - 1 ) ;
return addr_align ;
}
out :
return current - > mm - > get_unmapped_area ( filp , addr , len , pgoff , flags ) ;
}
2018-09-11 02:18:29 +03:00
static const struct address_space_operations dev_dax_aops = {
. set_page_dirty = noop_set_page_dirty ,
. invalidatepage = noop_invalidatepage ,
} ;
2016-08-11 10:38:03 +03:00
static int dax_open ( struct inode * inode , struct file * filp )
2016-08-07 18:23:56 +03:00
{
2017-04-11 19:49:49 +03:00
struct dax_device * dax_dev = inode_dax ( inode ) ;
struct inode * __dax_inode = dax_inode ( dax_dev ) ;
struct dev_dax * dev_dax = dax_get_private ( dax_dev ) ;
2016-08-07 18:23:56 +03:00
2018-03-06 03:40:05 +03:00
dev_dbg ( & dev_dax - > dev , " trace \n " ) ;
2017-04-11 19:49:49 +03:00
inode - > i_mapping = __dax_inode - > i_mapping ;
inode - > i_mapping - > host = __dax_inode ;
2018-09-11 02:18:29 +03:00
inode - > i_mapping - > a_ops = & dev_dax_aops ;
2016-07-25 07:55:45 +03:00
filp - > f_mapping = inode - > i_mapping ;
fs: new infrastructure for writeback error handling and reporting
Most filesystems currently use mapping_set_error and
filemap_check_errors for setting and reporting/clearing writeback errors
at the mapping level. filemap_check_errors is indirectly called from
most of the filemap_fdatawait_* functions and from
filemap_write_and_wait*. These functions are called from all sorts of
contexts to wait on writeback to finish -- e.g. mostly in fsync, but
also in truncate calls, getattr, etc.
The non-fsync callers are problematic. We should be reporting writeback
errors during fsync, but many places spread over the tree clear out
errors before they can be properly reported, or report errors at
nonsensical times.
If I get -EIO on a stat() call, there is no reason for me to assume that
it is because some previous writeback failed. The fact that it also
clears out the error such that a subsequent fsync returns 0 is a bug,
and a nasty one since that's potentially silent data corruption.
This patch adds a small bit of new infrastructure for setting and
reporting errors during address_space writeback. While the above was my
original impetus for adding this, I think it's also the case that
current fsync semantics are just problematic for userland. Most
applications that call fsync do so to ensure that the data they wrote
has hit the backing store.
In the case where there are multiple writers to the file at the same
time, this is really hard to determine. The first one to call fsync will
see any stored error, and the rest get back 0. The processes with open
fds may not be associated with one another in any way. They could even
be in different containers, so ensuring coordination between all fsync
callers is not really an option.
One way to remedy this would be to track what file descriptor was used
to dirty the file, but that's rather cumbersome and would likely be
slow. However, there is a simpler way to improve the semantics here
without incurring too much overhead.
This set adds an errseq_t to struct address_space, and a corresponding
one is added to struct file. Writeback errors are recorded in the
mapping's errseq_t, and the one in struct file is used as the "since"
value.
This changes the semantics of the Linux fsync implementation such that
applications can now use it to determine whether there were any
writeback errors since fsync(fd) was last called (or since the file was
opened in the case of fsync having never been called).
Note that those writeback errors may have occurred when writing data
that was dirtied via an entirely different fd, but that's the case now
with the current mapping_set_error/filemap_check_error infrastructure.
This will at least prevent you from getting a false report of success.
The new behavior is still consistent with the POSIX spec, and is more
reliable for application developers. This patch just adds some basic
infrastructure for doing this, and ensures that the f_wb_err "cursor"
is properly set when a file is opened. Later patches will change the
existing code to use this new infrastructure for reporting errors at
fsync time.
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
2017-07-06 14:02:25 +03:00
filp - > f_wb_err = filemap_sample_wb_err ( filp - > f_mapping ) ;
2017-01-31 08:43:10 +03:00
filp - > private_data = dev_dax ;
2016-08-11 10:41:51 +03:00
inode - > i_flags = S_DAX ;
2016-08-07 18:23:56 +03:00
return 0 ;
}
2016-05-14 22:20:44 +03:00
2016-08-11 10:38:03 +03:00
static int dax_release ( struct inode * inode , struct file * filp )
2016-08-07 18:23:56 +03:00
{
2017-01-31 08:43:10 +03:00
struct dev_dax * dev_dax = filp - > private_data ;
2016-08-07 18:23:56 +03:00
2018-03-06 03:40:05 +03:00
dev_dbg ( & dev_dax - > dev , " trace \n " ) ;
2016-08-07 18:23:56 +03:00
return 0 ;
2016-05-14 22:20:44 +03:00
}
2016-05-18 19:15:08 +03:00
static const struct file_operations dax_fops = {
. llseek = noop_llseek ,
. owner = THIS_MODULE ,
2016-08-11 10:38:03 +03:00
. open = dax_open ,
. release = dax_release ,
. get_unmapped_area = dax_get_unmapped_area ,
. mmap = dax_mmap ,
2018-04-19 23:39:43 +03:00
. mmap_supported_flags = MAP_SYNC ,
2016-05-18 19:15:08 +03:00
} ;
2017-07-13 03:58:21 +03:00
static void dev_dax_cdev_del ( void * cdev )
2016-08-07 18:23:56 +03:00
{
2017-07-13 03:58:21 +03:00
cdev_del ( cdev ) ;
}
2016-08-07 18:23:56 +03:00
2017-07-13 03:58:21 +03:00
static void dev_dax_kill ( void * dev_dax )
{
kill_dev_dax ( dev_dax ) ;
2016-08-11 10:41:51 +03:00
}
2017-07-16 23:51:53 +03:00
int dev_dax_probe ( struct device * dev )
2016-08-07 18:23:56 +03:00
{
2017-07-13 03:58:21 +03:00
struct dev_dax * dev_dax = to_dev_dax ( dev ) ;
struct dax_device * dax_dev = dev_dax - > dax_dev ;
2018-10-30 01:52:42 +03:00
struct resource * res = & dev_dax - > region - > res ;
2017-04-11 19:49:49 +03:00
struct inode * inode ;
2016-07-25 01:55:42 +03:00
struct cdev * cdev ;
2018-10-30 01:52:42 +03:00
void * addr ;
2017-07-14 23:54:50 +03:00
int rc ;
2017-07-12 23:42:37 +03:00
2018-10-30 01:52:42 +03:00
/* 1:1 map region resource range to device-dax instance range */
if ( ! devm_request_mem_region ( dev , res - > start , resource_size ( res ) ,
dev_name ( dev ) ) ) {
dev_warn ( dev , " could not reserve region %pR \n " , res ) ;
return - EBUSY ;
}
init_completion ( & dev_dax - > cmp ) ;
rc = percpu_ref_init ( & dev_dax - > ref , dev_dax_percpu_release , 0 ,
GFP_KERNEL ) ;
if ( rc )
return rc ;
rc = devm_add_action_or_reset ( dev , dev_dax_percpu_exit , & dev_dax - > ref ) ;
if ( rc )
return rc ;
dev_dax - > pgmap . ref = & dev_dax - > ref ;
dev_dax - > pgmap . kill = dev_dax_percpu_kill ;
addr = devm_memremap_pages ( dev , & dev_dax - > pgmap ) ;
if ( IS_ERR ( addr ) ) {
devm_remove_action ( dev , dev_dax_percpu_exit , & dev_dax - > ref ) ;
percpu_ref_exit ( & dev_dax - > ref ) ;
return PTR_ERR ( addr ) ;
}
2017-04-11 19:49:49 +03:00
inode = dax_inode ( dax_dev ) ;
cdev = inode - > i_cdev ;
2016-07-25 01:55:42 +03:00
cdev_init ( cdev , & dax_fops ) ;
2017-07-16 23:51:53 +03:00
if ( dev - > class ) {
/* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
cdev - > owner = dev - > parent - > driver - > owner ;
} else
cdev - > owner = dev - > driver - > owner ;
2017-07-13 03:58:21 +03:00
cdev_set_parent ( cdev , & dev - > kobj ) ;
rc = cdev_add ( cdev , dev - > devt , 1 ) ;
2016-07-20 03:51:40 +03:00
if ( rc )
2017-07-13 03:58:21 +03:00
return rc ;
2016-07-20 03:51:40 +03:00
2017-07-13 03:58:21 +03:00
rc = devm_add_action_or_reset ( dev , dev_dax_cdev_del , cdev ) ;
if ( rc )
return rc ;
2016-08-07 18:23:56 +03:00
2017-07-13 03:58:21 +03:00
run_dax ( dax_dev ) ;
return devm_add_action_or_reset ( dev , dev_dax_kill , dev_dax ) ;
}
2017-07-16 23:51:53 +03:00
EXPORT_SYMBOL_GPL ( dev_dax_probe ) ;
2016-08-07 18:23:56 +03:00
2017-07-13 03:58:21 +03:00
static int dev_dax_remove ( struct device * dev )
{
/* all probe actions are unwound by devm */
return 0 ;
2016-08-07 18:23:56 +03:00
}
2017-07-13 03:58:21 +03:00
2018-11-08 02:31:23 +03:00
static struct dax_device_driver device_dax_driver = {
. drv = {
. probe = dev_dax_probe ,
. remove = dev_dax_remove ,
} ,
. match_always = 1 ,
2017-07-13 03:58:21 +03:00
} ;
2016-08-07 18:23:56 +03:00
2016-05-18 19:15:08 +03:00
static int __init dax_init ( void )
{
2017-07-13 03:58:21 +03:00
return dax_driver_register ( & device_dax_driver ) ;
2016-05-18 19:15:08 +03:00
}
static void __exit dax_exit ( void )
{
2018-11-08 02:31:23 +03:00
dax_driver_unregister ( & device_dax_driver ) ;
2016-05-18 19:15:08 +03:00
}
MODULE_AUTHOR ( " Intel Corporation " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2017-07-13 03:58:21 +03:00
module_init ( dax_init ) ;
2016-05-18 19:15:08 +03:00
module_exit ( dax_exit ) ;
2017-07-13 03:58:21 +03:00
MODULE_ALIAS_DAX_DEVICE ( 0 ) ;