2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2009-09-20 18:14:15 -04:00
2005-11-11 21:15:21 +11:00
/*
* Copyright ( C ) 2004 Benjamin Herrenschmidt , IBM Corp .
* < benh @ kernel . crashing . org >
*/
# include <linux/errno.h>
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/smp.h>
# include <linux/stddef.h>
# include <linux/unistd.h>
# include <linux/slab.h>
# include <linux/user.h>
# include <linux/elf.h>
# include <linux/security.h>
2010-07-12 14:36:09 +10:00
# include <linux/memblock.h>
2020-09-27 09:16:19 +00:00
# include <linux/syscalls.h>
2021-03-31 16:48:47 +00:00
# include <linux/time_namespace.h>
2020-11-27 00:10:05 +11:00
# include <vdso/datapage.h>
2005-11-11 21:15:21 +11:00
2020-09-27 09:16:19 +00:00
# include <asm/syscall.h>
2005-11-11 21:15:21 +11:00
# include <asm/processor.h>
# include <asm/mmu.h>
# include <asm/mmu_context.h>
2008-02-13 16:56:49 -08:00
# include <asm/prom.h>
2005-11-11 21:15:21 +11:00
# include <asm/machdep.h>
# include <asm/cputable.h>
# include <asm/sections.h>
2006-03-28 23:15:54 +11:00
# include <asm/firmware.h>
2005-11-11 21:15:21 +11:00
# include <asm/vdso.h>
# include <asm/vdso_datapage.h>
2013-10-28 09:20:51 -05:00
# include <asm/setup.h>
2006-10-20 11:47:18 +10:00
2009-10-04 02:35:41 +00:00
/* The alignment of the vDSO */
# define VDSO_ALIGNMENT (1 << 16)
2015-05-11 20:01:02 +10:00
extern char vdso32_start , vdso32_end ;
2005-11-11 21:15:21 +11:00
extern char vdso64_start , vdso64_end ;
/*
* The vdso data page ( aka . systemcfg for old ppc64 fans ) is here .
* Once the early boot kernel code no longer needs to muck around
* with it , it will become dynamically allocated
*/
static union {
2020-11-27 00:10:05 +11:00
struct vdso_arch_data data ;
2005-11-11 21:15:21 +11:00
u8 page [ PAGE_SIZE ] ;
2009-09-20 18:14:15 -04:00
} vdso_data_store __page_aligned_data ;
2020-11-27 00:10:05 +11:00
struct vdso_arch_data * vdso_data = & vdso_data_store . data ;
2005-11-11 21:15:21 +11:00
2021-03-31 16:48:47 +00:00
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET ,
VVAR_TIMENS_PAGE_OFFSET ,
VVAR_NR_PAGES ,
} ;
2020-09-27 09:16:28 +00:00
static int vdso_mremap ( const struct vm_special_mapping * sm , struct vm_area_struct * new_vma ,
unsigned long text_size )
{
unsigned long new_size = new_vma - > vm_end - new_vma - > vm_start ;
2021-03-31 16:48:46 +00:00
if ( new_size ! = text_size )
2020-09-27 09:16:28 +00:00
return - EINVAL ;
2021-03-31 16:48:46 +00:00
current - > mm - > context . vdso = ( void __user * ) new_vma - > vm_start ;
2020-09-27 09:16:28 +00:00
return 0 ;
}
static int vdso32_mremap ( const struct vm_special_mapping * sm , struct vm_area_struct * new_vma )
{
return vdso_mremap ( sm , new_vma , & vdso32_end - & vdso32_start ) ;
}
static int vdso64_mremap ( const struct vm_special_mapping * sm , struct vm_area_struct * new_vma )
{
return vdso_mremap ( sm , new_vma , & vdso64_end - & vdso64_start ) ;
}
2005-11-11 21:15:21 +11:00
2021-03-31 16:48:47 +00:00
static vm_fault_t vvar_fault ( const struct vm_special_mapping * sm ,
struct vm_area_struct * vma , struct vm_fault * vmf ) ;
2021-03-31 16:48:46 +00:00
static struct vm_special_mapping vvar_spec __ro_after_init = {
. name = " [vvar] " ,
2021-03-31 16:48:47 +00:00
. fault = vvar_fault ,
2021-03-31 16:48:46 +00:00
} ;
2020-09-27 09:16:27 +00:00
static struct vm_special_mapping vdso32_spec __ro_after_init = {
. name = " [vdso] " ,
2020-09-27 09:16:28 +00:00
. mremap = vdso32_mremap ,
2020-09-27 09:16:27 +00:00
} ;
static struct vm_special_mapping vdso64_spec __ro_after_init = {
. name = " [vdso] " ,
2020-09-27 09:16:28 +00:00
. mremap = vdso64_mremap ,
2020-09-27 09:16:27 +00:00
} ;
2021-03-31 16:48:47 +00:00
# ifdef CONFIG_TIME_NS
struct vdso_data * arch_get_vdso_data ( void * vvar_page )
{
return ( ( struct vdso_arch_data * ) vvar_page ) - > data ;
}
/*
* The vvar mapping contains data for a specific time namespace , so when a task
* changes namespace we must unmap its vvar data for the old namespace .
* Subsequent faults will map in data for the new namespace .
*
* For more details see timens_setup_vdso_data ( ) .
*/
int vdso_join_timens ( struct task_struct * task , struct time_namespace * ns )
{
struct mm_struct * mm = task - > mm ;
struct vm_area_struct * vma ;
mmap_read_lock ( mm ) ;
for ( vma = mm - > mmap ; vma ; vma = vma - > vm_next ) {
unsigned long size = vma - > vm_end - vma - > vm_start ;
if ( vma_is_special_mapping ( vma , & vvar_spec ) )
zap_page_range ( vma , vma - > vm_start , size ) ;
}
mmap_read_unlock ( mm ) ;
return 0 ;
}
static struct page * find_timens_vvar_page ( struct vm_area_struct * vma )
{
if ( likely ( vma - > vm_mm = = current - > mm ) )
return current - > nsproxy - > time_ns - > vvar_page ;
/*
* VM_PFNMAP | VM_IO protect . fault ( ) handler from being called
* through interfaces like / proc / $ pid / mem or
* process_vm_ { readv , writev } ( ) as long as there ' s no . access ( )
* in special_mapping_vmops .
* For more details check_vma_flags ( ) and __access_remote_vm ( )
*/
WARN ( 1 , " vvar_page accessed remotely " ) ;
return NULL ;
}
# else
static struct page * find_timens_vvar_page ( struct vm_area_struct * vma )
{
return NULL ;
}
# endif
static vm_fault_t vvar_fault ( const struct vm_special_mapping * sm ,
struct vm_area_struct * vma , struct vm_fault * vmf )
{
struct page * timens_page = find_timens_vvar_page ( vma ) ;
unsigned long pfn ;
switch ( vmf - > pgoff ) {
case VVAR_DATA_PAGE_OFFSET :
if ( timens_page )
pfn = page_to_pfn ( timens_page ) ;
else
pfn = virt_to_pfn ( vdso_data ) ;
break ;
# ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET :
/*
* If a task belongs to a time namespace then a namespace
* specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
* the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
* offset .
* See also the comment near timens_setup_vdso_data ( ) .
*/
if ( ! timens_page )
return VM_FAULT_SIGBUS ;
pfn = virt_to_pfn ( vdso_data ) ;
break ;
# endif /* CONFIG_TIME_NS */
default :
return VM_FAULT_SIGBUS ;
}
return vmf_insert_pfn ( vma , vmf - > address , pfn ) ;
}
2005-11-11 21:15:21 +11:00
/*
* This is called from binfmt_elf , we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
2020-09-27 09:16:26 +00:00
static int __arch_setup_additional_pages ( struct linux_binprm * bprm , int uses_interp )
2005-11-11 21:15:21 +11:00
{
2021-03-31 16:48:46 +00:00
unsigned long vdso_size , vdso_base , mappings_size ;
2020-09-27 09:16:27 +00:00
struct vm_special_mapping * vdso_spec ;
2021-03-31 16:48:47 +00:00
unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE ;
2021-03-31 16:48:46 +00:00
struct mm_struct * mm = current - > mm ;
2020-09-27 09:16:27 +00:00
struct vm_area_struct * vma ;
2005-11-11 21:15:21 +11:00
2010-08-27 03:49:11 +00:00
if ( is_32bit_task ( ) ) {
2020-09-27 09:16:27 +00:00
vdso_spec = & vdso32_spec ;
2020-09-27 09:16:25 +00:00
vdso_size = & vdso32_end - & vdso32_start ;
2005-11-11 21:15:21 +11:00
vdso_base = VDSO32_MBASE ;
} else {
2020-09-27 09:16:27 +00:00
vdso_spec = & vdso64_spec ;
2020-09-27 09:16:25 +00:00
vdso_size = & vdso64_end - & vdso64_start ;
2009-07-13 20:53:51 +00:00
/*
* On 64 bit we don ' t have a preferred map address . This
* allows get_unmapped_area to find an area near other mmaps
* and most likely share a SLB entry .
*/
vdso_base = 0 ;
2005-11-11 21:15:21 +11:00
}
2021-03-31 16:48:46 +00:00
mappings_size = vdso_size + vvar_size ;
mappings_size + = ( VDSO_ALIGNMENT - 1 ) & PAGE_MASK ;
2005-11-11 21:15:21 +11:00
/*
* pick a base address for the vDSO in process space . We try to put it
* at vdso_base which is the " natural " base for it , but we might fail
* and end up putting it elsewhere .
2009-10-04 02:35:41 +00:00
* Add enough to the size so that the result can be aligned .
2005-11-11 21:15:21 +11:00
*/
2021-03-31 16:48:46 +00:00
vdso_base = get_unmapped_area ( NULL , vdso_base , mappings_size , 0 , 0 ) ;
2020-09-27 09:16:26 +00:00
if ( IS_ERR_VALUE ( vdso_base ) )
return vdso_base ;
2005-11-11 21:15:21 +11:00
2009-10-04 02:35:41 +00:00
/* Add required alignment. */
vdso_base = ALIGN ( vdso_base , VDSO_ALIGNMENT ) ;
2009-09-21 16:57:40 +00:00
/*
* Put vDSO base into mm struct . We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
2020-09-27 09:16:27 +00:00
* will fail to recognise it as a vDSO .
2009-09-21 16:57:40 +00:00
*/
2021-03-31 16:48:46 +00:00
mm - > context . vdso = ( void __user * ) vdso_base + vvar_size ;
vma = _install_special_mapping ( mm , vdso_base , vvar_size ,
VM_READ | VM_MAYREAD | VM_IO |
VM_DONTDUMP | VM_PFNMAP , & vvar_spec ) ;
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
2009-09-21 16:57:40 +00:00
2005-11-11 21:15:21 +11:00
/*
* our vma flags don ' t have VM_WRITE so by default , the process isn ' t
* allowed to write those pages .
* gdb can break that with ptrace interface , and thus trigger COW on
* those pages but it ' s then your responsibility to never do that on
* the " data " page of the vDSO or you ' ll stop getting kernel updates
* and your nice userland gettimeofday will be totally dead .
* It ' s fine to use that for setting breakpoints in the vDSO code
coredump: remove VM_ALWAYSDUMP flag
The motivation for this patchset was that I was looking at a way for a
qemu-kvm process, to exclude the guest memory from its core dump, which
can be quite large. There are already a number of filter flags in
/proc/<pid>/coredump_filter, however, these allow one to specify 'types'
of kernel memory, not specific address ranges (which is needed in this
case).
Since there are no more vma flags available, the first patch eliminates
the need for the 'VM_ALWAYSDUMP' flag. The flag is used internally by
the kernel to mark vdso and vsyscall pages. However, it is simple
enough to check if a vma covers a vdso or vsyscall page without the need
for this flag.
The second patch then replaces the 'VM_ALWAYSDUMP' flag with a new
'VM_NODUMP' flag, which can be set by userspace using new madvise flags:
'MADV_DONTDUMP', and unset via 'MADV_DODUMP'. The core dump filters
continue to work the same as before unless 'MADV_DONTDUMP' is set on the
region.
The qemu code which implements this features is at:
http://people.redhat.com/~jbaron/qemu-dump/qemu-dump.patch
In my testing the qemu core dump shrunk from 383MB -> 13MB with this
patch.
I also believe that the 'MADV_DONTDUMP' flag might be useful for
security sensitive apps, which might want to select which areas are
dumped.
This patch:
The VM_ALWAYSDUMP flag is currently used by the coredump code to
indicate that a vma is part of a vsyscall or vdso section. However, we
can determine if a vma is in one these sections by checking it against
the gate_vma and checking for a non-NULL return value from
arch_vma_name(). Thus, freeing a valuable vma bit.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Acked-by: Roland McGrath <roland@hack.frob.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Avi Kivity <avi@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-23 15:02:51 -07:00
* pages though .
2007-01-26 00:56:51 -08:00
*/
2021-03-31 16:48:46 +00:00
vma = _install_special_mapping ( mm , vdso_base + vvar_size , vdso_size ,
2020-09-27 09:16:27 +00:00
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC , vdso_spec ) ;
2021-03-31 16:48:46 +00:00
if ( IS_ERR ( vma ) )
do_munmap ( mm , vdso_base , vvar_size , NULL ) ;
2020-09-27 09:16:27 +00:00
return PTR_ERR_OR_ZERO ( vma ) ;
2020-09-27 09:16:26 +00:00
}
2007-02-08 14:20:43 -08:00
2020-09-27 09:16:26 +00:00
int arch_setup_additional_pages ( struct linux_binprm * bprm , int uses_interp )
{
struct mm_struct * mm = current - > mm ;
int rc ;
2020-09-27 09:16:29 +00:00
mm - > context . vdso = NULL ;
2020-09-27 09:16:26 +00:00
if ( mmap_write_lock_killable ( mm ) )
return - EINTR ;
rc = __arch_setup_additional_pages ( bprm , uses_interp ) ;
if ( rc )
2020-09-27 09:16:29 +00:00
mm - > context . vdso = NULL ;
2006-05-30 13:51:37 +10:00
2020-06-08 21:33:25 -07:00
mmap_write_unlock ( mm ) ;
2006-05-30 13:51:37 +10:00
return rc ;
}
2020-09-27 09:16:34 +00:00
# define VDSO_DO_FIXUPS(type, value, bits, sec) do { \
void * __start = ( void * ) VDSO # # bits # # _SYMBOL ( & vdso # # bits # # _start , sec # # _start ) ; \
void * __end = ( void * ) VDSO # # bits # # _SYMBOL ( & vdso # # bits # # _start , sec # # _end ) ; \
\
do_ # # type # # _fixups ( ( value ) , __start , __end ) ; \
} while ( 0 )
2005-11-11 21:15:21 +11:00
2020-09-27 09:16:43 +00:00
static void __init vdso_fixup_features ( void )
2006-10-20 11:47:18 +10:00
{
# ifdef CONFIG_PPC64
2020-09-27 09:16:34 +00:00
VDSO_DO_FIXUPS ( feature , cur_cpu_spec - > cpu_features , 64 , ftr_fixup ) ;
VDSO_DO_FIXUPS ( feature , cur_cpu_spec - > mmu_features , 64 , mmu_ftr_fixup ) ;
VDSO_DO_FIXUPS ( feature , powerpc_firmware_features , 64 , fw_ftr_fixup ) ;
VDSO_DO_FIXUPS ( lwsync , cur_cpu_spec - > cpu_features , 64 , lwsync_fixup ) ;
2006-10-20 11:47:18 +10:00
# endif /* CONFIG_PPC64 */
2015-05-11 20:01:02 +10:00
# ifdef CONFIG_VDSO32
2020-09-27 09:16:34 +00:00
VDSO_DO_FIXUPS ( feature , cur_cpu_spec - > cpu_features , 32 , ftr_fixup ) ;
VDSO_DO_FIXUPS ( feature , cur_cpu_spec - > mmu_features , 32 , mmu_ftr_fixup ) ;
2006-10-20 11:47:18 +10:00
# ifdef CONFIG_PPC64
2020-09-27 09:16:34 +00:00
VDSO_DO_FIXUPS ( feature , powerpc_firmware_features , 32 , fw_ftr_fixup ) ;
2006-10-20 11:47:18 +10:00
# endif /* CONFIG_PPC64 */
2020-09-27 09:16:34 +00:00
VDSO_DO_FIXUPS ( lwsync , cur_cpu_spec - > cpu_features , 32 , lwsync_fixup ) ;
2015-05-11 20:01:02 +10:00
# endif
2005-11-11 21:15:21 +11:00
}
/*
* Called from setup_arch to initialize the bitmap of available
* syscalls in the systemcfg page
*/
static void __init vdso_setup_syscall_map ( void )
{
unsigned int i ;
2015-11-19 17:04:53 +11:00
for ( i = 0 ; i < NR_syscalls ; i + + ) {
2020-09-27 09:16:19 +00:00
if ( sys_call_table [ i ] ! = ( unsigned long ) & sys_ni_syscall )
2020-09-27 09:16:20 +00:00
vdso_data - > syscall_map [ i > > 5 ] | = 0x80000000UL > > ( i & 0x1f ) ;
2020-03-20 11:20:16 +01:00
if ( IS_ENABLED ( CONFIG_COMPAT ) & &
2020-09-27 09:16:19 +00:00
compat_sys_call_table [ i ] ! = ( unsigned long ) & sys_ni_syscall )
2020-09-27 09:16:20 +00:00
vdso_data - > compat_syscall_map [ i > > 5 ] | = 0x80000000UL > > ( i & 0x1f ) ;
2005-11-11 21:15:21 +11:00
}
}
2012-07-04 20:37:11 +00:00
# ifdef CONFIG_PPC64
2013-06-24 15:30:09 -04:00
int vdso_getcpu_init ( void )
2012-07-04 20:37:11 +00:00
{
unsigned long cpu , node , val ;
/*
2014-03-10 17:29:38 -05:00
* SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
* in the next 16 bits . The VDSO uses this to implement getcpu ( ) .
2012-07-04 20:37:11 +00:00
*/
cpu = get_cpu ( ) ;
WARN_ON_ONCE ( cpu > 0xffff ) ;
node = cpu_to_node ( cpu ) ;
WARN_ON_ONCE ( node > 0xffff ) ;
2020-07-16 09:37:04 +10:00
val = ( cpu & 0xffff ) | ( ( node & 0xffff ) < < 16 ) ;
2014-03-10 17:29:38 -05:00
mtspr ( SPRN_SPRG_VDSO_WRITE , val ) ;
get_paca ( ) - > sprg_vdso = val ;
2012-07-04 20:37:11 +00:00
put_cpu ( ) ;
return 0 ;
}
/* We need to call this before SMP init */
early_initcall ( vdso_getcpu_init ) ;
# endif
2005-11-11 21:15:21 +11:00
2020-09-27 09:16:23 +00:00
static struct page * * __init vdso_setup_pages ( void * start , void * end )
2005-11-11 21:15:21 +11:00
{
int i ;
2020-09-27 09:16:23 +00:00
struct page * * pagelist ;
int pages = ( end - start ) > > PAGE_SHIFT ;
pagelist = kcalloc ( pages + 1 , sizeof ( struct page * ) , GFP_KERNEL ) ;
if ( ! pagelist )
panic ( " %s: Cannot allocate page list for VDSO " , __func__ ) ;
2020-09-27 09:16:30 +00:00
for ( i = 0 ; i < pages ; i + + )
2021-03-31 16:48:46 +00:00
pagelist [ i ] = virt_to_page ( start + i * PAGE_SIZE ) ;
return pagelist ;
}
2020-09-27 09:16:23 +00:00
static int __init vdso_init ( void )
{
2005-11-11 21:15:21 +11:00
# ifdef CONFIG_PPC64
/*
2009-09-28 21:43:57 -04:00
* Fill up the " systemcfg " stuff for backward compatibility
2005-11-11 21:15:21 +11:00
*/
2007-05-17 01:12:16 +10:00
strcpy ( ( char * ) vdso_data - > eye_catcher , " SYSTEMCFG:PPC64 " ) ;
2005-11-11 21:15:21 +11:00
vdso_data - > version . major = SYSTEMCFG_MAJOR ;
vdso_data - > version . minor = SYSTEMCFG_MINOR ;
vdso_data - > processor = mfspr ( SPRN_PVR ) ;
2006-03-28 23:15:54 +11:00
/*
2012-03-21 18:23:27 +00:00
* Fake the old platform number for pSeries and add
2006-03-28 23:15:54 +11:00
* in LPAR bit if necessary
*/
2012-03-21 18:23:27 +00:00
vdso_data - > platform = 0x100 ;
2006-03-28 23:15:54 +11:00
if ( firmware_has_feature ( FW_FEATURE_LPAR ) )
vdso_data - > platform | = 1 ;
2010-07-12 14:36:09 +10:00
vdso_data - > physicalMemorySize = memblock_phys_mem_size ( ) ;
2017-01-08 17:31:47 -06:00
vdso_data - > dcache_size = ppc64_caches . l1d . size ;
vdso_data - > dcache_line_size = ppc64_caches . l1d . line_size ;
vdso_data - > icache_size = ppc64_caches . l1i . size ;
vdso_data - > icache_line_size = ppc64_caches . l1i . line_size ;
vdso_data - > dcache_block_size = ppc64_caches . l1d . block_size ;
vdso_data - > icache_block_size = ppc64_caches . l1i . block_size ;
vdso_data - > dcache_log_block_size = ppc64_caches . l1d . log_block_size ;
vdso_data - > icache_log_block_size = ppc64_caches . l1i . log_block_size ;
2020-09-27 09:16:24 +00:00
# endif /* CONFIG_PPC64 */
2007-11-20 12:24:45 +11:00
2005-11-11 21:15:21 +11:00
vdso_setup_syscall_map ( ) ;
2006-10-20 11:47:18 +10:00
2020-09-27 09:16:43 +00:00
vdso_fixup_features ( ) ;
2005-11-11 21:15:21 +11:00
2020-09-27 09:16:24 +00:00
if ( IS_ENABLED ( CONFIG_VDSO32 ) )
2020-09-27 09:16:27 +00:00
vdso32_spec . pages = vdso_setup_pages ( & vdso32_start , & vdso32_end ) ;
2007-02-08 14:20:43 -08:00
2020-09-27 09:16:24 +00:00
if ( IS_ENABLED ( CONFIG_PPC64 ) )
2020-09-27 09:16:27 +00:00
vdso64_spec . pages = vdso_setup_pages ( & vdso64_start , & vdso64_end ) ;
2005-11-11 21:15:21 +11:00
2007-02-12 13:31:08 +11:00
smp_wmb ( ) ;
return 0 ;
2005-11-11 21:15:21 +11:00
}
2007-02-12 13:31:08 +11:00
arch_initcall ( vdso_init ) ;