2017-11-24 15:00:32 +01:00
// SPDX-License-Identifier: GPL-2.0
2008-12-25 13:38:36 +01:00
/*
* vdso setup for s390
*
* Copyright IBM Corp . 2008
* Author ( s ) : Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*/
2021-01-24 20:57:08 +01:00
# include <linux/binfmts.h>
# include <linux/compat.h>
# include <linux/elf.h>
2008-12-25 13:38:36 +01:00
# include <linux/errno.h>
2021-01-24 20:57:08 +01:00
# include <linux/init.h>
2008-12-25 13:38:36 +01:00
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/slab.h>
2021-01-24 20:57:08 +01:00
# include <linux/smp.h>
2021-02-05 16:19:32 +01:00
# include <linux/time_namespace.h>
2020-04-28 09:52:23 +02:00
# include <vdso/datapage.h>
2008-12-25 13:38:36 +01:00
# include <asm/vdso.h>
2021-01-24 20:22:29 +01:00
extern char vdso64_start [ ] , vdso64_end [ ] ;
2021-06-25 14:50:08 +02:00
extern char vdso32_start [ ] , vdso32_end [ ] ;
2008-12-25 13:38:36 +01:00
2021-02-05 16:19:32 +01:00
static struct vm_special_mapping vvar_mapping ;
2021-01-24 20:57:08 +01:00
static union {
struct vdso_data data [ CS_BASES ] ;
u8 page [ PAGE_SIZE ] ;
} vdso_data_store __page_aligned_data ;
struct vdso_data * vdso_data = vdso_data_store . data ;
2021-02-05 16:19:32 +01:00
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET ,
VVAR_TIMENS_PAGE_OFFSET ,
VVAR_NR_PAGES ,
} ;
# ifdef CONFIG_TIME_NS
struct vdso_data * arch_get_vdso_data ( void * vvar_page )
{
return ( struct vdso_data * ) ( vvar_page ) ;
}
static struct page * find_timens_vvar_page ( struct vm_area_struct * vma )
{
if ( likely ( vma - > vm_mm = = current - > mm ) )
return current - > nsproxy - > time_ns - > vvar_page ;
/*
* VM_PFNMAP | VM_IO protect . fault ( ) handler from being called
* through interfaces like / proc / $ pid / mem or
* process_vm_ { readv , writev } ( ) as long as there ' s no . access ( )
* in special_mapping_vmops ( ) .
* For more details check_vma_flags ( ) and __access_remote_vm ( )
*/
WARN ( 1 , " vvar_page accessed remotely " ) ;
return NULL ;
}
/*
* The VVAR page layout depends on whether a task belongs to the root or
* non - root time namespace . Whenever a task changes its namespace , the VVAR
* page tables are cleared and then they will be re - faulted with a
* corresponding layout .
* See also the comment near timens_setup_vdso_data ( ) for details .
*/
int vdso_join_timens ( struct task_struct * task , struct time_namespace * ns )
{
struct mm_struct * mm = task - > mm ;
struct vm_area_struct * vma ;
mmap_read_lock ( mm ) ;
for ( vma = mm - > mmap ; vma ; vma = vma - > vm_next ) {
unsigned long size = vma - > vm_end - vma - > vm_start ;
if ( ! vma_is_special_mapping ( vma , & vvar_mapping ) )
continue ;
zap_page_range ( vma , vma - > vm_start , size ) ;
break ;
}
mmap_read_unlock ( mm ) ;
return 0 ;
}
# else
static inline struct page * find_timens_vvar_page ( struct vm_area_struct * vma )
{
return NULL ;
}
# endif
2021-01-24 22:01:16 +01:00
static vm_fault_t vvar_fault ( const struct vm_special_mapping * sm ,
struct vm_area_struct * vma , struct vm_fault * vmf )
{
2021-02-05 16:19:32 +01:00
struct page * timens_page = find_timens_vvar_page ( vma ) ;
2021-01-31 23:07:42 +01:00
unsigned long addr , pfn ;
vm_fault_t err ;
2021-02-05 16:19:32 +01:00
switch ( vmf - > pgoff ) {
case VVAR_DATA_PAGE_OFFSET :
2021-01-31 23:07:42 +01:00
pfn = virt_to_pfn ( vdso_data ) ;
if ( timens_page ) {
/*
* Fault in VVAR page too , since it will be accessed
* to get clock data anyway .
*/
addr = vmf - > address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE ;
err = vmf_insert_pfn ( vma , addr , pfn ) ;
if ( unlikely ( err & VM_FAULT_ERROR ) )
return err ;
2021-02-05 16:19:32 +01:00
pfn = page_to_pfn ( timens_page ) ;
2021-01-31 23:07:42 +01:00
}
2021-02-05 16:19:32 +01:00
break ;
# ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET :
/*
* If a task belongs to a time namespace then a namespace
* specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
* the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
* offset .
* See also the comment near timens_setup_vdso_data ( ) .
*/
if ( ! timens_page )
return VM_FAULT_SIGBUS ;
pfn = virt_to_pfn ( vdso_data ) ;
break ;
# endif /* CONFIG_TIME_NS */
default :
return VM_FAULT_SIGBUS ;
}
return vmf_insert_pfn ( vma , vmf - > address , pfn ) ;
2021-01-24 22:01:16 +01:00
}
2017-05-15 10:23:38 +02:00
static int vdso_mremap ( const struct vm_special_mapping * sm ,
struct vm_area_struct * vma )
{
current - > mm - > context . vdso_base = vma - > vm_start ;
return 0 ;
}
2021-01-24 22:01:16 +01:00
static struct vm_special_mapping vvar_mapping = {
. name = " [vvar] " ,
. fault = vvar_fault ,
} ;
2021-06-25 14:50:08 +02:00
static struct vm_special_mapping vdso64_mapping = {
. name = " [vdso] " ,
. mremap = vdso_mremap ,
} ;
static struct vm_special_mapping vdso32_mapping = {
2017-05-15 10:23:38 +02:00
. name = " [vdso] " ,
. mremap = vdso_mremap ,
} ;
2021-01-24 19:51:34 +01:00
int vdso_getcpu_init ( void )
2020-11-16 08:06:41 +01:00
{
set_tod_programmable_field ( smp_processor_id ( ) ) ;
2021-01-24 19:51:34 +01:00
return 0 ;
2020-11-16 08:06:41 +01:00
}
2021-01-24 19:51:34 +01:00
early_initcall ( vdso_getcpu_init ) ; /* Must be called before SMP init */
2020-11-16 08:06:41 +01:00
2008-12-25 13:38:36 +01:00
int arch_setup_additional_pages ( struct linux_binprm * bprm , int uses_interp )
{
2021-02-05 16:09:14 +01:00
unsigned long vdso_text_len , vdso_mapping_len ;
unsigned long vvar_start , vdso_text_start ;
2021-06-25 14:50:08 +02:00
struct vm_special_mapping * vdso_mapping ;
2008-12-25 13:38:36 +01:00
struct mm_struct * mm = current - > mm ;
2017-05-15 10:23:38 +02:00
struct vm_area_struct * vma ;
2008-12-25 13:38:36 +01:00
int rc ;
2021-02-05 16:19:32 +01:00
BUILD_BUG_ON ( VVAR_NR_PAGES ! = __VVAR_PAGES ) ;
2020-06-08 21:33:25 -07:00
if ( mmap_write_lock_killable ( mm ) )
2016-05-23 16:25:54 -07:00
return - EINTR ;
2021-06-25 14:50:08 +02:00
if ( is_compat_task ( ) ) {
vdso_text_len = vdso32_end - vdso32_start ;
vdso_mapping = & vdso32_mapping ;
} else {
vdso_text_len = vdso64_end - vdso64_start ;
vdso_mapping = & vdso64_mapping ;
}
2021-02-05 16:19:32 +01:00
vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE ;
2021-02-05 16:09:14 +01:00
vvar_start = get_unmapped_area ( NULL , 0 , vdso_mapping_len , 0 , 0 ) ;
rc = vvar_start ;
if ( IS_ERR_VALUE ( vvar_start ) )
2021-01-24 20:57:08 +01:00
goto out ;
2021-02-05 16:19:32 +01:00
vma = _install_special_mapping ( mm , vvar_start , VVAR_NR_PAGES * PAGE_SIZE ,
VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP |
VM_PFNMAP ,
2021-02-05 16:09:14 +01:00
& vvar_mapping ) ;
rc = PTR_ERR ( vma ) ;
if ( IS_ERR ( vma ) )
goto out ;
2021-02-05 16:19:32 +01:00
vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE ;
2021-01-24 22:01:16 +01:00
/* VM_MAYWRITE for COW so gdb can set breakpoints */
2021-02-05 16:09:14 +01:00
vma = _install_special_mapping ( mm , vdso_text_start , vdso_text_len ,
2017-05-15 10:23:38 +02:00
VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC ,
2021-06-25 14:50:08 +02:00
vdso_mapping ) ;
2021-01-24 22:01:16 +01:00
if ( IS_ERR ( vma ) ) {
2021-02-05 16:09:14 +01:00
do_munmap ( mm , vvar_start , PAGE_SIZE , NULL ) ;
2021-01-24 22:01:16 +01:00
rc = PTR_ERR ( vma ) ;
} else {
current - > mm - > context . vdso_base = vdso_text_start ;
rc = 0 ;
}
2021-01-24 20:57:08 +01:00
out :
2020-06-08 21:33:25 -07:00
mmap_write_unlock ( mm ) ;
2008-12-25 13:38:36 +01:00
return rc ;
}
2021-06-25 14:50:08 +02:00
static struct page * * __init vdso_setup_pages ( void * start , void * end )
2008-12-25 13:38:36 +01:00
{
2021-06-25 14:50:08 +02:00
int pages = ( end - start ) > > PAGE_SHIFT ;
struct page * * pagelist ;
2008-12-25 13:38:36 +01:00
int i ;
2021-06-25 14:50:08 +02:00
pagelist = kcalloc ( pages + 1 , sizeof ( struct page * ) , GFP_KERNEL ) ;
if ( ! pagelist )
panic ( " %s: Cannot allocate page list for VDSO " , __func__ ) ;
for ( i = 0 ; i < pages ; i + + )
pagelist [ i ] = virt_to_page ( start + i * PAGE_SIZE ) ;
return pagelist ;
}
2021-06-23 14:10:00 +02:00
2021-06-25 14:50:08 +02:00
static int __init vdso_init ( void )
{
vdso64_mapping . pages = vdso_setup_pages ( vdso64_start , vdso64_end ) ;
if ( IS_ENABLED ( CONFIG_COMPAT ) )
vdso32_mapping . pages = vdso_setup_pages ( vdso32_start , vdso32_end ) ;
2008-12-25 13:38:36 +01:00
return 0 ;
}
2021-01-24 19:51:34 +01:00
arch_initcall ( vdso_init ) ;