2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2017-07-10 18:00:26 -07:00
/*
* Copyright ( C ) 2004 Benjamin Herrenschmidt , IBM Corp .
* < benh @ kernel . crashing . org >
* Copyright ( C ) 2012 ARM Limited
* Copyright ( C ) 2015 Regents of the University of California
*/
2019-10-17 15:21:28 -07:00
# include <linux/elf.h>
2017-07-10 18:00:26 -07:00
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/binfmts.h>
# include <linux/err.h>
2020-06-09 22:14:48 +08:00
# include <asm/page.h>
2021-01-02 13:24:34 +00:00
# ifdef CONFIG_GENERIC_TIME_VSYSCALL
2020-06-09 22:14:48 +08:00
# include <vdso/datapage.h>
# else
2017-07-10 18:00:26 -07:00
# include <asm/vdso.h>
2020-06-09 22:14:48 +08:00
# endif
2017-07-10 18:00:26 -07:00
extern char vdso_start [ ] , vdso_end [ ] ;
2021-03-30 02:22:51 +08:00
static unsigned int vdso_pages __ro_after_init ;
static struct page * * vdso_pagelist __ro_after_init ;
2017-07-10 18:00:26 -07:00
/*
* The vDSO data page .
*/
static union {
struct vdso_data data ;
u8 page [ PAGE_SIZE ] ;
} vdso_data_store __page_aligned_data ;
2020-06-09 22:14:48 +08:00
struct vdso_data * vdso_data = & vdso_data_store . data ;
2017-07-10 18:00:26 -07:00
static int __init vdso_init ( void )
{
unsigned int i ;
vdso_pages = ( vdso_end - vdso_start ) > > PAGE_SHIFT ;
vdso_pagelist =
kcalloc ( vdso_pages + 1 , sizeof ( struct page * ) , GFP_KERNEL ) ;
if ( unlikely ( vdso_pagelist = = NULL ) ) {
pr_err ( " vdso: pagelist allocation failed \n " ) ;
return - ENOMEM ;
}
for ( i = 0 ; i < vdso_pages ; i + + ) {
struct page * pg ;
pg = virt_to_page ( vdso_start + ( i < < PAGE_SHIFT ) ) ;
vdso_pagelist [ i ] = pg ;
}
vdso_pagelist [ i ] = virt_to_page ( vdso_data ) ;
return 0 ;
}
arch_initcall ( vdso_init ) ;
int arch_setup_additional_pages ( struct linux_binprm * bprm ,
int uses_interp )
{
struct mm_struct * mm = current - > mm ;
unsigned long vdso_base , vdso_len ;
int ret ;
vdso_len = ( vdso_pages + 1 ) < < PAGE_SHIFT ;
2020-06-08 21:33:25 -07:00
mmap_write_lock ( mm ) ;
2017-07-10 18:00:26 -07:00
vdso_base = get_unmapped_area ( NULL , 0 , vdso_len , 0 , 0 ) ;
2018-01-22 13:44:02 +01:00
if ( IS_ERR_VALUE ( vdso_base ) ) {
2017-07-10 18:00:26 -07:00
ret = vdso_base ;
goto end ;
}
/*
* Put vDSO base into mm struct . We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO ( since arch_vma_name fails ) .
*/
mm - > context . vdso = ( void * ) vdso_base ;
2020-06-09 22:14:49 +08:00
ret =
install_special_mapping ( mm , vdso_base , vdso_pages < < PAGE_SHIFT ,
2017-07-10 18:00:26 -07:00
( VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC ) ,
vdso_pagelist ) ;
2020-06-09 22:14:49 +08:00
if ( unlikely ( ret ) ) {
2017-07-10 18:00:26 -07:00
mm - > context . vdso = NULL ;
2020-06-09 22:14:49 +08:00
goto end ;
}
2017-07-10 18:00:26 -07:00
2020-06-09 22:14:49 +08:00
vdso_base + = ( vdso_pages < < PAGE_SHIFT ) ;
ret = install_special_mapping ( mm , vdso_base , PAGE_SIZE ,
( VM_READ | VM_MAYREAD ) , & vdso_pagelist [ vdso_pages ] ) ;
if ( unlikely ( ret ) )
mm - > context . vdso = NULL ;
2017-07-10 18:00:26 -07:00
end :
2020-06-08 21:33:25 -07:00
mmap_write_unlock ( mm ) ;
2017-07-10 18:00:26 -07:00
return ret ;
}
const char * arch_vma_name ( struct vm_area_struct * vma )
{
if ( vma - > vm_mm & & ( vma - > vm_start = = ( long ) vma - > vm_mm - > context . vdso ) )
return " [vdso] " ;
2020-06-09 22:14:49 +08:00
if ( vma - > vm_mm & & ( vma - > vm_start = =
( long ) vma - > vm_mm - > context . vdso + PAGE_SIZE ) )
return " [vdso_data] " ;
2017-07-10 18:00:26 -07:00
return NULL ;
}