2006-12-10 02:18:52 -08:00
/*
* arch / xtensa / kernel / syscall . c
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2001 - 2005 Tensilica Inc .
* Copyright ( C ) 2000 Silicon Graphics , Inc .
* Copyright ( C ) 1995 - 2000 by Ralf Baechle
*
* Joe Taylor < joe @ tensilica . com , joetylr @ yahoo . com >
* Marc Gauthier < marc @ tensilica . com , marc @ alumni . uwaterloo . ca >
* Chris Zankel < chris @ zankel . net >
* Kevin Chea
*
*/
2016-12-24 11:46:01 -08:00
# include <linux/uaccess.h>
2006-12-13 00:34:32 -08:00
# include <asm/syscall.h>
2006-12-10 02:18:52 -08:00
# include <linux/linkage.h>
# include <linux/stringify.h>
# include <linux/errno.h>
# include <linux/syscalls.h>
# include <linux/file.h>
# include <linux/fs.h>
# include <linux/mman.h>
2017-02-08 18:51:31 +01:00
# include <linux/sched/mm.h>
2006-12-10 02:18:52 -08:00
# include <linux/shm.h>
2021-03-02 00:36:54 +09:00
syscall_t sys_call_table [ ] /* FIXME __cacheline_aligned */ = {
2021-03-02 00:36:55 +09:00
# define __SYSCALL(nr, entry) (syscall_t)entry,
2018-11-13 15:49:30 +05:30
# include <asm/syscall_table.h>
2006-12-10 02:18:52 -08:00
} ;
2012-12-22 06:35:04 +04:00
# define COLOUR_ALIGN(addr, pgoff) \
( ( ( ( addr ) + SHMLBA - 1 ) & ~ ( SHMLBA - 1 ) ) + \
( ( ( pgoff ) < < PAGE_SHIFT ) & ( SHMLBA - 1 ) ) )
2006-12-10 02:18:52 -08:00
asmlinkage long xtensa_shmat ( int shmid , char __user * shmaddr , int shmflg )
{
unsigned long ret ;
long err ;
2012-07-30 14:42:38 -07:00
err = do_shmat ( shmid , shmaddr , shmflg , & ret , SHMLBA ) ;
2006-12-10 02:18:52 -08:00
if ( err )
return err ;
return ( long ) ret ;
}
2012-10-23 20:17:05 -07:00
asmlinkage long xtensa_fadvise64_64 ( int fd , int advice ,
unsigned long long offset , unsigned long long len )
2007-08-05 10:22:58 -07:00
{
2018-03-11 11:34:45 +01:00
return ksys_fadvise64_64 ( fd , offset , len , advice ) ;
2007-08-05 10:22:58 -07:00
}
2012-12-22 06:35:04 +04:00
2014-09-22 07:21:48 +04:00
# ifdef CONFIG_MMU
2012-12-22 06:35:04 +04:00
unsigned long arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct vm_area_struct * vmm ;
2022-09-06 19:48:55 +00:00
struct vma_iterator vmi ;
2012-12-22 06:35:04 +04:00
if ( flags & MAP_FIXED ) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints .
*/
if ( ( flags & MAP_SHARED ) & &
( ( addr - ( pgoff < < PAGE_SHIFT ) ) & ( SHMLBA - 1 ) ) )
return - EINVAL ;
return addr ;
}
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( ! addr )
addr = TASK_UNMAPPED_BASE ;
if ( flags & MAP_SHARED )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
2022-09-06 19:48:55 +00:00
vma_iter_init ( & vmi , current - > mm , addr ) ;
for_each_vma ( vmi , vmm ) {
/* At this point: (addr < vmm->vm_end). */
if ( addr + len < = vm_start_gap ( vmm ) )
break ;
2012-12-22 06:35:04 +04:00
addr = vmm - > vm_end ;
if ( flags & MAP_SHARED )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
}
2022-09-06 19:48:55 +00:00
if ( TASK_SIZE - len < addr )
return - ENOMEM ;
return addr ;
2012-12-22 06:35:04 +04:00
}
2014-09-22 07:21:48 +04:00
# endif