2006-12-10 13:18:52 +03:00
/*
* arch / xtensa / kernel / syscall . c
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2001 - 2005 Tensilica Inc .
* Copyright ( C ) 2000 Silicon Graphics , Inc .
* Copyright ( C ) 1995 - 2000 by Ralf Baechle
*
* Joe Taylor < joe @ tensilica . com , joetylr @ yahoo . com >
* Marc Gauthier < marc @ tensilica . com , marc @ alumni . uwaterloo . ca >
* Chris Zankel < chris @ zankel . net >
* Kevin Chea
*
*/
# include <asm/uaccess.h>
2006-12-13 11:34:32 +03:00
# include <asm/syscall.h>
2006-12-10 13:18:52 +03:00
# include <asm/unistd.h>
# include <linux/linkage.h>
# include <linux/stringify.h>
# include <linux/errno.h>
# include <linux/syscalls.h>
# include <linux/file.h>
# include <linux/fs.h>
# include <linux/mman.h>
# include <linux/shm.h>
typedef void ( * syscall_t ) ( void ) ;
syscall_t sys_call_table [ __NR_syscall_count ] /* FIXME __cacheline_aligned */ = {
[ 0 . . . __NR_syscall_count - 1 ] = ( syscall_t ) & sys_ni_syscall ,
# define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
2012-10-24 07:17:05 +04:00
# include <uapi/asm/unistd.h>
2006-12-10 13:18:52 +03:00
} ;
2012-12-22 06:35:04 +04:00
# define COLOUR_ALIGN(addr, pgoff) \
( ( ( ( addr ) + SHMLBA - 1 ) & ~ ( SHMLBA - 1 ) ) + \
( ( ( pgoff ) < < PAGE_SHIFT ) & ( SHMLBA - 1 ) ) )
2006-12-10 13:18:52 +03:00
asmlinkage long xtensa_shmat ( int shmid , char __user * shmaddr , int shmflg )
{
unsigned long ret ;
long err ;
2012-07-31 01:42:38 +04:00
err = do_shmat ( shmid , shmaddr , shmflg , & ret , SHMLBA ) ;
2006-12-10 13:18:52 +03:00
if ( err )
return err ;
return ( long ) ret ;
}
2012-10-24 07:17:05 +04:00
asmlinkage long xtensa_fadvise64_64 ( int fd , int advice ,
unsigned long long offset , unsigned long long len )
2007-08-05 21:22:58 +04:00
{
return sys_fadvise64_64 ( fd , offset , len , advice ) ;
}
2012-12-22 06:35:04 +04:00
2014-09-22 07:21:48 +04:00
# ifdef CONFIG_MMU
2012-12-22 06:35:04 +04:00
unsigned long arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct vm_area_struct * vmm ;
if ( flags & MAP_FIXED ) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints .
*/
if ( ( flags & MAP_SHARED ) & &
( ( addr - ( pgoff < < PAGE_SHIFT ) ) & ( SHMLBA - 1 ) ) )
return - EINVAL ;
return addr ;
}
if ( len > TASK_SIZE )
return - ENOMEM ;
if ( ! addr )
addr = TASK_UNMAPPED_BASE ;
if ( flags & MAP_SHARED )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
for ( vmm = find_vma ( current - > mm , addr ) ; ; vmm = vmm - > vm_next ) {
/* At this point: (!vmm || addr < vmm->vm_end). */
if ( TASK_SIZE - len < addr )
return - ENOMEM ;
if ( ! vmm | | addr + len < = vmm - > vm_start )
return addr ;
addr = vmm - > vm_end ;
if ( flags & MAP_SHARED )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
}
}
2014-09-22 07:21:48 +04:00
# endif