2008-11-12 06:53:48 +03:00
/*
* arch / sh / mm / mmap . c
*
2009-05-07 11:38:16 +04:00
* Copyright ( C ) 2008 - 2009 Paul Mundt
2008-11-12 06:53:48 +03:00
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
# include <linux/io.h>
# include <linux/mm.h>
2008-11-12 07:17:38 +03:00
# include <linux/mman.h>
# include <linux/module.h>
2008-11-12 06:53:48 +03:00
# include <asm/page.h>
2008-11-12 07:17:38 +03:00
# include <asm/processor.h>
unsigned long shm_align_mask = PAGE_SIZE - 1 ; /* Sane caches */
EXPORT_SYMBOL ( shm_align_mask ) ;
2009-08-15 04:49:32 +04:00
# ifdef CONFIG_MMU
2008-11-12 07:17:38 +03:00
/*
* To avoid cache aliases , we map the shared page with same color .
*/
2009-05-07 11:38:16 +04:00
static inline unsigned long COLOUR_ALIGN ( unsigned long addr ,
unsigned long pgoff )
{
unsigned long base = ( addr + shm_align_mask ) & ~ shm_align_mask ;
unsigned long off = ( pgoff < < PAGE_SHIFT ) & shm_align_mask ;
return base + off ;
}
2008-11-12 07:17:38 +03:00
unsigned long arch_get_unmapped_area ( struct file * filp , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
int do_colour_align ;
2012-12-12 04:02:12 +04:00
struct vm_unmapped_area_info info ;
2008-11-12 07:17:38 +03:00
if ( flags & MAP_FIXED ) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints .
*/
2009-12-05 23:10:44 +03:00
if ( ( flags & MAP_SHARED ) & &
( ( addr - ( pgoff < < PAGE_SHIFT ) ) & shm_align_mask ) )
2008-11-12 07:17:38 +03:00
return - EINVAL ;
return addr ;
}
if ( unlikely ( len > TASK_SIZE ) )
return - ENOMEM ;
do_colour_align = 0 ;
if ( filp | | ( flags & MAP_SHARED ) )
do_colour_align = 1 ;
if ( addr ) {
if ( do_colour_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2012-12-12 04:02:12 +04:00
info . flags = 0 ;
info . length = len ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = TASK_SIZE ;
info . align_mask = do_colour_align ? ( PAGE_MASK & shm_align_mask ) : 0 ;
info . align_offset = pgoff < < PAGE_SHIFT ;
return vm_unmapped_area ( & info ) ;
2008-11-12 07:17:38 +03:00
}
2009-05-07 11:38:16 +04:00
unsigned long
arch_get_unmapped_area_topdown ( struct file * filp , const unsigned long addr0 ,
const unsigned long len , const unsigned long pgoff ,
const unsigned long flags )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
int do_colour_align ;
2012-12-12 04:02:12 +04:00
struct vm_unmapped_area_info info ;
2009-05-07 11:38:16 +04:00
if ( flags & MAP_FIXED ) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints .
*/
if ( ( flags & MAP_SHARED ) & &
( ( addr - ( pgoff < < PAGE_SHIFT ) ) & shm_align_mask ) )
return - EINVAL ;
return addr ;
}
if ( unlikely ( len > TASK_SIZE ) )
return - ENOMEM ;
do_colour_align = 0 ;
if ( filp | | ( flags & MAP_SHARED ) )
do_colour_align = 1 ;
/* requesting a specific address */
if ( addr ) {
if ( do_colour_align )
addr = COLOUR_ALIGN ( addr , pgoff ) ;
else
addr = PAGE_ALIGN ( addr ) ;
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
2012-12-12 04:02:12 +04:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = PAGE_SIZE ;
info . high_limit = mm - > mmap_base ;
info . align_mask = do_colour_align ? ( PAGE_MASK & shm_align_mask ) : 0 ;
info . align_offset = pgoff < < PAGE_SHIFT ;
addr = vm_unmapped_area ( & info ) ;
2009-05-07 11:38:16 +04:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-12 04:02:12 +04:00
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = TASK_SIZE ;
addr = vm_unmapped_area ( & info ) ;
}
2009-05-07 11:38:16 +04:00
return addr ;
}
2008-11-12 07:17:38 +03:00
# endif /* CONFIG_MMU */
2008-11-12 06:53:48 +03:00
/*
* You really shouldn ' t be using read ( ) or write ( ) on / dev / mem . This
* might go away in the future .
*/
2012-09-12 22:05:58 +04:00
int valid_phys_addr_range ( phys_addr_t addr , size_t count )
2008-11-12 06:53:48 +03:00
{
2008-11-13 09:38:02 +03:00
if ( addr < __MEMORY_START )
2008-11-12 06:53:48 +03:00
return 0 ;
if ( addr + count > __pa ( high_memory ) )
return 0 ;
return 1 ;
}
int valid_mmap_phys_addr_range ( unsigned long pfn , size_t size )
{
return 1 ;
}