2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* ( C ) Copyright 1995 1996 Linus Torvalds
* ( C ) Copyright 2001 , 2002 Ralf Baechle
*/
# include <linux/module.h>
# include <asm/addrspace.h>
# include <asm/byteorder.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-16 15:20:36 -07:00
# include <linux/vmalloc.h>
2007-02-21 09:37:44 +00:00
# include <asm/cacheflush.h>
# include <asm/io.h>
# include <asm/tlbflush.h>
static inline void remap_area_pte ( pte_t * pte , unsigned long address ,
phys_t size , phys_t phys_addr , unsigned long flags )
{
phys_t end ;
unsigned long pfn ;
pgprot_t pgprot = __pgprot ( _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
| __WRITEABLE | flags ) ;
address & = ~ PMD_MASK ;
end = address + size ;
if ( end > PMD_SIZE )
end = PMD_SIZE ;
if ( address > = end )
BUG ( ) ;
pfn = phys_addr > > PAGE_SHIFT ;
do {
if ( ! pte_none ( * pte ) ) {
printk ( " remap_area_pte: page already exists \n " ) ;
BUG ( ) ;
}
set_pte ( pte , pfn_pte ( pfn , pgprot ) ) ;
address + = PAGE_SIZE ;
pfn + + ;
pte + + ;
} while ( address & & ( address < end ) ) ;
}
static inline int remap_area_pmd ( pmd_t * pmd , unsigned long address ,
phys_t size , phys_t phys_addr , unsigned long flags )
{
phys_t end ;
address & = ~ PGDIR_MASK ;
end = address + size ;
if ( end > PGDIR_SIZE )
end = PGDIR_SIZE ;
phys_addr - = address ;
if ( address > = end )
BUG ( ) ;
do {
pte_t * pte = pte_alloc_kernel ( pmd , address ) ;
if ( ! pte )
return - ENOMEM ;
remap_area_pte ( pte , address , end - address , address + phys_addr , flags ) ;
address = ( address + PMD_SIZE ) & PMD_MASK ;
pmd + + ;
} while ( address & & ( address < end ) ) ;
return 0 ;
}
static int remap_area_pages ( unsigned long address , phys_t phys_addr ,
phys_t size , unsigned long flags )
{
int error ;
pgd_t * dir ;
unsigned long end = address + size ;
phys_addr - = address ;
dir = pgd_offset ( & init_mm , address ) ;
flush_cache_all ( ) ;
if ( address > = end )
BUG ( ) ;
do {
pud_t * pud ;
pmd_t * pmd ;
error = - ENOMEM ;
pud = pud_alloc ( & init_mm , dir , address ) ;
if ( ! pud )
break ;
pmd = pmd_alloc ( & init_mm , pud , address ) ;
if ( ! pmd )
break ;
if ( remap_area_pmd ( pmd , address , end - address ,
phys_addr + address , flags ) )
break ;
error = 0 ;
address = ( address + PGDIR_SIZE ) & PGDIR_MASK ;
dir + + ;
} while ( address & & ( address < end ) ) ;
flush_tlb_all ( ) ;
return error ;
}
2005-04-16 15:20:36 -07:00
/*
* Generic mapping function ( not visible outside ) :
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space . Needed when the kernel wants to access high addresses
* directly .
*
* NOTE ! We need to allow non - page - aligned mappings too : we will obviously
* have to convert them into an offset in a page - aligned mapping , but the
* caller shouldn ' t need to know that small detail .
*/
# define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
2005-06-30 10:48:40 +00:00
void __iomem * __ioremap ( phys_t phys_addr , phys_t size , unsigned long flags )
2005-04-16 15:20:36 -07:00
{
struct vm_struct * area ;
unsigned long offset ;
phys_t last_addr ;
void * addr ;
phys_addr = fixup_bigphys_addr ( phys_addr , size ) ;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1 ;
if ( ! size | | last_addr < phys_addr )
return NULL ;
/*
* Map uncached objects in the low 512 mb of address space using KSEG1 ,
* otherwise map using page tables .
*/
if ( IS_LOW512 ( phys_addr ) & & IS_LOW512 ( last_addr ) & &
flags = = _CACHE_UNCACHED )
2005-06-30 10:48:40 +00:00
return ( void __iomem * ) CKSEG1ADDR ( phys_addr ) ;
2005-04-16 15:20:36 -07:00
/*
* Don ' t allow anybody to remap normal RAM that we ' re using . .
*/
if ( phys_addr < virt_to_phys ( high_memory ) ) {
char * t_addr , * t_end ;
struct page * page ;
t_addr = __va ( phys_addr ) ;
t_end = t_addr + ( size - 1 ) ;
for ( page = virt_to_page ( t_addr ) ; page < = virt_to_page ( t_end ) ; page + + )
if ( ! PageReserved ( page ) )
return NULL ;
}
/*
* Mappings have to be page - aligned
*/
offset = phys_addr & ~ PAGE_MASK ;
phys_addr & = PAGE_MASK ;
size = PAGE_ALIGN ( last_addr + 1 ) - phys_addr ;
/*
* Ok , go for it . .
*/
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
addr = area - > addr ;
2007-02-21 09:37:44 +00:00
if ( remap_area_pages ( ( unsigned long ) addr , phys_addr , size , flags ) ) {
2005-04-16 15:20:36 -07:00
vunmap ( addr ) ;
return NULL ;
}
2005-06-30 10:48:40 +00:00
return ( void __iomem * ) ( offset + ( char * ) addr ) ;
2005-04-16 15:20:36 -07:00
}
2005-02-10 12:19:59 +00:00
# define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
2005-04-16 15:20:36 -07:00
2006-10-19 14:21:47 +01:00
void __iounmap ( const volatile void __iomem * addr )
2005-04-16 15:20:36 -07:00
{
struct vm_struct * p ;
if ( IS_KSEG1 ( addr ) )
return ;
p = remove_vm_area ( ( void * ) ( PAGE_MASK & ( unsigned long __force ) addr ) ) ;
2005-02-10 12:19:59 +00:00
if ( ! p )
2005-04-16 15:20:36 -07:00
printk ( KERN_ERR " iounmap: bad address %p \n " , addr ) ;
kfree ( p ) ;
}
EXPORT_SYMBOL ( __ioremap ) ;
EXPORT_SYMBOL ( __iounmap ) ;