2005-04-16 15:20:36 -07:00
/*
* Copyright 2002 Andi Kleen , SuSE Labs .
* Thanks to Ben LaHaise for precious feedback .
*/
# include <linux/mm.h>
# include <linux/sched.h>
# include <linux/highmem.h>
# include <linux/module.h>
# include <linux/slab.h>
# include <asm/uaccess.h>
# include <asm/processor.h>
# include <asm/tlbflush.h>
# include <asm/io.h>
2007-07-22 11:12:31 +02:00
pte_t * lookup_address ( unsigned long address )
2005-04-16 15:20:36 -07:00
{
pgd_t * pgd = pgd_offset_k ( address ) ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte ;
if ( pgd_none ( * pgd ) )
return NULL ;
pud = pud_offset ( pgd , address ) ;
if ( ! pud_present ( * pud ) )
return NULL ;
pmd = pmd_offset ( pud , address ) ;
if ( ! pmd_present ( * pmd ) )
return NULL ;
if ( pmd_large ( * pmd ) )
return ( pte_t * ) pmd ;
pte = pte_offset_kernel ( pmd , address ) ;
if ( pte & & ! pte_present ( * pte ) )
pte = NULL ;
return pte ;
}
static struct page * split_large_page ( unsigned long address , pgprot_t prot ,
pgprot_t ref_prot )
{
int i ;
unsigned long addr ;
struct page * base = alloc_pages ( GFP_KERNEL , 0 ) ;
pte_t * pbase ;
if ( ! base )
return NULL ;
2006-03-22 00:08:33 -08:00
/*
* page_private is used to track the number of entries in
* the page table page have non standard attributes .
*/
SetPagePrivate ( base ) ;
page_private ( base ) = 0 ;
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
address = __pa ( address ) ;
2005-04-16 15:20:36 -07:00
addr = address & LARGE_PAGE_MASK ;
pbase = ( pte_t * ) page_address ( base ) ;
for ( i = 0 ; i < PTRS_PER_PTE ; i + + , addr + = PAGE_SIZE ) {
pbase [ i ] = pfn_pte ( addr > > PAGE_SHIFT ,
addr = = address ? prot : ref_prot ) ;
}
return base ;
}
2007-10-21 16:41:47 -07:00
void clflush_cache_range ( void * adr , int size )
2005-04-16 15:20:36 -07:00
{
2006-12-07 02:14:05 +01:00
int i ;
2007-10-21 16:41:47 -07:00
for ( i = 0 ; i < size ; i + = boot_cpu_data . x86_clflush_size )
2007-10-17 18:04:37 +02:00
clflush ( adr + i ) ;
2005-04-16 15:20:36 -07:00
}
2006-12-07 02:14:05 +01:00
static void flush_kernel_map ( void * arg )
{
struct list_head * l = ( struct list_head * ) arg ;
struct page * pg ;
/* When clflush is available always use it because it is
2007-07-21 17:09:51 +02:00
much cheaper than WBINVD . */
2007-08-10 22:31:02 +02:00
/* clflush is still broken. Disable for now. */
if ( 1 | | ! cpu_has_clflush )
2006-12-07 02:14:05 +01:00
asm volatile ( " wbinvd " : : : " memory " ) ;
2007-06-20 12:23:36 +02:00
else list_for_each_entry ( pg , l , lru ) {
2006-12-07 02:14:05 +01:00
void * adr = page_address ( pg ) ;
2007-10-21 16:41:47 -07:00
clflush_cache_range ( adr , PAGE_SIZE ) ;
2006-12-07 02:14:05 +01:00
}
2007-04-24 13:05:37 +02:00
__flush_tlb_all ( ) ;
2006-12-07 02:14:05 +01:00
}
2005-04-16 15:20:36 -07:00
2006-12-07 02:14:05 +01:00
static inline void flush_map ( struct list_head * l )
2005-04-16 15:20:36 -07:00
{
2006-12-07 02:14:05 +01:00
on_each_cpu ( flush_kernel_map , l , 1 , 1 ) ;
2005-04-16 15:20:36 -07:00
}
2006-12-07 02:14:05 +01:00
static LIST_HEAD ( deferred_pages ) ; /* protected by init_mm.mmap_sem */
2006-03-22 00:08:32 -08:00
static inline void save_page ( struct page * fpage )
2005-04-16 15:20:36 -07:00
{
2007-07-21 17:09:51 +02:00
if ( ! test_and_set_bit ( PG_arch_1 , & fpage - > flags ) )
list_add ( & fpage - > lru , & deferred_pages ) ;
2005-04-16 15:20:36 -07:00
}
/*
* No more special protections in this 2 / 4 MB area - revert to a
* large page again .
*/
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
static void revert_page ( unsigned long address , pgprot_t ref_prot )
2005-04-16 15:20:36 -07:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t large_pte ;
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
unsigned long pfn ;
2005-04-16 15:20:36 -07:00
pgd = pgd_offset_k ( address ) ;
BUG_ON ( pgd_none ( * pgd ) ) ;
pud = pud_offset ( pgd , address ) ;
BUG_ON ( pud_none ( * pud ) ) ;
pmd = pmd_offset ( pud , address ) ;
BUG_ON ( pmd_val ( * pmd ) & _PAGE_PSE ) ;
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
pfn = ( __pa ( address ) & LARGE_PAGE_MASK ) > > PAGE_SHIFT ;
2007-02-13 13:26:26 +01:00
large_pte = pfn_pte ( pfn , ref_prot ) ;
2006-09-26 10:52:37 +02:00
large_pte = pte_mkhuge ( large_pte ) ;
2005-04-16 15:20:36 -07:00
set_pte ( ( pte_t * ) pmd , large_pte ) ;
}
static int
__change_page_attr ( unsigned long address , unsigned long pfn , pgprot_t prot ,
pgprot_t ref_prot )
{
pte_t * kpte ;
struct page * kpte_page ;
2006-01-06 00:12:03 -08:00
pgprot_t ref_prot2 ;
2007-07-21 17:09:51 +02:00
2005-04-16 15:20:36 -07:00
kpte = lookup_address ( address ) ;
if ( ! kpte ) return 0 ;
kpte_page = virt_to_page ( ( ( unsigned long ) kpte ) & PAGE_MASK ) ;
2007-07-21 17:09:51 +02:00
BUG_ON ( PageLRU ( kpte_page ) ) ;
BUG_ON ( PageCompound ( kpte_page ) ) ;
2005-04-16 15:20:36 -07:00
if ( pgprot_val ( prot ) ! = pgprot_val ( ref_prot ) ) {
2006-09-26 10:52:37 +02:00
if ( ! pte_huge ( * kpte ) ) {
2005-04-16 15:20:36 -07:00
set_pte ( kpte , pfn_pte ( pfn , prot ) ) ;
} else {
/*
2006-03-22 00:08:33 -08:00
* split_large_page will take the reference for this
* change_page_attr on the split page .
2005-04-16 15:20:36 -07:00
*/
2006-01-06 00:12:03 -08:00
struct page * split ;
2006-09-26 10:52:37 +02:00
ref_prot2 = pte_pgprot ( pte_clrhuge ( * kpte ) ) ;
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
split = split_large_page ( address , prot , ref_prot2 ) ;
2005-04-16 15:20:36 -07:00
if ( ! split )
return - ENOMEM ;
2007-10-17 18:04:35 +02:00
pgprot_val ( ref_prot2 ) & = ~ _PAGE_NX ;
2006-09-26 10:52:37 +02:00
set_pte ( kpte , mk_pte ( split , ref_prot2 ) ) ;
2005-04-16 15:20:36 -07:00
kpte_page = split ;
2006-09-26 10:52:37 +02:00
}
2006-03-22 00:08:33 -08:00
page_private ( kpte_page ) + + ;
2006-09-26 10:52:37 +02:00
} else if ( ! pte_huge ( * kpte ) ) {
2005-04-16 15:20:36 -07:00
set_pte ( kpte , pfn_pte ( pfn , ref_prot ) ) ;
2006-03-22 00:08:33 -08:00
BUG_ON ( page_private ( kpte_page ) = = 0 ) ;
page_private ( kpte_page ) - - ;
2005-04-16 15:20:36 -07:00
} else
BUG ( ) ;
/* on x86-64 the direct mapping set at boot is not using 4k pages */
BUG_ON ( PageReserved ( kpte_page ) ) ;
2007-07-21 17:09:51 +02:00
save_page ( kpte_page ) ;
if ( page_private ( kpte_page ) = = 0 )
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
revert_page ( address , ref_prot ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
/*
* Change the page attributes of an page in the linear mapping .
*
* This should be used when a page is mapped with a different caching policy
* than write - back somewhere - some CPUs do not like it when mappings with
* different caching policies exist . This changes the page attributes of the
* in kernel linear mapping too .
*
* The caller needs to ensure that there are no conflicting mappings elsewhere .
* This function only deals with the kernel linear map .
*
* Caller must call global_flush_tlb ( ) after this .
*/
int change_page_attr_addr ( unsigned long address , int numpages , pgprot_t prot )
{
2007-05-02 19:27:10 +02:00
int err = 0 , kernel_map = 0 ;
2005-04-16 15:20:36 -07:00
int i ;
2007-05-02 19:27:10 +02:00
if ( address > = __START_KERNEL_map
& & address < __START_KERNEL_map + KERNEL_TEXT_SIZE ) {
address = ( unsigned long ) __va ( __pa ( address ) ) ;
kernel_map = 1 ;
}
2005-04-16 15:20:36 -07:00
down_write ( & init_mm . mmap_sem ) ;
for ( i = 0 ; i < numpages ; i + + , address + = PAGE_SIZE ) {
unsigned long pfn = __pa ( address ) > > PAGE_SHIFT ;
2007-05-02 19:27:10 +02:00
if ( ! kernel_map | | pte_present ( pfn_pte ( 0 , prot ) ) ) {
err = __change_page_attr ( address , pfn , prot , PAGE_KERNEL ) ;
if ( err )
break ;
}
2005-04-16 15:20:36 -07:00
/* Handle kernel mapping too which aliases part of the
* lowmem */
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
if ( __pa ( address ) < KERNEL_TEXT_SIZE ) {
2005-04-16 15:20:36 -07:00
unsigned long addr2 ;
2006-09-26 10:52:37 +02:00
pgprot_t prot2 ;
Revert "[PATCH] x86: __pa and __pa_symbol address space separation"
This was broken. It adds complexity, for no good reason. Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.
However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa(). That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.
Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-07 08:44:24 -07:00
addr2 = __START_KERNEL_map + __pa ( address ) ;
2006-09-26 10:52:37 +02:00
/* Make sure the kernel mappings stay executable */
prot2 = pte_pgprot ( pte_mkexec ( pfn_pte ( 0 , prot ) ) ) ;
err = __change_page_attr ( addr2 , pfn , prot2 ,
PAGE_KERNEL_EXEC ) ;
2005-04-16 15:20:36 -07:00
}
}
up_write ( & init_mm . mmap_sem ) ;
return err ;
}
/* Don't call this for MMIO areas that may not have a mem_map entry */
int change_page_attr ( struct page * page , int numpages , pgprot_t prot )
{
unsigned long addr = ( unsigned long ) page_address ( page ) ;
return change_page_attr_addr ( addr , numpages , prot ) ;
}
void global_flush_tlb ( void )
{
2006-12-07 02:14:05 +01:00
struct page * pg , * next ;
struct list_head l ;
2005-04-16 15:20:36 -07:00
2007-10-19 12:19:26 +02:00
/*
* Write - protect the semaphore , to exclude two contexts
* doing a list_replace_init ( ) call in parallel and to
* exclude new additions to the deferred_pages list :
*/
down_write ( & init_mm . mmap_sem ) ;
2006-12-07 02:14:05 +01:00
list_replace_init ( & deferred_pages , & l ) ;
2007-10-19 12:19:26 +02:00
up_write ( & init_mm . mmap_sem ) ;
2006-03-22 00:08:32 -08:00
2006-12-07 02:14:05 +01:00
flush_map ( & l ) ;
list_for_each_entry_safe ( pg , next , & l , lru ) {
2007-07-21 17:09:51 +02:00
list_del ( & pg - > lru ) ;
clear_bit ( PG_arch_1 , & pg - > flags ) ;
if ( page_private ( pg ) ! = 0 )
continue ;
2006-12-07 02:14:05 +01:00
ClearPagePrivate ( pg ) ;
__free_page ( pg ) ;
2005-04-16 15:20:36 -07:00
}
}
EXPORT_SYMBOL ( change_page_attr ) ;
EXPORT_SYMBOL ( global_flush_tlb ) ;