x86: clean up arch/x86/mm/pageattr.c

do some leftover cleanups in the now unified arch/x86/mm/pageattr.c
file.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Ingo Molnar 2008-01-30 13:34:04 +01:00
parent 4554ab95c2
commit e4b71dcf54
2 changed files with 7 additions and 8 deletions

View File

@ -2,7 +2,6 @@
* Copyright 2002 Andi Kleen, SuSE Labs. * Copyright 2002 Andi Kleen, SuSE Labs.
* Thanks to Ben LaHaise for precious feedback. * Thanks to Ben LaHaise for precious feedback.
*/ */
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
@ -50,9 +49,7 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
/* change init_mm */ /* change init_mm */
set_pte_atomic(kpte, pte); set_pte_atomic(kpte, pte);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if (SHARED_KERNEL_PMD) if (!SHARED_KERNEL_PMD) {
return;
{
struct page *page; struct page *page;
for (page = pgd_list; page; page = (struct page *)page->index) { for (page = pgd_list; page; page = (struct page *)page->index) {
@ -277,14 +274,14 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
return; return;
/* /*
* the return value is ignored - the calls cannot fail, * The return value is ignored - the calls cannot fail,
* large pages are disabled at boot time. * large pages are disabled at boot time:
*/ */
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
/* /*
* we should perform an IPI and flush all tlbs, * We should perform an IPI and flush all tlbs,
* but that can deadlock->flush only current cpu. * but that can deadlock->flush only current cpu:
*/ */
__flush_tlb_all(); __flush_tlb_all();
} }

View File

@ -25,6 +25,8 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 1
/* /*
* PGDIR_SHIFT determines what a top-level page table entry can map * PGDIR_SHIFT determines what a top-level page table entry can map
*/ */