From 9f5a8d610dd38c9b8e4c227f0c92cfaebe0ef135 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 14 Dec 2017 21:25:00 +0100 Subject: [PATCH] Revert "x86/mm/pat: Ensure cpa->pfn only contains page frame numbers" This reverts commit 87e2bd898d3a79a8c609f183180adac47879a2a4 which is commit edc3b9129cecd0f0857112136f5b8b1bc1d45918 upstream. Turns there was too many other issues with this patch to make it viable for the stable tree. Reported-by: Ben Hutchings Cc: Matt Fleming Cc: Borislav Petkov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Jones Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sai Praneeth Prakhya Cc: Stephen Smalley Cc: Thomas Gleixner Cc: Toshi Kani Cc: linux-efi@vger.kernel.org Cc: Ingo Molnar Cc: "Ghannam, Yazen" Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/pageattr.c | 17 +++++++++++------ arch/x86/platform/efi/efi_64.c | 16 ++++++---------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index a0fe62e3f4a3..b599a780a5a9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -911,10 +911,15 @@ static void populate_pte(struct cpa_data *cpa, pte = pte_offset_kernel(pmd, start); while (num_pages-- && start < end) { - set_pte(pte, pfn_pte(cpa->pfn, pgprot)); + + /* deal with the NX bit */ + if (!(pgprot_val(pgprot) & _PAGE_NX)) + cpa->pfn &= ~_PAGE_NX; + + set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); start += PAGE_SIZE; - cpa->pfn++; + cpa->pfn += PAGE_SIZE; pte++; } } @@ -970,11 +975,11 @@ static int populate_pmd(struct cpa_data *cpa, pmd = pmd_offset(pud, start); - set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | + set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pmd_pgprot))); start += PMD_SIZE; - cpa->pfn += PMD_SIZE >> PAGE_SHIFT; + cpa->pfn += PMD_SIZE; cur_pages += PMD_SIZE >> PAGE_SHIFT; } @@ -1043,11 +1048,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, * Map everything starting from the Gb boundary, possibly with 1G pages */ while (end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | + set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pud_pgprot))); start += PUD_SIZE; - cpa->pfn += PUD_SIZE >> PAGE_SHIFT; + cpa->pfn += PUD_SIZE; cur_pages += PUD_SIZE >> PAGE_SHIFT; pud++; } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 5aa186db59e3..a0ac0f9c307f 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void) int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) { - unsigned long pfn, text; + unsigned long text; struct page *page; unsigned npages; pgd_t *pgd; @@ -160,8 +160,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) * and ident-map those pages containing the map before calling * phys_efi_set_virtual_address_map(). */ - pfn = pa_memmap >> PAGE_SHIFT; - if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) { + if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) { pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); return 1; } @@ -186,9 +185,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) npages = (_end - _text) >> PAGE_SHIFT; text = __pa(_text); - pfn = text >> PAGE_SHIFT; - if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) { + if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) { pr_err("Failed to map kernel text 1:1\n"); return 1; } @@ -206,14 +204,12 @@ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) static void __init __map_region(efi_memory_desc_t *md, u64 va) { pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); - unsigned long flags = 0; - unsigned long pfn; + unsigned long pf = 0; if (!(md->attribute & EFI_MEMORY_WB)) - flags |= _PAGE_PCD; + pf |= _PAGE_PCD; - pfn = md->phys_addr >> PAGE_SHIFT; - if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags)) + if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", md->phys_addr, va); }