This fixes an issue uncovered when a recent change to add the "page table" flag was merged. During bootup we see many errors like the following: BUG: Bad page state in process mkdir pfn:00bae page:c1ff15c0 count:0 mapcount:-1024 mapping:00000000 index:0x0 flags: 0x0() raw: 00000000 00000000 00000000 fffffbff 00000000 00000100 00000200 00000000 page dumped because: nonzero mapcount Modules linked in: CPU: 0 PID: 46 Comm: mkdir Tainted: G B 4.17.0-simple-smp-07461-g1d40a5ea01d5-dirty #993 Call trace: [<(ptrval)>] show_stack+0x44/0x54 [<(ptrval)>] dump_stack+0xb0/0xe8 [<(ptrval)>] bad_page+0x138/0x174 [<(ptrval)>] ? cpumask_next+0x24/0x34 [<(ptrval)>] free_pages_check_bad+0x6c/0xd0 [<(ptrval)>] free_pcppages_bulk+0x174/0x42c [<(ptrval)>] free_unref_page_commit.isra.17+0xb8/0xc8 [<(ptrval)>] free_unref_page_list+0x10c/0x190 [<(ptrval)>] ? set_reset_devices+0x0/0x2c [<(ptrval)>] release_pages+0x3a0/0x414 [<(ptrval)>] tlb_flush_mmu_free+0x5c/0x90 [<(ptrval)>] tlb_flush_mmu+0x90/0xa4 [<(ptrval)>] arch_tlb_finish_mmu+0x50/0x94 [<(ptrval)>] tlb_finish_mmu+0x30/0x64 [<(ptrval)>] exit_mmap+0x110/0x1e0 [<(ptrval)>] mmput+0x50/0xf0 [<(ptrval)>] do_exit+0x274/0xa94 [<(ptrval)>] do_group_exit+0x50/0x110 [<(ptrval)>] __wake_up_parent+0x0/0x38 [<(ptrval)>] _syscall_return+0x0/0x4 During the __pte_free_tlb path openrisc fails to call the page destructor which would clear the new bits that were introduced. To fix this we are calling the destructor. It seem openrisc was the only architecture missing this, all other architectures either call the destructor like we are doing here or use pte_free. Note: failing to call the destructor was also messing up the zone stats (and will be cause other problems if you were using SPLIT_PTE_PTLOCKS, which we are not yet). Fixes: 1d40a5ea01d53 ("mm: mark pages in use for page tables") Acked-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Stafford Horne <shorne@gmail.com>
112 lines
2.6 KiB
C
112 lines
2.6 KiB
C
/*
|
|
* OpenRISC Linux
|
|
*
|
|
* Linux architectural port borrowing liberally from similar works of
|
|
* others. All original copyrights apply as per the original source
|
|
* declaration.
|
|
*
|
|
* OpenRISC implementation:
|
|
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
|
|
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
|
* et al.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*/
|
|
|
|
#ifndef __ASM_OPENRISC_PGALLOC_H
|
|
#define __ASM_OPENRISC_PGALLOC_H
|
|
|
|
#include <asm/page.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/memblock.h>
|
|
|
|
extern int mem_init_done;
|
|
|
|
#define pmd_populate_kernel(mm, pmd, pte) \
|
|
set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
|
|
|
|
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
struct page *pte)
|
|
{
|
|
set_pmd(pmd, __pmd(_KERNPG_TABLE +
|
|
((unsigned long)page_to_pfn(pte) <<
|
|
(unsigned long) PAGE_SHIFT)));
|
|
}
|
|
|
|
/*
|
|
* Allocate and free page tables.
|
|
*/
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
|
|
|
|
if (ret) {
|
|
memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
|
memcpy(ret + USER_PTRS_PER_PGD,
|
|
swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
#if 0
|
|
/* FIXME: This seems to be the preferred style, but we are using
|
|
* current_pgd (from mm->pgd) to load kernel pages so we need it
|
|
* initialized. This needs to be looked into.
|
|
*/
|
|
extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
return (pgd_t *)get_zeroed_page(GFP_KERNEL);
|
|
}
|
|
#endif
|
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
free_page((unsigned long)pgd);
|
|
}
|
|
|
|
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
|
|
|
|
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|
unsigned long address)
|
|
{
|
|
struct page *pte;
|
|
pte = alloc_pages(GFP_KERNEL, 0);
|
|
if (!pte)
|
|
return NULL;
|
|
clear_page(page_address(pte));
|
|
if (!pgtable_page_ctor(pte)) {
|
|
__free_page(pte);
|
|
return NULL;
|
|
}
|
|
return pte;
|
|
}
|
|
|
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
{
|
|
free_page((unsigned long)pte);
|
|
}
|
|
|
|
static inline void pte_free(struct mm_struct *mm, struct page *pte)
|
|
{
|
|
pgtable_page_dtor(pte);
|
|
__free_page(pte);
|
|
}
|
|
|
|
#define __pte_free_tlb(tlb, pte, addr) \
|
|
do { \
|
|
pgtable_page_dtor(pte); \
|
|
tlb_remove_page((tlb), (pte)); \
|
|
} while (0)
|
|
|
|
#define pmd_pgtable(pmd) pmd_page(pmd)
|
|
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
#endif
|