12e4d53f3f
Patch series "Fixup page directory freeing", v4. This is a repost of patch series from Peter with the arch specific changes except ppc64 dropped. ppc64 changes are added here because we are redoing the patch series on top of ppc64 changes. This makes it easy to backport these changes. Only the first 2 patches need to be backported to stable. The thing is, on anything SMP, freeing page directories should observe the exact same order as normal page freeing: 1) unhook page/directory 2) TLB invalidate 3) free page/directory Without this, any concurrent page-table walk could end up with a Use-after-Free. This is esp. trivial for anything that has software page-table walkers (HAVE_FAST_GUP / software TLB fill) or the hardware caches partial page-walks (ie. caches page directories). Even on UP this might give issues since mmu_gather is preemptible these days. An interrupt or preempted task accessing user pages might stumble into the free page if the hardware caches page directories. This patch series fixes ppc64 and add generic MMU_GATHER changes to support the conversion of other architectures. I haven't added patches w.r.t other architecture because they are yet to be acked. This patch (of 9): A followup patch is going to make sure we correctly invalidate page walk cache before we free page table pages. In order to keep things simple enable RCU_TABLE_FREE even for !SMP so that we don't have to fixup the !SMP case differently in the followup patch !SMP case is right now broken for radix translation w.r.t page walk cache flush. We can get interrupted in between page table free and that would imply we have page walk cache entries pointing to tables which got freed already. Michael said "both our platforms that run on Power9 force SMP on in Kconfig, so the !SMP case is unlikely to be a problem for anyone in practice, unless they've hacked their kernel to build it !SMP." Link: http://lkml.kernel.org/r/20200116064531.483522-2-aneesh.kumar@linux.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
73 lines
1.8 KiB
C
73 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_NOHASH_PGALLOC_H
|
|
#define _ASM_POWERPC_NOHASH_PGALLOC_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
|
|
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
|
#ifdef CONFIG_PPC64
|
|
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
|
|
#else
|
|
/* 44x etc which is BOOKE not BOOK3E */
|
|
static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
|
|
unsigned long address)
|
|
{
|
|
|
|
}
|
|
#endif /* !CONFIG_PPC_BOOK3E */
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
|
pgtable_gfp_flags(mm, GFP_KERNEL));
|
|
}
|
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
|
|
}
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/nohash/64/pgalloc.h>
|
|
#else
|
|
#include <asm/nohash/32/pgalloc.h>
|
|
#endif
|
|
|
|
static inline void pgtable_free(void *table, int shift)
|
|
{
|
|
if (!shift) {
|
|
pte_fragment_free((unsigned long *)table, 0);
|
|
} else {
|
|
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
|
kmem_cache_free(PGT_CACHE(shift), table);
|
|
}
|
|
}
|
|
|
|
#define get_hugepd_cache_index(x) (x)
|
|
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
|
{
|
|
unsigned long pgf = (unsigned long)table;
|
|
|
|
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
|
|
pgf |= shift;
|
|
tlb_remove_table(tlb, (void *)pgf);
|
|
}
|
|
|
|
static inline void __tlb_remove_table(void *_table)
|
|
{
|
|
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
|
|
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
|
|
|
|
pgtable_free(table, shift);
|
|
}
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|
unsigned long address)
|
|
{
|
|
tlb_flush_pgtable(tlb, address);
|
|
pgtable_free_tlb(tlb, table, 0);
|
|
}
|
|
#endif /* _ASM_POWERPC_NOHASH_PGALLOC_H */
|