linux/arch/sparc/include/asm/pgalloc_32.h
Will Deacon 8e958839e4 sparc32: mm: Restructure sparc32 MMU page-table layout
The "SRMMU" supports 4k pages using a fixed three-level walk with a
256-entry PGD and 64-entry PMD/PTE levels. In order to fill a page
with a 'pgtable_t', the SRMMU code allocates four native PTE tables
into a single PTE allocation and similarly for the PMD level, leading
to an array of 16 physical pointers in a 'pmd_t'

This breaks the generic code which assumes READ_ONCE(*pmd) will be
word sized.

In a manner similar to ef22d8abd876 ("m68k: mm: Restructure Motorola
MMU page-table layout"), this patch implements the native page-table
setup directly. This significantly increases the page-table memory
overhead, but will be addresses in a subsequent patch.

Cc: "David S. Miller" <davem@davemloft.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-13 15:32:00 -07:00

79 lines
1.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SPARC_PGALLOC_H
#define _SPARC_PGALLOC_H
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/pgtsrmmu.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
struct page;
void *srmmu_get_nocache(int size, int align);
void srmmu_free_nocache(void *addr, int size);
extern struct resource sparc_iomap;
pgd_t *get_pgd_fast(void);
static inline void free_pgd_fast(pgd_t *pgd)
{
srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE);
}
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
static inline void pud_set(pud_t * pudp, pmd_t * pmdp)
{
unsigned long pa = __nocache_pa(pmdp);
set_pte((pte_t *)pudp, __pte((SRMMU_ET_PTD | (pa >> 4))));
}
#define pud_populate(MM, PGD, PMD) pud_set(PGD, PMD)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)
{
return srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE);
}
static inline void free_pmd_fast(pmd_t * pmd)
{
srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE);
}
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
#define pmd_pgtable(pmd) pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
pgtable_t pte_alloc_one(struct mm_struct *mm);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE,
SRMMU_PTE_TABLE_SIZE);
}
static inline void free_pte_fast(pte_t *pte)
{
srmmu_free_nocache(pte, SRMMU_PTE_TABLE_SIZE);
}
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
void pte_free(struct mm_struct * mm, pgtable_t pte);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */