linux/arch/arm64/mm/pgd.c
Ard Biesheuvel a6bbf5d4d9 arm64: mm: Add definitions to support 5 levels of paging
Add the required types and descriptor accessors to support 5 levels of
paging in the common code. This is one of the prerequisites for
supporting 52-bit virtual addressing with 4k pages.

Note that this does not cover the code that handles kernel mappings or
the fixmap.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20240214122845.2033971-76-ardb+git@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2024-02-16 12:42:39 +00:00

66 lines
1.3 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* PGD allocation/freeing
*
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
*/
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
static struct kmem_cache *pgd_cache __ro_after_init;
static bool pgdir_is_page_size(void)
{
if (PGD_SIZE == PAGE_SIZE)
return true;
if (CONFIG_PGTABLE_LEVELS == 5)
return !pgtable_l5_enabled();
return false;
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
gfp_t gfp = GFP_PGTABLE_USER;
if (pgdir_is_page_size())
return (pgd_t *)__get_free_page(gfp);
else
return kmem_cache_alloc(pgd_cache, gfp);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (pgdir_is_page_size())
free_page((unsigned long)pgd);
else
kmem_cache_free(pgd_cache, pgd);
}
void __init pgtable_cache_init(void)
{
if (pgdir_is_page_size())
return;
#ifdef CONFIG_ARM64_PA_BITS_52
/*
* With 52-bit physical addresses, the architecture requires the
* top-level table to be aligned to at least 64 bytes.
*/
BUILD_BUG_ON(PGD_SIZE < 64);
#endif
/*
* Naturally aligned pgds required by the architecture.
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
SLAB_PANIC, NULL);
}