arm64: mm: Create gigabyte kernel logical mappings where possible
We have the capability to map 1GB level 1 blocks when using a 4K granule. This patch adjusts the create_mapping logic s.t. when mapping physical memory on boot, we attempt to use a 1GB block if both the VA and PA start and end are 1GB aligned. This both reduces the levels of lookup required to resolve a kernel logical address, as well as reduces TLB pressure on cores that support 1GB TLB entries. Signed-off-by: Steve Capper <steve.capper@linaro.org> Tested-by: Jungseok Lee <jays.lee@samsung.com> [catalin.marinas@arm.com: s/prot_sect_kernel/PROT_SECT_NORMAL_EXEC/] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
ba6bf8c85c
commit
206a2a73a6
@ -29,6 +29,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
|
#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
|
||||||
|
#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
|
||||||
|
#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Level 2 descriptor (PMD).
|
* Level 2 descriptor (PMD).
|
||||||
|
@ -259,6 +259,7 @@ static inline pmd_t pte_pmd(pte_t pte)
|
|||||||
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
||||||
|
|
||||||
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
|
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
|
||||||
|
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
|
||||||
|
|
||||||
#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
|
#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
|
||||||
|
|
||||||
@ -292,6 +293,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||||||
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
||||||
PMD_TYPE_SECT)
|
PMD_TYPE_SECT)
|
||||||
|
|
||||||
|
#ifdef ARM64_64K_PAGES
|
||||||
|
#define pud_sect(pud) (0)
|
||||||
|
#else
|
||||||
|
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
|
||||||
|
PUD_TYPE_SECT)
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||||
{
|
{
|
||||||
|
@ -195,7 +195,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
|||||||
|
|
||||||
do {
|
do {
|
||||||
next = pud_addr_end(addr, end);
|
next = pud_addr_end(addr, end);
|
||||||
alloc_init_pmd(pud, addr, next, phys);
|
|
||||||
|
/*
|
||||||
|
* For 4K granule only, attempt to put down a 1GB block
|
||||||
|
*/
|
||||||
|
if ((PAGE_SHIFT == 12) &&
|
||||||
|
((addr | next | phys) & ~PUD_MASK) == 0) {
|
||||||
|
pud_t old_pud = *pud;
|
||||||
|
set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we have an old value for a pud, it will
|
||||||
|
* be pointing to a pmd table that we no longer
|
||||||
|
* need (from swapper_pg_dir).
|
||||||
|
*
|
||||||
|
* Look up the old pmd table and free it.
|
||||||
|
*/
|
||||||
|
if (!pud_none(old_pud)) {
|
||||||
|
phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
|
||||||
|
memblock_free(table, PAGE_SIZE);
|
||||||
|
flush_tlb_all();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
alloc_init_pmd(pud, addr, next, phys);
|
||||||
|
}
|
||||||
phys += next - addr;
|
phys += next - addr;
|
||||||
} while (pud++, addr = next, addr != end);
|
} while (pud++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
@ -338,6 +361,9 @@ int kern_addr_valid(unsigned long addr)
|
|||||||
if (pud_none(*pud))
|
if (pud_none(*pud))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (pud_sect(*pud))
|
||||||
|
return pfn_valid(pud_pfn(*pud));
|
||||||
|
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
if (pmd_none(*pmd))
|
if (pmd_none(*pmd))
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user