2f0584f3f4
The x86 Shadow stack feature includes a new type of memory called shadow stack. This shadow stack memory has some unusual properties, which requires some core mm changes to function properly. One of these unusual properties is that shadow stack memory is writable, but only in limited ways. These limits are applied via a specific PTE bit combination. Nevertheless, the memory is writable, and core mm code will need to apply the writable permissions in the typical paths that call pte_mkwrite(). The goal is to make pte_mkwrite() take a VMA, so that the x86 implementation of it can know whether to create regular writable or shadow stack mappings. But there are a couple of challenges to this. Modifying the signatures of each arch pte_mkwrite() implementation would be error prone because some are generated with macros and would need to be re-implemented. Also, some pte_mkwrite() callers operate on kernel memory without a VMA. So this can be done in a three step process. First pte_mkwrite() can be renamed to pte_mkwrite_novma() in each arch, with a generic pte_mkwrite() added that just calls pte_mkwrite_novma(). Next callers without a VMA can be moved to pte_mkwrite_novma(). And lastly, pte_mkwrite() and all callers can be changed to take/pass a VMA. Start the process by renaming pte_mkwrite() to pte_mkwrite_novma() and adding the pte_mkwrite() wrapper in linux/pgtable.h. Apply the same pattern for pmd_mkwrite(). Since not all archs have a pmd_mkwrite_novma(), create a new arch config HAS_HUGE_PAGE that can be used to tell if pmd_mkwrite() should be defined. Otherwise in the !HAS_HUGE_PAGE cases the compiler would not be able to find pmd_mkwrite_novma(). No functional change. Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: David Hildenbrand <david@redhat.com> Link: https://lore.kernel.org/lkml/CAHk-=wiZjSu7c9sFYZb3q04108stgHff2wfbokGCCgW7riz+8Q@mail.gmail.com/ Link: https://lore.kernel.org/all/20230613001108.3040476-2-rick.p.edgecombe%40intel.com
68 lines
2.0 KiB
C
68 lines
2.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
|
|
*/
|
|
|
|
|
|
#ifndef _ASM_ARC_HUGEPAGE_H
|
|
#define _ASM_ARC_HUGEPAGE_H
|
|
|
|
#include <linux/types.h>
|
|
#include <asm-generic/pgtable-nopmd.h>
|
|
|
|
static inline pte_t pmd_pte(pmd_t pmd)
|
|
{
|
|
return __pte(pmd_val(pmd));
|
|
}
|
|
|
|
static inline pmd_t pte_pmd(pte_t pte)
|
|
{
|
|
return __pmd(pte_val(pte));
|
|
}
|
|
|
|
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
|
|
#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
|
|
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
|
|
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
|
|
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
|
|
#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
|
|
#define pmd_mkinvalid(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
|
|
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
|
|
|
|
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
|
|
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
|
|
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
|
|
|
|
#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
|
|
|
|
#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
|
|
|
|
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
|
|
|
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
{
|
|
/*
|
|
* open-coded pte_modify() with additional retaining of HW_SZ bit
|
|
* so that pmd_trans_huge() remains true for this PMD
|
|
*/
|
|
return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
|
|
}
|
|
|
|
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
*pmdp = pmd;
|
|
}
|
|
|
|
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|
pmd_t *pmd);
|
|
|
|
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
|
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end);
|
|
|
|
/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
|
|
#define pmdp_establish generic_pmdp_establish
|
|
|
|
#endif
|