arm64 fixes for -rc3
- Avoid erroneously marking untagged pages with PG_mte_tagged - Always reset KASAN tags for destination page in copy_page() - Mark PMU header functions 'static inline' - Fix some sparse warnings due to missing casts -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmRnVj4QHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNJdMB/94LS7O5EVgx0qVg7g7D0LDs6f++eJ8Ya0V 32QSPal1KRFIBvHdyyQWdce0j+hs0mefsJ8fUTA16Jl8UrZ5U2XKzV3czvxZHjeX AcBhw2nNVTH3vG1lXJzQLMGtgsVGXAw/DNwCgztNnlNCvyS+dpb7Gzrwv1safxni eXs8nvyAlowzfvCWk4bNAE8ZLB4ckAAuPrzactzVJ6I397kk5i/PPUSWzG6syx3G qPXE3XSYtlqQ/ZmGnoE/PWTGPNcieWfeQcHlIT+00tdH/FWd+rJdIb5VW0JKDtLo Rqtpq0SfZTqg6+GdzQUbLVfOcAte3TiD7qOcA1xnT2z4IjEQGIav =eZEU -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "A mixture of compiler/static checker resolutions and a couple of MTE fixes: - Avoid erroneously marking untagged pages with PG_mte_tagged - Always reset KASAN tags for destination page in copy_page() - Mark PMU header functions 'static inline' - Fix some sparse warnings due to missing casts" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: mte: Do not set PG_mte_tagged if tags were not initialized arm64: Also reset KASAN tag if page is not PG_mte_tagged arm64: perf: Mark all accessor functions inline ARM: perf: Mark all accessor functions inline arm64: vdso: Pass (void *) to virt_to_page() arm64/mm: mark private VM_FAULT_X defines as vm_fault_t
This commit is contained in:
commit
4ffd96c962
@ -92,7 +92,7 @@
|
||||
|
||||
#define RETURN_READ_PMEVCNTRN(n) \
|
||||
return read_sysreg(PMEVCNTR##n)
|
||||
static unsigned long read_pmevcntrn(int n)
|
||||
static inline unsigned long read_pmevcntrn(int n)
|
||||
{
|
||||
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
|
||||
return 0;
|
||||
@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n)
|
||||
|
||||
#define WRITE_PMEVCNTRN(n) \
|
||||
write_sysreg(val, PMEVCNTR##n)
|
||||
static void write_pmevcntrn(int n, unsigned long val)
|
||||
static inline void write_pmevcntrn(int n, unsigned long val)
|
||||
{
|
||||
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
|
||||
}
|
||||
|
||||
#define WRITE_PMEVTYPERN(n) \
|
||||
write_sysreg(val, PMEVTYPER##n)
|
||||
static void write_pmevtypern(int n, unsigned long val)
|
||||
static inline void write_pmevtypern(int n, unsigned long val)
|
||||
{
|
||||
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
|
||||
}
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
#define RETURN_READ_PMEVCNTRN(n) \
|
||||
return read_sysreg(pmevcntr##n##_el0)
|
||||
static unsigned long read_pmevcntrn(int n)
|
||||
static inline unsigned long read_pmevcntrn(int n)
|
||||
{
|
||||
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
|
||||
return 0;
|
||||
@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n)
|
||||
|
||||
#define WRITE_PMEVCNTRN(n) \
|
||||
write_sysreg(val, pmevcntr##n##_el0)
|
||||
static void write_pmevcntrn(int n, unsigned long val)
|
||||
static inline void write_pmevcntrn(int n, unsigned long val)
|
||||
{
|
||||
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
|
||||
}
|
||||
|
||||
#define WRITE_PMEVTYPERN(n) \
|
||||
write_sysreg(val, pmevtyper##n##_el0)
|
||||
static void write_pmevtypern(int n, unsigned long val)
|
||||
static inline void write_pmevtypern(int n, unsigned long val)
|
||||
{
|
||||
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
|
||||
}
|
||||
|
@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
|
||||
return;
|
||||
|
||||
/* if PG_mte_tagged is set, tags have already been initialised */
|
||||
for (i = 0; i < nr_pages; i++, page++) {
|
||||
if (!page_mte_tagged(page)) {
|
||||
for (i = 0; i < nr_pages; i++, page++)
|
||||
if (!page_mte_tagged(page))
|
||||
mte_sync_page_tags(page, old_pte, check_swap,
|
||||
pte_is_tagged);
|
||||
set_page_mte_tagged(page);
|
||||
}
|
||||
}
|
||||
|
||||
/* ensure the tags are visible before the PTE is set */
|
||||
smp_wmb();
|
||||
|
@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void)
|
||||
|
||||
memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
|
||||
kuser_sz);
|
||||
aarch32_vectors_page = virt_to_page(vdso_page);
|
||||
aarch32_vectors_page = virt_to_page((void *)vdso_page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from)
|
||||
|
||||
copy_page(kto, kfrom);
|
||||
|
||||
if (kasan_hw_tags_enabled())
|
||||
page_kasan_tag_reset(to);
|
||||
|
||||
if (system_supports_mte() && page_mte_tagged(from)) {
|
||||
if (kasan_hw_tags_enabled())
|
||||
page_kasan_tag_reset(to);
|
||||
/* It's a new page, shouldn't have been tagged yet */
|
||||
WARN_ON_ONCE(!try_page_mte_tagging(to));
|
||||
mte_copy_page_tags(kto, kfrom);
|
||||
|
@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr,
|
||||
}
|
||||
}
|
||||
|
||||
#define VM_FAULT_BADMAP 0x010000
|
||||
#define VM_FAULT_BADACCESS 0x020000
|
||||
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
|
||||
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
|
||||
|
||||
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned int mm_flags, unsigned long vm_flags,
|
||||
|
Loading…
x
Reference in New Issue
Block a user