Fix the TLBI RANGE operand calculation causing live migration under
KVM/arm64 to miss dirty pages due to stale TLB entries. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAmYZdbAACgkQa9axLQDI XvGKrQ/+IRsZCRPNSY8/NluQa6D8UxvE9qwv24NfMa1XJIVJSDd6gjS8mk3Fzjx5 6BdD1zCgTNv57y1tWtGjWqAmpXocXlKCr3FvCnRBuILwCg2+Ou/G9DY0TdlOgy7J U5Azt3KNM9WyvR3+izR4ov2s0YP+Wygp2fHGlptvh2W3GIScouiyLmWaDd4Suk4Y PM06hJ5T8ECL1K+RkDT2NJ0qaKjk4sL8NHy/e3rL9VxSmc6eKKXtAxqYL0xraidu M41zNS/R6e73JpfIXAqj//RrBOoU9n9LNp1IS6x2GwgCm6Yt6eREtOf+AKxUkYeN QLTULfuppnpe6YtJ2MWYpO7jOtTV/i0IH2oB0u2BdhilbKZmYq/LVGywsuK/zHqM 9dayH04PAxvvNjSMWqMPu31B0yTUk9dluYtM6qpGIcAECqQm24devC6Dv1gINlce lIB/TyLz6BWLqIjAxyc0HWiaTKEKSsLNUTib/5IVPQqz0nMYacZLFG70fqH5I71w VHjV7k9fdR+CCEl3R5rn1qhS+5wwSsUMoqSp2RkwV/TORcCc+w/v91uinAm7mmBS EM7CM/twjia8RcdMWHxtraSNoBvZ8JrXxfeID7rZqu4MyJqBewTg9UxaJZAo5hPP xai6ifa82RvsDwbKawvi6soJu15D6NaPze4n82PNRdwicGLhCb4= =0ljm -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fix from Catalin Marinas: "Fix the TLBI RANGE operand calculation causing live migration under KVM/arm64 to miss dirty pages due to stale TLB entries" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: tlb: Fix TLBI RANGE operand
This commit is contained in:
commit
8f2c057754
@ -161,12 +161,18 @@ static inline unsigned long get_trans_granule(void)
|
||||
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
|
||||
|
||||
/*
|
||||
* Generate 'num' values from -1 to 30 with -1 rejected by the
|
||||
* __flush_tlb_range() loop below.
|
||||
* Generate 'num' values from -1 to 31 with -1 rejected by the
|
||||
* __flush_tlb_range() loop below. Its return value is only
|
||||
* significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
|
||||
* 'pages' is more than that, you must iterate over the overall
|
||||
* range.
|
||||
*/
|
||||
#define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
|
||||
#define __TLBI_RANGE_NUM(pages, scale) \
|
||||
((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
|
||||
#define __TLBI_RANGE_NUM(pages, scale) \
|
||||
({ \
|
||||
int __pages = min((pages), \
|
||||
__TLBI_RANGE_PAGES(31, (scale))); \
|
||||
(__pages >> (5 * (scale) + 1)) - 1; \
|
||||
})
|
||||
|
||||
/*
|
||||
* TLB Invalidation
|
||||
@ -379,10 +385,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||
* 3. If there is 1 page remaining, flush it through non-range operations. Range
|
||||
* operations can only span an even number of pages. We save this for last to
|
||||
* ensure 64KB start alignment is maintained for the LPA2 case.
|
||||
*
|
||||
* Note that certain ranges can be represented by either num = 31 and
|
||||
* scale or num = 0 and scale + 1. The loop below favours the latter
|
||||
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
|
||||
*/
|
||||
#define __flush_tlb_range_op(op, start, pages, stride, \
|
||||
asid, tlb_level, tlbi_user, lpa2) \
|
||||
|
Loading…
x
Reference in New Issue
Block a user