This option increases the number of hash misses by limiting the number of kernel HPT entries, by keeping a per-CPU record of the last kernel HPTEs installed, and removing that from the hash table on the next hash insertion. A timer round-robins CPUs removing remaining kernel HPTEs and clearing the TLB (in the case of bare metal) to increase and slightly randomise kernel fault activity. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> [mpe: Add comment about NR_CPUS usage, fixup whitespace] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20221024030150.852517-1-npiggin@gmail.com
32 lines
714 B
C
32 lines
714 B
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
#ifndef ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H
|
|
#define ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H
|
|
|
|
#include <linux/jump_label.h>
|
|
|
|
extern bool stress_slb_enabled;
|
|
|
|
DECLARE_STATIC_KEY_FALSE(stress_slb_key);
|
|
|
|
static inline bool stress_slb(void)
|
|
{
|
|
return static_branch_unlikely(&stress_slb_key);
|
|
}
|
|
|
|
extern bool stress_hpt_enabled;
|
|
|
|
DECLARE_STATIC_KEY_FALSE(stress_hpt_key);
|
|
|
|
static inline bool stress_hpt(void)
|
|
{
|
|
return static_branch_unlikely(&stress_hpt_key);
|
|
}
|
|
|
|
void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
|
|
|
|
void slb_setup_new_exec(void);
|
|
|
|
void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
|
|
|
|
#endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */
|