powerpc/mce: Avoid using irq_work_queue() in realmode
In realmode mce handler we use irq_work_queue() to defer the processing of mce events, irq_work_queue() can only be called when translation is enabled because it touches memory outside RMA, hence we enable translation before calling irq_work_queue and disable on return, though it is not safe to do in realmode. To avoid this, program the decrementer and call the event processing functions from timer handler. Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220120121931.517974-1-ganeshgr@linux.ibm.com
This commit is contained in:
parent
0a182611d1
commit
cc15ff3275
@ -94,6 +94,8 @@ struct machdep_calls {
|
||||
/* Called during machine check exception to retrive fixup address. */
|
||||
bool (*mce_check_early_recovery)(struct pt_regs *regs);
|
||||
|
||||
void (*machine_check_log_err)(void);
|
||||
|
||||
/* Motherboard/chipset features. This is a kind of general purpose
|
||||
* hook used to control some machine specific features (like reset
|
||||
* lines, chip power control, etc...).
|
||||
|
@ -235,8 +235,21 @@ extern void machine_check_print_event_info(struct machine_check_event *evt,
|
||||
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr);
|
||||
extern void mce_common_process_ue(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err);
|
||||
void mce_irq_work_queue(void);
|
||||
int mce_register_notifier(struct notifier_block *nb);
|
||||
int mce_unregister_notifier(struct notifier_block *nb);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void mce_run_irq_context_handlers(void);
|
||||
#else
|
||||
static inline void mce_run_irq_context_handlers(void) { };
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void set_mce_pending_irq_work(void);
|
||||
void clear_mce_pending_irq_work(void);
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void flush_and_reload_slb(void);
|
||||
void flush_erat(void);
|
||||
|
@ -288,6 +288,7 @@ struct paca_struct {
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct mce_info *mce_info;
|
||||
u8 mce_pending_irq_work;
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
|
@ -28,19 +28,9 @@
|
||||
|
||||
#include "setup.h"
|
||||
|
||||
static void machine_check_process_queued_event(struct irq_work *work);
|
||||
static void machine_check_ue_irq_work(struct irq_work *work);
|
||||
static void machine_check_ue_event(struct machine_check_event *evt);
|
||||
static void machine_process_ue_event(struct work_struct *work);
|
||||
|
||||
static struct irq_work mce_event_process_work = {
|
||||
.func = machine_check_process_queued_event,
|
||||
};
|
||||
|
||||
static struct irq_work mce_ue_event_irq_work = {
|
||||
.func = machine_check_ue_irq_work,
|
||||
};
|
||||
|
||||
static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
|
||||
@ -89,6 +79,13 @@ static void mce_set_error_info(struct machine_check_event *mce,
|
||||
}
|
||||
}
|
||||
|
||||
void mce_irq_work_queue(void)
|
||||
{
|
||||
/* Raise decrementer interrupt */
|
||||
arch_irq_work_raise();
|
||||
set_mce_pending_irq_work();
|
||||
}
|
||||
|
||||
/*
|
||||
* Decode and save high level MCE information into per cpu buffer which
|
||||
* is an array of machine_check_event structure.
|
||||
@ -217,7 +214,7 @@ void release_mce_event(void)
|
||||
get_mce_event(NULL, true);
|
||||
}
|
||||
|
||||
static void machine_check_ue_irq_work(struct irq_work *work)
|
||||
static void machine_check_ue_work(void)
|
||||
{
|
||||
schedule_work(&mce_ue_event_work);
|
||||
}
|
||||
@ -239,7 +236,7 @@ static void machine_check_ue_event(struct machine_check_event *evt)
|
||||
evt, sizeof(*evt));
|
||||
|
||||
/* Queue work to process this event later. */
|
||||
irq_work_queue(&mce_ue_event_irq_work);
|
||||
mce_irq_work_queue();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -249,7 +246,6 @@ void machine_check_queue_event(void)
|
||||
{
|
||||
int index;
|
||||
struct machine_check_event evt;
|
||||
unsigned long msr;
|
||||
|
||||
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
|
||||
return;
|
||||
@ -263,20 +259,7 @@ void machine_check_queue_event(void)
|
||||
memcpy(&local_paca->mce_info->mce_event_queue[index],
|
||||
&evt, sizeof(evt));
|
||||
|
||||
/*
|
||||
* Queue irq work to process this event later. Before
|
||||
* queuing the work enable translation for non radix LPAR,
|
||||
* as irq_work_queue may try to access memory outside RMO
|
||||
* region.
|
||||
*/
|
||||
if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
msr = mfmsr();
|
||||
mtmsr(msr | MSR_IR | MSR_DR);
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
mtmsr(msr);
|
||||
} else {
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
}
|
||||
mce_irq_work_queue();
|
||||
}
|
||||
|
||||
void mce_common_process_ue(struct pt_regs *regs,
|
||||
@ -338,7 +321,7 @@ static void machine_process_ue_event(struct work_struct *work)
|
||||
* process pending MCE event from the mce event queue. This function will be
|
||||
* called during syscall exit.
|
||||
*/
|
||||
static void machine_check_process_queued_event(struct irq_work *work)
|
||||
static void machine_check_process_queued_event(void)
|
||||
{
|
||||
int index;
|
||||
struct machine_check_event *evt;
|
||||
@ -363,6 +346,27 @@ static void machine_check_process_queued_event(struct irq_work *work)
|
||||
}
|
||||
}
|
||||
|
||||
void set_mce_pending_irq_work(void)
|
||||
{
|
||||
local_paca->mce_pending_irq_work = 1;
|
||||
}
|
||||
|
||||
void clear_mce_pending_irq_work(void)
|
||||
{
|
||||
local_paca->mce_pending_irq_work = 0;
|
||||
}
|
||||
|
||||
void mce_run_irq_context_handlers(void)
|
||||
{
|
||||
if (unlikely(local_paca->mce_pending_irq_work)) {
|
||||
if (ppc_md.machine_check_log_err)
|
||||
ppc_md.machine_check_log_err();
|
||||
machine_check_process_queued_event();
|
||||
machine_check_ue_work();
|
||||
clear_mce_pending_irq_work();
|
||||
}
|
||||
}
|
||||
|
||||
void machine_check_print_event_info(struct machine_check_event *evt,
|
||||
bool user_mode, bool in_guest)
|
||||
{
|
||||
|
@ -70,6 +70,7 @@
|
||||
#include <asm/vdso_datapage.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
/* powerpc clocksource/clockevent code */
|
||||
|
||||
@ -638,6 +639,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
|
||||
|
||||
if (test_irq_work_pending()) {
|
||||
clear_irq_work_pending();
|
||||
mce_run_irq_context_handlers();
|
||||
irq_work_run();
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ struct pt_regs;
|
||||
extern int pSeries_system_reset_exception(struct pt_regs *regs);
|
||||
extern int pSeries_machine_check_exception(struct pt_regs *regs);
|
||||
extern long pseries_machine_check_realmode(struct pt_regs *regs);
|
||||
void pSeries_machine_check_log_err(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void smp_init_pseries(void);
|
||||
|
@ -23,11 +23,6 @@ static DEFINE_SPINLOCK(ras_log_buf_lock);
|
||||
|
||||
static int ras_check_exception_token;
|
||||
|
||||
static void mce_process_errlog_event(struct irq_work *work);
|
||||
static struct irq_work mce_errlog_process_work = {
|
||||
.func = mce_process_errlog_event,
|
||||
};
|
||||
|
||||
#define EPOW_SENSOR_TOKEN 9
|
||||
#define EPOW_SENSOR_INDEX 0
|
||||
|
||||
@ -745,7 +740,6 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
||||
struct pseries_errorlog *pseries_log;
|
||||
struct pseries_mc_errorlog *mce_log = NULL;
|
||||
int disposition = rtas_error_disposition(errp);
|
||||
unsigned long msr;
|
||||
u8 error_type;
|
||||
|
||||
if (!rtas_error_extended(errp))
|
||||
@ -759,40 +753,16 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
||||
error_type = mce_log->error_type;
|
||||
|
||||
disposition = mce_handle_err_realmode(disposition, error_type);
|
||||
|
||||
/*
|
||||
* Enable translation as we will be accessing per-cpu variables
|
||||
* in save_mce_event() which may fall outside RMO region, also
|
||||
* leave it enabled because subsequently we will be queuing work
|
||||
* to workqueues where again per-cpu variables accessed, besides
|
||||
* fwnmi_release_errinfo() crashes when called in realmode on
|
||||
* pseries.
|
||||
* Note: All the realmode handling like flushing SLB entries for
|
||||
* SLB multihit is done by now.
|
||||
*/
|
||||
out:
|
||||
msr = mfmsr();
|
||||
mtmsr(msr | MSR_IR | MSR_DR);
|
||||
|
||||
disposition = mce_handle_err_virtmode(regs, errp, mce_log,
|
||||
disposition);
|
||||
|
||||
/*
|
||||
* Queue irq work to log this rtas event later.
|
||||
* irq_work_queue uses per-cpu variables, so do this in virt
|
||||
* mode as well.
|
||||
*/
|
||||
irq_work_queue(&mce_errlog_process_work);
|
||||
|
||||
mtmsr(msr);
|
||||
|
||||
return disposition;
|
||||
}
|
||||
|
||||
/*
|
||||
* Process MCE rtas errlog event.
|
||||
*/
|
||||
static void mce_process_errlog_event(struct irq_work *work)
|
||||
void pSeries_machine_check_log_err(void)
|
||||
{
|
||||
struct rtas_error_log *err;
|
||||
|
||||
|
@ -1086,6 +1086,7 @@ define_machine(pseries) {
|
||||
.system_reset_exception = pSeries_system_reset_exception,
|
||||
.machine_check_early = pseries_machine_check_realmode,
|
||||
.machine_check_exception = pSeries_machine_check_exception,
|
||||
.machine_check_log_err = pSeries_machine_check_log_err,
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
.machine_kexec = pSeries_machine_kexec,
|
||||
.kexec_cpu_down = pseries_kexec_cpu_down,
|
||||
|
Loading…
Reference in New Issue
Block a user