x86/delay: Refactor delay_mwaitx() for TPAUSE support
Refactor code to make it easier to add a new model specific function to delay for a number of cycles. No functional change. Co-developed-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Kyung Min Park <kyung.min.park@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Tony Luck <tony.luck@intel.com> Link: https://lkml.kernel.org/r/1587757076-30337-3-git-send-email-kyung.min.park@intel.com
This commit is contained in:
parent
e882489024
commit
46f90c7aad
@ -34,6 +34,7 @@ static void delay_loop(u64 __loops);
|
||||
* during boot.
|
||||
*/
|
||||
static void (*delay_fn)(u64) __ro_after_init = delay_loop;
|
||||
static void (*delay_halt_fn)(u64 start, u64 cycles) __ro_after_init;
|
||||
|
||||
/* simple loop based delay: */
|
||||
static void delay_loop(u64 __loops)
|
||||
@ -100,9 +101,33 @@ static void delay_tsc(u64 cycles)
|
||||
* counts with TSC frequency. The input value is the number of TSC cycles
|
||||
* to wait. MWAITX will also exit when the timer expires.
|
||||
*/
|
||||
static void delay_mwaitx(u64 cycles)
|
||||
static void delay_halt_mwaitx(u64 unused, u64 cycles)
|
||||
{
|
||||
u64 start, end, delay;
|
||||
u64 delay;
|
||||
|
||||
delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles);
|
||||
/*
|
||||
* Use cpu_tss_rw as a cacheline-aligned, seldomly accessed per-cpu
|
||||
* variable as the monitor target.
|
||||
*/
|
||||
__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
|
||||
|
||||
/*
|
||||
* AMD, like Intel, supports the EAX hint and EAX=0xf means, do not
|
||||
* enter any deep C-state and we use it here in delay() to minimize
|
||||
* wakeup latency.
|
||||
*/
|
||||
__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call a vendor specific function to delay for a given amount of time. Because
|
||||
* these functions may return earlier than requested, check for actual elapsed
|
||||
* time and call again until done.
|
||||
*/
|
||||
static void delay_halt(u64 __cycles)
|
||||
{
|
||||
u64 start, end, cycles = __cycles;
|
||||
|
||||
/*
|
||||
* Timer value of 0 causes MWAITX to wait indefinitely, unless there
|
||||
@ -114,21 +139,7 @@ static void delay_mwaitx(u64 cycles)
|
||||
start = rdtsc_ordered();
|
||||
|
||||
for (;;) {
|
||||
delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles);
|
||||
|
||||
/*
|
||||
* Use cpu_tss_rw as a cacheline-aligned, seldomly
|
||||
* accessed per-cpu variable as the monitor target.
|
||||
*/
|
||||
__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
|
||||
|
||||
/*
|
||||
* AMD, like Intel's MWAIT version, supports the EAX hint and
|
||||
* EAX=0xf0 means, do not enter any deep C-state and we use it
|
||||
* here in delay() to minimize wakeup latency.
|
||||
*/
|
||||
__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
|
||||
|
||||
delay_halt_fn(start, cycles);
|
||||
end = rdtsc_ordered();
|
||||
|
||||
if (cycles <= end - start)
|
||||
@ -147,7 +158,8 @@ void __init use_tsc_delay(void)
|
||||
|
||||
void use_mwaitx_delay(void)
|
||||
{
|
||||
delay_fn = delay_mwaitx;
|
||||
delay_halt_fn = delay_halt_mwaitx;
|
||||
delay_fn = delay_halt;
|
||||
}
|
||||
|
||||
int read_current_timer(unsigned long *timer_val)
|
||||
|
Loading…
Reference in New Issue
Block a user