[S390] spinlock: check virtual cpu running status
This patch introduces a new function that checks the running status of a cpu in a hypervisor. This status is not virtualized, so the check is only correct if running in an LPAR. On acquiring a spinlock, if the cpu holding the lock is scheduled by the hypervisor, we do a busy wait on the lock. If it is not scheduled, we yield over to that cpu. Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
8387c736fc
commit
59b6978745
@ -25,29 +25,28 @@ static inline int cpu_logical_map(int cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
sigp_unassigned=0x0,
|
sigp_sense = 1,
|
||||||
sigp_sense,
|
sigp_external_call = 2,
|
||||||
sigp_external_call,
|
sigp_emergency_signal = 3,
|
||||||
sigp_emergency_signal,
|
sigp_start = 4,
|
||||||
sigp_start,
|
sigp_stop = 5,
|
||||||
sigp_stop,
|
sigp_restart = 6,
|
||||||
sigp_restart,
|
sigp_stop_and_store_status = 9,
|
||||||
sigp_unassigned1,
|
sigp_initial_cpu_reset = 11,
|
||||||
sigp_unassigned2,
|
sigp_cpu_reset = 12,
|
||||||
sigp_stop_and_store_status,
|
sigp_set_prefix = 13,
|
||||||
sigp_unassigned3,
|
sigp_store_status_at_address = 14,
|
||||||
sigp_initial_cpu_reset,
|
sigp_store_extended_status_at_address = 15,
|
||||||
sigp_cpu_reset,
|
sigp_set_architecture = 18,
|
||||||
sigp_set_prefix,
|
sigp_conditional_emergency_signal = 19,
|
||||||
sigp_store_status_at_address,
|
sigp_sense_running = 21,
|
||||||
sigp_store_extended_status_at_address
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
sigp_order_code_accepted = 0,
|
sigp_order_code_accepted = 0,
|
||||||
sigp_status_stored,
|
sigp_status_stored = 1,
|
||||||
sigp_busy,
|
sigp_busy = 2,
|
||||||
sigp_not_operational
|
sigp_not_operational = 3,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -57,7 +56,6 @@ enum {
|
|||||||
ec_schedule = 0,
|
ec_schedule = 0,
|
||||||
ec_call_function,
|
ec_call_function,
|
||||||
ec_call_function_single,
|
ec_call_function_single,
|
||||||
ec_bit_last
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -36,6 +36,28 @@ extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
|
|||||||
int from, int to);
|
int from, int to);
|
||||||
extern void smp_restart_cpu(void);
|
extern void smp_restart_cpu(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* returns 1 if (virtual) cpu is scheduled
|
||||||
|
* returns 0 otherwise
|
||||||
|
*/
|
||||||
|
static inline int smp_vcpu_scheduled(int cpu)
|
||||||
|
{
|
||||||
|
u32 status;
|
||||||
|
|
||||||
|
switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
|
||||||
|
case sigp_status_stored:
|
||||||
|
/* Check for running status */
|
||||||
|
if (status & 0x400)
|
||||||
|
return 0;
|
||||||
|
break;
|
||||||
|
case sigp_not_operational:
|
||||||
|
return 0;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
#else /* CONFIG_SMP */
|
#else /* CONFIG_SMP */
|
||||||
|
|
||||||
static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
|
static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
|
||||||
@ -43,6 +65,8 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
|
|||||||
func(data);
|
func(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define smp_vcpu_scheduled (1)
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
|||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
unsigned int owner;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (count-- <= 0) {
|
owner = lp->owner_cpu;
|
||||||
unsigned int owner = lp->owner_cpu;
|
if (!owner || smp_vcpu_scheduled(~owner)) {
|
||||||
if (owner != 0)
|
for (count = spin_retry; count > 0; count--) {
|
||||||
_raw_yield_cpu(~owner);
|
|
||||||
count = spin_retry;
|
|
||||||
}
|
|
||||||
if (arch_spin_is_locked(lp))
|
if (arch_spin_is_locked(lp))
|
||||||
continue;
|
continue;
|
||||||
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
|
||||||
|
cpu) == 0)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (MACHINE_IS_LPAR)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
owner = lp->owner_cpu;
|
||||||
|
if (owner)
|
||||||
|
_raw_yield_cpu(~owner);
|
||||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -63,18 +71,28 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
unsigned int owner;
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
while (1) {
|
while (1) {
|
||||||
if (count-- <= 0) {
|
owner = lp->owner_cpu;
|
||||||
unsigned int owner = lp->owner_cpu;
|
if (!owner || smp_vcpu_scheduled(~owner)) {
|
||||||
if (owner != 0)
|
for (count = spin_retry; count > 0; count--) {
|
||||||
_raw_yield_cpu(~owner);
|
|
||||||
count = spin_retry;
|
|
||||||
}
|
|
||||||
if (arch_spin_is_locked(lp))
|
if (arch_spin_is_locked(lp))
|
||||||
continue;
|
continue;
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
|
||||||
|
cpu) == 0)
|
||||||
|
return;
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
if (MACHINE_IS_LPAR)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
owner = lp->owner_cpu;
|
||||||
|
if (owner)
|
||||||
|
_raw_yield_cpu(~owner);
|
||||||
|
local_irq_disable();
|
||||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||||
return;
|
return;
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
@ -100,9 +118,12 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
|
|||||||
void arch_spin_relax(arch_spinlock_t *lock)
|
void arch_spin_relax(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int cpu = lock->owner_cpu;
|
unsigned int cpu = lock->owner_cpu;
|
||||||
if (cpu != 0)
|
if (cpu != 0) {
|
||||||
|
if (MACHINE_IS_VM || MACHINE_IS_KVM ||
|
||||||
|
!smp_vcpu_scheduled(~cpu))
|
||||||
_raw_yield_cpu(~cpu);
|
_raw_yield_cpu(~cpu);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(arch_spin_relax);
|
EXPORT_SYMBOL(arch_spin_relax);
|
||||||
|
|
||||||
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||||
|
Loading…
Reference in New Issue
Block a user