[PARISC] Enable interrupts while spinning
Use the __raw_spin_lock_flags routine so we can take an interrupt while spinning. This re-fixes a bug jejb found on 2005-10-20: CPU0 does a flush_tlb_all holding the vmlist_lock for write. CPU1 tries a cat of /proc/meminfo which tries to acquire vmlist_lock for read CPU1 is now spinning with interrupts disabled CPU0 tries to execute a smp_call_function to flush the local tlb caches This is now a deadlock because CPU1 is spinning with interrupts disabled and can never receive the IPI Signed-off-by: Matthew Wilcox <matthew@wil.cx> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
This commit is contained in:
parent
56f335c89e
commit
65ee8f0a7f
@ -57,35 +57,42 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Read-write spinlocks, allowing multiple readers but only one writer.
|
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||||
* The spinlock is held by the writer, preventing any readers or other
|
* Linux rwlocks are unfair to writers; they can be starved for an indefinite
|
||||||
* writers from grabbing the rwlock. Readers use the lock to serialise their
|
* time by readers. With care, they can also be taken in interrupt context.
|
||||||
* access to the counter (which records how many readers currently hold the
|
*
|
||||||
* lock). Linux rwlocks are unfair to writers; they can be starved for
|
* In the PA-RISC implementation, we have a spinlock and a counter.
|
||||||
* an indefinite time by readers. They can also be taken in interrupt context,
|
* Readers use the lock to serialise their access to the counter (which
|
||||||
* so we have to disable interrupts when acquiring the spin lock to be sure
|
* records how many readers currently hold the lock).
|
||||||
* that an interrupting reader doesn't get an inconsistent view of the lock.
|
* Writers hold the spinlock, preventing any readers or other writers from
|
||||||
|
* grabbing the rwlock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
|
* interrupted by some other code that wants to grab the same read lock */
|
||||||
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
|
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__raw_spin_lock(&rw->lock);
|
__raw_spin_lock_flags(&rw->lock, flags);
|
||||||
rw->counter++;
|
rw->counter++;
|
||||||
__raw_spin_unlock(&rw->lock);
|
__raw_spin_unlock(&rw->lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
|
* interrupted by some other code that wants to grab the same read lock */
|
||||||
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
|
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__raw_spin_lock(&rw->lock);
|
__raw_spin_lock_flags(&rw->lock, flags);
|
||||||
rw->counter--;
|
rw->counter--;
|
||||||
__raw_spin_unlock(&rw->lock);
|
__raw_spin_unlock(&rw->lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
|
* interrupted by some other code that wants to grab the same read lock */
|
||||||
static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
|
static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -110,12 +117,14 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
|
|||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
|
* interrupted by some other code that wants to read_trylock() this lock */
|
||||||
static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
|
static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
retry:
|
retry:
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__raw_spin_lock(&rw->lock);
|
__raw_spin_lock_flags(&rw->lock, flags);
|
||||||
|
|
||||||
if (rw->counter != 0) {
|
if (rw->counter != 0) {
|
||||||
__raw_spin_unlock(&rw->lock);
|
__raw_spin_unlock(&rw->lock);
|
||||||
@ -138,6 +147,8 @@ static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
|
|||||||
__raw_spin_unlock(&rw->lock);
|
__raw_spin_unlock(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
|
* interrupted by some other code that wants to read_trylock() this lock */
|
||||||
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
|
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
Loading…
Reference in New Issue
Block a user