mm/memory-failure.c: fix spinlock vs mutex order
We cannot take a mutex while holding a spinlock, so flip the order and fix the locking documentation. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
aa2c96d6f3
commit
9b679320a5
@ -391,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
struct anon_vma *av;
|
struct anon_vma *av;
|
||||||
|
|
||||||
read_lock(&tasklist_lock);
|
|
||||||
av = page_lock_anon_vma(page);
|
av = page_lock_anon_vma(page);
|
||||||
if (av == NULL) /* Not actually mapped anymore */
|
if (av == NULL) /* Not actually mapped anymore */
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
|
read_lock(&tasklist_lock);
|
||||||
for_each_process (tsk) {
|
for_each_process (tsk) {
|
||||||
struct anon_vma_chain *vmac;
|
struct anon_vma_chain *vmac;
|
||||||
|
|
||||||
@ -408,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
|
|||||||
add_to_kill(tsk, page, vma, to_kill, tkc);
|
add_to_kill(tsk, page, vma, to_kill, tkc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
page_unlock_anon_vma(av);
|
|
||||||
out:
|
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
page_unlock_anon_vma(av);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -424,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|||||||
struct prio_tree_iter iter;
|
struct prio_tree_iter iter;
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
|
|
||||||
/*
|
|
||||||
* A note on the locking order between the two locks.
|
|
||||||
* We don't rely on this particular order.
|
|
||||||
* If you have some other code that needs a different order
|
|
||||||
* feel free to switch them around. Or add a reverse link
|
|
||||||
* from mm_struct to task_struct, then this could be all
|
|
||||||
* done without taking tasklist_lock and looping over all tasks.
|
|
||||||
*/
|
|
||||||
|
|
||||||
read_lock(&tasklist_lock);
|
|
||||||
mutex_lock(&mapping->i_mmap_mutex);
|
mutex_lock(&mapping->i_mmap_mutex);
|
||||||
|
read_lock(&tasklist_lock);
|
||||||
for_each_process(tsk) {
|
for_each_process(tsk) {
|
||||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||||
|
|
||||||
@ -454,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
|
|||||||
add_to_kill(tsk, page, vma, to_kill, tkc);
|
add_to_kill(tsk, page, vma, to_kill, tkc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&mapping->i_mmap_mutex);
|
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
mutex_unlock(&mapping->i_mmap_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -38,9 +38,8 @@
|
|||||||
* in arch-dependent flush_dcache_mmap_lock,
|
* in arch-dependent flush_dcache_mmap_lock,
|
||||||
* within inode_wb_list_lock in __sync_single_inode)
|
* within inode_wb_list_lock in __sync_single_inode)
|
||||||
*
|
*
|
||||||
* (code doesn't rely on that order so it could be switched around)
|
* anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
|
||||||
* ->tasklist_lock
|
* ->tasklist_lock
|
||||||
* anon_vma->mutex (memory_failure, collect_procs_anon)
|
|
||||||
* pte map lock
|
* pte map lock
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user