|
|
|
@ -111,10 +111,10 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
|
|
|
|
*/
|
|
|
|
|
if (eb->lock_nested && current->pid == eb->lock_owner)
|
|
|
|
|
return;
|
|
|
|
|
if (atomic_read(&eb->blocking_writers) == 0) {
|
|
|
|
|
if (eb->blocking_writers == 0) {
|
|
|
|
|
btrfs_assert_spinning_writers_put(eb);
|
|
|
|
|
btrfs_assert_tree_locked(eb);
|
|
|
|
|
atomic_inc(&eb->blocking_writers);
|
|
|
|
|
eb->blocking_writers++;
|
|
|
|
|
write_unlock(&eb->lock);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@ -148,12 +148,11 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
|
|
|
|
|
*/
|
|
|
|
|
if (eb->lock_nested && current->pid == eb->lock_owner)
|
|
|
|
|
return;
|
|
|
|
|
BUG_ON(atomic_read(&eb->blocking_writers) != 1);
|
|
|
|
|
write_lock(&eb->lock);
|
|
|
|
|
BUG_ON(eb->blocking_writers != 1);
|
|
|
|
|
btrfs_assert_spinning_writers_get(eb);
|
|
|
|
|
/* atomic_dec_and_test implies a barrier */
|
|
|
|
|
if (atomic_dec_and_test(&eb->blocking_writers))
|
|
|
|
|
cond_wake_up_nomb(&eb->write_lock_wq);
|
|
|
|
|
if (--eb->blocking_writers == 0)
|
|
|
|
|
cond_wake_up(&eb->write_lock_wq);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@ -167,12 +166,10 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
|
|
|
|
|
if (trace_btrfs_tree_read_lock_enabled())
|
|
|
|
|
start_ns = ktime_get_ns();
|
|
|
|
|
again:
|
|
|
|
|
BUG_ON(!atomic_read(&eb->blocking_writers) &&
|
|
|
|
|
current->pid == eb->lock_owner);
|
|
|
|
|
|
|
|
|
|
read_lock(&eb->lock);
|
|
|
|
|
if (atomic_read(&eb->blocking_writers) &&
|
|
|
|
|
current->pid == eb->lock_owner) {
|
|
|
|
|
BUG_ON(eb->blocking_writers == 0 &&
|
|
|
|
|
current->pid == eb->lock_owner);
|
|
|
|
|
if (eb->blocking_writers && current->pid == eb->lock_owner) {
|
|
|
|
|
/*
|
|
|
|
|
* This extent is already write-locked by our thread. We allow
|
|
|
|
|
* an additional read lock to be added because it's for the same
|
|
|
|
@ -185,10 +182,10 @@ again:
|
|
|
|
|
trace_btrfs_tree_read_lock(eb, start_ns);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if (atomic_read(&eb->blocking_writers)) {
|
|
|
|
|
if (eb->blocking_writers) {
|
|
|
|
|
read_unlock(&eb->lock);
|
|
|
|
|
wait_event(eb->write_lock_wq,
|
|
|
|
|
atomic_read(&eb->blocking_writers) == 0);
|
|
|
|
|
eb->blocking_writers == 0);
|
|
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
|
btrfs_assert_tree_read_locks_get(eb);
|
|
|
|
@ -203,11 +200,11 @@ again:
|
|
|
|
|
*/
|
|
|
|
|
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
|
|
|
|
{
|
|
|
|
|
if (atomic_read(&eb->blocking_writers))
|
|
|
|
|
if (eb->blocking_writers)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
read_lock(&eb->lock);
|
|
|
|
|
if (atomic_read(&eb->blocking_writers)) {
|
|
|
|
|
if (eb->blocking_writers) {
|
|
|
|
|
read_unlock(&eb->lock);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@ -223,13 +220,13 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
|
|
|
|
*/
|
|
|
|
|
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
|
|
|
|
|
{
|
|
|
|
|
if (atomic_read(&eb->blocking_writers))
|
|
|
|
|
if (eb->blocking_writers)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!read_trylock(&eb->lock))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (atomic_read(&eb->blocking_writers)) {
|
|
|
|
|
if (eb->blocking_writers) {
|
|
|
|
|
read_unlock(&eb->lock);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@ -245,13 +242,11 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
|
|
|
|
|
*/
|
|
|
|
|
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
|
|
|
|
{
|
|
|
|
|
if (atomic_read(&eb->blocking_writers) ||
|
|
|
|
|
atomic_read(&eb->blocking_readers))
|
|
|
|
|
if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
write_lock(&eb->lock);
|
|
|
|
|
if (atomic_read(&eb->blocking_writers) ||
|
|
|
|
|
atomic_read(&eb->blocking_readers)) {
|
|
|
|
|
if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
|
|
|
|
|
write_unlock(&eb->lock);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@ -322,10 +317,9 @@ void btrfs_tree_lock(struct extent_buffer *eb)
|
|
|
|
|
WARN_ON(eb->lock_owner == current->pid);
|
|
|
|
|
again:
|
|
|
|
|
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
|
|
|
|
|
wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
|
|
|
|
|
wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
|
|
|
|
|
write_lock(&eb->lock);
|
|
|
|
|
if (atomic_read(&eb->blocking_readers) ||
|
|
|
|
|
atomic_read(&eb->blocking_writers)) {
|
|
|
|
|
if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
|
|
|
|
|
write_unlock(&eb->lock);
|
|
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
@ -340,7 +334,7 @@ again:
|
|
|
|
|
*/
|
|
|
|
|
void btrfs_tree_unlock(struct extent_buffer *eb)
|
|
|
|
|
{
|
|
|
|
|
int blockers = atomic_read(&eb->blocking_writers);
|
|
|
|
|
int blockers = eb->blocking_writers;
|
|
|
|
|
|
|
|
|
|
BUG_ON(blockers > 1);
|
|
|
|
|
|
|
|
|
@ -351,7 +345,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
|
|
|
|
|
|
|
|
|
|
if (blockers) {
|
|
|
|
|
btrfs_assert_no_spinning_writers(eb);
|
|
|
|
|
atomic_dec(&eb->blocking_writers);
|
|
|
|
|
eb->blocking_writers--;
|
|
|
|
|
/* Use the lighter barrier after atomic */
|
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
|
cond_wake_up_nomb(&eb->write_lock_wq);
|
|
|
|
|