mm/mmu_notifier: convert user range->blockable to helper function
Use the mmu_notifier_range_blockable() helper function instead of directly dereferencing the range->blockable field. This is done to make it easier to change the mmu_notifier range field. This patch is the outcome of the following coccinelle patch: %<------------------------------------------------------------------- @@ identifier I1, FN; @@ FN(..., struct mmu_notifier_range *I1, ...) { <... -I1->blockable +mmu_notifier_range_blockable(I1) ...> } ------------------------------------------------------------------->% spatch --in-place --sp-file blockable.spatch --dir . Link: http://lkml.kernel.org/r/20190326164747.24405-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Cc: Christian König <christian.koenig@amd.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Christian Koenig <christian.koenig@amd.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4a83bfe916
commit
dfcd66604c
@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
|||||||
/* TODO we should be able to split locking for interval tree and
|
/* TODO we should be able to split locking for interval tree and
|
||||||
* amdgpu_mn_invalidate_node
|
* amdgpu_mn_invalidate_node
|
||||||
*/
|
*/
|
||||||
if (amdgpu_mn_read_lock(amn, range->blockable))
|
if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
||||||
while (it) {
|
while (it) {
|
||||||
struct amdgpu_mn_node *node;
|
struct amdgpu_mn_node *node;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
amdgpu_mn_read_unlock(amn);
|
amdgpu_mn_read_unlock(amn);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
|||||||
/* notification is exclusive, but interval is inclusive */
|
/* notification is exclusive, but interval is inclusive */
|
||||||
end = range->end - 1;
|
end = range->end - 1;
|
||||||
|
|
||||||
if (amdgpu_mn_read_lock(amn, range->blockable))
|
if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
it = interval_tree_iter_first(&amn->objects, range->start, end);
|
||||||
@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
|||||||
struct amdgpu_mn_node *node;
|
struct amdgpu_mn_node *node;
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
amdgpu_mn_read_unlock(amn);
|
amdgpu_mn_read_unlock(amn);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
|||||||
while (it) {
|
while (it) {
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
/* TODO we should be able to split locking for interval tree and
|
/* TODO we should be able to split locking for interval tree and
|
||||||
* the tear down.
|
* the tear down.
|
||||||
*/
|
*/
|
||||||
if (range->blockable)
|
if (mmu_notifier_range_blockable(range))
|
||||||
mutex_lock(&rmn->lock);
|
mutex_lock(&rmn->lock);
|
||||||
else if (!mutex_trylock(&rmn->lock))
|
else if (!mutex_trylock(&rmn->lock))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
struct radeon_bo *bo;
|
struct radeon_bo *bo;
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
if (!range->blockable) {
|
if (!mmu_notifier_range_blockable(range)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
@ -152,7 +152,7 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
struct ib_ucontext_per_mm *per_mm =
|
struct ib_ucontext_per_mm *per_mm =
|
||||||
container_of(mn, struct ib_ucontext_per_mm, mn);
|
container_of(mn, struct ib_ucontext_per_mm, mn);
|
||||||
|
|
||||||
if (range->blockable)
|
if (mmu_notifier_range_blockable(range))
|
||||||
down_read(&per_mm->umem_rwsem);
|
down_read(&per_mm->umem_rwsem);
|
||||||
else if (!down_read_trylock(&per_mm->umem_rwsem))
|
else if (!down_read_trylock(&per_mm->umem_rwsem))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
@ -170,7 +170,8 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
|
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
|
||||||
range->end,
|
range->end,
|
||||||
invalidate_range_start_trampoline,
|
invalidate_range_start_trampoline,
|
||||||
range->blockable, NULL);
|
mmu_notifier_range_blockable(range),
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
|
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
|
||||||
|
@ -526,20 +526,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
|
|||||||
struct gntdev_grant_map *map;
|
struct gntdev_grant_map *map;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (range->blockable)
|
if (mmu_notifier_range_blockable(range))
|
||||||
mutex_lock(&priv->lock);
|
mutex_lock(&priv->lock);
|
||||||
else if (!mutex_trylock(&priv->lock))
|
else if (!mutex_trylock(&priv->lock))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
list_for_each_entry(map, &priv->maps, next) {
|
list_for_each_entry(map, &priv->maps, next) {
|
||||||
ret = unmap_if_in_range(map, range->start, range->end,
|
ret = unmap_if_in_range(map, range->start, range->end,
|
||||||
range->blockable);
|
mmu_notifier_range_blockable(range));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
list_for_each_entry(map, &priv->freeable_maps, next) {
|
list_for_each_entry(map, &priv->freeable_maps, next) {
|
||||||
ret = unmap_if_in_range(map, range->start, range->end,
|
ret = unmap_if_in_range(map, range->start, range->end,
|
||||||
range->blockable);
|
mmu_notifier_range_blockable(range));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
6
mm/hmm.c
6
mm/hmm.c
@ -205,9 +205,9 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
update.start = nrange->start;
|
update.start = nrange->start;
|
||||||
update.end = nrange->end;
|
update.end = nrange->end;
|
||||||
update.event = HMM_UPDATE_INVALIDATE;
|
update.event = HMM_UPDATE_INVALIDATE;
|
||||||
update.blockable = nrange->blockable;
|
update.blockable = mmu_notifier_range_blockable(nrange);
|
||||||
|
|
||||||
if (nrange->blockable)
|
if (mmu_notifier_range_blockable(nrange))
|
||||||
mutex_lock(&hmm->lock);
|
mutex_lock(&hmm->lock);
|
||||||
else if (!mutex_trylock(&hmm->lock)) {
|
else if (!mutex_trylock(&hmm->lock)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
@ -222,7 +222,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
}
|
}
|
||||||
mutex_unlock(&hmm->lock);
|
mutex_unlock(&hmm->lock);
|
||||||
|
|
||||||
if (nrange->blockable)
|
if (mmu_notifier_range_blockable(nrange))
|
||||||
down_read(&hmm->mirrors_sem);
|
down_read(&hmm->mirrors_sem);
|
||||||
else if (!down_read_trylock(&hmm->mirrors_sem)) {
|
else if (!down_read_trylock(&hmm->mirrors_sem)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
|
@ -180,7 +180,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
|
|||||||
if (_ret) {
|
if (_ret) {
|
||||||
pr_info("%pS callback failed with %d in %sblockable context.\n",
|
pr_info("%pS callback failed with %d in %sblockable context.\n",
|
||||||
mn->ops->invalidate_range_start, _ret,
|
mn->ops->invalidate_range_start, _ret,
|
||||||
!range->blockable ? "non-" : "");
|
!mmu_notifier_range_blockable(range) ? "non-" : "");
|
||||||
ret = _ret;
|
ret = _ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -391,7 +391,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
|
ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
|
||||||
range->end, range->blockable);
|
range->end,
|
||||||
|
mmu_notifier_range_blockable(range));
|
||||||
|
|
||||||
srcu_read_unlock(&kvm->srcu, idx);
|
srcu_read_unlock(&kvm->srcu, idx);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user