drm/msm/gem: Consolidate pin/unpin paths

Avoid having multiple spots where we increment/decrement pin_count (and
associated LRU updating)

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/496130/
Link: https://lore.kernel.org/r/20220802155152.1727594-8-robdclark@gmail.com
This commit is contained in:
Rob Clark 2022-08-02 08:51:40 -07:00
parent e7cd5ee9aa
commit 9fd5ff7f28

View File

@ -190,7 +190,7 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
p = get_pages(obj);
if (!IS_ERR(p)) {
msm_obj->pin_count++;
to_msm_bo(obj)->pin_count++;
update_lru(obj);
}
@ -213,9 +213,7 @@ void msm_gem_unpin_pages(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_lru(obj);
msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
@ -436,14 +434,13 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
pages = get_pages(obj);
pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
if (!ret)
msm_obj->pin_count++;
if (ret)
msm_gem_unpin_locked(obj);
return ret;
}