linux/drivers/gpu/drm/msm/msm_gem.c

1317 lines
29 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/dma-map-ops.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"
static void update_inactive(struct msm_gem_object *msm_obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
priv->vram.paddr;
}
static bool use_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
return !msm_obj->vram_node;
}
drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold <stephan@gerhold.net> Signed-off-by: Rob Clark <robdclark@chromium.org> [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com
2019-07-30 14:46:28 -07:00
/*
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
* API. Really GPU cache is out of scope here (handled on cmdstream)
* and all we need to do is invalidate newly allocated pages before
* mapping to CPU as uncached/writecombine.
*
* On top of this, we have the added headache, that depending on
* display generation, the display's iommu may be wired up to either
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
* that here we either have dma-direct or iommu ops.
*
* Let this be a cautionary tail of abstraction gone wrong.
*/
static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold <stephan@gerhold.net> Signed-off-by: Rob Clark <robdclark@chromium.org> [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com
2019-07-30 14:46:28 -07:00
}
static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold <stephan@gerhold.net> Signed-off-by: Rob Clark <robdclark@chromium.org> [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com
2019-07-30 14:46:28 -07:00
}
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
dma_addr_t paddr;
struct page **p;
int ret, i;
p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
spin_lock(&priv->vram.lock);
drm: Improve drm_mm search (and fix topdown allocation) with rbtrees The drm_mm range manager claimed to support top-down insertion, but it was neither searching for the top-most hole that could fit the allocation request nor fitting the request to the hole correctly. In order to search the range efficiently, we create a secondary index for the holes using either their size or their address. This index allows us to find the smallest hole or the hole at the bottom or top of the range efficiently, whilst keeping the hole stack to rapidly service evictions. v2: Search for holes both high and low. Rename flags to mode. v3: Discover rb_entry_safe() and use it! v4: Kerneldoc for enum drm_mm_insert_mode. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Cc: David Airlie <airlied@linux.ie> Cc: Russell King <rmk+kernel@armlinux.org.uk> Cc: Daniel Vetter <daniel.vetter@intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Sean Paul <seanpaul@chromium.org> Cc: Lucas Stach <l.stach@pengutronix.de> Cc: Christian Gmeiner <christian.gmeiner@gmail.com> Cc: Rob Clark <robdclark@gmail.com> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Stephen Warren <swarren@wwwdotorg.org> Cc: Alexandre Courbot <gnurou@gmail.com> Cc: Eric Anholt <eric@anholt.net> Cc: Sinclair Yeh <syeh@vmware.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> # vmwgfx Reviewed-by: Lucas Stach <l.stach@pengutronix.de> #etnaviv Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20170202210438.28702-1-chris@chris-wilson.co.uk
2017-02-02 21:04:38 +00:00
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
spin_unlock(&priv->vram.lock);
if (ret) {
kvfree(p);
return ERR_PTR(ret);
}
paddr = physaddr(obj);
for (i = 0; i < npages; i++) {
p[i] = phys_to_page(paddr);
paddr += PAGE_SIZE;
}
return p;
}
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_gem_is_locked(obj));
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
if (use_pages(obj))
p = drm_gem_get_pages(obj);
else
p = get_pages_vram(obj, npages);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
return p;
}
msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold <stephan@gerhold.net> Signed-off-by: Rob Clark <robdclark@chromium.org> [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com
2019-07-30 14:46:28 -07:00
sync_for_device(msm_obj);
}
return msm_obj->pages;
}
static void put_pages_vram(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
spin_lock(&priv->vram.lock);
drm_mm_remove_node(msm_obj->vram_node);
spin_unlock(&priv->vram.lock);
kvfree(msm_obj->pages);
}
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold <stephan@gerhold.net> Signed-off-by: Rob Clark <robdclark@chromium.org> [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com
2019-07-30 14:46:28 -07:00
sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
}
if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else
put_pages_vram(obj);
msm_obj->pages = NULL;
}
}
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
msm_gem_lock(obj);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj);
msm_gem_unlock(obj);
return p;
}
void msm_gem_put_pages(struct drm_gem_object *obj)
{
/* when we start tracking the pin count, then do something here */
}
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
if (msm_obj->flags & MSM_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
} else if (msm_obj->flags & MSM_BO_UNCACHED) {
vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
} else {
/*
* Shunt off cached objs to shmem file so they have their own
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
vma->vm_pgoff = 0;
vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
return 0;
}
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret) {
DBG("mmap failed: %d", ret);
return ret;
}
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int err;
vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
err = msm_gem_lock_interruptible(obj);
if (err) {
ret = VM_FAULT_NOPAGE;
goto out;
}
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj);
return VM_FAULT_SIGBUS;
}
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(pages[pgoff]);
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
msm_gem_unlock(obj);
out:
return ret;
}
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
WARN_ON(!msm_gem_is_locked(obj));
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
msm_gem_lock(obj);
offset = mmap_offset(obj);
msm_gem_unlock(obj);
return offset;
}
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj));
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
return ERR_PTR(-ENOMEM);
vma->aspace = aspace;
list_add_tail(&vma->list, &msm_obj->vmas);
return vma;
}
static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
return vma;
}
return NULL;
}
static void del_vma(struct msm_gem_vma *vma)
{
if (!vma)
return;
list_del(&vma->list);
kfree(vma);
}
/* Called with msm_obj locked */
static void
put_iova_spaces(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
msm_gem_purge_vma(vma->aspace, vma);
msm_gem_close_vma(vma->aspace, vma);
}
}
}
/* Called with msm_obj locked */
static void
put_iova_vmas(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
WARN_ON(!msm_gem_is_locked(obj));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
}
}
static int get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end)
{
struct msm_gem_vma *vma;
int ret = 0;
WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace);
if (!vma) {
vma = add_vma(obj, aspace);
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
range_start, range_end);
if (ret) {
del_vma(vma);
return ret;
}
}
*iova = vma->iova;
return 0;
}
static int msm_gem_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
struct page **pages;
int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
WARN_ON(!msm_gem_is_locked(obj));
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
vma = lookup_vma(obj, aspace);
if (WARN_ON(!vma))
return -EINVAL;
pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
return msm_gem_map_vma(aspace, vma, prot,
msm_obj->sgt, obj->size >> PAGE_SHIFT);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end)
{
u64 local;
int ret;
WARN_ON(!msm_gem_is_locked(obj));
ret = get_iova_locked(obj, aspace, &local,
range_start, range_end);
if (!ret)
ret = msm_gem_pin_iova(obj, aspace);
if (!ret)
*iova = local;
return ret;
}
/*
* get iova and pin it. Should have a matching put
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end)
{
int ret;
msm_gem_lock(obj);
ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
msm_gem_unlock(obj);
return ret;
}
int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
}
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
int ret;
msm_gem_lock(obj);
ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
msm_gem_unlock(obj);
return ret;
}
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
*/
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_vma *vma;
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
msm_gem_unlock(obj);
WARN_ON(!vma);
return vma ? vma->iova : 0;
}
/*
* Locked variant of msm_gem_unpin_iova()
*/
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_vma *vma;
WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace);
if (!WARN_ON(!vma))
msm_gem_unmap_vma(aspace, vma);
}
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
msm_gem_lock(obj);
msm_gem_unpin_iova_locked(obj, aspace);
msm_gem_unlock(obj);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *obj;
int ret = 0;
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto fail;
}
*offset = msm_gem_mmap_offset(obj);
drm_gem_object_put(obj);
fail:
return ret;
}
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
WARN_ON(!msm_gem_is_locked(obj));
drm/msm: Don't call dma_buf_vunmap without _vmap I honestly don't exactly understand what's going on here, but the current code is wrong for sure: It calls dma_buf_vunmap without ever calling dma_buf_vmap. What I'm not sure about is whether the WARN_ON is correct: - msm imports dma-buf using drm_prime_sg_to_page_addr_arrays. Which is a pretty neat layering violation of how you shouldn't peek behind the curtain of the dma-buf exporter, but par for course. Note that all the nice new helpers don't (and we should probably have a bit a warning about this in the kerneldoc). - but then in the get_vaddr() in msm_gem.c, we seems to happily wrap a vmap() around any object with ->pages set (so including imported dma-buf). - I'm not seeing any guarantees that userspace can't use an imported dma-buf for e.g. MSM_SUBMIT_CMD_BUF in a5xx_submit_in_rb, so no guarantees that an imported dma-buf won't end up with a ->vaddr set. But even if that WARN_ON is wrong, cleaning up a vmap() done by msm by calling dma_buf_vunmap is the wrong thing to do. v2: Rob said in review that we do indeed have a gap in get_vaddr() that needs to be plugged. But the users I've found aren't legit users on imported dma-buf, so we can just reject that. Reviewed-by: Rob Clark <robdclark@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Rob Clark <robdclark@gmail.com> Cc: Sean Paul <sean@poorly.run> Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20200514201117.465146-1-daniel.vetter@ffwll.ch
2020-05-14 22:11:17 +02:00
if (obj->import_attach)
return ERR_PTR(-ENODEV);
if (WARN_ON(msm_obj->madv > madv)) {
DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
* This guarantees that we won't try to msm_gem_vunmap() this
* same object from within the vmap() call (while we already
* hold msm_obj lock)
*/
msm_obj->vmap_count++;
if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
}
}
return msm_obj->vaddr;
fail:
msm_obj->vmap_count--;
return ERR_PTR(ret);
}
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{
return get_vaddr(obj, MSM_MADV_WILLNEED);
}
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
void *ret;
msm_gem_lock(obj);
ret = msm_gem_get_vaddr_locked(obj);
msm_gem_unlock(obj);
return ret;
}
/*
* Don't use this! It is for the very special case of dumping
* submits from GPU hangs or faults, were the bo may already
* be MSM_MADV_DONTNEED, but we know the buffer is still on the
* active list.
*/
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
return get_vaddr(obj, __MSM_MADV_PURGED);
}
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_gem_is_locked(obj));
WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
msm_gem_lock(obj);
msm_gem_put_vaddr_locked(obj);
msm_gem_unlock(obj);
}
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
madv = msm_obj->madv;
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
if (msm_obj->active_count == 0)
update_inactive(msm_obj);
msm_gem_unlock(obj);
return (madv != __MSM_MADV_PURGED);
}
void msm_gem_purge(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach);
put_iova_spaces(obj);
msm_gem_vunmap(obj);
put_pages(obj);
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_gem_is_locked(obj));
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return;
vunmap(msm_obj->vaddr);
msm_obj->vaddr = NULL;
}
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
{
struct dma_resv_list *fobj;
dma-buf: Rename struct fence to dma_fence I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
2016-10-25 13:00:45 +01:00
struct dma_fence *fence;
int i, ret;
fobj = dma_resv_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
fence = dma_resv_get_excl(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
dma-buf: Rename struct fence to dma_fence I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
2016-10-25 13:00:45 +01:00
ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
}
if (!exclusive || !fobj)
return 0;
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
dma_resv_held(obj->resv));
if (fence->context != fctx->context) {
dma-buf: Rename struct fence to dma_fence I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
2016-10-25 13:00:45 +01:00
ret = dma_fence_wait(fence, true);
if (ret)
return ret;
}
}
return 0;
}
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
WARN_ON(!msm_gem_is_locked(obj));
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
if (msm_obj->active_count++ == 0) {
mutex_lock(&priv->mm_lock);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
mutex_unlock(&priv->mm_lock);
}
}
void msm_gem_active_put(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
might_sleep();
WARN_ON(!msm_gem_is_locked(obj));
if (--msm_obj->active_count == 0) {
update_inactive(msm_obj);
}
}
static void update_inactive(struct msm_gem_object *msm_obj)
{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
mutex_lock(&priv->mm_lock);
WARN_ON(msm_obj->active_count != 0);
list_del_init(&msm_obj->mm_list);
if (msm_obj->madv == MSM_MADV_WILLNEED)
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
else
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mutex_unlock(&priv->mm_lock);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
bool write = !!(op & MSM_PREP_WRITE);
unsigned long remain =
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
ret = dma_resv_wait_timeout_rcu(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
return ret;
/* TODO cache maintenance */
return 0;
}
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
/* TODO cache maintenance */
return 0;
}
#ifdef CONFIG_DEBUG_FS
dma-buf: Rename struct fence to dma_fence I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
2016-10-25 13:00:45 +01:00
static void describe_fence(struct dma_fence *fence, const char *type,
struct seq_file *m)
{
dma-buf: Rename struct fence to dma_fence I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
2016-10-25 13:00:45 +01:00
if (!dma_fence_is_signaled(fence))
seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->seqno);
}
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
struct dma_resv_list *fobj;
dma-buf: Rename struct fence to dma_fence I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
2016-10-25 13:00:45 +01:00
struct dma_fence *fence;
struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
msm_gem_lock(obj);
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
madv = " purged";
break;
case MSM_MADV_DONTNEED:
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
default:
madv = "";
break;
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
if (!list_empty(&msm_obj->vmas)) {
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list) {
const char *name, *comm;
if (vma->aspace) {
struct msm_gem_address_space *aspace = vma->aspace;
struct task_struct *task =
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
} else {
comm = NULL;
}
name = aspace->name;
} else {
name = comm = NULL;
}
seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
name, comm ? ":" : "", comm ? comm : "",
vma->aspace, vma->iova,
vma->mapped ? "mapped" : "unmapped",
vma->inuse);
kfree(comm);
}
seq_puts(m, "\n");
}
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
for (i = 0; i < shared_count; i++) {
fence = rcu_dereference(fobj->shared[i]);
describe_fence(fence, "Shared", m);
}
}
fence = rcu_dereference(robj->fence_excl);
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
msm_gem_unlock(obj);
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
struct msm_gem_object *msm_obj;
int count = 0;
size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, mm_list) {
struct drm_gem_object *obj = &msm_obj->base;
seq_puts(m, " ");
msm_gem_describe(obj, m);
count++;
size += obj->size;
}
seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
}
#endif
/* don't call directly! Use drm_gem_object_put_locked() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
mutex_lock(&priv->mm_lock);
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
msm_gem_lock(obj);
/* object should not be on active list: */
WARN_ON(is_active(msm_obj));
put_iova_spaces(obj);
if (obj->import_attach) {
drm/msm: Don't call dma_buf_vunmap without _vmap I honestly don't exactly understand what's going on here, but the current code is wrong for sure: It calls dma_buf_vunmap without ever calling dma_buf_vmap. What I'm not sure about is whether the WARN_ON is correct: - msm imports dma-buf using drm_prime_sg_to_page_addr_arrays. Which is a pretty neat layering violation of how you shouldn't peek behind the curtain of the dma-buf exporter, but par for course. Note that all the nice new helpers don't (and we should probably have a bit a warning about this in the kerneldoc). - but then in the get_vaddr() in msm_gem.c, we seems to happily wrap a vmap() around any object with ->pages set (so including imported dma-buf). - I'm not seeing any guarantees that userspace can't use an imported dma-buf for e.g. MSM_SUBMIT_CMD_BUF in a5xx_submit_in_rb, so no guarantees that an imported dma-buf won't end up with a ->vaddr set. But even if that WARN_ON is wrong, cleaning up a vmap() done by msm by calling dma_buf_vunmap is the wrong thing to do. v2: Rob said in review that we do indeed have a gap in get_vaddr() that needs to be plugged. But the users I've found aren't legit users on imported dma-buf, so we can just reject that. Reviewed-by: Rob Clark <robdclark@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Rob Clark <robdclark@gmail.com> Cc: Sean Paul <sean@poorly.run> Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20200514201117.465146-1-daniel.vetter@ffwll.ch
2020-05-14 22:11:17 +02:00
WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
if (msm_obj->pages)
kvfree(msm_obj->pages);
drm/msm: Fix WARN_ON() splat in _free_object() [ 192.062000] ------------[ cut here ]------------ [ 192.062498] WARNING: CPU: 3 PID: 2039 at drivers/gpu/drm/msm/msm_gem.c:381 put_iova_vmas+0x94/0xa0 [msm] [ 192.062870] Modules linked in: snd_hrtimer snd_seq snd_seq_device rfcomm algif_hash algif_skcipher af_alg bnep xt_CHECKSUM nft_chain_nat xt_MASQUERADE nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nft_counter xt_tcpudp nft_compat cpufreq_powersave cpufreq_conservative q6asm_dai q6routing q6afe_dai q6adm bridge q6afe q6asm q6dsp_common q6core stp llc nf_tables libcrc32c nfnetlink snd_soc_wsa881x regmap_sdw soundwire_qcom gpio_wcd934x snd_soc_wcd934x wcd934x regmap_slimbus venus_enc venus_dec apr videobuf2_dma_sg qrtr_smd uvcvideo videobuf2_vmalloc videobuf2_memops ath10k_snoc ath10k_core hci_uart btqca btbcm mac80211 bluetooth snd_soc_sdm845 ath snd_soc_rt5663 snd_soc_qcom_common snd_soc_rl6231 soundwire_bus ecdh_generic ecc qcom_spmi_adc5 venus_core qcom_pon qcom_spmi_temp_alarm qcom_vadc_common v4l2_mem2mem videobuf2_v4l2 cfg80211 videobuf2_common hid_multitouch reset_qcom_pdc qcrypto qcom_rng rfkill qcom_q6v5_mss libarc4 libdes qrtr ns qcom_wdt socinfo slim_qcom_ngd_ctrl [ 192.065739] pdr_interface qcom_q6v5_pas slimbus qcom_pil_info qcom_q6v5 qcom_sysmon qcom_common qcom_glink_smem qmi_helpers rmtfs_mem tcp_bbr sch_fq fuse ip_tables x_tables ipv6 crc_ccitt ti_sn65dsi86 i2c_hid msm mdt_loader llcc_qcom rtc_pm8xxx ocmem drm_kms_helper crct10dif_ce phy_qcom_qusb2 i2c_qcom_geni panel_simple drm pwm_bl [ 192.066066] CPU: 3 PID: 2039 Comm: gnome-shell Tainted: G W 5.10.0-rc7-next-20201208 #1 [ 192.066068] Hardware name: LENOVO 81JL/LNVNB161216, BIOS 9UCN33WW(V2.06) 06/ 4/2019 [ 192.066072] pstate: 40400005 (nZcv daif +PAN -UAO -TCO BTYPE=--) [ 192.066099] pc : put_iova_vmas+0x94/0xa0 [msm] [ 192.066262] lr : put_iova_vmas+0x1c/0xa0 [msm] [ 192.066403] sp : ffff800019efbbb0 [ 192.066405] x29: ffff800019efbbb0 x28: ffff800019efbd88 [ 192.066411] x27: 0000000000000000 x26: ffff109582efa400 [ 192.066417] x25: 0000000000000009 x24: 000000000000012b [ 192.066422] x23: ffff109582efa438 x22: ffff109582efa450 [ 192.066427] x21: ffff109582efa528 x20: ffff1095cbd4f200 [ 192.066432] x19: ffff1095cbd4f200 x18: 0000000000000000 [ 192.066438] x17: 0000000000000000 x16: ffffc26c200ca750 [ 192.066727] x15: 0000000000000000 x14: 0000000000000000 [ 192.066741] x13: ffff1096fb8c9100 x12: 0000000000000002 [ 192.066754] x11: ffffffffffffffff x10: 0000000000000002 [ 192.067046] x9 : 0000000000000001 x8 : 0000000000000a36 [ 192.067060] x7 : ffff4e2ad9f11000 x6 : ffffc26c216d4000 [ 192.067212] x5 : ffffc26c2022661c x4 : ffff1095c2b98000 [ 192.067367] x3 : ffff1095cbd4f300 x2 : 0000000000000000 [ 192.067380] x1 : ffff1095c2b98000 x0 : 0000000000000000 [ 192.067667] Call trace: [ 192.067734] put_iova_vmas+0x94/0xa0 [msm] [ 192.068078] msm_gem_free_object+0xb4/0x110 [msm] [ 192.068399] drm_gem_object_free+0x1c/0x30 [drm] [ 192.068717] drm_gem_object_handle_put_unlocked+0xf0/0xf8 [drm] [ 192.069032] drm_gem_object_release_handle+0x6c/0x88 [drm] [ 192.069349] drm_gem_handle_delete+0x68/0xc0 [drm] [ 192.069666] drm_gem_close_ioctl+0x30/0x48 [drm] [ 192.069984] drm_ioctl_kernel+0xc0/0x110 [drm] [ 192.070303] drm_ioctl+0x210/0x440 [drm] [ 192.070588] __arm64_sys_ioctl+0xa8/0xf0 [ 192.070599] el0_svc_common.constprop.0+0x74/0x190 [ 192.070608] do_el0_svc+0x24/0x90 [ 192.070618] el0_svc+0x14/0x20 [ 192.070903] el0_sync_handler+0xb0/0xb8 [ 192.070911] el0_sync+0x174/0x180 [ 192.070918] ---[ end trace bee6b12a899001a3 ]--- [ 192.072140] ------------[ cut here ]------------ Fixes: 9b73bde39cf2 ("drm/msm: Fix use-after-free in msm_gem with carveout") Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Iskren Chernev <iskren.chernev@gmail.com>
2020-12-10 09:40:28 -08:00
put_iova_vmas(obj);
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
*/
msm_gem_unlock(obj);
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
drm/msm: Fix WARN_ON() splat in _free_object() [ 192.062000] ------------[ cut here ]------------ [ 192.062498] WARNING: CPU: 3 PID: 2039 at drivers/gpu/drm/msm/msm_gem.c:381 put_iova_vmas+0x94/0xa0 [msm] [ 192.062870] Modules linked in: snd_hrtimer snd_seq snd_seq_device rfcomm algif_hash algif_skcipher af_alg bnep xt_CHECKSUM nft_chain_nat xt_MASQUERADE nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nft_counter xt_tcpudp nft_compat cpufreq_powersave cpufreq_conservative q6asm_dai q6routing q6afe_dai q6adm bridge q6afe q6asm q6dsp_common q6core stp llc nf_tables libcrc32c nfnetlink snd_soc_wsa881x regmap_sdw soundwire_qcom gpio_wcd934x snd_soc_wcd934x wcd934x regmap_slimbus venus_enc venus_dec apr videobuf2_dma_sg qrtr_smd uvcvideo videobuf2_vmalloc videobuf2_memops ath10k_snoc ath10k_core hci_uart btqca btbcm mac80211 bluetooth snd_soc_sdm845 ath snd_soc_rt5663 snd_soc_qcom_common snd_soc_rl6231 soundwire_bus ecdh_generic ecc qcom_spmi_adc5 venus_core qcom_pon qcom_spmi_temp_alarm qcom_vadc_common v4l2_mem2mem videobuf2_v4l2 cfg80211 videobuf2_common hid_multitouch reset_qcom_pdc qcrypto qcom_rng rfkill qcom_q6v5_mss libarc4 libdes qrtr ns qcom_wdt socinfo slim_qcom_ngd_ctrl [ 192.065739] pdr_interface qcom_q6v5_pas slimbus qcom_pil_info qcom_q6v5 qcom_sysmon qcom_common qcom_glink_smem qmi_helpers rmtfs_mem tcp_bbr sch_fq fuse ip_tables x_tables ipv6 crc_ccitt ti_sn65dsi86 i2c_hid msm mdt_loader llcc_qcom rtc_pm8xxx ocmem drm_kms_helper crct10dif_ce phy_qcom_qusb2 i2c_qcom_geni panel_simple drm pwm_bl [ 192.066066] CPU: 3 PID: 2039 Comm: gnome-shell Tainted: G W 5.10.0-rc7-next-20201208 #1 [ 192.066068] Hardware name: LENOVO 81JL/LNVNB161216, BIOS 9UCN33WW(V2.06) 06/ 4/2019 [ 192.066072] pstate: 40400005 (nZcv daif +PAN -UAO -TCO BTYPE=--) [ 192.066099] pc : put_iova_vmas+0x94/0xa0 [msm] [ 192.066262] lr : put_iova_vmas+0x1c/0xa0 [msm] [ 192.066403] sp : ffff800019efbbb0 [ 192.066405] x29: ffff800019efbbb0 x28: ffff800019efbd88 [ 192.066411] x27: 0000000000000000 x26: ffff109582efa400 [ 192.066417] x25: 0000000000000009 x24: 000000000000012b [ 192.066422] x23: ffff109582efa438 x22: ffff109582efa450 [ 192.066427] x21: ffff109582efa528 x20: ffff1095cbd4f200 [ 192.066432] x19: ffff1095cbd4f200 x18: 0000000000000000 [ 192.066438] x17: 0000000000000000 x16: ffffc26c200ca750 [ 192.066727] x15: 0000000000000000 x14: 0000000000000000 [ 192.066741] x13: ffff1096fb8c9100 x12: 0000000000000002 [ 192.066754] x11: ffffffffffffffff x10: 0000000000000002 [ 192.067046] x9 : 0000000000000001 x8 : 0000000000000a36 [ 192.067060] x7 : ffff4e2ad9f11000 x6 : ffffc26c216d4000 [ 192.067212] x5 : ffffc26c2022661c x4 : ffff1095c2b98000 [ 192.067367] x3 : ffff1095cbd4f300 x2 : 0000000000000000 [ 192.067380] x1 : ffff1095c2b98000 x0 : 0000000000000000 [ 192.067667] Call trace: [ 192.067734] put_iova_vmas+0x94/0xa0 [msm] [ 192.068078] msm_gem_free_object+0xb4/0x110 [msm] [ 192.068399] drm_gem_object_free+0x1c/0x30 [drm] [ 192.068717] drm_gem_object_handle_put_unlocked+0xf0/0xf8 [drm] [ 192.069032] drm_gem_object_release_handle+0x6c/0x88 [drm] [ 192.069349] drm_gem_handle_delete+0x68/0xc0 [drm] [ 192.069666] drm_gem_close_ioctl+0x30/0x48 [drm] [ 192.069984] drm_ioctl_kernel+0xc0/0x110 [drm] [ 192.070303] drm_ioctl+0x210/0x440 [drm] [ 192.070588] __arm64_sys_ioctl+0xa8/0xf0 [ 192.070599] el0_svc_common.constprop.0+0x74/0x190 [ 192.070608] do_el0_svc+0x24/0x90 [ 192.070618] el0_svc+0x14/0x20 [ 192.070903] el0_sync_handler+0xb0/0xb8 [ 192.070911] el0_sync+0x174/0x180 [ 192.070918] ---[ end trace bee6b12a899001a3 ]--- [ 192.072140] ------------[ cut here ]------------ Fixes: 9b73bde39cf2 ("drm/msm: Fix use-after-free in msm_gem with carveout") Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Iskren Chernev <iskren.chernev@gmail.com>
2020-12-10 09:40:28 -08:00
put_iova_vmas(obj);
msm_gem_unlock(obj);
}
drm_gem_object_release(obj);
kfree(msm_obj);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
int ret;
obj = msm_gem_new(dev, size, flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
if (name)
msm_gem_object_set_name(obj, "%s", name);
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(obj);
return ret;
}
static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap,
.vm_ops = &vm_ops,
};
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
struct drm_gem_object **obj)
{
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
return 0;
}
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags, bool struct_mutex_locked)
{
struct msm_drm_private *priv = dev->dev_private;
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
bool use_vram = false;
int ret;
size = PAGE_ALIGN(size);
if (!msm_use_mmu(dev))
use_vram = true;
else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true;
if (WARN_ON(use_vram && !priv->vram.size))
return ERR_PTR(-EINVAL);
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
if (size == 0)
return ERR_PTR(-EINVAL);
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
goto fail;
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
msm_obj = to_msm_bo(obj);
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
drm/msm: fix WARN_ON in add_vma() with no iommu While I was testing the upcoming adv7533 CEC support with my Dragonboard c410 I encountered this warning several times during boot: [ 4.408309] WARNING: CPU: 3 PID: 1347 at drivers/gpu/drm/msm/msm_gem.c:312 add_vma+0x78/0x88 [msm] [ 4.412951] Modules linked in: snd_soc_hdmi_codec adv7511 cec qcom_wcnss_pil msm mdt_loader drm_kms_helper msm_rng rng_core drm [ 4.421728] CPU: 3 PID: 1347 Comm: kworker/3:3 Not tainted 4.13.0-rc1-dragonboard #111 [ 4.433090] Hardware name: Qualcomm Technologies, Inc. APQ 8016 SBC (DT) [ 4.441081] Workqueue: events deferred_probe_work_func [ 4.447929] task: ffff800031243600 task.stack: ffff800003394000 [ 4.453023] PC is at add_vma+0x78/0x88 [msm] [ 4.458823] LR is at _msm_gem_new+0xd4/0x188 [msm] [ 4.463207] pc : [<ffff000000ac01f8>] lr : [<ffff000000ac06b4>] pstate: 40000145 [ 4.467811] sp : ffff8000033978a0 [ 4.475357] x29: ffff8000033978a0 x28: ffff8000031dea18 [ 4.478572] x27: ffff800003933a00 x26: ffff800003b39800 [ 4.483953] x25: ffff8000338ff800 x24: 0000000000000001 [ 4.489249] x23: 0000000000000000 x22: ffff800003b39800 [ 4.494544] x21: ffff8000338ff800 x20: 0000000000000000 [ 4.499839] x19: ffff800003932600 x18: 0000000000000001 [ 4.505135] x17: 0000ffff8969e9e0 x16: ffff7e00000ce7a0 [ 4.510429] x15: ffffffffffffffff x14: ffff8000833977ef [ 4.515724] x13: ffff8000033977f3 x12: 0000000000000038 [ 4.521020] x11: 0101010101010101 x10: ffffff7f7fff7f7f [ 4.526315] x9 : 0000000000000000 x8 : ffff800003932800 [ 4.531633] x7 : 0000000000000000 x6 : 000000000000003f [ 4.531644] x5 : 0000000000000040 x4 : 0000000000000000 [ 4.531650] x3 : ffff800031243600 x2 : 0000000000000000 [ 4.531655] x1 : 0000000000000000 x0 : 0000000000000000 [ 4.531670] Call trace: [ 4.531676] Exception stack(0xffff8000033976c0 to 0xffff8000033977f0) [ 4.531683] 76c0: ffff800003932600 0001000000000000 ffff8000033978a0 ffff000000ac01f8 [ 4.531688] 76e0: 0000000000000140 0000000000000000 ffff800003932550 ffff800003397780 [ 4.531694] 7700: ffff800003397730 ffff000008261ce8 0000000000000000 ffff8000031d2f80 [ 4.531699] 7720: ffff800003397800 ffff0000081d671c 0000000000000140 0000000000000000 [ 4.531705] 7740: ffff000000ac04c0 0000000000004003 ffff800003397908 00000000014080c0 [ 4.531710] 7760: 0000000000000000 ffff800003b39800 0000000000000000 0000000000000000 [ 4.531716] 7780: 0000000000000000 ffff800031243600 0000000000000000 0000000000000040 [ 4.531721] 77a0: 000000000000003f 0000000000000000 ffff800003932800 0000000000000000 [ 4.531726] 77c0: ffffff7f7fff7f7f 0101010101010101 0000000000000038 ffff8000033977f3 [ 4.531730] 77e0: ffff8000833977ef ffffffffffffffff [ 4.531881] [<ffff000000ac01f8>] add_vma+0x78/0x88 [msm] [ 4.532011] [<ffff000000ac06b4>] _msm_gem_new+0xd4/0x188 [msm] [ 4.532134] [<ffff000000ac1900>] msm_gem_new+0x10/0x18 [msm] [ 4.532260] [<ffff000000acb274>] msm_dsi_host_modeset_init+0x17c/0x268 [msm] [ 4.532384] [<ffff000000ac9024>] msm_dsi_modeset_init+0x34/0x1b8 [msm] [ 4.532504] [<ffff000000ab6168>] modeset_init+0x408/0x488 [msm] [ 4.532623] [<ffff000000ab6c4c>] mdp5_kms_init+0x2b4/0x338 [msm] [ 4.532745] [<ffff000000abeff8>] msm_drm_bind+0x218/0x4e8 [msm] [ 4.532755] [<ffff00000855d744>] try_to_bring_up_master+0x1f4/0x318 [ 4.532762] [<ffff00000855d900>] component_add+0x98/0x180 [ 4.532887] [<ffff000000ac8da0>] dsi_dev_probe+0x18/0x28 [msm] [ 4.532895] [<ffff000008565fe8>] platform_drv_probe+0x58/0xc0 [ 4.532901] [<ffff00000856410c>] driver_probe_device+0x324/0x458 [ 4.532907] [<ffff00000856440c>] __device_attach_driver+0xac/0x170 [ 4.532913] [<ffff000008561ef4>] bus_for_each_drv+0x4c/0x98 [ 4.532918] [<ffff000008563c38>] __device_attach+0xc0/0x160 [ 4.532924] [<ffff000008564530>] device_initial_probe+0x10/0x18 [ 4.532929] [<ffff000008562f84>] bus_probe_device+0x94/0xa0 [ 4.532934] [<ffff0000085635d4>] deferred_probe_work_func+0x8c/0xe8 [ 4.532941] [<ffff0000080d79bc>] process_one_work+0x1d4/0x330 [ 4.532946] [<ffff0000080d7b60>] worker_thread+0x48/0x468 [ 4.532952] [<ffff0000080ddae4>] kthread+0x12c/0x130 [ 4.532958] [<ffff000008082f10>] ret_from_fork+0x10/0x40 [ 4.532962] ---[ end trace b1ac6888ec40b0bb ]--- Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Rob Clark <robdclark@gmail.com>
2017-07-30 14:42:36 +02:00
drm/msm: Fix null dereference in _msm_gem_new The crash was caused by locking an uninitialized lock during init of drm_gem_object. The lock changed in the breaking commit, but the init was not moved accordingly. 8<--- cut here --- Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = (ptrval) [00000000] *pgd=00000000 Internal error: Oops: 5 [#1] PREEMPT SMP ARM Modules linked in: msm(+) qcom_spmi_vadc qcom_vadc_common dm_mod usb_f_rndis rmi_i2c rmi_core qnoc_msm8974 icc_smd_rpm pm8941_pwrkey CPU: 2 PID: 1020 Comm: udevd Not tainted 5.10.0-postmarketos-qcom-msm8974 #8 Hardware name: Generic DT based system PC is at ww_mutex_lock+0x20/0xb0 LR is at _msm_gem_new+0x13c/0x298 [msm] pc : [<c0be31e8>] lr : [<bf0b3404>] psr: 20000013 sp : c36e7ad0 ip : c3b3d800 fp : 00000000 r10: 00000001 r9 : c3b22800 r8 : 00000000 r7 : c3b23000 r6 : c3b3d600 r5 : c3b3d600 r4 : 00000000 r3 : c34b4780 r2 : c3b3d6f4 r1 : 00000000 r0 : 00000000 Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 10c5787d Table: 03ae406a DAC: 00000051 Process udevd (pid: 1020, stack limit = 0x(ptrval)) Stack: (0xc36e7ad0 to 0xc36e8000) [...] [<c0be31e8>] (ww_mutex_lock) from [<bf0b3404>] (_msm_gem_new+0x13c/0x298 [msm]) [<bf0b3404>] (_msm_gem_new [msm]) from [<bf0b3aa8>] (_msm_gem_kernel_new+0x20/0x190 [msm]) [<bf0b3aa8>] (_msm_gem_kernel_new [msm]) from [<bf0b4a30>] (msm_gem_kernel_new+0x24/0x2c [msm]) [<bf0b4a30>] (msm_gem_kernel_new [msm]) from [<bf0b8e2c>] (msm_gpu_init+0x308/0x548 [msm]) [<bf0b8e2c>] (msm_gpu_init [msm]) from [<bf060a90>] (adreno_gpu_init+0x13c/0x240 [msm]) [<bf060a90>] (adreno_gpu_init [msm]) from [<bf062b1c>] (a3xx_gpu_init+0x78/0x1dc [msm]) [<bf062b1c>] (a3xx_gpu_init [msm]) from [<bf05f394>] (adreno_bind+0x1cc/0x274 [msm]) [<bf05f394>] (adreno_bind [msm]) from [<c087a254>] (component_bind_all+0x11c/0x278) [<c087a254>] (component_bind_all) from [<bf0b11d4>] (msm_drm_bind+0x18c/0x5b4 [msm]) [<bf0b11d4>] (msm_drm_bind [msm]) from [<c0879ea0>] (try_to_bring_up_master+0x200/0x2c8) [<c0879ea0>] (try_to_bring_up_master) from [<c087a648>] (component_master_add_with_match+0xc8/0xfc) [<c087a648>] (component_master_add_with_match) from [<bf0b0c3c>] (msm_pdev_probe+0x288/0x2c4 [msm]) [<bf0b0c3c>] (msm_pdev_probe [msm]) from [<c08844cc>] (platform_drv_probe+0x48/0x98) [<c08844cc>] (platform_drv_probe) from [<c0881cc4>] (really_probe+0x108/0x528) [<c0881cc4>] (really_probe) from [<c0882480>] (driver_probe_device+0x78/0x1d4) [<c0882480>] (driver_probe_device) from [<c08828dc>] (device_driver_attach+0xa8/0xb0) [<c08828dc>] (device_driver_attach) from [<c0882998>] (__driver_attach+0xb4/0x154) [<c0882998>] (__driver_attach) from [<c087fa1c>] (bus_for_each_dev+0x78/0xb8) [<c087fa1c>] (bus_for_each_dev) from [<c0880e98>] (bus_add_driver+0x10c/0x208) [<c0880e98>] (bus_add_driver) from [<c0883504>] (driver_register+0x88/0x118) [<c0883504>] (driver_register) from [<c0302098>] (do_one_initcall+0x50/0x2b0) [<c0302098>] (do_one_initcall) from [<c03bace4>] (do_init_module+0x60/0x288) [<c03bace4>] (do_init_module) from [<c03bdf1c>] (sys_finit_module+0xd4/0x120) [<c03bdf1c>] (sys_finit_module) from [<c0300060>] (ret_fast_syscall+0x0/0x54) Exception stack(0xc36e7fa8 to 0xc36e7ff0) 7fa0: 00020000 00000000 00000007 b6edd5b0 00000000 b6f2ff20 7fc0: 00020000 00000000 0000017b 0000017b b6eef980 bedc3a54 00473c99 00000000 7fe0: b6edd5b0 bedc3918 b6ed8a5f b6f6a8b0 Code: e3c3303f e593300c e1a04000 f590f000 (e1940f9f) ---[ end trace 277e2a3da40bbb76 ]--- Fixes: 6c0e3ea250476 ("drm/msm/gem: Switch over to obj->resv for locking") Signed-off-by: Iskren Chernev <iskren.chernev@gmail.com> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-12-28 23:31:30 +02:00
drm_gem_private_object_init(dev, obj, size);
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
msm_gem_unlock(obj);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
}
to_msm_bo(obj)->vram_node = &vma->node;
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
}
vma->iova = physaddr(obj);
} else {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
/*
* Our buffers are kept pinned, so allocating them from the
* MOVABLE zone is a really bad idea, and conflicts with CMA.
* See comments above new_inode() why this is required _and_
* expected if you're going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
mutex_lock(&priv->mm_lock);
/* Initially obj is idle, obj->madv == WILLNEED: */
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mutex_unlock(&priv->mm_lock);
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
return obj;
fail:
if (struct_mutex_locked) {
drm_gem_object_put_locked(obj);
} else {
drm_gem_object_put(obj);
}
return ERR_PTR(ret);
}
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags)
{
return _msm_gem_new(dev, size, flags, true);
}
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags)
{
return _msm_gem_new(dev, size, flags, false);
}
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt)
{
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
uint32_t size;
int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
if (!msm_use_mmu(dev)) {
DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
return ERR_PTR(-EINVAL);
}
size = PAGE_ALIGN(dmabuf->size);
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
goto fail;
drm_gem_private_object_init(dev, obj, size);
npages = size / PAGE_SIZE;
msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
msm_obj->sgt = sgt;
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!msm_obj->pages) {
msm_gem_unlock(obj);
ret = -ENOMEM;
goto fail;
}
ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
msm_gem_unlock(obj);
goto fail;
}
msm_gem_unlock(obj);
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
mutex_lock(&priv->mm_lock);
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mutex_unlock(&priv->mm_lock);
drm/msm: Fix a null pointer access in msm_gem_shrinker_count() Adding an msm_gem_object object to the inactive_list before completing its initialization is a bad idea because shrinker may pick it up from the inactive_list. Fix this by making sure that the initialization is complete before moving the msm_obj object to the inactive list. This patch fixes the below error: [10027.553044] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000068 [10027.573305] Mem abort info: [10027.590160] ESR = 0x96000006 [10027.597905] EC = 0x25: DABT (current EL), IL = 32 bits [10027.614430] SET = 0, FnV = 0 [10027.624427] EA = 0, S1PTW = 0 [10027.632722] Data abort info: [10027.638039] ISV = 0, ISS = 0x00000006 [10027.647459] CM = 0, WnR = 0 [10027.654345] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001e3a6a000 [10027.672681] [0000000000000068] pgd=0000000198c31003, pud=0000000198c31003, pmd=0000000000000000 [10027.693900] Internal error: Oops: 96000006 [#1] PREEMPT SMP [10027.738261] CPU: 3 PID: 214 Comm: kswapd0 Tainted: G S 5.4.40 #1 [10027.745766] Hardware name: Qualcomm Technologies, Inc. SC7180 IDP (DT) [10027.752472] pstate: 80c00009 (Nzcv daif +PAN +UAO) [10027.757409] pc : mutex_is_locked+0x14/0x2c [10027.761626] lr : msm_gem_shrinker_count+0x70/0xec [10027.766454] sp : ffffffc011323ad0 [10027.769867] x29: ffffffc011323ad0 x28: ffffffe677e4b878 [10027.775324] x27: 0000000000000cc0 x26: 0000000000000000 [10027.780783] x25: ffffff817114a708 x24: 0000000000000008 [10027.786242] x23: ffffff8023ab7170 x22: 0000000000000001 [10027.791701] x21: ffffff817114a080 x20: 0000000000000119 [10027.797160] x19: 0000000000000068 x18: 00000000000003bc [10027.802621] x17: 0000000004a34210 x16: 00000000000000c0 [10027.808083] x15: 0000000000000000 x14: 0000000000000000 [10027.813542] x13: ffffffe677e0a3c0 x12: 0000000000000000 [10027.819000] x11: 0000000000000000 x10: ffffff8174b94340 [10027.824461] x9 : 0000000000000000 x8 : 0000000000000000 [10027.829919] x7 : 00000000000001fc x6 : ffffffc011323c88 [10027.835373] x5 : 0000000000000001 x4 : ffffffc011323d80 [10027.840832] x3 : ffffffff0477b348 x2 : 0000000000000000 [10027.846290] x1 : ffffffc011323b68 x0 : 0000000000000068 [10027.851748] Call trace: [10027.854264] mutex_is_locked+0x14/0x2c [10027.858121] msm_gem_shrinker_count+0x70/0xec [10027.862603] shrink_slab+0xc0/0x4b4 [10027.866187] shrink_node+0x4a8/0x818 [10027.869860] kswapd+0x624/0x890 [10027.873097] kthread+0x11c/0x12c [10027.876424] ret_from_fork+0x10/0x18 [10027.880102] Code: f9000bf3 910003fd aa0003f3 d503201f (f9400268) [10027.886362] ---[ end trace df5849a1a3543251 ]--- [10027.891518] Kernel panic - not syncing: Fatal exception Signed-off-by: Akhil P Oommen <akhilpo@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
2020-07-10 02:01:55 +05:30
return obj;
fail:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
void *vaddr;
struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
if (iova) {
ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_unpin_iova(obj, aspace);
ret = PTR_ERR(vaddr);
goto err;
}
if (bo)
*bo = obj;
return vaddr;
err:
if (locked)
drm_gem_object_put_locked(obj);
else
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
void msm_gem_kernel_put(struct drm_gem_object *bo,
struct msm_gem_address_space *aspace, bool locked)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
msm_gem_unpin_iova(bo, aspace);
if (locked)
drm_gem_object_put_locked(bo);
else
drm_gem_object_put(bo);
}
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
struct msm_gem_object *msm_obj = to_msm_bo(bo);
va_list ap;
if (!fmt)
return;
va_start(ap, fmt);
vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
va_end(ap);
}