Merge tag 'drm-msm-fixes-2022-06-28' into msm-next-staging
Merge v5.19 fixes to avoid merge conflicts Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
commit
1796c0255b
@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
||||
|
||||
ring->cur = ring->start;
|
||||
ring->next = ring->start;
|
||||
|
||||
/* reset completed fence seqno: */
|
||||
ring->memptrs->fence = ring->fctx->completed_fence;
|
||||
ring->memptrs->rptr = 0;
|
||||
|
||||
/* Detect and clean up an impossible fence, ie. if GPU managed
|
||||
* to scribble something invalid, we don't want that to confuse
|
||||
* us into mistakingly believing that submits have completed.
|
||||
*/
|
||||
if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
|
||||
ring->memptrs->fence = ring->fctx->last_fence;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
|
||||
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
|
||||
release_firmware(adreno_gpu->fw[i]);
|
||||
|
||||
pm_runtime_disable(&priv->gpu_pdev->dev);
|
||||
if (pm_runtime_enabled(&priv->gpu_pdev->dev))
|
||||
pm_runtime_disable(&priv->gpu_pdev->dev);
|
||||
|
||||
msm_gpu_cleanup(&adreno_gpu->base);
|
||||
}
|
||||
|
@ -1251,12 +1251,13 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
|
||||
DPU_ATRACE_BEGIN("encoder_vblank_callback");
|
||||
dpu_enc = to_dpu_encoder_virt(drm_enc);
|
||||
|
||||
atomic_inc(&phy_enc->vsync_cnt);
|
||||
|
||||
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
|
||||
if (dpu_enc->crtc)
|
||||
dpu_crtc_vblank_callback(dpu_enc->crtc);
|
||||
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
|
||||
|
||||
atomic_inc(&phy_enc->vsync_cnt);
|
||||
DPU_ATRACE_END("encoder_vblank_callback");
|
||||
}
|
||||
|
||||
|
@ -252,11 +252,6 @@ static int dpu_encoder_phys_wb_atomic_check(
|
||||
DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
|
||||
phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
|
||||
|
||||
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
|
||||
return 0;
|
||||
|
||||
fb = conn_state->writeback_job->fb;
|
||||
|
||||
if (!conn_state || !conn_state->connector) {
|
||||
DPU_ERROR("invalid connector state\n");
|
||||
return -EINVAL;
|
||||
@ -267,6 +262,11 @@ static int dpu_encoder_phys_wb_atomic_check(
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
|
||||
return 0;
|
||||
|
||||
fb = conn_state->writeback_job->fb;
|
||||
|
||||
DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
|
||||
fb->width, fb->height);
|
||||
|
||||
|
@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
|
||||
|
||||
return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth,
|
||||
/*
|
||||
* We should ideally be limiting the modes only to the maxlinewidth but
|
||||
* on some chipsets this will allow even 4k modes to be added which will
|
||||
* fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
|
||||
* and source split support added lets limit the modes based on max_mixer_width
|
||||
* as 4K modes can then be supported.
|
||||
*/
|
||||
return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
|
||||
dev->mode_config.max_height);
|
||||
}
|
||||
|
||||
|
@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
|
||||
if (IS_ERR(encoder)) {
|
||||
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
|
||||
of_node_put(panel_node);
|
||||
return PTR_ERR(encoder);
|
||||
}
|
||||
|
||||
@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
|
||||
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
|
||||
if (IS_ERR(connector)) {
|
||||
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
|
||||
of_node_put(panel_node);
|
||||
return PTR_ERR(connector);
|
||||
}
|
||||
|
||||
|
@ -1534,6 +1534,8 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
|
||||
|
||||
static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
|
||||
|
||||
ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
|
||||
if (!ret)
|
||||
ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
|
||||
ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
|
||||
else
|
||||
DRM_ERROR("failed to enable DP link controller\n");
|
||||
|
||||
@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
|
||||
return dp_ctrl_setup_main_link(ctrl, &training_step);
|
||||
}
|
||||
|
||||
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
|
||||
static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
int ret;
|
||||
struct dp_ctrl_private *ctrl;
|
||||
|
||||
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
|
||||
|
||||
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
|
||||
|
||||
ret = dp_ctrl_enable_stream_clocks(ctrl);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dp_ctrl_send_phy_test_pattern(ctrl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
|
||||
{
|
||||
int ret = 0;
|
||||
bool mainlink_ready = false;
|
||||
@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
|
||||
dp_ctrl_send_phy_test_pattern(ctrl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dp_ctrl_channel_eq_ok(ctrl))
|
||||
if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
|
||||
dp_ctrl_link_retrain(ctrl);
|
||||
|
||||
/* stop txing train pattern to end link training */
|
||||
|
@ -21,7 +21,7 @@ struct dp_ctrl {
|
||||
};
|
||||
|
||||
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
|
||||
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
|
||||
int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
|
||||
|
@ -309,12 +309,15 @@ static void dp_display_unbind(struct device *dev, struct device *master,
|
||||
struct msm_drm_private *priv = dev_get_drvdata(master);
|
||||
|
||||
/* disable all HPD interrupts */
|
||||
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
|
||||
if (dp->core_initialized)
|
||||
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
|
||||
|
||||
kthread_stop(dp->ev_tsk);
|
||||
|
||||
dp_power_client_deinit(dp->power);
|
||||
dp_aux_unregister(dp->aux);
|
||||
dp->drm_dev = NULL;
|
||||
dp->aux->drm_dev = NULL;
|
||||
priv->dp[dp->id] = NULL;
|
||||
}
|
||||
|
||||
@ -872,7 +875,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = dp_ctrl_on_stream(dp->ctrl);
|
||||
rc = dp_ctrl_on_stream(dp->ctrl, data);
|
||||
if (!rc)
|
||||
dp_display->power_on = true;
|
||||
|
||||
@ -1659,6 +1662,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
|
||||
int rc = 0;
|
||||
struct dp_display_private *dp_display;
|
||||
u32 state;
|
||||
bool force_link_train = false;
|
||||
|
||||
dp_display = container_of(dp, struct dp_display_private, dp_display);
|
||||
if (!dp_display->dp_mode.drm_mode.clock) {
|
||||
@ -1693,10 +1697,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
|
||||
|
||||
state = dp_display->hpd_state;
|
||||
|
||||
if (state == ST_DISPLAY_OFF)
|
||||
if (state == ST_DISPLAY_OFF) {
|
||||
dp_display_host_phy_init(dp_display);
|
||||
force_link_train = true;
|
||||
}
|
||||
|
||||
dp_display_enable(dp_display, 0);
|
||||
dp_display_enable(dp_display, force_link_train);
|
||||
|
||||
rc = dp_display_post_enable(dp);
|
||||
if (rc) {
|
||||
@ -1705,10 +1711,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
|
||||
dp_display_unprepare(dp);
|
||||
}
|
||||
|
||||
/* manual kick off plug event to train link */
|
||||
if (state == ST_DISPLAY_OFF)
|
||||
dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
|
||||
|
||||
/* completed connection */
|
||||
dp_display->hpd_state = ST_CONNECTED;
|
||||
|
||||
|
@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = {
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
|
||||
.gem_prime_mmap = drm_gem_prime_mmap,
|
||||
.gem_prime_mmap = msm_gem_prime_mmap,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
.debugfs_init = msm_debugfs_init,
|
||||
#endif
|
||||
|
@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
|
||||
void msm_gem_shrinker_init(struct drm_device *dev);
|
||||
void msm_gem_shrinker_cleanup(struct drm_device *dev);
|
||||
|
||||
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
|
||||
|
@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
|
||||
(int32_t)(*fctx->fenceptr - fence) >= 0;
|
||||
}
|
||||
|
||||
/* called from workqueue */
|
||||
/* called from irq handler and workqueue (in recover path) */
|
||||
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
|
||||
{
|
||||
spin_lock(&fctx->spinlock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fctx->spinlock, flags);
|
||||
fctx->completed_fence = max(fence, fctx->completed_fence);
|
||||
spin_unlock(&fctx->spinlock);
|
||||
spin_unlock_irqrestore(&fctx->spinlock, flags);
|
||||
}
|
||||
|
||||
struct msm_fence {
|
||||
|
@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
|
||||
void msm_gem_unpin_locked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
GEM_WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
msm_gem_unpin_vma(vma);
|
||||
|
||||
msm_obj->pin_count--;
|
||||
GEM_WARN_ON(msm_obj->pin_count < 0);
|
||||
|
||||
@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
|
||||
msm_gem_lock(obj);
|
||||
vma = lookup_vma(obj, aspace);
|
||||
if (!GEM_WARN_ON(!vma)) {
|
||||
msm_gem_unpin_vma_locked(obj, vma);
|
||||
msm_gem_unpin_vma(vma);
|
||||
msm_gem_unpin_locked(obj);
|
||||
}
|
||||
msm_gem_unlock(obj);
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ struct msm_gem_object {
|
||||
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
|
||||
void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
|
||||
void msm_gem_unpin_locked(struct drm_gem_object *obj);
|
||||
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
@ -377,10 +377,11 @@ struct msm_gem_submit {
|
||||
} *cmd; /* array of size nr_cmds */
|
||||
struct {
|
||||
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
|
||||
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
|
||||
#define BO_LOCKED 0x4000 /* obj lock is held */
|
||||
#define BO_ACTIVE 0x2000 /* active refcnt is held */
|
||||
#define BO_PINNED 0x1000 /* obj is pinned and on active list */
|
||||
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
|
||||
#define BO_LOCKED 0x4000 /* obj lock is held */
|
||||
#define BO_ACTIVE 0x2000 /* active refcnt is held */
|
||||
#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
|
||||
#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
|
||||
uint32_t flags;
|
||||
union {
|
||||
struct msm_gem_object *obj;
|
||||
|
@ -11,6 +11,21 @@
|
||||
#include "msm_drv.h"
|
||||
#include "msm_gem.h"
|
||||
|
||||
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Ensure the mmap offset is initialized. We lazily initialize it,
|
||||
* so if it has not been first mmap'd directly as a GEM object, the
|
||||
* mmap offset will not be already initialized.
|
||||
*/
|
||||
ret = drm_gem_create_mmap_offset(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return drm_gem_prime_mmap(obj, vma);
|
||||
}
|
||||
|
||||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
|
||||
*/
|
||||
submit->bos[i].flags &= ~cleanup_flags;
|
||||
|
||||
if (flags & BO_PINNED)
|
||||
msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
|
||||
if (flags & BO_VMA_PINNED)
|
||||
msm_gem_unpin_vma(submit->bos[i].vma);
|
||||
|
||||
if (flags & BO_OBJ_PINNED)
|
||||
msm_gem_unpin_locked(obj);
|
||||
|
||||
if (flags & BO_ACTIVE)
|
||||
msm_gem_active_put(obj);
|
||||
@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
|
||||
|
||||
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
|
||||
{
|
||||
submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED);
|
||||
unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
|
||||
BO_ACTIVE | BO_LOCKED;
|
||||
submit_cleanup_bo(submit, i, cleanup_flags);
|
||||
|
||||
if (!(submit->bos[i].flags & BO_VALID))
|
||||
submit->bos[i].iova = 0;
|
||||
@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
submit->bos[i].flags |= BO_PINNED;
|
||||
submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
|
||||
submit->bos[i].vma = vma;
|
||||
|
||||
if (vma->iova == submit->bos[i].iova) {
|
||||
@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error)
|
||||
unsigned i;
|
||||
|
||||
if (error)
|
||||
cleanup_flags |= BO_PINNED | BO_ACTIVE;
|
||||
cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit)
|
||||
struct drm_gem_object *obj = &submit->bos[i].obj->base;
|
||||
|
||||
msm_gem_lock(obj);
|
||||
submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE);
|
||||
/* Note, VMA already fence-unpinned before submit: */
|
||||
submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
|
||||
msm_gem_unlock(obj);
|
||||
drm_gem_object_put(obj);
|
||||
}
|
||||
@ -922,7 +928,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
INT_MAX, GFP_KERNEL);
|
||||
}
|
||||
if (submit->fence_id < 0) {
|
||||
ret = submit->fence_id = 0;
|
||||
ret = submit->fence_id;
|
||||
submit->fence_id = 0;
|
||||
}
|
||||
|
||||
|
@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
|
||||
unsigned size = vma->node.size;
|
||||
|
||||
/* Print a message if we try to purge a vma in use */
|
||||
if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
|
||||
return;
|
||||
GEM_WARN_ON(msm_gem_vma_inuse(vma));
|
||||
|
||||
/* Don't do anything if the memory isn't mapped */
|
||||
if (!vma->mapped)
|
||||
@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
|
||||
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma)
|
||||
{
|
||||
if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
|
||||
return;
|
||||
GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
|
||||
|
||||
spin_lock(&aspace->lock);
|
||||
if (vma->iova)
|
||||
|
@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
uint32_t fence)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ring->submit_lock, flags);
|
||||
list_for_each_entry(submit, &ring->submits, node) {
|
||||
if (fence_after(submit->seqno, fence))
|
||||
break;
|
||||
|
||||
msm_update_fence(submit->ring->fctx,
|
||||
submit->hw_fence->seqno);
|
||||
dma_fence_signal(submit->hw_fence);
|
||||
}
|
||||
spin_unlock_irqrestore(&ring->submit_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
|
||||
size_t count, void *data, size_t datalen)
|
||||
@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work)
|
||||
* one more to clear the faulting submit
|
||||
*/
|
||||
if (ring == cur_ring)
|
||||
fence++;
|
||||
ring->memptrs->fence = ++fence;
|
||||
|
||||
update_fences(gpu, ring, fence);
|
||||
msm_update_fence(ring->fctx, fence);
|
||||
}
|
||||
|
||||
if (msm_gpu_active(gpu)) {
|
||||
@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
msm_submit_retire(submit);
|
||||
|
||||
pm_runtime_mark_last_busy(&gpu->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&gpu->pdev->dev);
|
||||
|
||||
spin_lock_irqsave(&ring->submit_lock, flags);
|
||||
list_del(&submit->node);
|
||||
@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
msm_devfreq_idle(gpu);
|
||||
mutex_unlock(&gpu->active_lock);
|
||||
|
||||
pm_runtime_put_autosuspend(&gpu->pdev->dev);
|
||||
|
||||
msm_gem_submit_put(submit);
|
||||
}
|
||||
|
||||
@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gpu->nr_rings; i++)
|
||||
update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
|
||||
msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
|
||||
|
||||
kthread_queue_work(gpu->worker, &gpu->retire_work);
|
||||
update_sw_cntrs(gpu);
|
||||
|
@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
|
||||
u64 addr = iova;
|
||||
unsigned int i;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
for_each_sgtable_sg(sgt, sg, i) {
|
||||
size_t size = sg->length;
|
||||
phys_addr_t phys = sg_phys(sg);
|
||||
|
||||
|
@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
|
||||
|
||||
msm_gem_lock(obj);
|
||||
msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
|
||||
submit->bos[i].flags &= ~BO_PINNED;
|
||||
submit->bos[i].flags &= ~BO_VMA_PINNED;
|
||||
msm_gem_unlock(obj);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user