Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "This is the final set of fixes for -rc8, just a few i915 and one
  vmwgfx ones.

  I'm off on holidays for a week, so if anything shows up for fixes I've
  asked Daniel or Sean Paul to herd it in the right direction"

[ The additional etnaviv fixes were already herded towards me as seen in
  my previous pull - Linus ]

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/vmwgfx: Free hash table allocated by cmdbuf managed res mgr
  drm/i915: Disable EXEC_OBJECT_ASYNC when doing relocations
  drm/i915: Hold struct_mutex for per-file stats in debugfs/i915_gem_object
  drm/i915: Retire the VMA's fence tracker before unbinding
This commit is contained in:
Linus Torvalds 2017-06-28 13:22:26 -07:00
commit 5a37be4b51
4 changed files with 25 additions and 4 deletions

View File

@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data; struct file_stats *stats = data;
struct i915_vma *vma; struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
stats->count++; stats->count++;
stats->total += obj->base.size; stats->total += obj->base.size;
if (!obj->bind_count) if (!obj->bind_count)
@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
struct task_struct *task; struct task_struct *task;
mutex_lock(&dev->struct_mutex);
memset(&stats, 0, sizeof(stats)); memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv; stats.file_priv = file->driver_priv;
spin_lock(&file->table_lock); spin_lock(&file->table_lock);
@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
* still alive (e.g. get_pid(current) => fork() => exit()). * still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU. * Therefore, we need to protect this ->comm access using RCU.
*/ */
mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list, request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request, struct drm_i915_gem_request,
client_link); client_link);
@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
PIDTYPE_PID); PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats); print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
mutex_unlock(&dev->filelist_mutex); mutex_unlock(&dev->filelist_mutex);

View File

@ -546,11 +546,12 @@ repeat:
} }
static int static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
struct eb_vmas *eb, struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc, struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache) struct reloc_cache *cache)
{ {
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj; struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj; struct drm_i915_gem_object *target_i915_obj;
@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL; return -EINVAL;
} }
/*
* If we write into the object, we need to force the synchronisation
* barrier, either with an asynchronous clflush or if we executed the
* patching using the GPU (though that should be serialised by the
* timeline). To be completely sure, and since we are required to
* do relocations we are already stalling, disable the user's opt
* of our synchronisation.
*/
vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
ret = relocate_entry(obj, reloc, cache, target_offset); ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret) if (ret)
return ret; return ret;
@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
do { do {
u64 offset = r->presumed_offset; u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
if (ret) if (ret)
goto out; goto out;
@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
reloc_cache_init(&cache, eb->i915); reloc_cache_init(&cache, eb->i915);
for (i = 0; i < entry->relocation_count; i++) { for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
if (ret) if (ret)
break; break;
} }

View File

@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
break; break;
} }
if (!ret) {
ret = i915_gem_active_retire(&vma->last_fence,
&vma->vm->i915->drm.struct_mutex);
}
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
if (ret) if (ret)
return ret; return ret;

View File

@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head) list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry); vmw_cmdbuf_res_free(man, entry);
drm_ht_remove(&man->resources);
kfree(man); kfree(man);
} }