2010-08-07 11:01:23 +01:00
/*
* Copyright © 2008 - 2010 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eric Anholt < eric @ anholt . net >
* Chris Wilson < chris @ chris - wilson . co . uuk >
*
*/
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
# include <drm/i915_drm.h>
2014-01-20 10:17:37 +00:00
# include "i915_drv.h"
# include "intel_drv.h"
2011-02-03 11:57:46 +00:00
# include "i915_trace.h"
2010-08-07 11:01:23 +01:00
2016-10-28 13:58:58 +01:00
static bool ggtt_is_idle ( struct drm_i915_private * dev_priv )
2016-08-04 16:32:17 +01:00
{
2016-10-28 13:58:58 +01:00
struct i915_ggtt * ggtt = & dev_priv - > ggtt ;
2016-08-04 16:32:17 +01:00
struct intel_engine_cs * engine ;
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-13 22:44:48 +05:30
enum intel_engine_id id ;
2016-08-04 16:32:17 +01:00
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
With the possibility of addition of many more number of rings in future,
the drm_i915_private structure could bloat as an array, of type
intel_engine_cs, is embedded inside it.
struct intel_engine_cs engine[I915_NUM_ENGINES];
Though this is still fine as generally there is only a single instance of
drm_i915_private structure used, but not all of the possible rings would be
enabled or active on most of the platforms. Some memory can be saved by
allocating intel_engine_cs structure only for the enabled/active engines.
Currently the engine/ring ID is kept static and dev_priv->engine[] is simply
indexed using the enums defined in intel_engine_id.
To save memory and continue using the static engine/ring IDs, 'engine' is
defined as an array of pointers.
struct intel_engine_cs *engine[I915_NUM_ENGINES];
dev_priv->engine[engine_ID] will be NULL for disabled engine instances.
There is a text size reduction of 928 bytes, from 1028200 to 1027272, for
i915.o file (but for i915.ko file text size remain same as 1193131 bytes).
v2:
- Remove the engine iterator field added in drm_i915_private structure,
instead pass a local iterator variable to the for_each_engine**
macros. (Chris)
- Do away with intel_engine_initialized() and instead directly use the
NULL pointer check on engine pointer. (Chris)
v3:
- Remove for_each_engine_id() macro, as the updated macro for_each_engine()
can be used in place of it. (Chris)
- Protect the access to Render engine Fault register with a NULL check, as
engine specific init is done later in Driver load sequence.
v4:
- Use !!dev_priv->engine[VCS] style for the engine check in getparam. (Chris)
- Kill the superfluous init_engine_lists().
v5:
- Cleanup the intel_engines_init() & intel_engines_setup(), with respect to
allocation of intel_engine_cs structure. (Chris)
v6:
- Rebase.
v7:
- Optimize the for_each_engine_masked() macro. (Chris)
- Change the type of 'iter' local variable to enum intel_engine_id. (Chris)
- Rebase.
v8: Rebase.
v9: Rebase.
v10:
- For index calculation use engine ID instead of pointer based arithmetic in
intel_engine_sync_index() as engine pointers are not contiguous now (Chris)
- For appropriateness, rename local enum variable 'iter' to 'id'. (Joonas)
- Use for_each_engine macro for cleanup in intel_engines_init() and remove
check for NULL engine pointer in cleanup() routines. (Joonas)
v11: Rebase.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1476378888-7372-1-git-send-email-akash.goel@intel.com
2016-10-13 22:44:48 +05:30
for_each_engine ( engine , dev_priv , id ) {
2016-10-28 13:58:58 +01:00
struct intel_timeline * tl ;
tl = & ggtt - > base . timeline . engine [ engine - > id ] ;
if ( i915_gem_active_isset ( & tl - > last_request ) )
2016-08-04 16:32:17 +01:00
return false ;
}
return true ;
}
2010-08-07 11:01:24 +01:00
static bool
2016-12-22 08:36:29 +00:00
mark_free ( struct drm_mm_scan * scan ,
struct i915_vma * vma ,
unsigned int flags ,
struct list_head * unwind )
2010-08-07 11:01:23 +01:00
{
2016-08-04 16:32:30 +01:00
if ( i915_vma_is_pinned ( vma ) )
2012-04-24 15:47:30 +01:00
return false ;
2013-08-26 11:23:47 +02:00
if ( WARN_ON ( ! list_empty ( & vma - > exec_list ) ) )
return false ;
2016-10-24 13:42:14 +01:00
if ( flags & PIN_NONFAULT & & ! list_empty ( & vma - > obj - > userfault_link ) )
2016-08-18 17:17:05 +01:00
return false ;
2013-08-14 11:38:34 +02:00
list_add ( & vma - > exec_list , unwind ) ;
2016-12-22 08:36:29 +00:00
return drm_mm_scan_add_block ( scan , & vma - > node ) ;
2010-08-07 11:01:23 +01:00
}
2014-01-29 22:07:11 +01:00
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @ vm : address space to evict from
2015-01-05 14:36:59 +01:00
* @ min_size : size of the desired free space
2014-01-29 22:07:11 +01:00
* @ alignment : alignment constraint of the desired free space
* @ cache_level : cache_level for the desired space
2015-01-05 14:36:59 +01:00
* @ start : start ( inclusive ) of the range from which to evict objects
* @ end : end ( exclusive ) of the range from which to evict objects
* @ flags : additional flags to control the eviction algorithm
2014-01-29 22:07:11 +01:00
*
* This function will try to evict vmas until a free space satisfying the
* requirements is found . Callers must check first whether any such hole exists
* already before calling this function .
*
* This function is used by the object / vma binding code .
*
2015-03-18 14:47:59 +01:00
* Since this function is only used to free up virtual address space it only
* ignores pinned vmas , and not object where the backing storage itself is
* pinned . Hence obj - > pages_pin_count does not protect against eviction .
*
2014-01-29 22:07:11 +01:00
* To clarify : This is for freeing up virtual address space , not for freeing
* memory in e . g . the shrinker .
*/
2010-08-07 11:01:23 +01:00
int
2016-08-04 16:32:18 +01:00
i915_gem_evict_something ( struct i915_address_space * vm ,
2016-08-04 16:32:22 +01:00
u64 min_size , u64 alignment ,
unsigned cache_level ,
u64 start , u64 end ,
2014-02-14 14:01:11 +01:00
unsigned flags )
2010-08-07 11:01:23 +01:00
{
2016-11-29 09:50:08 +00:00
struct drm_i915_private * dev_priv = vm - > i915 ;
2016-12-22 08:36:29 +00:00
struct drm_mm_scan scan ;
2016-08-04 16:32:17 +01:00
struct list_head eviction_list ;
struct list_head * phases [ ] = {
& vm - > inactive_list ,
& vm - > active_list ,
NULL ,
} , * * phase ;
struct i915_vma * vma , * next ;
2016-12-22 08:36:36 +00:00
struct drm_mm_node * node ;
2016-08-04 16:32:17 +01:00
int ret ;
2010-08-07 11:01:23 +01:00
2016-11-29 09:50:08 +00:00
lockdep_assert_held ( & vm - > i915 - > drm . struct_mutex ) ;
2016-08-04 16:32:18 +01:00
trace_i915_gem_evict ( vm , min_size , alignment , flags ) ;
2011-02-03 11:57:46 +00:00
2010-08-07 11:01:24 +01:00
/*
* The goal is to evict objects and amalgamate space in LRU order .
* The oldest idle objects reside on the inactive list , which is in
2016-08-04 16:32:17 +01:00
* retirement order . The next objects to retire are those in flight ,
* on the active list , again in retirement order .
2010-08-07 11:01:24 +01:00
*
* The retirement sequence is thus :
* 1. Inactive objects ( already retired )
2016-08-04 16:32:17 +01:00
* 2. Active objects ( will stall on unbinding )
2010-08-07 11:01:24 +01:00
*
* On each list , the oldest objects lie at the HEAD with the freshest
* object on the TAIL .
*/
2016-12-22 08:36:31 +00:00
drm_mm_scan_init_with_range ( & scan , & vm - > mm ,
min_size , alignment , cache_level ,
2016-12-22 08:36:33 +00:00
start , end ,
flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0 ) ;
2010-08-07 11:01:24 +01:00
2016-12-09 15:05:55 +00:00
/* Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists , we may have
* a stray pin ( preventing eviction ) that can only be resolved by
* retiring .
*/
if ( ! ( flags & PIN_NONBLOCK ) )
i915_gem_retire_requests ( dev_priv ) ;
else
2016-08-04 16:32:17 +01:00
phases [ 1 ] = NULL ;
2010-08-07 11:01:23 +01:00
2016-08-04 16:32:17 +01:00
search_again :
INIT_LIST_HEAD ( & eviction_list ) ;
phase = phases ;
do {
list_for_each_entry ( vma , * phase , vm_link )
2016-12-22 08:36:29 +00:00
if ( mark_free ( & scan , vma , flags , & eviction_list ) )
2016-08-04 16:32:17 +01:00
goto found ;
} while ( * + + phase ) ;
2010-08-07 11:01:24 +01:00
/* Nothing found, clean up and bail out! */
2016-08-04 16:32:17 +01:00
list_for_each_entry_safe ( vma , next , & eviction_list , exec_list ) {
2016-12-22 08:36:29 +00:00
ret = drm_mm_scan_remove_block ( & scan , & vma - > node ) ;
2010-08-07 11:01:24 +01:00
BUG_ON ( ret ) ;
2011-01-10 14:21:05 +00:00
2016-08-04 16:32:17 +01:00
INIT_LIST_HEAD ( & vma - > exec_list ) ;
2010-08-07 11:01:24 +01:00
}
2013-12-09 10:37:24 +00:00
/* Can we unpin some objects such as idle hw contents,
2016-08-04 16:32:17 +01:00
* or pending flips ? But since only the GGTT has global entries
* such as scanouts , rinbuffers and contexts , we can skip the
* purge when inspecting per - process local address spaces .
2010-08-07 11:01:24 +01:00
*/
2016-08-04 16:32:17 +01:00
if ( ! i915_is_ggtt ( vm ) | | flags & PIN_NONBLOCK )
2014-01-20 10:17:37 +00:00
return - ENOSPC ;
2013-12-09 10:37:24 +00:00
2016-10-28 13:58:58 +01:00
if ( ggtt_is_idle ( dev_priv ) ) {
2016-08-04 16:32:17 +01:00
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts .
*/
2016-11-29 09:50:08 +00:00
return intel_has_pending_fb_unpin ( dev_priv ) ? - EAGAIN : - ENOSPC ;
2014-01-20 10:17:37 +00:00
}
2016-08-04 16:32:17 +01:00
/* Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling ) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference .
2014-01-20 10:17:37 +00:00
*/
2016-08-04 16:32:17 +01:00
ret = i915_gem_switch_to_kernel_context ( dev_priv ) ;
if ( ret )
return ret ;
2016-09-09 14:11:50 +01:00
ret = i915_gem_wait_for_idle ( dev_priv ,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED ) ;
2016-08-04 16:32:17 +01:00
if ( ret )
return ret ;
i915_gem_retire_requests ( dev_priv ) ;
goto search_again ;
2010-08-07 11:01:24 +01:00
found :
2010-09-29 22:23:05 +01:00
/* drm_mm doesn't allow any other other operations while
2016-08-04 16:32:17 +01:00
* scanning , therefore store to - be - evicted objects on a
* temporary list and take a reference for all before
* calling unbind ( which may remove the active reference
* of any of our objects , thus corrupting the list ) .
*/
list_for_each_entry_safe ( vma , next , & eviction_list , exec_list ) {
2016-12-22 08:36:29 +00:00
if ( drm_mm_scan_remove_block ( & scan , & vma - > node ) )
2016-08-04 16:32:30 +01:00
__i915_vma_pin ( vma ) ;
2016-08-04 16:32:17 +01:00
else
list_del_init ( & vma - > exec_list ) ;
2010-08-07 11:01:24 +01:00
}
2010-08-07 11:01:23 +01:00
2010-08-07 11:01:24 +01:00
/* Unbinding will emit any required flushes */
2010-09-29 22:23:05 +01:00
while ( ! list_empty ( & eviction_list ) ) {
2013-08-14 11:38:34 +02:00
vma = list_first_entry ( & eviction_list ,
struct i915_vma ,
2010-11-25 19:32:06 +00:00
exec_list ) ;
2013-08-16 13:29:33 -07:00
list_del_init ( & vma - > exec_list ) ;
2016-08-04 16:32:30 +01:00
__i915_vma_unpin ( vma ) ;
2010-09-29 22:23:05 +01:00
if ( ret = = 0 )
2013-08-14 11:38:34 +02:00
ret = i915_vma_unbind ( vma ) ;
2010-08-07 11:01:23 +01:00
}
2016-12-22 08:36:36 +00:00
while ( ret = = 0 & & ( node = drm_mm_scan_color_evict ( & scan ) ) ) {
vma = container_of ( node , struct i915_vma , node ) ;
ret = i915_vma_unbind ( vma ) ;
}
2010-09-29 22:23:05 +01:00
return ret ;
2010-08-07 11:01:23 +01:00
}
2016-12-05 14:29:37 +00:00
/**
* i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
* @ target : address space and range to evict for
* @ flags : additional flags to control the eviction algorithm
*
* This function will try to evict vmas that overlap the target node .
*
* To clarify : This is for freeing up virtual address space , not for freeing
* memory in e . g . the shrinker .
*/
int i915_gem_evict_for_vma ( struct i915_vma * target , unsigned int flags )
2015-12-08 11:55:07 +00:00
{
2016-12-05 14:29:37 +00:00
LIST_HEAD ( eviction_list ) ;
struct drm_mm_node * node ;
u64 start = target - > node . start ;
u64 end = start + target - > node . size ;
struct i915_vma * vma , * next ;
bool check_color ;
int ret = 0 ;
2015-12-08 11:55:07 +00:00
2016-11-29 09:50:08 +00:00
lockdep_assert_held ( & target - > vm - > i915 - > drm . struct_mutex ) ;
2016-12-05 14:29:37 +00:00
trace_i915_gem_evict_vma ( target , flags ) ;
2016-12-09 15:05:55 +00:00
/* Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists , we may have
* a stray pin ( preventing eviction ) that can only be resolved by
* retiring .
*/
if ( ! ( flags & PIN_NONBLOCK ) )
i915_gem_retire_requests ( target - > vm - > i915 ) ;
2016-12-05 14:29:37 +00:00
check_color = target - > vm - > mm . color_adjust ;
if ( check_color ) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if ( start > target - > vm - > start )
start - = 4096 ;
if ( end < target - > vm - > start + target - > vm - > total )
end + = 4096 ;
}
2016-10-28 13:58:32 +01:00
2016-12-05 14:29:37 +00:00
drm_mm_for_each_node_in_range ( node , & target - > vm - > mm , start , end ) {
/* If we find any non-objects (!vma), we cannot evict them */
if ( node - > color = = I915_COLOR_UNEVICTABLE ) {
ret = - ENOSPC ;
2015-12-08 11:55:07 +00:00
break ;
2016-12-05 14:29:37 +00:00
}
2015-12-08 11:55:07 +00:00
vma = container_of ( node , typeof ( * vma ) , node ) ;
2016-12-05 14:29:37 +00:00
/* If we are using coloring to insert guard pages between
* different cache domains within the address space , we have
* to check whether the objects on either side of our range
* abutt and conflict . If they are in conflict , then we evict
* those as well to make room for our guard pages .
*/
if ( check_color ) {
if ( vma - > node . start + vma - > node . size = = target - > node . start ) {
if ( vma - > node . color = = target - > node . color )
continue ;
}
if ( vma - > node . start = = target - > node . start + target - > node . size ) {
if ( vma - > node . color = = target - > node . color )
continue ;
}
}
2015-12-08 11:55:07 +00:00
2016-12-05 14:29:37 +00:00
if ( flags & PIN_NONBLOCK & &
( i915_vma_is_pinned ( vma ) | | i915_vma_is_active ( vma ) ) ) {
ret = - ENOSPC ;
break ;
}
2015-12-08 11:55:07 +00:00
2016-12-05 14:29:37 +00:00
/* Overlap of objects in the same batch? */
if ( i915_vma_is_pinned ( vma ) ) {
ret = - ENOSPC ;
if ( vma - > exec_entry & &
vma - > exec_entry - > flags & EXEC_OBJECT_PINNED )
ret = - EINVAL ;
break ;
2015-12-08 11:55:07 +00:00
}
2016-12-05 14:29:37 +00:00
/* Never show fear in the face of dragons!
*
* We cannot directly remove this node from within this
* iterator and as with i915_gem_evict_something ( ) we employ
* the vma pin_count in order to prevent the action of
* unbinding one vma from freeing ( by dropping its active
* reference ) another in our eviction list .
*/
__i915_vma_pin ( vma ) ;
list_add ( & vma - > exec_list , & eviction_list ) ;
}
list_for_each_entry_safe ( vma , next , & eviction_list , exec_list ) {
list_del_init ( & vma - > exec_list ) ;
__i915_vma_unpin ( vma ) ;
if ( ret = = 0 )
ret = i915_vma_unbind ( vma ) ;
2015-12-08 11:55:07 +00:00
}
2016-12-05 14:29:37 +00:00
return ret ;
2015-12-08 11:55:07 +00:00
}
2013-09-11 14:57:50 -07:00
/**
2014-01-29 22:07:11 +01:00
* i915_gem_evict_vm - Evict all idle vmas from a vm
* @ vm : Address space to cleanse
2013-09-11 14:57:50 -07:00
* @ do_idle : Boolean directing whether to idle first .
*
2014-01-29 22:07:11 +01:00
* This function evicts all idles vmas from a vm . If all unpinned vmas should be
* evicted the @ do_idle needs to be set to true .
2013-09-11 14:57:50 -07:00
*
2014-01-29 22:07:11 +01:00
* This is used by the execbuf code as a last - ditch effort to defragment the
* address space .
*
* To clarify : This is for freeing up virtual address space , not for freeing
* memory in e . g . the shrinker .
2013-09-11 14:57:50 -07:00
*/
int i915_gem_evict_vm ( struct i915_address_space * vm , bool do_idle )
2013-09-11 14:57:49 -07:00
{
struct i915_vma * vma , * next ;
int ret ;
2016-11-29 09:50:08 +00:00
lockdep_assert_held ( & vm - > i915 - > drm . struct_mutex ) ;
2013-09-24 09:57:56 -07:00
trace_i915_gem_evict_vm ( vm ) ;
2013-09-11 14:57:49 -07:00
if ( do_idle ) {
2016-11-29 09:50:08 +00:00
struct drm_i915_private * dev_priv = vm - > i915 ;
2016-06-24 14:55:57 +01:00
2016-06-24 14:55:58 +01:00
if ( i915_is_ggtt ( vm ) ) {
2016-07-15 14:56:19 +01:00
ret = i915_gem_switch_to_kernel_context ( dev_priv ) ;
2016-06-24 14:55:58 +01:00
if ( ret )
return ret ;
}
2016-06-24 14:55:57 +01:00
2016-09-09 14:11:50 +01:00
ret = i915_gem_wait_for_idle ( dev_priv ,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED ) ;
2013-09-11 14:57:49 -07:00
if ( ret )
return ret ;
2016-06-24 14:55:57 +01:00
i915_gem_retire_requests ( dev_priv ) ;
2014-12-23 17:16:04 +00:00
WARN_ON ( ! list_empty ( & vm - > active_list ) ) ;
2013-09-11 14:57:49 -07:00
}
2016-02-26 11:03:19 +00:00
list_for_each_entry_safe ( vma , next , & vm - > inactive_list , vm_link )
2016-08-04 16:32:30 +01:00
if ( ! i915_vma_is_pinned ( vma ) )
2013-09-11 14:57:49 -07:00
WARN_ON ( i915_vma_unbind ( vma ) ) ;
return 0 ;
}