2014-12-11 12:13:08 -08:00
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
*/
# include "i915_drv.h"
2015-04-07 16:20:34 +01:00
# include "i915_gem_batch_pool.h"
2014-12-11 12:13:08 -08:00
/**
* DOC : batch pool
*
* In order to submit batch buffers as ' secure ' , the software command parser
* must ensure that a batch buffer cannot be modified after parsing . It does
* this by copying the user provided batch buffer contents to a kernel owned
* buffer from which the hardware will actually execute , and by carefully
* managing the address space bindings for such buffers .
*
* The batch pool framework provides a mechanism for the driver to manage a
* set of scratch buffers to use for this purpose . The framework can be
* extended to support other uses cases should they arise .
*/
/**
* i915_gem_batch_pool_init ( ) - initialize a batch buffer pool
* @ dev : the drm device
* @ pool : the batch buffer pool
*/
void i915_gem_batch_pool_init ( struct drm_device * dev ,
struct i915_gem_batch_pool * pool )
{
2015-04-07 16:20:38 +01:00
int n ;
2014-12-11 12:13:08 -08:00
pool - > dev = dev ;
2015-04-07 16:20:38 +01:00
for ( n = 0 ; n < ARRAY_SIZE ( pool - > cache_list ) ; n + + )
INIT_LIST_HEAD ( & pool - > cache_list [ n ] ) ;
2014-12-11 12:13:08 -08:00
}
/**
* i915_gem_batch_pool_fini ( ) - clean up a batch buffer pool
* @ pool : the pool to clean up
*
* Note : Callers must hold the struct_mutex .
*/
void i915_gem_batch_pool_fini ( struct i915_gem_batch_pool * pool )
{
2015-04-07 16:20:38 +01:00
int n ;
2014-12-11 12:13:08 -08:00
WARN_ON ( ! mutex_is_locked ( & pool - > dev - > struct_mutex ) ) ;
2015-04-07 16:20:38 +01:00
for ( n = 0 ; n < ARRAY_SIZE ( pool - > cache_list ) ; n + + ) {
while ( ! list_empty ( & pool - > cache_list [ n ] ) ) {
struct drm_i915_gem_object * obj =
list_first_entry ( & pool - > cache_list [ n ] ,
struct drm_i915_gem_object ,
batch_pool_link ) ;
2014-12-11 12:13:08 -08:00
2015-04-07 16:20:38 +01:00
list_del ( & obj - > batch_pool_link ) ;
drm_gem_object_unreference ( & obj - > base ) ;
}
2014-12-11 12:13:08 -08:00
}
}
/**
2015-04-07 16:20:35 +01:00
* i915_gem_batch_pool_get ( ) - allocate a buffer from the pool
2014-12-11 12:13:08 -08:00
* @ pool : the batch buffer pool
* @ size : the minimum desired size of the returned buffer
*
2015-04-07 16:20:35 +01:00
* Returns an inactive buffer from @ pool with at least @ size bytes ,
* with the pages pinned . The caller must i915_gem_object_unpin_pages ( )
* on the returned object .
2014-12-11 12:13:08 -08:00
*
* Note : Callers must hold the struct_mutex
*
2015-04-07 16:20:35 +01:00
* Return : the buffer object or an error pointer
2014-12-11 12:13:08 -08:00
*/
struct drm_i915_gem_object *
i915_gem_batch_pool_get ( struct i915_gem_batch_pool * pool ,
size_t size )
{
struct drm_i915_gem_object * obj = NULL ;
struct drm_i915_gem_object * tmp , * next ;
2015-04-07 16:20:38 +01:00
struct list_head * list ;
int n ;
2014-12-11 12:13:08 -08:00
WARN_ON ( ! mutex_is_locked ( & pool - > dev - > struct_mutex ) ) ;
2015-04-07 16:20:38 +01:00
/* Compute a power-of-two bucket, but throw everything greater than
* 16 KiB into the same bucket : i . e . the the buckets hold objects of
* ( 1 page , 2 pages , 4 pages , 8 + pages ) .
*/
n = fls ( size > > PAGE_SHIFT ) - 1 ;
if ( n > = ARRAY_SIZE ( pool - > cache_list ) )
n = ARRAY_SIZE ( pool - > cache_list ) - 1 ;
list = & pool - > cache_list [ n ] ;
list_for_each_entry_safe ( tmp , next , list , batch_pool_link ) {
2015-04-07 16:20:36 +01:00
/* The batches are strictly LRU ordered */
2014-12-11 12:13:08 -08:00
if ( tmp - > active )
2015-04-07 16:20:36 +01:00
break ;
2014-12-11 12:13:08 -08:00
/* While we're looping, do some clean up */
if ( tmp - > madv = = __I915_MADV_PURGED ) {
2015-04-07 16:20:38 +01:00
list_del ( & tmp - > batch_pool_link ) ;
2014-12-11 12:13:08 -08:00
drm_gem_object_unreference ( & tmp - > base ) ;
continue ;
}
2015-04-07 16:20:38 +01:00
if ( tmp - > base . size > = size ) {
2014-12-11 12:13:08 -08:00
obj = tmp ;
break ;
}
}
2015-04-07 16:20:35 +01:00
if ( obj = = NULL ) {
int ret ;
2014-12-11 12:13:08 -08:00
obj = i915_gem_alloc_object ( pool - > dev , size ) ;
2015-04-07 16:20:35 +01:00
if ( obj = = NULL )
2014-12-11 12:13:08 -08:00
return ERR_PTR ( - ENOMEM ) ;
2015-04-07 16:20:35 +01:00
ret = i915_gem_object_get_pages ( obj ) ;
if ( ret )
return ERR_PTR ( ret ) ;
2014-12-11 12:13:08 -08:00
2015-04-07 16:20:35 +01:00
obj - > madv = I915_MADV_DONTNEED ;
}
2014-12-11 12:13:11 -08:00
2015-04-07 16:20:38 +01:00
list_move_tail ( & obj - > batch_pool_link , list ) ;
2015-04-07 16:20:35 +01:00
i915_gem_object_pin_pages ( obj ) ;
2014-12-11 12:13:08 -08:00
return obj ;
}