2020-04-30 12:18:12 +01:00
// SPDX-License-Identifier: MIT
2019-08-04 13:48:26 +01:00
/*
* Copyright © 2014 - 2018 Intel Corporation
*/
# include "gem/i915_gem_object.h"
# include "i915_drv.h"
# include "intel_engine_pm.h"
2020-04-30 12:18:12 +01:00
# include "intel_gt_buffer_pool.h"
2019-08-04 13:48:26 +01:00
2020-04-30 12:18:12 +01:00
static struct intel_gt * to_gt ( struct intel_gt_buffer_pool * pool )
2019-08-04 13:48:26 +01:00
{
2020-04-30 12:18:12 +01:00
return container_of ( pool , struct intel_gt , buffer_pool ) ;
2019-08-04 13:48:26 +01:00
}
static struct list_head *
2020-04-30 12:18:12 +01:00
bucket_for_size ( struct intel_gt_buffer_pool * pool , size_t sz )
2019-08-04 13:48:26 +01:00
{
int n ;
/*
* Compute a power - of - two bucket , but throw everything greater than
* 16 KiB into the same bucket : i . e . the buckets hold objects of
* ( 1 page , 2 pages , 4 pages , 8 + pages ) .
*/
n = fls ( sz > > PAGE_SHIFT ) - 1 ;
if ( n > = ARRAY_SIZE ( pool - > cache_list ) )
n = ARRAY_SIZE ( pool - > cache_list ) - 1 ;
return & pool - > cache_list [ n ] ;
}
2020-04-30 12:18:12 +01:00
static void node_free ( struct intel_gt_buffer_pool_node * node )
2019-08-04 13:48:26 +01:00
{
i915_gem_object_put ( node - > obj ) ;
i915_active_fini ( & node - > active ) ;
2020-07-29 09:02:45 +01:00
kfree_rcu ( node , rcu ) ;
2019-08-04 13:48:26 +01:00
}
2020-07-29 12:07:56 +01:00
static bool pool_free_older_than ( struct intel_gt_buffer_pool * pool , long keep )
2020-04-30 12:18:12 +01:00
{
2020-07-29 09:02:45 +01:00
struct intel_gt_buffer_pool_node * node , * stale = NULL ;
2020-04-30 12:18:12 +01:00
bool active = false ;
int n ;
/* Free buffers that have not been used in the past second */
for ( n = 0 ; n < ARRAY_SIZE ( pool - > cache_list ) ; n + + ) {
struct list_head * list = & pool - > cache_list [ n ] ;
2020-07-29 09:02:45 +01:00
if ( list_empty ( list ) )
continue ;
if ( spin_trylock_irq ( & pool - > lock ) ) {
struct list_head * pos ;
2020-04-30 12:18:12 +01:00
2020-07-29 09:02:45 +01:00
/* Most recent at head; oldest at tail */
list_for_each_prev ( pos , list ) {
2020-07-29 12:07:56 +01:00
unsigned long age ;
2020-07-29 09:02:45 +01:00
node = list_entry ( pos , typeof ( * node ) , link ) ;
2020-07-29 12:07:56 +01:00
age = READ_ONCE ( node - > age ) ;
if ( ! age | | jiffies - age < keep )
2020-07-29 09:02:45 +01:00
break ;
/* Check we are the first to claim this node */
if ( ! xchg ( & node - > age , 0 ) )
break ;
node - > free = stale ;
stale = node ;
}
if ( ! list_is_last ( pos , list ) )
__list_del_many ( pos , list ) ;
spin_unlock_irq ( & pool - > lock ) ;
2020-04-30 12:18:12 +01:00
}
2020-07-29 09:02:45 +01:00
2020-04-30 12:18:12 +01:00
active | = ! list_empty ( list ) ;
}
2020-07-29 09:02:45 +01:00
while ( ( node = stale ) ) {
stale = stale - > free ;
2020-04-30 12:18:12 +01:00
node_free ( node ) ;
2020-07-29 09:02:45 +01:00
}
return active ;
}
static void pool_free_work ( struct work_struct * wrk )
{
struct intel_gt_buffer_pool * pool =
container_of ( wrk , typeof ( * pool ) , work . work ) ;
2020-04-30 12:18:12 +01:00
2020-07-29 12:07:56 +01:00
if ( pool_free_older_than ( pool , HZ ) )
2020-04-30 12:18:12 +01:00
schedule_delayed_work ( & pool - > work ,
round_jiffies_up_relative ( HZ ) ) ;
}
2019-08-04 13:48:26 +01:00
static int pool_active ( struct i915_active * ref )
{
2020-04-30 12:18:12 +01:00
struct intel_gt_buffer_pool_node * node =
2019-08-04 13:48:26 +01:00
container_of ( ref , typeof ( * node ) , active ) ;
2019-08-21 22:47:35 -07:00
struct dma_resv * resv = node - > obj - > base . resv ;
2019-08-04 13:48:26 +01:00
int err ;
2019-08-21 22:47:35 -07:00
if ( dma_resv_trylock ( resv ) ) {
dma_resv_add_excl_fence ( resv , NULL ) ;
dma_resv_unlock ( resv ) ;
2019-08-04 13:48:26 +01:00
}
err = i915_gem_object_pin_pages ( node - > obj ) ;
if ( err )
return err ;
/* Hide this pinned object from the shrinker until retired */
i915_gem_object_make_unshrinkable ( node - > obj ) ;
return 0 ;
}
2019-10-04 14:39:59 +01:00
__i915_active_call
2019-08-04 13:48:26 +01:00
static void pool_retire ( struct i915_active * ref )
{
2020-04-30 12:18:12 +01:00
struct intel_gt_buffer_pool_node * node =
2019-08-04 13:48:26 +01:00
container_of ( ref , typeof ( * node ) , active ) ;
2020-04-30 12:18:12 +01:00
struct intel_gt_buffer_pool * pool = node - > pool ;
2019-08-04 13:48:26 +01:00
struct list_head * list = bucket_for_size ( pool , node - > obj - > base . size ) ;
unsigned long flags ;
i915_gem_object_unpin_pages ( node - > obj ) ;
/* Return this object to the shrinker pool */
i915_gem_object_make_purgeable ( node - > obj ) ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
2020-07-29 09:02:45 +01:00
list_add_rcu ( & node - > link , list ) ;
2020-07-30 14:40:49 +01:00
WRITE_ONCE ( node - > age , jiffies ? : 1 ) ; /* 0 reserved for active nodes */
2019-08-04 13:48:26 +01:00
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
2020-04-30 12:18:12 +01:00
schedule_delayed_work ( & pool - > work ,
round_jiffies_up_relative ( HZ ) ) ;
2019-08-04 13:48:26 +01:00
}
2020-04-30 12:18:12 +01:00
static struct intel_gt_buffer_pool_node *
node_create ( struct intel_gt_buffer_pool * pool , size_t sz )
2019-08-04 13:48:26 +01:00
{
2020-04-30 12:18:12 +01:00
struct intel_gt * gt = to_gt ( pool ) ;
struct intel_gt_buffer_pool_node * node ;
2019-08-04 13:48:26 +01:00
struct drm_i915_gem_object * obj ;
node = kmalloc ( sizeof ( * node ) ,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN ) ;
if ( ! node )
return ERR_PTR ( - ENOMEM ) ;
node - > pool = pool ;
2019-10-04 14:40:00 +01:00
i915_active_init ( & node - > active , pool_active , pool_retire ) ;
2019-08-04 13:48:26 +01:00
2020-04-30 12:18:12 +01:00
obj = i915_gem_object_create_internal ( gt - > i915 , sz ) ;
2019-08-04 13:48:26 +01:00
if ( IS_ERR ( obj ) ) {
i915_active_fini ( & node - > active ) ;
kfree ( node ) ;
return ERR_CAST ( obj ) ;
}
2019-11-19 15:01:54 +00:00
i915_gem_object_set_readonly ( obj ) ;
2019-08-04 13:48:26 +01:00
node - > obj = obj ;
return node ;
}
2020-04-30 12:18:12 +01:00
struct intel_gt_buffer_pool_node *
intel_gt_get_buffer_pool ( struct intel_gt * gt , size_t size )
2019-08-27 14:59:35 +01:00
{
2020-04-30 12:18:12 +01:00
struct intel_gt_buffer_pool * pool = & gt - > buffer_pool ;
struct intel_gt_buffer_pool_node * node ;
2019-08-04 13:48:26 +01:00
struct list_head * list ;
int ret ;
size = PAGE_ALIGN ( size ) ;
list = bucket_for_size ( pool , size ) ;
2020-07-29 09:02:45 +01:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( node , list , link ) {
unsigned long age ;
2019-08-04 13:48:26 +01:00
if ( node - > obj - > base . size < size )
continue ;
2020-07-29 09:02:45 +01:00
age = READ_ONCE ( node - > age ) ;
if ( ! age )
continue ;
if ( cmpxchg ( & node - > age , age , 0 ) = = age ) {
spin_lock_irq ( & pool - > lock ) ;
list_del_rcu ( & node - > link ) ;
spin_unlock_irq ( & pool - > lock ) ;
break ;
}
2019-08-04 13:48:26 +01:00
}
2020-07-29 09:02:45 +01:00
rcu_read_unlock ( ) ;
2019-08-04 13:48:26 +01:00
if ( & node - > link = = list ) {
node = node_create ( pool , size ) ;
if ( IS_ERR ( node ) )
return node ;
}
ret = i915_active_acquire ( & node - > active ) ;
if ( ret ) {
node_free ( node ) ;
return ERR_PTR ( ret ) ;
}
return node ;
}
2020-04-30 12:18:12 +01:00
void intel_gt_init_buffer_pool ( struct intel_gt * gt )
2019-08-04 13:48:26 +01:00
{
2020-04-30 12:18:12 +01:00
struct intel_gt_buffer_pool * pool = & gt - > buffer_pool ;
2019-08-04 13:48:26 +01:00
int n ;
spin_lock_init ( & pool - > lock ) ;
for ( n = 0 ; n < ARRAY_SIZE ( pool - > cache_list ) ; n + + )
INIT_LIST_HEAD ( & pool - > cache_list [ n ] ) ;
2020-04-30 12:18:12 +01:00
INIT_DELAYED_WORK ( & pool - > work , pool_free_work ) ;
2019-08-04 13:48:26 +01:00
}
2020-04-30 12:18:12 +01:00
void intel_gt_flush_buffer_pool ( struct intel_gt * gt )
{
struct intel_gt_buffer_pool * pool = & gt - > buffer_pool ;
2020-05-25 15:19:56 +01:00
do {
2020-07-29 12:07:56 +01:00
while ( pool_free_older_than ( pool , 0 ) )
2020-07-29 09:02:45 +01:00
;
2020-05-25 15:19:56 +01:00
} while ( cancel_delayed_work_sync ( & pool - > work ) ) ;
2019-08-04 13:48:26 +01:00
}
2020-04-30 12:18:12 +01:00
void intel_gt_fini_buffer_pool ( struct intel_gt * gt )
2019-08-04 13:48:26 +01:00
{
2020-04-30 12:18:12 +01:00
struct intel_gt_buffer_pool * pool = & gt - > buffer_pool ;
2019-08-04 13:48:26 +01:00
int n ;
2020-04-30 12:18:12 +01:00
intel_gt_flush_buffer_pool ( gt ) ;
2019-08-04 13:48:26 +01:00
for ( n = 0 ; n < ARRAY_SIZE ( pool - > cache_list ) ; n + + )
GEM_BUG_ON ( ! list_empty ( & pool - > cache_list [ n ] ) ) ;
}