2021-06-02 10:38:08 +02:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_device.h>
2021-06-11 13:34:42 +10:00
# include <drm/ttm/ttm_range_manager.h>
2021-06-02 10:38:08 +02:00
# include "i915_drv.h"
# include "i915_scatterlist.h"
2021-06-16 16:25:00 +01:00
# include "i915_ttm_buddy_manager.h"
2021-06-02 10:38:08 +02:00
# include "intel_region_ttm.h"
2022-03-15 18:14:23 +00:00
# include "gem/i915_gem_region.h"
2021-06-10 09:01:49 +02:00
# include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
2021-06-02 10:38:08 +02:00
/**
* DOC : TTM support structure
*
* The code in this file deals with setting up memory managers for TTM
* LMEM and MOCK regions and converting the output from
* the managers to struct sg_table , Basically providing the mapping from
* i915 GEM regions to TTM memory types and resource managers .
*/
/**
* intel_region_ttm_device_init - Initialize a TTM device
* @ dev_priv : Pointer to an i915 device private structure .
*
* Return : 0 on success , negative error code on failure .
*/
int intel_region_ttm_device_init ( struct drm_i915_private * dev_priv )
{
struct drm_device * drm = & dev_priv - > drm ;
2021-06-10 09:01:49 +02:00
return ttm_device_init ( & dev_priv - > bdev , i915_ttm_driver ( ) ,
2021-06-02 10:38:08 +02:00
drm - > dev , drm - > anon_inode - > i_mapping ,
drm - > vma_offset_manager , false , false ) ;
}
/**
* intel_region_ttm_device_fini - Finalize a TTM device
* @ dev_priv : Pointer to an i915 device private structure .
*/
void intel_region_ttm_device_fini ( struct drm_i915_private * dev_priv )
{
ttm_device_fini ( & dev_priv - > bdev ) ;
}
/*
* Map the i915 memory regions to TTM memory types . We use the
* driver - private types for now , reserving TTM_PL_VRAM for stolen
* memory and TTM_PL_TT for GGTT use if decided to implement this .
*/
2021-06-16 16:24:57 +01:00
int intel_region_to_ttm_type ( const struct intel_memory_region * mem )
2021-06-02 10:38:08 +02:00
{
int type ;
GEM_BUG_ON ( mem - > type ! = INTEL_MEMORY_LOCAL & &
2021-06-16 16:24:57 +01:00
mem - > type ! = INTEL_MEMORY_MOCK & &
mem - > type ! = INTEL_MEMORY_SYSTEM ) ;
if ( mem - > type = = INTEL_MEMORY_SYSTEM )
return TTM_PL_SYSTEM ;
2021-06-02 10:38:08 +02:00
type = mem - > instance + TTM_PL_PRIV ;
GEM_BUG_ON ( type > = TTM_NUM_MEM_TYPES ) ;
return type ;
}
/**
2021-06-16 16:25:00 +01:00
* intel_region_ttm_init - Initialize a memory region for TTM .
* @ mem : The region to initialize .
*
* This function initializes a suitable TTM resource manager for the
* region , and if it ' s a LMEM region type , attaches it to the TTM
* device . MOCK regions are NOT attached to the TTM device , since we don ' t
* have one for the mock selftests .
*
* Return : 0 on success , negative error code on failure .
2021-06-02 10:38:08 +02:00
*/
int intel_region_ttm_init ( struct intel_memory_region * mem )
{
struct ttm_device * bdev = & mem - > i915 - > bdev ;
int mem_type = intel_region_to_ttm_type ( mem ) ;
int ret ;
2021-06-16 16:25:00 +01:00
ret = i915_ttm_buddy_man_init ( bdev , mem_type , false ,
2021-06-25 11:38:23 +01:00
resource_size ( & mem - > region ) ,
2022-02-25 14:54:59 +00:00
mem - > io_size ,
2021-06-25 11:38:23 +01:00
mem - > min_page_size , PAGE_SIZE ) ;
2021-06-02 10:38:08 +02:00
if ( ret )
return ret ;
mem - > region_private = ttm_manager_type ( bdev , mem_type ) ;
return 0 ;
}
/**
* intel_region_ttm_fini - Finalize a TTM region .
* @ mem : The memory region
*
* This functions takes down the TTM resource manager associated with the
* memory region , and if it was registered with the TTM device ,
* removes that registration .
*/
2021-11-22 22:45:51 +01:00
int intel_region_ttm_fini ( struct intel_memory_region * mem )
2021-06-02 10:38:08 +02:00
{
2021-11-22 22:45:51 +01:00
struct ttm_resource_manager * man = mem - > region_private ;
int ret = - EBUSY ;
int count ;
/*
* Put the region ' s move fences . This releases requests that
* may hold on to contexts and vms that may hold on to buffer
* objects placed in this region .
*/
if ( man )
ttm_resource_manager_cleanup ( man ) ;
/* Flush objects from region. */
for ( count = 0 ; count < 10 ; + + count ) {
i915_gem_flush_free_objects ( mem - > i915 ) ;
mutex_lock ( & mem - > objects . lock ) ;
if ( list_empty ( & mem - > objects . list ) )
ret = 0 ;
mutex_unlock ( & mem - > objects . lock ) ;
if ( ! ret )
break ;
msleep ( 20 ) ;
flush_delayed_work ( & mem - > i915 - > bdev . wq ) ;
}
/* If we leaked objects, Don't free the region causing use after free */
if ( ret | | ! man )
return ret ;
2021-06-02 10:38:08 +02:00
2021-06-16 16:25:00 +01:00
ret = i915_ttm_buddy_man_fini ( & mem - > i915 - > bdev ,
intel_region_to_ttm_type ( mem ) ) ;
2021-06-02 10:38:08 +02:00
GEM_WARN_ON ( ret ) ;
mem - > region_private = NULL ;
2021-11-22 22:45:51 +01:00
return ret ;
2021-06-02 10:38:08 +02:00
}
/**
2021-11-01 13:24:44 +01:00
* intel_region_ttm_resource_to_rsgt -
* Convert an opaque TTM resource manager resource to a refcounted sg_table .
2021-06-02 10:38:08 +02:00
* @ mem : The memory region .
2021-06-16 16:24:59 +01:00
* @ res : The resource manager resource obtained from the TTM resource manager .
2021-06-02 10:38:08 +02:00
*
* The gem backends typically use sg - tables for operations on the underlying
* io_memory . So provide a way for the backends to translate the
* nodes they are handed from TTM to sg - tables .
*
* Return : A malloced sg_table on success , an error pointer on failure .
*/
2021-11-01 13:24:44 +01:00
struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt ( struct intel_memory_region * mem ,
struct ttm_resource * res )
2021-06-02 10:38:08 +02:00
{
2021-06-16 16:25:00 +01:00
if ( mem - > is_range_manager ) {
struct ttm_range_mgr_node * range_node =
to_ttm_range_mgr_node ( res ) ;
2021-06-11 13:34:42 +10:00
2021-11-01 13:24:44 +01:00
return i915_rsgt_from_mm_node ( & range_node - > mm_nodes [ 0 ] ,
mem - > region . start ) ;
2021-06-16 16:25:00 +01:00
} else {
2021-11-01 13:24:44 +01:00
return i915_rsgt_from_buddy_resource ( res , mem - > region . start ) ;
2021-06-16 16:25:00 +01:00
}
2021-06-02 10:38:08 +02:00
}
2021-06-10 09:01:49 +02:00
# ifdef CONFIG_DRM_I915_SELFTEST
2021-06-02 10:38:08 +02:00
/**
2021-06-17 09:37:19 +01:00
* intel_region_ttm_resource_alloc - Allocate memory resources from a region
2021-06-02 10:38:08 +02:00
* @ mem : The memory region ,
* @ size : The requested size in bytes
* @ flags : Allocation flags
*
* This functionality is provided only for callers that need to allocate
* memory from standalone TTM range managers , without the TTM eviction
* functionality . Don ' t use if you are not completely sure that ' s the
* case . The returned opaque node can be converted to an sg_table using
2021-06-17 09:37:19 +01:00
* intel_region_ttm_resource_to_st ( ) , and can be freed using
* intel_region_ttm_resource_free ( ) .
2021-06-02 10:38:08 +02:00
*
* Return : A valid pointer on success , an error pointer on failure .
*/
2021-06-11 13:34:42 +10:00
struct ttm_resource *
2021-06-16 16:24:59 +01:00
intel_region_ttm_resource_alloc ( struct intel_memory_region * mem ,
2022-03-15 18:14:23 +00:00
resource_size_t offset ,
2021-06-16 16:24:59 +01:00
resource_size_t size ,
unsigned int flags )
2021-06-02 10:38:08 +02:00
{
struct ttm_resource_manager * man = mem - > region_private ;
struct ttm_place place = { } ;
struct ttm_buffer_object mock_bo = { } ;
2021-06-11 13:34:42 +10:00
struct ttm_resource * res ;
2021-06-02 10:38:08 +02:00
int ret ;
2022-02-25 14:54:58 +00:00
if ( flags & I915_BO_ALLOC_CONTIGUOUS )
place . flags | = TTM_PL_FLAG_CONTIGUOUS ;
2022-03-15 18:14:23 +00:00
if ( offset ! = I915_BO_INVALID_OFFSET ) {
place . fpfn = offset > > PAGE_SHIFT ;
place . lpfn = place . fpfn + ( size > > PAGE_SHIFT ) ;
} else if ( mem - > io_size & & mem - > io_size < mem - > total ) {
2022-02-25 14:54:58 +00:00
if ( flags & I915_BO_ALLOC_GPU_ONLY ) {
place . flags | = TTM_PL_FLAG_TOPDOWN ;
} else {
place . fpfn = 0 ;
place . lpfn = mem - > io_size > > PAGE_SHIFT ;
}
2022-02-25 14:54:57 +00:00
}
2021-06-11 13:34:42 +10:00
mock_bo . base . size = size ;
2022-02-21 12:11:03 +00:00
mock_bo . bdev = & mem - > i915 - > bdev ;
2021-06-02 10:38:08 +02:00
ret = man - > func - > alloc ( man , & mock_bo , & place , & res ) ;
if ( ret = = - ENOSPC )
ret = - ENXIO ;
2022-02-21 12:11:03 +00:00
if ( ! ret )
res - > bo = NULL ; /* Rather blow up, then some uaf */
2021-06-11 13:34:42 +10:00
return ret ? ERR_PTR ( ret ) : res ;
2021-06-02 10:38:08 +02:00
}
2021-06-16 16:25:00 +01:00
2021-06-10 09:01:49 +02:00
# endif
2021-06-16 16:25:00 +01:00
/**
* intel_region_ttm_resource_free - Free a resource allocated from a resource manager
* @ mem : The region the resource was allocated from .
* @ res : The opaque resource representing an allocation .
*/
void intel_region_ttm_resource_free ( struct intel_memory_region * mem ,
struct ttm_resource * res )
{
struct ttm_resource_manager * man = mem - > region_private ;
2022-02-21 12:11:03 +00:00
struct ttm_buffer_object mock_bo = { } ;
mock_bo . base . size = res - > num_pages < < PAGE_SHIFT ;
mock_bo . bdev = & mem - > i915 - > bdev ;
res - > bo = & mock_bo ;
2021-06-16 16:25:00 +01:00
man - > func - > free ( man , res ) ;
}