2015-04-20 16:55:21 -04:00
/*
* Copyright 2009 Jerome Glisse .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
*/
/*
* Authors :
* Jerome Glisse < glisse @ freedesktop . org >
* Thomas Hellstrom < thomas - at - tungstengraphics - dot - com >
* Dave Airlie
*/
# include <linux/list.h>
# include <linux/slab.h>
# include <drm/drmP.h>
# include <drm/amdgpu_drm.h>
2016-01-30 07:59:34 +02:00
# include <drm/drm_cache.h>
2015-04-20 16:55:21 -04:00
# include "amdgpu.h"
# include "amdgpu_trace.h"
2018-02-06 20:32:38 -05:00
# include "amdgpu_amdkfd.h"
2015-04-20 16:55:21 -04:00
2018-05-17 17:58:45 -04:00
/**
* DOC : amdgpu_object
*
* This defines the interfaces to operate on an & amdgpu_bo buffer object which
* represents memory used by driver ( VRAM , system memory , etc . ) . The driver
* provides DRM / GEM APIs to userspace . DRM / GEM APIs then use these interfaces
* to create / destroy / set buffer object which are then managed by the kernel TTM
* memory manager .
* The interfaces are also used internally by kernel clients , including gfx ,
* uvd , etc . for kernel managed allocations used by the GPU .
*
*/
2018-07-16 16:12:24 +02:00
static bool amdgpu_bo_need_backup ( struct amdgpu_device * adev )
2017-12-15 16:45:02 -05:00
{
if ( adev - > flags & AMD_IS_APU )
return false ;
2017-12-20 14:21:25 +01:00
if ( amdgpu_gpu_recovery = = 0 | |
( amdgpu_gpu_recovery = = - 1 & & ! amdgpu_sriov_vf ( adev ) ) )
return false ;
return true ;
2017-12-15 16:45:02 -05:00
}
2018-07-11 12:42:55 +02:00
/**
* amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
*
* @ bo : & amdgpu_bo buffer object
*
* This function is called when a BO stops being pinned , and updates the
* & amdgpu_device pin_size values accordingly .
*/
static void amdgpu_bo_subtract_pin_size ( struct amdgpu_bo * bo )
{
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
if ( bo - > tbo . mem . mem_type = = TTM_PL_VRAM ) {
atomic64_sub ( amdgpu_bo_size ( bo ) , & adev - > vram_pin_size ) ;
atomic64_sub ( amdgpu_vram_mgr_bo_visible_size ( bo ) ,
& adev - > visible_pin_size ) ;
} else if ( bo - > tbo . mem . mem_type = = TTM_PL_TT ) {
atomic64_sub ( amdgpu_bo_size ( bo ) , & adev - > gart_pin_size ) ;
}
}
2018-07-16 16:12:24 +02:00
static void amdgpu_bo_destroy ( struct ttm_buffer_object * tbo )
2015-04-20 16:55:21 -04:00
{
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( tbo - > bdev ) ;
2017-09-15 21:05:19 -04:00
struct amdgpu_bo * bo = ttm_to_amdgpu_bo ( tbo ) ;
2015-04-20 16:55:21 -04:00
2018-07-19 17:38:18 +02:00
if ( bo - > pin_count > 0 )
2018-07-11 12:42:55 +02:00
amdgpu_bo_subtract_pin_size ( bo ) ;
2018-02-06 20:32:38 -05:00
if ( bo - > kfd_bo )
amdgpu_amdkfd_unreserve_system_memory_limit ( bo ) ;
2017-07-11 17:25:49 +02:00
amdgpu_bo_kunmap ( bo ) ;
2015-04-20 16:55:21 -04:00
2018-03-09 14:42:54 +01:00
if ( bo - > gem_base . import_attach )
drm_prime_gem_destroy ( & bo - > gem_base , bo - > tbo . sg ) ;
2015-04-20 16:55:21 -04:00
drm_gem_object_release ( & bo - > gem_base ) ;
2015-11-27 16:49:00 +01:00
amdgpu_bo_unref ( & bo - > parent ) ;
2016-08-17 11:41:30 +08:00
if ( ! list_empty ( & bo - > shadow_list ) ) {
2016-09-15 14:58:48 +02:00
mutex_lock ( & adev - > shadow_list_lock ) ;
2016-08-17 11:41:30 +08:00
list_del_init ( & bo - > shadow_list ) ;
2016-09-15 14:58:48 +02:00
mutex_unlock ( & adev - > shadow_list_lock ) ;
2016-08-17 11:41:30 +08:00
}
2015-04-20 16:55:21 -04:00
kfree ( bo - > metadata ) ;
kfree ( bo ) ;
}
2018-05-17 17:58:45 -04:00
/**
2018-07-16 16:12:24 +02:00
* amdgpu_bo_is_amdgpu_bo - check if the buffer object is an & amdgpu_bo
2018-05-17 17:58:45 -04:00
* @ bo : buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* an & amdgpu_bo .
*
2018-06-01 12:29:45 +02:00
* Returns :
* true if the object belongs to & amdgpu_bo , false if not .
2018-05-17 17:58:45 -04:00
*/
2018-07-16 16:12:24 +02:00
bool amdgpu_bo_is_amdgpu_bo ( struct ttm_buffer_object * bo )
2015-04-20 16:55:21 -04:00
{
2018-07-16 16:12:24 +02:00
if ( bo - > destroy = = & amdgpu_bo_destroy )
2015-04-20 16:55:21 -04:00
return true ;
return false ;
}
2018-05-17 17:58:45 -04:00
/**
2018-07-16 16:12:24 +02:00
* amdgpu_bo_placement_from_domain - set buffer ' s placement
2018-05-17 17:58:45 -04:00
* @ abo : & amdgpu_bo buffer object whose placement is to be set
* @ domain : requested domain
*
* Sets buffer ' s placement according to requested domain and the buffer ' s
* flags .
*/
2018-07-16 16:12:24 +02:00
void amdgpu_bo_placement_from_domain ( struct amdgpu_bo * abo , u32 domain )
2015-04-20 16:55:21 -04:00
{
2017-09-12 10:56:17 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( abo - > tbo . bdev ) ;
struct ttm_placement * placement = & abo - > placement ;
struct ttm_place * places = abo - > placements ;
u64 flags = abo - > flags ;
2016-08-15 14:08:54 +02:00
u32 c = 0 ;
2015-04-24 17:37:30 +08:00
2015-04-20 16:55:21 -04:00
if ( domain & AMDGPU_GEM_DOMAIN_VRAM ) {
2018-01-12 14:52:22 +01:00
unsigned visible_pfn = adev - > gmc . visible_vram_size > > PAGE_SHIFT ;
2016-08-15 14:06:50 +02:00
places [ c ] . fpfn = 0 ;
2017-03-29 13:41:57 +02:00
places [ c ] . lpfn = 0 ;
2016-08-15 14:06:50 +02:00
places [ c ] . flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
2015-04-24 17:37:30 +08:00
TTM_PL_FLAG_VRAM ;
2017-03-29 13:41:57 +02:00
2016-08-15 14:06:50 +02:00
if ( flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED )
places [ c ] . lpfn = visible_pfn ;
else
places [ c ] . flags | = TTM_PL_FLAG_TOPDOWN ;
2017-03-29 13:41:57 +02:00
if ( flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS )
places [ c ] . flags | = TTM_PL_FLAG_CONTIGUOUS ;
2016-08-15 14:06:50 +02:00
c + + ;
2015-04-20 16:55:21 -04:00
}
if ( domain & AMDGPU_GEM_DOMAIN_GTT ) {
2016-08-15 14:06:50 +02:00
places [ c ] . fpfn = 0 ;
2017-08-18 15:50:17 +02:00
if ( flags & AMDGPU_GEM_CREATE_SHADOW )
2018-01-12 14:52:22 +01:00
places [ c ] . lpfn = adev - > gmc . gart_size > > PAGE_SHIFT ;
2017-08-18 15:50:17 +02:00
else
places [ c ] . lpfn = 0 ;
2016-08-15 14:06:50 +02:00
places [ c ] . flags = TTM_PL_FLAG_TT ;
if ( flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC )
places [ c ] . flags | = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED ;
else
places [ c ] . flags | = TTM_PL_FLAG_CACHED ;
c + + ;
2015-04-20 16:55:21 -04:00
}
if ( domain & AMDGPU_GEM_DOMAIN_CPU ) {
2016-08-15 14:06:50 +02:00
places [ c ] . fpfn = 0 ;
places [ c ] . lpfn = 0 ;
places [ c ] . flags = TTM_PL_FLAG_SYSTEM ;
if ( flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC )
places [ c ] . flags | = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED ;
else
places [ c ] . flags | = TTM_PL_FLAG_CACHED ;
c + + ;
2015-04-20 16:55:21 -04:00
}
if ( domain & AMDGPU_GEM_DOMAIN_GDS ) {
2016-08-15 14:06:50 +02:00
places [ c ] . fpfn = 0 ;
places [ c ] . lpfn = 0 ;
places [ c ] . flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS ;
c + + ;
2015-04-20 16:55:21 -04:00
}
2016-08-15 14:06:50 +02:00
2015-04-20 16:55:21 -04:00
if ( domain & AMDGPU_GEM_DOMAIN_GWS ) {
2016-08-15 14:06:50 +02:00
places [ c ] . fpfn = 0 ;
places [ c ] . lpfn = 0 ;
places [ c ] . flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS ;
c + + ;
2015-04-20 16:55:21 -04:00
}
2016-08-15 14:06:50 +02:00
2015-04-20 16:55:21 -04:00
if ( domain & AMDGPU_GEM_DOMAIN_OA ) {
2016-08-15 14:06:50 +02:00
places [ c ] . fpfn = 0 ;
places [ c ] . lpfn = 0 ;
places [ c ] . flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA ;
c + + ;
2015-04-20 16:55:21 -04:00
}
if ( ! c ) {
2016-08-15 14:06:50 +02:00
places [ c ] . fpfn = 0 ;
places [ c ] . lpfn = 0 ;
places [ c ] . flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM ;
c + + ;
2015-04-20 16:55:21 -04:00
}
2016-08-15 14:06:50 +02:00
2018-07-18 11:16:35 +02:00
BUG_ON ( c > = AMDGPU_BO_MAX_PLACEMENTS ) ;
2015-04-24 17:37:30 +08:00
placement - > num_placement = c ;
2016-08-15 14:06:50 +02:00
placement - > placement = places ;
2015-04-20 16:55:21 -04:00
2016-08-15 14:06:50 +02:00
placement - > num_busy_placement = c ;
placement - > busy_placement = places ;
2015-04-20 16:55:21 -04:00
}
2015-12-14 13:18:01 +01:00
/**
2017-07-27 17:08:54 +02:00
* amdgpu_bo_create_reserved - create reserved BO for kernel use
2015-12-14 13:18:01 +01:00
*
* @ adev : amdgpu device object
* @ size : size for the new BO
* @ align : alignment for the new BO
* @ domain : where to place it
2018-03-14 11:45:22 -04:00
* @ bo_ptr : used to initialize BOs in structures
2015-12-14 13:18:01 +01:00
* @ gpu_addr : GPU addr of the pinned BO
* @ cpu_addr : optional CPU address mapping
*
2017-07-27 17:08:54 +02:00
* Allocates and pins a BO for kernel internal use , and returns it still
* reserved .
2015-12-14 13:18:01 +01:00
*
2018-03-14 11:45:22 -04:00
* Note : For bo_ptr new BO is only created if bo_ptr points to NULL .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 on success , negative error code otherwise .
2015-12-14 13:18:01 +01:00
*/
2017-07-27 17:08:54 +02:00
int amdgpu_bo_create_reserved ( struct amdgpu_device * adev ,
unsigned long size , int align ,
u32 domain , struct amdgpu_bo * * bo_ptr ,
u64 * gpu_addr , void * * cpu_addr )
2015-12-14 13:18:01 +01:00
{
2018-04-16 18:27:50 +08:00
struct amdgpu_bo_param bp ;
2017-07-27 14:52:53 +02:00
bool free = false ;
2015-12-14 13:18:01 +01:00
int r ;
2018-04-16 18:27:50 +08:00
memset ( & bp , 0 , sizeof ( bp ) ) ;
bp . size = size ;
bp . byte_align = align ;
bp . domain = domain ;
bp . flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ;
bp . type = ttm_bo_type_kernel ;
bp . resv = NULL ;
2017-07-27 14:52:53 +02:00
if ( ! * bo_ptr ) {
2018-04-16 18:27:50 +08:00
r = amdgpu_bo_create ( adev , & bp , bo_ptr ) ;
2017-07-27 14:52:53 +02:00
if ( r ) {
dev_err ( adev - > dev , " (%d) failed to allocate kernel bo \n " ,
r ) ;
return r ;
}
free = true ;
2015-12-14 13:18:01 +01:00
}
r = amdgpu_bo_reserve ( * bo_ptr , false ) ;
if ( r ) {
dev_err ( adev - > dev , " (%d) failed to reserve kernel bo \n " , r ) ;
goto error_free ;
}
2018-06-25 12:51:14 +08:00
r = amdgpu_bo_pin ( * bo_ptr , domain ) ;
2015-12-14 13:18:01 +01:00
if ( r ) {
dev_err ( adev - > dev , " (%d) kernel bo pin failed \n " , r ) ;
goto error_unreserve ;
}
2018-06-25 13:32:24 +08:00
r = amdgpu_ttm_alloc_gart ( & ( * bo_ptr ) - > tbo ) ;
if ( r ) {
dev_err ( adev - > dev , " %p bind failed \n " , * bo_ptr ) ;
goto error_unpin ;
}
2018-06-25 12:51:14 +08:00
if ( gpu_addr )
* gpu_addr = amdgpu_bo_gpu_offset ( * bo_ptr ) ;
2015-12-14 13:18:01 +01:00
if ( cpu_addr ) {
r = amdgpu_bo_kmap ( * bo_ptr , cpu_addr ) ;
if ( r ) {
dev_err ( adev - > dev , " (%d) kernel bo map failed \n " , r ) ;
2018-06-26 16:23:48 +08:00
goto error_unpin ;
2015-12-14 13:18:01 +01:00
}
}
return 0 ;
2018-06-25 13:32:24 +08:00
error_unpin :
amdgpu_bo_unpin ( * bo_ptr ) ;
2015-12-14 13:18:01 +01:00
error_unreserve :
amdgpu_bo_unreserve ( * bo_ptr ) ;
error_free :
2017-07-27 14:52:53 +02:00
if ( free )
amdgpu_bo_unref ( bo_ptr ) ;
2015-12-14 13:18:01 +01:00
return r ;
}
2017-07-27 17:08:54 +02:00
/**
* amdgpu_bo_create_kernel - create BO for kernel use
*
* @ adev : amdgpu device object
* @ size : size for the new BO
* @ align : alignment for the new BO
* @ domain : where to place it
2018-03-14 11:45:22 -04:00
* @ bo_ptr : used to initialize BOs in structures
2017-07-27 17:08:54 +02:00
* @ gpu_addr : GPU addr of the pinned BO
* @ cpu_addr : optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use .
*
2018-03-14 11:45:22 -04:00
* Note : For bo_ptr new BO is only created if bo_ptr points to NULL .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 on success , negative error code otherwise .
2017-07-27 17:08:54 +02:00
*/
int amdgpu_bo_create_kernel ( struct amdgpu_device * adev ,
unsigned long size , int align ,
u32 domain , struct amdgpu_bo * * bo_ptr ,
u64 * gpu_addr , void * * cpu_addr )
{
int r ;
r = amdgpu_bo_create_reserved ( adev , size , align , domain , bo_ptr ,
gpu_addr , cpu_addr ) ;
if ( r )
return r ;
amdgpu_bo_unreserve ( * bo_ptr ) ;
return 0 ;
}
2016-09-08 10:13:32 +08:00
/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @ bo : amdgpu BO to free
2018-06-01 12:29:45 +02:00
* @ gpu_addr : pointer to where the BO ' s GPU memory space address was stored
* @ cpu_addr : pointer to where the BO ' s CPU memory space address was stored
2016-09-08 10:13:32 +08:00
*
* unmaps and unpin a BO for kernel internal use .
*/
void amdgpu_bo_free_kernel ( struct amdgpu_bo * * bo , u64 * gpu_addr ,
void * * cpu_addr )
{
if ( * bo = = NULL )
return ;
2017-04-24 14:27:00 -04:00
if ( likely ( amdgpu_bo_reserve ( * bo , true ) = = 0 ) ) {
2016-09-08 10:13:32 +08:00
if ( cpu_addr )
amdgpu_bo_kunmap ( * bo ) ;
amdgpu_bo_unpin ( * bo ) ;
amdgpu_bo_unreserve ( * bo ) ;
}
amdgpu_bo_unref ( bo ) ;
if ( gpu_addr )
* gpu_addr = 0 ;
if ( cpu_addr )
* cpu_addr = NULL ;
}
2017-11-10 18:35:56 -05:00
/* Validate bo size is bit bigger then the request domain */
static bool amdgpu_bo_validate_size ( struct amdgpu_device * adev ,
unsigned long size , u32 domain )
{
struct ttm_mem_type_manager * man = NULL ;
/*
* If GTT is part of requested domains the check must succeed to
* allow fall back to GTT
*/
if ( domain & AMDGPU_GEM_DOMAIN_GTT ) {
man = & adev - > mman . bdev . man [ TTM_PL_TT ] ;
if ( size < ( man - > size < < PAGE_SHIFT ) )
return true ;
else
goto fail ;
}
if ( domain & AMDGPU_GEM_DOMAIN_VRAM ) {
man = & adev - > mman . bdev . man [ TTM_PL_VRAM ] ;
if ( size < ( man - > size < < PAGE_SHIFT ) )
return true ;
else
goto fail ;
}
/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
return true ;
fail :
2017-11-15 11:37:23 +01:00
DRM_DEBUG ( " BO size %lu > total memory in domain: %llu \n " , size ,
man - > size < < PAGE_SHIFT ) ;
2017-11-10 18:35:56 -05:00
return false ;
}
2018-04-16 17:57:19 +08:00
static int amdgpu_bo_do_create ( struct amdgpu_device * adev ,
struct amdgpu_bo_param * bp ,
2017-09-12 10:56:17 +02:00
struct amdgpu_bo * * bo_ptr )
2015-04-20 16:55:21 -04:00
{
2017-12-08 13:31:52 +08:00
struct ttm_operation_ctx ctx = {
2018-04-16 17:57:19 +08:00
. interruptible = ( bp - > type ! = ttm_bo_type_kernel ) ,
2017-12-08 13:31:52 +08:00
. no_wait_gpu = false ,
2018-04-16 17:57:19 +08:00
. resv = bp - > resv ,
2018-02-06 11:22:57 +08:00
. flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
2017-12-08 13:31:52 +08:00
} ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo * bo ;
2018-04-16 17:57:19 +08:00
unsigned long page_align , size = bp - > size ;
2015-04-20 16:55:21 -04:00
size_t acc_size ;
int r ;
2018-04-16 17:57:19 +08:00
page_align = roundup ( bp - > byte_align , PAGE_SIZE ) > > PAGE_SHIFT ;
2015-04-20 16:55:21 -04:00
size = ALIGN ( size , PAGE_SIZE ) ;
2018-04-16 17:57:19 +08:00
if ( ! amdgpu_bo_validate_size ( adev , size , bp - > domain ) )
2017-11-10 18:35:56 -05:00
return - ENOMEM ;
2015-04-20 16:55:21 -04:00
* bo_ptr = NULL ;
acc_size = ttm_bo_dma_acc_size ( & adev - > mman . bdev , size ,
sizeof ( struct amdgpu_bo ) ) ;
bo = kzalloc ( sizeof ( struct amdgpu_bo ) , GFP_KERNEL ) ;
if ( bo = = NULL )
return - ENOMEM ;
2018-02-16 09:52:51 +01:00
drm_gem_private_object_init ( adev - > ddev , & bo - > gem_base , size ) ;
2016-08-17 11:41:30 +08:00
INIT_LIST_HEAD ( & bo - > shadow_list ) ;
2015-04-20 16:55:21 -04:00
INIT_LIST_HEAD ( & bo - > va ) ;
2018-04-17 18:34:40 +08:00
bo - > preferred_domains = bp - > preferred_domain ? bp - > preferred_domain :
2018-04-17 11:52:53 +08:00
bp - > domain ;
2018-04-10 13:42:38 +02:00
bo - > allowed_domains = bo - > preferred_domains ;
2018-04-16 17:57:19 +08:00
if ( bp - > type ! = ttm_bo_type_kernel & &
2018-04-10 13:42:38 +02:00
bo - > allowed_domains = = AMDGPU_GEM_DOMAIN_VRAM )
bo - > allowed_domains | = AMDGPU_GEM_DOMAIN_GTT ;
2015-04-20 16:55:21 -04:00
2018-04-16 17:57:19 +08:00
bo - > flags = bp - > flags ;
2016-01-30 07:59:34 +02:00
2017-01-22 20:15:27 +01:00
# ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https : //bugs.freedesktop.org/show_bug.cgi?id=84627
*/
bo - > flags & = ~ AMDGPU_GEM_CREATE_CPU_GTT_USWC ;
# elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
/* Don't try to enable write-combining when it can't work, or things
* may be slow
* See https : //bugs.freedesktop.org/show_bug.cgi?id=88758
*/
2017-02-01 16:59:21 +01:00
# ifndef CONFIG_COMPILE_TEST
2017-01-22 20:15:27 +01:00
# warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write - combining
2017-02-01 16:59:21 +01:00
# endif
2017-01-22 20:15:27 +01:00
if ( bo - > flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC )
DRM_INFO_ONCE ( " Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
" better performance thanks to write-combining \n " ) ;
bo - > flags & = ~ AMDGPU_GEM_CREATE_CPU_GTT_USWC ;
# else
2016-01-30 07:59:34 +02:00
/* For architectures that don't support WC memory,
* mask out the WC flag from the BO
*/
if ( ! drm_arch_can_wc_memory ( ) )
bo - > flags & = ~ AMDGPU_GEM_CREATE_CPU_GTT_USWC ;
2017-01-22 20:15:27 +01:00
# endif
2016-01-30 07:59:34 +02:00
2017-09-12 10:56:17 +02:00
bo - > tbo . bdev = & adev - > mman . bdev ;
2018-07-16 16:12:24 +02:00
amdgpu_bo_placement_from_domain ( bo , bp - > domain ) ;
2018-05-11 11:02:23 +08:00
if ( bp - > type = = ttm_bo_type_kernel )
bo - > tbo . priority = 1 ;
2018-04-10 13:42:38 +02:00
2018-04-16 17:57:19 +08:00
r = ttm_bo_init_reserved ( & adev - > mman . bdev , & bo - > tbo , size , bp - > type ,
2018-02-22 15:52:31 +01:00
& bo - > placement , page_align , & ctx , acc_size ,
2018-07-16 16:12:24 +02:00
NULL , bp - > resv , & amdgpu_bo_destroy ) ;
2018-04-10 13:42:38 +02:00
if ( unlikely ( r ! = 0 ) )
2017-10-31 09:36:13 +01:00
return r ;
2018-06-12 14:28:20 -04:00
if ( ! amdgpu_gmc_vram_full_visible ( & adev - > gmc ) & &
2017-06-27 22:33:18 -04:00
bo - > tbo . mem . mem_type = = TTM_PL_VRAM & &
2018-01-12 14:52:22 +01:00
bo - > tbo . mem . start < adev - > gmc . visible_vram_size > > PAGE_SHIFT )
2017-04-27 18:20:47 +02:00
amdgpu_cs_report_moved_bytes ( adev , ctx . bytes_moved ,
ctx . bytes_moved ) ;
2017-06-27 22:33:18 -04:00
else
2017-04-27 18:20:47 +02:00
amdgpu_cs_report_moved_bytes ( adev , ctx . bytes_moved , 0 ) ;
2017-02-09 11:33:37 +01:00
2018-04-16 17:57:19 +08:00
if ( bp - > flags & AMDGPU_GEM_CREATE_VRAM_CLEARED & &
2016-07-20 14:44:38 +08:00
bo - > tbo . mem . placement & TTM_PL_FLAG_VRAM ) {
2016-10-25 13:00:45 +01:00
struct dma_fence * fence ;
2016-07-20 14:44:38 +08:00
2018-01-24 19:55:32 +01:00
r = amdgpu_fill_buffer ( bo , 0 , bo - > tbo . resv , & fence ) ;
2016-11-17 12:16:34 +01:00
if ( unlikely ( r ) )
goto fail_unreserve ;
2016-07-20 14:44:38 +08:00
amdgpu_bo_fence ( bo , fence , false ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( bo - > tbo . moving ) ;
bo - > tbo . moving = dma_fence_get ( fence ) ;
dma_fence_put ( fence ) ;
2016-07-20 14:44:38 +08:00
}
2018-04-16 17:57:19 +08:00
if ( ! bp - > resv )
2017-02-16 11:01:44 +01:00
amdgpu_bo_unreserve ( bo ) ;
2015-04-20 16:55:21 -04:00
* bo_ptr = bo ;
trace_amdgpu_bo_create ( bo ) ;
2017-06-30 11:31:08 -04:00
/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
2018-04-16 17:57:19 +08:00
if ( bp - > type = = ttm_bo_type_device )
2017-06-30 11:31:08 -04:00
bo - > flags & = ~ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED ;
2015-04-20 16:55:21 -04:00
return 0 ;
2016-07-20 14:44:38 +08:00
fail_unreserve :
2018-04-16 17:57:19 +08:00
if ( ! bp - > resv )
2017-01-10 20:36:56 +01:00
ww_mutex_unlock ( & bo - > tbo . resv - > lock ) ;
2016-07-20 14:44:38 +08:00
amdgpu_bo_unref ( & bo ) ;
return r ;
2015-04-20 16:55:21 -04:00
}
2016-07-26 14:13:21 +08:00
static int amdgpu_bo_create_shadow ( struct amdgpu_device * adev ,
unsigned long size , int byte_align ,
struct amdgpu_bo * bo )
{
2018-04-16 18:27:50 +08:00
struct amdgpu_bo_param bp ;
2016-07-26 14:13:21 +08:00
int r ;
if ( bo - > shadow )
return 0 ;
2018-04-16 18:27:50 +08:00
memset ( & bp , 0 , sizeof ( bp ) ) ;
bp . size = size ;
bp . byte_align = byte_align ;
bp . domain = AMDGPU_GEM_DOMAIN_GTT ;
bp . flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW ;
bp . type = ttm_bo_type_kernel ;
bp . resv = bo - > tbo . resv ;
2018-04-16 17:57:19 +08:00
r = amdgpu_bo_do_create ( adev , & bp , & bo - > shadow ) ;
2016-08-17 11:41:30 +08:00
if ( ! r ) {
2016-07-26 14:13:21 +08:00
bo - > shadow - > parent = amdgpu_bo_ref ( bo ) ;
2016-08-17 11:41:30 +08:00
mutex_lock ( & adev - > shadow_list_lock ) ;
list_add_tail ( & bo - > shadow_list , & adev - > shadow_list ) ;
mutex_unlock ( & adev - > shadow_list_lock ) ;
}
2016-07-26 14:13:21 +08:00
return r ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_create - create an & amdgpu_bo buffer object
* @ adev : amdgpu device object
* @ bp : parameters to be used for the buffer object
* @ bo_ptr : pointer to the buffer object pointer
*
* Creates an & amdgpu_bo buffer object ; and if requested , also creates a
* shadow object .
* Shadow object is used to backup the original buffer object , and is always
* in GTT .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2018-04-16 18:27:50 +08:00
int amdgpu_bo_create ( struct amdgpu_device * adev ,
struct amdgpu_bo_param * bp ,
2015-09-03 17:34:59 +02:00
struct amdgpu_bo * * bo_ptr )
2015-04-24 17:37:30 +08:00
{
2018-04-16 18:27:50 +08:00
u64 flags = bp - > flags ;
2016-07-26 14:13:21 +08:00
int r ;
2015-04-24 17:37:30 +08:00
2018-04-16 18:27:50 +08:00
bp - > flags = bp - > flags & ~ AMDGPU_GEM_CREATE_SHADOW ;
r = amdgpu_bo_do_create ( adev , bp , bo_ptr ) ;
2016-07-26 14:13:21 +08:00
if ( r )
return r ;
2018-07-16 16:12:24 +02:00
if ( ( flags & AMDGPU_GEM_CREATE_SHADOW ) & & amdgpu_bo_need_backup ( adev ) ) {
2018-04-16 18:27:50 +08:00
if ( ! bp - > resv )
2017-08-18 15:50:17 +02:00
WARN_ON ( reservation_object_lock ( ( * bo_ptr ) - > tbo . resv ,
NULL ) ) ;
2017-01-10 19:06:00 +01:00
2018-04-16 18:27:50 +08:00
r = amdgpu_bo_create_shadow ( adev , bp - > size , bp - > byte_align , ( * bo_ptr ) ) ;
2017-01-10 19:06:00 +01:00
2018-04-16 18:27:50 +08:00
if ( ! bp - > resv )
2017-08-18 15:50:17 +02:00
reservation_object_unlock ( ( * bo_ptr ) - > tbo . resv ) ;
2017-01-10 19:06:00 +01:00
2016-07-26 14:13:21 +08:00
if ( r )
amdgpu_bo_unref ( bo_ptr ) ;
}
return r ;
2015-04-24 17:37:30 +08:00
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_backup_to_shadow - Backs up an & amdgpu_bo buffer object
* @ adev : amdgpu device object
* @ ring : amdgpu_ring for the engine handling the buffer operations
* @ bo : & amdgpu_bo buffer to be backed up
* @ resv : reservation object with embedded fence
* @ fence : dma_fence associated with the operation
* @ direct : whether to submit the job directly
*
* Copies an & amdgpu_bo buffer object to its shadow object .
* Not used for now .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2016-08-04 16:51:18 +08:00
int amdgpu_bo_backup_to_shadow ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring ,
struct amdgpu_bo * bo ,
struct reservation_object * resv ,
2016-10-25 13:00:45 +01:00
struct dma_fence * * fence ,
2016-08-04 16:51:18 +08:00
bool direct )
{
struct amdgpu_bo * shadow = bo - > shadow ;
uint64_t bo_addr , shadow_addr ;
int r ;
if ( ! shadow )
return - EINVAL ;
bo_addr = amdgpu_bo_gpu_offset ( bo ) ;
shadow_addr = amdgpu_bo_gpu_offset ( bo - > shadow ) ;
r = reservation_object_reserve_shared ( bo - > tbo . resv ) ;
if ( r )
goto err ;
r = amdgpu_copy_buffer ( ring , bo_addr , shadow_addr ,
amdgpu_bo_size ( bo ) , resv , fence ,
2017-06-29 11:46:15 +02:00
direct , false ) ;
2016-08-04 16:51:18 +08:00
if ( ! r )
amdgpu_bo_fence ( bo , * fence , true ) ;
err :
return r ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_validate - validate an & amdgpu_bo buffer object
* @ bo : pointer to the buffer object
*
* Sets placement according to domain ; and changes placement and caching
* policy of the buffer object according to the placement .
* This is used for validating shadow bos . It calls ttm_bo_validate ( ) to
* make sure the buffer is resident where it needs to be .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2017-04-21 13:08:43 +08:00
int amdgpu_bo_validate ( struct amdgpu_bo * bo )
{
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { false , false } ;
2017-04-21 13:08:43 +08:00
uint32_t domain ;
int r ;
if ( bo - > pin_count )
return 0 ;
2017-08-08 07:58:01 -04:00
domain = bo - > preferred_domains ;
2017-04-21 13:08:43 +08:00
retry :
2018-07-16 16:12:24 +02:00
amdgpu_bo_placement_from_domain ( bo , domain ) ;
2017-04-12 14:24:39 +02:00
r = ttm_bo_validate ( & bo - > tbo , & bo - > placement , & ctx ) ;
2017-04-21 13:08:43 +08:00
if ( unlikely ( r = = - ENOMEM ) & & domain ! = bo - > allowed_domains ) {
domain = bo - > allowed_domains ;
goto retry ;
}
return r ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_restore_from_shadow - restore an & amdgpu_bo buffer object
* @ adev : amdgpu device object
* @ ring : amdgpu_ring for the engine handling the buffer operations
* @ bo : & amdgpu_bo buffer to be restored
* @ resv : reservation object with embedded fence
* @ fence : dma_fence associated with the operation
* @ direct : whether to submit the job directly
*
* Copies a buffer object ' s shadow content back to the object .
* This is used for recovering a buffer from its shadow in case of a gpu
* reset where vram context may be lost .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2016-08-04 16:51:18 +08:00
int amdgpu_bo_restore_from_shadow ( struct amdgpu_device * adev ,
struct amdgpu_ring * ring ,
struct amdgpu_bo * bo ,
struct reservation_object * resv ,
2016-10-25 13:00:45 +01:00
struct dma_fence * * fence ,
2016-08-04 16:51:18 +08:00
bool direct )
{
struct amdgpu_bo * shadow = bo - > shadow ;
uint64_t bo_addr , shadow_addr ;
int r ;
if ( ! shadow )
return - EINVAL ;
bo_addr = amdgpu_bo_gpu_offset ( bo ) ;
shadow_addr = amdgpu_bo_gpu_offset ( bo - > shadow ) ;
r = reservation_object_reserve_shared ( bo - > tbo . resv ) ;
if ( r )
goto err ;
r = amdgpu_copy_buffer ( ring , shadow_addr , bo_addr ,
amdgpu_bo_size ( bo ) , resv , fence ,
2017-06-29 11:46:15 +02:00
direct , false ) ;
2016-08-04 16:51:18 +08:00
if ( ! r )
amdgpu_bo_fence ( bo , * fence , true ) ;
err :
return r ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_kmap - map an & amdgpu_bo buffer object
* @ bo : & amdgpu_bo buffer object to be mapped
* @ ptr : kernel virtual address to be returned
*
* Calls ttm_bo_kmap ( ) to set up the kernel virtual mapping ; calls
* amdgpu_bo_kptr ( ) to get the kernel virtual address .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_kmap ( struct amdgpu_bo * bo , void * * ptr )
{
2017-07-20 23:45:18 +02:00
void * kptr ;
2016-03-10 16:21:04 +01:00
long r ;
2015-04-20 16:55:21 -04:00
2015-05-13 14:30:53 +02:00
if ( bo - > flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS )
return - EPERM ;
2017-07-20 23:45:18 +02:00
kptr = amdgpu_bo_kptr ( bo ) ;
if ( kptr ) {
if ( ptr )
* ptr = kptr ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
2016-03-10 16:21:04 +01:00
r = reservation_object_wait_timeout_rcu ( bo - > tbo . resv , false , false ,
MAX_SCHEDULE_TIMEOUT ) ;
if ( r < 0 )
return r ;
2015-04-20 16:55:21 -04:00
r = ttm_bo_kmap ( & bo - > tbo , 0 , bo - > tbo . num_pages , & bo - > kmap ) ;
2016-03-10 16:21:04 +01:00
if ( r )
2015-04-20 16:55:21 -04:00
return r ;
2016-03-10 16:21:04 +01:00
if ( ptr )
2017-07-20 23:45:18 +02:00
* ptr = amdgpu_bo_kptr ( bo ) ;
2016-03-10 16:21:04 +01:00
2015-04-20 16:55:21 -04:00
return 0 ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
* @ bo : & amdgpu_bo buffer object
*
* Calls ttm_kmap_obj_virtual ( ) to get the kernel virtual address
*
2018-06-01 12:29:45 +02:00
* Returns :
* the virtual address of a buffer object area .
2018-05-17 17:58:45 -04:00
*/
2017-07-20 23:45:18 +02:00
void * amdgpu_bo_kptr ( struct amdgpu_bo * bo )
{
bool is_iomem ;
return ttm_kmap_obj_virtual ( & bo - > kmap , & is_iomem ) ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_kunmap - unmap an & amdgpu_bo buffer object
* @ bo : & amdgpu_bo buffer object to be unmapped
*
* Unmaps a kernel map set up by amdgpu_bo_kmap ( ) .
*/
2015-04-20 16:55:21 -04:00
void amdgpu_bo_kunmap ( struct amdgpu_bo * bo )
{
2017-07-20 23:45:18 +02:00
if ( bo - > kmap . bo )
ttm_bo_kunmap ( & bo - > kmap ) ;
2015-04-20 16:55:21 -04:00
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_ref - reference an & amdgpu_bo buffer object
* @ bo : & amdgpu_bo buffer object
*
* References the contained & ttm_buffer_object .
*
2018-06-01 12:29:45 +02:00
* Returns :
* a refcounted pointer to the & amdgpu_bo buffer object .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
struct amdgpu_bo * amdgpu_bo_ref ( struct amdgpu_bo * bo )
{
if ( bo = = NULL )
return NULL ;
2018-07-31 09:12:35 +02:00
ttm_bo_get ( & bo - > tbo ) ;
2015-04-20 16:55:21 -04:00
return bo ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_unref - unreference an & amdgpu_bo buffer object
* @ bo : & amdgpu_bo buffer object
*
* Unreferences the contained & ttm_buffer_object and clear the pointer
*/
2015-04-20 16:55:21 -04:00
void amdgpu_bo_unref ( struct amdgpu_bo * * bo )
{
struct ttm_buffer_object * tbo ;
if ( ( * bo ) = = NULL )
return ;
tbo = & ( ( * bo ) - > tbo ) ;
2018-07-31 09:12:36 +02:00
ttm_bo_put ( tbo ) ;
* bo = NULL ;
2015-04-20 16:55:21 -04:00
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_pin_restricted - pin an & amdgpu_bo buffer object
* @ bo : & amdgpu_bo buffer object to be pinned
* @ domain : domain to be pinned to
* @ min_offset : the start of requested address range
* @ max_offset : the end of requested address range
*
* Pins the buffer object according to requested domain and address range . If
* the memory is unbound gart memory , binds the pages into gart table . Adjusts
* pin_count and pin_size accordingly .
*
* Pinning means to lock pages in memory along with keeping them at a fixed
* offset . It is required when a buffer can not be moved , for example , when
* a display buffer is being scanned out .
*
* Compared with amdgpu_bo_pin ( ) , this function gives more flexibility on
* where to pin a buffer if there are specific restrictions on where a buffer
* must be located .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-24 17:37:30 +08:00
int amdgpu_bo_pin_restricted ( struct amdgpu_bo * bo , u32 domain ,
2018-06-25 12:51:14 +08:00
u64 min_offset , u64 max_offset )
2015-04-20 16:55:21 -04:00
{
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { false , false } ;
2015-04-20 16:55:21 -04:00
int r , i ;
2016-02-08 11:08:35 +01:00
if ( amdgpu_ttm_tt_get_usermm ( bo - > tbo . ttm ) )
2015-04-20 16:55:21 -04:00
return - EPERM ;
2015-04-24 17:37:30 +08:00
if ( WARN_ON_ONCE ( min_offset > max_offset ) )
return - EINVAL ;
2017-04-03 13:31:22 +10:00
/* A shared bo cannot be migrated to VRAM */
2018-04-18 16:26:18 -04:00
if ( bo - > prime_shared_count ) {
if ( domain & AMDGPU_GEM_DOMAIN_GTT )
domain = AMDGPU_GEM_DOMAIN_GTT ;
else
return - EINVAL ;
}
2017-04-03 13:31:22 +10:00
2018-04-18 16:15:52 -04:00
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
* See function amdgpu_display_supported_domains ( )
*/
2018-05-25 17:12:29 -07:00
domain = amdgpu_bo_get_preferred_pin_domain ( adev , domain ) ;
2018-04-18 16:15:52 -04:00
2015-04-20 16:55:21 -04:00
if ( bo - > pin_count ) {
2016-08-18 12:55:13 +08:00
uint32_t mem_type = bo - > tbo . mem . mem_type ;
2017-10-23 17:29:36 +02:00
if ( ! ( domain & amdgpu_mem_type_to_domain ( mem_type ) ) )
2016-08-18 12:55:13 +08:00
return - EINVAL ;
2015-04-20 16:55:21 -04:00
bo - > pin_count + + ;
if ( max_offset ! = 0 ) {
2016-08-18 13:18:09 +08:00
u64 domain_start = bo - > tbo . bdev - > man [ mem_type ] . gpu_offset ;
2015-04-20 16:55:21 -04:00
WARN_ON_ONCE ( max_offset <
( amdgpu_bo_gpu_offset ( bo ) - domain_start ) ) ;
}
return 0 ;
}
2016-08-15 17:00:22 +02:00
bo - > flags | = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ;
2017-09-11 17:29:26 +02:00
/* force to pin into visible video ram */
if ( ! ( bo - > flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS ) )
bo - > flags | = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED ;
2018-07-16 16:12:24 +02:00
amdgpu_bo_placement_from_domain ( bo , domain ) ;
2015-04-20 16:55:21 -04:00
for ( i = 0 ; i < bo - > placement . num_placement ; i + + ) {
2017-09-11 17:29:26 +02:00
unsigned fpfn , lpfn ;
fpfn = min_offset > > PAGE_SHIFT ;
lpfn = max_offset > > PAGE_SHIFT ;
2015-04-24 17:37:30 +08:00
if ( fpfn > bo - > placements [ i ] . fpfn )
bo - > placements [ i ] . fpfn = fpfn ;
2016-01-19 12:48:14 +01:00
if ( ! bo - > placements [ i ] . lpfn | |
( lpfn & & lpfn < bo - > placements [ i ] . lpfn ) )
2015-04-24 17:37:30 +08:00
bo - > placements [ i ] . lpfn = lpfn ;
2015-04-20 16:55:21 -04:00
bo - > placements [ i ] . flags | = TTM_PL_FLAG_NO_EVICT ;
}
2017-04-12 14:24:39 +02:00
r = ttm_bo_validate ( & bo - > tbo , & bo - > placement , & ctx ) ;
2016-08-12 16:50:12 +02:00
if ( unlikely ( r ) ) {
2016-09-15 14:58:48 +02:00
dev_err ( adev - > dev , " %p pin failed \n " , bo ) ;
2016-08-12 16:50:12 +02:00
goto error ;
}
bo - > pin_count = 1 ;
2017-10-20 13:11:00 +02:00
domain = amdgpu_mem_type_to_domain ( bo - > tbo . mem . mem_type ) ;
2016-08-12 16:50:12 +02:00
if ( domain = = AMDGPU_GEM_DOMAIN_VRAM ) {
2018-07-11 12:00:40 +02:00
atomic64_add ( amdgpu_bo_size ( bo ) , & adev - > vram_pin_size ) ;
atomic64_add ( amdgpu_vram_mgr_bo_visible_size ( bo ) ,
& adev - > visible_pin_size ) ;
2016-08-18 13:17:07 +08:00
} else if ( domain = = AMDGPU_GEM_DOMAIN_GTT ) {
2018-07-11 12:00:40 +02:00
atomic64_add ( amdgpu_bo_size ( bo ) , & adev - > gart_pin_size ) ;
2015-04-20 16:55:21 -04:00
}
2016-08-12 16:50:12 +02:00
error :
2015-04-20 16:55:21 -04:00
return r ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_pin - pin an & amdgpu_bo buffer object
* @ bo : & amdgpu_bo buffer object to be pinned
* @ domain : domain to be pinned to
*
* A simple wrapper to amdgpu_bo_pin_restricted ( ) .
* Provides a simpler API for buffers that do not have any strict restrictions
* on where a buffer must be located .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2018-06-25 12:51:14 +08:00
int amdgpu_bo_pin ( struct amdgpu_bo * bo , u32 domain )
2015-04-20 16:55:21 -04:00
{
2018-06-25 12:51:14 +08:00
return amdgpu_bo_pin_restricted ( bo , domain , 0 , 0 ) ;
2015-04-20 16:55:21 -04:00
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_unpin - unpin an & amdgpu_bo buffer object
* @ bo : & amdgpu_bo buffer object to be unpinned
*
* Decreases the pin_count , and clears the flags if pin_count reaches 0.
* Changes placement and pin size accordingly .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_unpin ( struct amdgpu_bo * bo )
{
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { false , false } ;
2015-04-20 16:55:21 -04:00
int r , i ;
if ( ! bo - > pin_count ) {
2016-09-15 14:58:48 +02:00
dev_warn ( adev - > dev , " %p unpin not necessary \n " , bo ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
bo - > pin_count - - ;
if ( bo - > pin_count )
return 0 ;
2016-08-12 16:50:12 +02:00
2018-07-11 12:42:55 +02:00
amdgpu_bo_subtract_pin_size ( bo ) ;
2016-08-12 16:50:12 +02:00
2018-06-15 11:06:56 +02:00
for ( i = 0 ; i < bo - > placement . num_placement ; i + + ) {
bo - > placements [ i ] . lpfn = 0 ;
bo - > placements [ i ] . flags & = ~ TTM_PL_FLAG_NO_EVICT ;
}
r = ttm_bo_validate ( & bo - > tbo , & bo - > placement , & ctx ) ;
if ( unlikely ( r ) )
dev_err ( adev - > dev , " %p validate failed for unpin \n " , bo ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_evict_vram - evict VRAM buffers
* @ adev : amdgpu device object
*
* Evicts all VRAM buffers on the lru list of the memory type .
* Mainly used for evicting vram at suspend time .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_evict_vram ( struct amdgpu_device * adev )
{
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
2018-07-25 12:54:19 +02:00
# ifndef CONFIG_HIBERNATION
if ( adev - > flags & AMD_IS_APU ) {
2015-04-20 16:55:21 -04:00
/* Useless to evict on IGP chips */
return 0 ;
}
2018-07-25 12:54:19 +02:00
# endif
2015-04-20 16:55:21 -04:00
return ttm_bo_evict_mm ( & adev - > mman . bdev , TTM_PL_VRAM ) ;
}
2016-03-31 16:56:22 -04:00
static const char * amdgpu_vram_names [ ] = {
" UNKNOWN " ,
" GDDR1 " ,
" DDR2 " ,
" GDDR3 " ,
" GDDR4 " ,
" GDDR5 " ,
" HBM " ,
2018-03-09 06:16:55 -05:00
" DDR3 " ,
" DDR4 " ,
2016-03-31 16:56:22 -04:00
} ;
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_init - initialize memory manager
* @ adev : amdgpu device object
*
* Calls amdgpu_ttm_init ( ) to initialize amdgpu memory manager .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_init ( struct amdgpu_device * adev )
{
2016-10-24 15:37:48 +10:00
/* reserve PAT memory space to WC for VRAM */
2018-01-12 14:52:22 +01:00
arch_io_reserve_memtype_wc ( adev - > gmc . aper_base ,
adev - > gmc . aper_size ) ;
2016-10-24 15:37:48 +10:00
2015-04-20 16:55:21 -04:00
/* Add an MTRR for the VRAM */
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_mtrr = arch_phys_wc_add ( adev - > gmc . aper_base ,
adev - > gmc . aper_size ) ;
2015-04-20 16:55:21 -04:00
DRM_INFO ( " Detected VRAM RAM=%lluM, BAR=%lluM \n " ,
2018-01-12 14:52:22 +01:00
adev - > gmc . mc_vram_size > > 20 ,
( unsigned long long ) adev - > gmc . aper_size > > 20 ) ;
2016-03-31 16:56:22 -04:00
DRM_INFO ( " RAM width %dbits %s \n " ,
2018-01-12 14:52:22 +01:00
adev - > gmc . vram_width , amdgpu_vram_names [ adev - > gmc . vram_type ] ) ;
2015-04-20 16:55:21 -04:00
return amdgpu_ttm_init ( adev ) ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_late_init - late init
* @ adev : amdgpu device object
*
* Calls amdgpu_ttm_late_init ( ) to free resources used earlier during
* initialization .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2018-04-06 14:54:10 -05:00
int amdgpu_bo_late_init ( struct amdgpu_device * adev )
{
amdgpu_ttm_late_init ( adev ) ;
return 0 ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_fini - tear down memory manager
* @ adev : amdgpu device object
*
* Reverses amdgpu_bo_init ( ) to tear down memory manager .
*/
2015-04-20 16:55:21 -04:00
void amdgpu_bo_fini ( struct amdgpu_device * adev )
{
amdgpu_ttm_fini ( adev ) ;
2018-01-12 14:52:22 +01:00
arch_phys_wc_del ( adev - > gmc . vram_mtrr ) ;
arch_io_free_memtype_wc ( adev - > gmc . aper_base , adev - > gmc . aper_size ) ;
2015-04-20 16:55:21 -04:00
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_fbdev_mmap - mmap fbdev memory
* @ bo : & amdgpu_bo buffer object
* @ vma : vma as input from the fbdev mmap method
*
* Calls ttm_fbdev_mmap ( ) to mmap fbdev memory if it is backed by a bo .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_fbdev_mmap ( struct amdgpu_bo * bo ,
struct vm_area_struct * vma )
{
return ttm_fbdev_mmap ( vma , & bo - > tbo ) ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_set_tiling_flags - set tiling flags
* @ bo : & amdgpu_bo buffer object
* @ tiling_flags : new flags
*
* Sets buffer object ' s tiling flags with the new one . Used by GEM ioctl or
* kernel driver to set the tiling flags on a buffer .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_set_tiling_flags ( struct amdgpu_bo * bo , u64 tiling_flags )
{
2017-03-03 16:03:15 -05:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
if ( adev - > family < = AMDGPU_FAMILY_CZ & &
AMDGPU_TILING_GET ( tiling_flags , TILE_SPLIT ) > 6 )
2015-04-20 16:55:21 -04:00
return - EINVAL ;
bo - > tiling_flags = tiling_flags ;
return 0 ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_get_tiling_flags - get tiling flags
* @ bo : & amdgpu_bo buffer object
* @ tiling_flags : returned flags
*
* Gets buffer object ' s tiling flags . Used by GEM ioctl or kernel driver to
* set the tiling flags on a buffer .
*/
2015-04-20 16:55:21 -04:00
void amdgpu_bo_get_tiling_flags ( struct amdgpu_bo * bo , u64 * tiling_flags )
{
lockdep_assert_held ( & bo - > tbo . resv - > lock . base ) ;
if ( tiling_flags )
* tiling_flags = bo - > tiling_flags ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_set_metadata - set metadata
* @ bo : & amdgpu_bo buffer object
* @ metadata : new metadata
* @ metadata_size : size of the new metadata
* @ flags : flags of the new metadata
*
* Sets buffer object ' s metadata , its size and flags .
* Used via GEM ioctl .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_set_metadata ( struct amdgpu_bo * bo , void * metadata ,
uint32_t metadata_size , uint64_t flags )
{
void * buffer ;
if ( ! metadata_size ) {
if ( bo - > metadata_size ) {
kfree ( bo - > metadata ) ;
2016-05-03 12:44:29 +10:00
bo - > metadata = NULL ;
2015-04-20 16:55:21 -04:00
bo - > metadata_size = 0 ;
}
return 0 ;
}
if ( metadata = = NULL )
return - EINVAL ;
2015-09-21 17:34:39 -04:00
buffer = kmemdup ( metadata , metadata_size , GFP_KERNEL ) ;
2015-04-20 16:55:21 -04:00
if ( buffer = = NULL )
return - ENOMEM ;
kfree ( bo - > metadata ) ;
bo - > metadata_flags = flags ;
bo - > metadata = buffer ;
bo - > metadata_size = metadata_size ;
return 0 ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_get_metadata - get metadata
* @ bo : & amdgpu_bo buffer object
* @ buffer : returned metadata
* @ buffer_size : size of the buffer
* @ metadata_size : size of the returned metadata
* @ flags : flags of the returned metadata
*
* Gets buffer object ' s metadata , its size and flags . buffer_size shall not be
* less than metadata_size .
* Used via GEM ioctl .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_get_metadata ( struct amdgpu_bo * bo , void * buffer ,
size_t buffer_size , uint32_t * metadata_size ,
uint64_t * flags )
{
if ( ! buffer & & ! metadata_size )
return - EINVAL ;
if ( buffer ) {
if ( buffer_size < bo - > metadata_size )
return - EINVAL ;
if ( bo - > metadata_size )
memcpy ( buffer , bo - > metadata , bo - > metadata_size ) ;
}
if ( metadata_size )
* metadata_size = bo - > metadata_size ;
if ( flags )
* flags = bo - > metadata_flags ;
return 0 ;
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_move_notify - notification about a memory move
* @ bo : pointer to a buffer object
* @ evict : if this move is evicting the buffer from the graphics address space
* @ new_mem : new information of the bufer object
*
* Marks the corresponding & amdgpu_bo buffer object as invalid , also performs
* bookkeeping .
* TTM driver callback which is called when ttm moves a buffer .
*/
2015-04-20 16:55:21 -04:00
void amdgpu_bo_move_notify ( struct ttm_buffer_object * bo ,
2016-12-15 17:23:49 +01:00
bool evict ,
2015-04-20 16:55:21 -04:00
struct ttm_mem_reg * new_mem )
{
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > bdev ) ;
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo ;
2016-06-07 17:48:52 +08:00
struct ttm_mem_reg * old_mem = & bo - > mem ;
2015-04-20 16:55:21 -04:00
2018-07-16 16:12:24 +02:00
if ( ! amdgpu_bo_is_amdgpu_bo ( bo ) )
2015-04-20 16:55:21 -04:00
return ;
2017-09-15 21:05:19 -04:00
abo = ttm_to_amdgpu_bo ( bo ) ;
2017-08-03 14:02:13 +02:00
amdgpu_vm_bo_invalidate ( adev , abo , evict ) ;
2015-04-20 16:55:21 -04:00
2017-07-11 17:25:49 +02:00
amdgpu_bo_kunmap ( abo ) ;
2016-12-15 17:26:42 +01:00
/* remember the eviction */
if ( evict )
atomic64_inc ( & adev - > num_evictions ) ;
2015-04-20 16:55:21 -04:00
/* update statistics */
if ( ! new_mem )
return ;
/* move_notify is called before move happens */
2018-07-16 16:12:24 +02:00
trace_amdgpu_bo_move ( abo , new_mem - > mem_type , old_mem - > mem_type ) ;
2015-04-20 16:55:21 -04:00
}
2018-05-17 17:58:45 -04:00
/**
* amdgpu_bo_fault_reserve_notify - notification about a memory fault
* @ bo : pointer to a buffer object
*
* Notifies the driver we are taking a fault on this BO and have reserved it ,
* also performs bookkeeping .
* TTM driver callback for dealing with vm faults .
*
2018-06-01 12:29:45 +02:00
* Returns :
* 0 for success or a negative error code on failure .
2018-05-17 17:58:45 -04:00
*/
2015-04-20 16:55:21 -04:00
int amdgpu_bo_fault_reserve_notify ( struct ttm_buffer_object * bo )
{
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > bdev ) ;
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { false , false } ;
2015-05-21 17:03:46 +02:00
struct amdgpu_bo * abo ;
2017-06-30 11:31:08 -04:00
unsigned long offset , size ;
int r ;
2015-04-20 16:55:21 -04:00
2018-07-16 16:12:24 +02:00
if ( ! amdgpu_bo_is_amdgpu_bo ( bo ) )
2015-04-20 16:55:21 -04:00
return 0 ;
2015-05-21 17:03:46 +02:00
2017-09-15 21:05:19 -04:00
abo = ttm_to_amdgpu_bo ( bo ) ;
2017-06-30 11:31:08 -04:00
/* Remember that this BO was accessed by the CPU */
abo - > flags | = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED ;
2015-05-21 17:03:46 +02:00
if ( bo - > mem . mem_type ! = TTM_PL_VRAM )
return 0 ;
size = bo - > mem . num_pages < < PAGE_SHIFT ;
offset = bo - > mem . start < < PAGE_SHIFT ;
2018-01-12 14:52:22 +01:00
if ( ( offset + size ) < = adev - > gmc . visible_vram_size )
2015-05-21 17:03:46 +02:00
return 0 ;
2016-03-28 12:53:02 +09:00
/* Can't move a pinned BO to visible VRAM */
if ( abo - > pin_count > 0 )
return - EINVAL ;
2015-05-21 17:03:46 +02:00
/* hurrah the memory is not visible ! */
2017-05-17 20:05:08 +02:00
atomic64_inc ( & adev - > num_vram_cpu_page_faults ) ;
2018-07-16 16:12:24 +02:00
amdgpu_bo_placement_from_domain ( abo , AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT ) ;
2017-06-27 22:33:21 -04:00
/* Avoid costly evictions; only set GTT as a busy placement */
abo - > placement . num_busy_placement = 1 ;
abo - > placement . busy_placement = & abo - > placements [ 1 ] ;
2017-04-12 14:24:39 +02:00
r = ttm_bo_validate ( bo , & abo - > placement , & ctx ) ;
2017-06-27 22:33:21 -04:00
if ( unlikely ( r ! = 0 ) )
2015-05-21 17:03:46 +02:00
return r ;
offset = bo - > mem . start < < PAGE_SHIFT ;
/* this should never happen */
2017-06-27 22:33:21 -04:00
if ( bo - > mem . mem_type = = TTM_PL_VRAM & &
2018-01-12 14:52:22 +01:00
( offset + size ) > adev - > gmc . visible_vram_size )
2015-05-21 17:03:46 +02:00
return - EINVAL ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
/**
* amdgpu_bo_fence - add fence to buffer object
*
* @ bo : buffer object in question
* @ fence : fence to add
* @ shared : true if fence should be added shared
*
*/
2016-10-25 13:00:45 +01:00
void amdgpu_bo_fence ( struct amdgpu_bo * bo , struct dma_fence * fence ,
2015-04-20 16:55:21 -04:00
bool shared )
{
struct reservation_object * resv = bo - > tbo . resv ;
if ( shared )
2015-08-03 11:38:09 +08:00
reservation_object_add_shared_fence ( resv , fence ) ;
2015-04-20 16:55:21 -04:00
else
2015-08-03 11:38:09 +08:00
reservation_object_add_excl_fence ( resv , fence ) ;
2015-04-20 16:55:21 -04:00
}
2016-07-25 17:56:18 +02:00
/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @ bo : amdgpu object for which we query the offset
*
* Note : object should either be pinned or reserved when calling this
* function , it might be useful to add check for this for debugging .
2018-06-01 12:29:45 +02:00
*
* Returns :
* current GPU offset of the object .
2016-07-25 17:56:18 +02:00
*/
u64 amdgpu_bo_gpu_offset ( struct amdgpu_bo * bo )
{
WARN_ON_ONCE ( bo - > tbo . mem . mem_type = = TTM_PL_SYSTEM ) ;
WARN_ON_ONCE ( ! ww_mutex_is_locked ( & bo - > tbo . resv - > lock ) & &
! bo - > pin_count ) ;
2016-09-07 15:10:44 +02:00
WARN_ON_ONCE ( bo - > tbo . mem . start = = AMDGPU_BO_INVALID_OFFSET ) ;
2016-08-15 17:00:22 +02:00
WARN_ON_ONCE ( bo - > tbo . mem . mem_type = = TTM_PL_VRAM & &
! ( bo - > flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ) ) ;
2016-07-25 17:56:18 +02:00
2018-08-27 18:22:31 +02:00
return amdgpu_gmc_sign_extend ( bo - > tbo . offset ) ;
2016-07-25 17:56:18 +02:00
}
2018-05-25 17:12:29 -07:00
2018-06-01 12:29:45 +02:00
/**
* amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
* @ adev : amdgpu device object
* @ domain : allowed : ref : ` memory domains < amdgpu_memory_domains > `
*
* Returns :
* Which of the allowed domains is preferred for pinning the BO for scanout .
*/
2018-05-25 17:12:29 -07:00
uint32_t amdgpu_bo_get_preferred_pin_domain ( struct amdgpu_device * adev ,
uint32_t domain )
{
if ( domain = = ( AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT ) ) {
domain = AMDGPU_GEM_DOMAIN_VRAM ;
if ( adev - > gmc . real_vram_size < = AMDGPU_SG_THRESHOLD )
domain = AMDGPU_GEM_DOMAIN_GTT ;
}
return domain ;
}