2015-04-20 16:55:21 -04:00
/*
* Copyright 2008 Advanced Micro Devices , Inc .
* Copyright 2008 Red Hat Inc .
* Copyright 2009 Jerome Glisse .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
# include <drm/drmP.h>
# include <drm/amdgpu_drm.h>
2017-05-08 15:58:17 -07:00
# ifdef CONFIG_X86
# include <asm/set_memory.h>
# endif
2015-04-20 16:55:21 -04:00
# include "amdgpu.h"
/*
* GART
* The GART ( Graphics Aperture Remapping Table ) is an aperture
* in the GPU ' s address space . System pages can be mapped into
* the aperture and look like contiguous pages from the GPU ' s
* perspective . A page table maps the pages in the aperture
* to the actual backing pages in system memory .
*
* Radeon GPUs support both an internal GART , as described above ,
* and AGP . AGP works similarly , but the GART table is configured
* and maintained by the northbridge rather than the driver .
* Radeon hw has a separate AGP aperture that is programmed to
* point to the AGP aperture provided by the northbridge and the
* requests are passed through to the northbridge aperture .
* Both AGP and internal GART can be used at the same time , however
* that is not currently supported by the driver .
*
* This file handles the common internal GART management .
*/
/*
* Common GART table functions .
*/
/**
* amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
*
* @ adev : amdgpu_device pointer
*
* Allocate system memory for GART page table
* ( r1xx - r3xx , non - pcie r4xx , rs400 ) . These asics require the
* gart table to be in system memory .
* Returns 0 for success , - ENOMEM for failure .
*/
int amdgpu_gart_table_ram_alloc ( struct amdgpu_device * adev )
{
void * ptr ;
ptr = pci_alloc_consistent ( adev - > pdev , adev - > gart . table_size ,
& adev - > gart . table_addr ) ;
if ( ptr = = NULL ) {
return - ENOMEM ;
}
# ifdef CONFIG_X86
if ( 0 ) {
set_memory_uc ( ( unsigned long ) ptr ,
adev - > gart . table_size > > PAGE_SHIFT ) ;
}
# endif
adev - > gart . ptr = ptr ;
memset ( ( void * ) adev - > gart . ptr , 0 , adev - > gart . table_size ) ;
return 0 ;
}
/**
* amdgpu_gart_table_ram_free - free system ram for gart page table
*
* @ adev : amdgpu_device pointer
*
* Free system memory for GART page table
* ( r1xx - r3xx , non - pcie r4xx , rs400 ) . These asics require the
* gart table to be in system memory .
*/
void amdgpu_gart_table_ram_free ( struct amdgpu_device * adev )
{
if ( adev - > gart . ptr = = NULL ) {
return ;
}
# ifdef CONFIG_X86
if ( 0 ) {
set_memory_wb ( ( unsigned long ) adev - > gart . ptr ,
adev - > gart . table_size > > PAGE_SHIFT ) ;
}
# endif
pci_free_consistent ( adev - > pdev , adev - > gart . table_size ,
( void * ) adev - > gart . ptr ,
adev - > gart . table_addr ) ;
adev - > gart . ptr = NULL ;
adev - > gart . table_addr = 0 ;
}
/**
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
*
* @ adev : amdgpu_device pointer
*
* Allocate video memory for GART page table
* ( pcie r4xx , r5xx + ) . These asics require the
* gart table to be in video memory .
* Returns 0 for success , error for failure .
*/
int amdgpu_gart_table_vram_alloc ( struct amdgpu_device * adev )
{
int r ;
if ( adev - > gart . robj = = NULL ) {
r = amdgpu_bo_create ( adev , adev - > gart . table_size ,
2015-08-27 00:14:16 -04:00
PAGE_SIZE , true , AMDGPU_GEM_DOMAIN_VRAM ,
2016-08-15 17:00:22 +02:00
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ,
2015-09-03 17:34:59 +02:00
NULL , NULL , & adev - > gart . robj ) ;
2015-04-20 16:55:21 -04:00
if ( r ) {
return r ;
}
}
return 0 ;
}
/**
* amdgpu_gart_table_vram_pin - pin gart page table in vram
*
* @ adev : amdgpu_device pointer
*
* Pin the GART page table in vram so it will not be moved
* by the memory manager ( pcie r4xx , r5xx + ) . These asics require the
* gart table to be in video memory .
* Returns 0 for success , error for failure .
*/
int amdgpu_gart_table_vram_pin ( struct amdgpu_device * adev )
{
uint64_t gpu_addr ;
int r ;
r = amdgpu_bo_reserve ( adev - > gart . robj , false ) ;
if ( unlikely ( r ! = 0 ) )
return r ;
r = amdgpu_bo_pin ( adev - > gart . robj ,
AMDGPU_GEM_DOMAIN_VRAM , & gpu_addr ) ;
if ( r ) {
amdgpu_bo_unreserve ( adev - > gart . robj ) ;
return r ;
}
r = amdgpu_bo_kmap ( adev - > gart . robj , & adev - > gart . ptr ) ;
if ( r )
amdgpu_bo_unpin ( adev - > gart . robj ) ;
amdgpu_bo_unreserve ( adev - > gart . robj ) ;
adev - > gart . table_addr = gpu_addr ;
return r ;
}
/**
* amdgpu_gart_table_vram_unpin - unpin gart page table in vram
*
* @ adev : amdgpu_device pointer
*
* Unpin the GART page table in vram ( pcie r4xx , r5xx + ) .
* These asics require the gart table to be in video memory .
*/
void amdgpu_gart_table_vram_unpin ( struct amdgpu_device * adev )
{
int r ;
if ( adev - > gart . robj = = NULL ) {
return ;
}
2017-04-28 17:28:14 +09:00
r = amdgpu_bo_reserve ( adev - > gart . robj , true ) ;
2015-04-20 16:55:21 -04:00
if ( likely ( r = = 0 ) ) {
amdgpu_bo_kunmap ( adev - > gart . robj ) ;
amdgpu_bo_unpin ( adev - > gart . robj ) ;
amdgpu_bo_unreserve ( adev - > gart . robj ) ;
adev - > gart . ptr = NULL ;
}
}
/**
* amdgpu_gart_table_vram_free - free gart page table vram
*
* @ adev : amdgpu_device pointer
*
* Free the video memory used for the GART page table
* ( pcie r4xx , r5xx + ) . These asics require the gart table to
* be in video memory .
*/
void amdgpu_gart_table_vram_free ( struct amdgpu_device * adev )
{
if ( adev - > gart . robj = = NULL ) {
return ;
}
amdgpu_bo_unref ( & adev - > gart . robj ) ;
}
/*
* Common gart functions .
*/
/**
* amdgpu_gart_unbind - unbind pages from the gart page table
*
* @ adev : amdgpu_device pointer
* @ offset : offset into the GPU ' s gart aperture
* @ pages : number of pages to unbind
*
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page ( all asics ) .
2017-05-05 13:27:10 +08:00
* Returns 0 for success , - EINVAL for failure .
2015-04-20 16:55:21 -04:00
*/
2017-05-05 13:27:10 +08:00
int amdgpu_gart_unbind ( struct amdgpu_device * adev , uint64_t offset ,
2015-04-20 16:55:21 -04:00
int pages )
{
unsigned t ;
unsigned p ;
int i , j ;
u64 page_base ;
2017-03-03 16:42:27 -05:00
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
uint64_t flags = 0 ;
2015-04-20 16:55:21 -04:00
if ( ! adev - > gart . ready ) {
WARN ( 1 , " trying to unbind memory from uninitialized GART ! \n " ) ;
2017-05-05 13:27:10 +08:00
return - EINVAL ;
2015-04-20 16:55:21 -04:00
}
t = offset / AMDGPU_GPU_PAGE_SIZE ;
p = t / ( PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE ) ;
for ( i = 0 ; i < pages ; i + + , p + + ) {
2016-09-25 16:10:06 +02:00
# ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2016-03-30 14:42:57 +02:00
adev - > gart . pages [ p ] = NULL ;
# endif
page_base = adev - > dummy_page . addr ;
if ( ! adev - > gart . ptr )
continue ;
2015-04-20 16:55:21 -04:00
2016-03-30 14:42:57 +02:00
for ( j = 0 ; j < ( PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE ) ; j + + , t + + ) {
amdgpu_gart_set_pte_pde ( adev , adev - > gart . ptr ,
t , page_base , flags ) ;
page_base + = AMDGPU_GPU_PAGE_SIZE ;
2015-04-20 16:55:21 -04:00
}
}
mb ( ) ;
amdgpu_gart_flush_gpu_tlb ( adev , 0 ) ;
2017-05-05 13:27:10 +08:00
return 0 ;
2015-04-20 16:55:21 -04:00
}
/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @ adev : amdgpu_device pointer
* @ offset : offset into the GPU ' s gart aperture
* @ pages : number of pages to bind
* @ pagelist : pages to bind
* @ dma_addr : DMA addresses of pages
*
* Binds the requested pages to the gart page table
* ( all asics ) .
* Returns 0 for success , - EINVAL for failure .
*/
2016-08-12 19:25:21 -04:00
int amdgpu_gart_bind ( struct amdgpu_device * adev , uint64_t offset ,
2015-04-20 16:55:21 -04:00
int pages , struct page * * pagelist , dma_addr_t * dma_addr ,
2016-09-21 16:19:19 +08:00
uint64_t flags )
2015-04-20 16:55:21 -04:00
{
unsigned t ;
unsigned p ;
uint64_t page_base ;
int i , j ;
if ( ! adev - > gart . ready ) {
WARN ( 1 , " trying to bind memory to uninitialized GART ! \n " ) ;
return - EINVAL ;
}
t = offset / AMDGPU_GPU_PAGE_SIZE ;
p = t / ( PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE ) ;
for ( i = 0 ; i < pages ; i + + , p + + ) {
2016-09-25 16:10:06 +02:00
# ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2015-04-20 16:55:21 -04:00
adev - > gart . pages [ p ] = pagelist [ i ] ;
2016-03-30 14:42:57 +02:00
# endif
2015-04-20 16:55:21 -04:00
if ( adev - > gart . ptr ) {
2016-03-30 10:54:16 +02:00
page_base = dma_addr [ i ] ;
2015-04-20 16:55:21 -04:00
for ( j = 0 ; j < ( PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE ) ; j + + , t + + ) {
amdgpu_gart_set_pte_pde ( adev , adev - > gart . ptr , t , page_base , flags ) ;
page_base + = AMDGPU_GPU_PAGE_SIZE ;
}
}
}
mb ( ) ;
amdgpu_gart_flush_gpu_tlb ( adev , 0 ) ;
return 0 ;
}
/**
* amdgpu_gart_init - init the driver info for managing the gart
*
* @ adev : amdgpu_device pointer
*
* Allocate the dummy page and init the gart driver info ( all asics ) .
* Returns 0 for success , error for failure .
*/
int amdgpu_gart_init ( struct amdgpu_device * adev )
{
2016-03-30 10:54:16 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2016-03-30 14:42:57 +02:00
if ( adev - > dummy_page . page )
2015-04-20 16:55:21 -04:00
return 0 ;
2016-03-30 14:42:57 +02:00
2015-04-20 16:55:21 -04:00
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
if ( PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE ) {
DRM_ERROR ( " Page size is smaller than GPU page size! \n " ) ;
return - EINVAL ;
}
r = amdgpu_dummy_page_init ( adev ) ;
if ( r )
return r ;
/* Compute table size */
adev - > gart . num_cpu_pages = adev - > mc . gtt_size / PAGE_SIZE ;
adev - > gart . num_gpu_pages = adev - > mc . gtt_size / AMDGPU_GPU_PAGE_SIZE ;
DRM_INFO ( " GART: num cpu pages %u, num gpu pages %u \n " ,
adev - > gart . num_cpu_pages , adev - > gart . num_gpu_pages ) ;
2016-03-30 14:42:57 +02:00
2016-09-25 16:10:06 +02:00
# ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2015-04-20 16:55:21 -04:00
/* Allocate pages table */
adev - > gart . pages = vzalloc ( sizeof ( void * ) * adev - > gart . num_cpu_pages ) ;
if ( adev - > gart . pages = = NULL ) {
amdgpu_gart_fini ( adev ) ;
return - ENOMEM ;
}
2016-03-30 14:42:57 +02:00
# endif
2015-04-20 16:55:21 -04:00
return 0 ;
}
/**
* amdgpu_gart_fini - tear down the driver info for managing the gart
*
* @ adev : amdgpu_device pointer
*
* Tear down the gart driver info and free the dummy page ( all asics ) .
*/
void amdgpu_gart_fini ( struct amdgpu_device * adev )
{
2016-03-30 14:42:57 +02:00
if ( adev - > gart . ready ) {
2015-04-20 16:55:21 -04:00
/* unbind pages */
amdgpu_gart_unbind ( adev , 0 , adev - > gart . num_cpu_pages ) ;
}
adev - > gart . ready = false ;
2016-09-25 16:10:06 +02:00
# ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2015-04-20 16:55:21 -04:00
vfree ( adev - > gart . pages ) ;
adev - > gart . pages = NULL ;
2016-03-30 14:42:57 +02:00
# endif
2015-04-20 16:55:21 -04:00
amdgpu_dummy_page_fini ( adev ) ;
}