2018-05-07 01:16:26 +02:00
// SPDX-License-Identifier: GPL-2.0 OR MIT
2009-12-10 00:19:58 +00:00
/**************************************************************************
*
2018-05-07 01:16:26 +02:00
* Copyright 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2009-12-10 00:19:58 +00:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "vmwgfx_drv.h"
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
2009-12-10 00:19:58 +00:00
2017-07-17 07:39:53 -07:00
static const struct ttm_place vram_placement_flags = {
2014-08-27 13:16:04 +02:00
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = TTM_PL_VRAM ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} ;
2009-12-10 00:19:58 +00:00
2017-07-17 07:39:53 -07:00
static const struct ttm_place sys_placement_flags = {
2014-08-27 13:16:04 +02:00
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = TTM_PL_SYSTEM ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} ;
2012-11-21 10:49:52 +01:00
2017-07-17 07:39:53 -07:00
static const struct ttm_place gmr_placement_flags = {
2014-08-27 13:16:04 +02:00
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_GMR ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} ;
2011-10-04 20:13:21 +02:00
2017-07-17 07:39:53 -07:00
static const struct ttm_place mob_placement_flags = {
2009-12-10 00:19:58 +00:00
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_MOB ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} ;
struct ttm_placement vmw_vram_placement = {
2009-12-10 00:19:58 +00:00
. num_placement = 1 ,
. placement = & vram_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & vram_placement_flags
} ;
2017-07-17 07:39:53 -07:00
static const struct ttm_place vram_gmr_placement_flags [ ] = {
2014-08-27 13:16:04 +02:00
{
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = TTM_PL_VRAM ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} , {
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_GMR ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
}
2010-10-26 21:21:47 +02:00
} ;
2017-07-17 07:39:53 -07:00
static const struct ttm_place gmr_vram_placement_flags [ ] = {
2014-08-27 13:16:04 +02:00
{
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_GMR ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} , {
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = TTM_PL_VRAM ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
}
2011-10-04 20:13:33 +02:00
} ;
2010-10-26 21:21:47 +02:00
struct ttm_placement vmw_vram_gmr_placement = {
. num_placement = 2 ,
. placement = vram_gmr_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & gmr_placement_flags
} ;
2010-01-16 16:05:05 +01:00
struct ttm_placement vmw_vram_sys_placement = {
. num_placement = 1 ,
. placement = & vram_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_placement_flags
} ;
2009-12-10 00:19:58 +00:00
struct ttm_placement vmw_sys_placement = {
. num_placement = 1 ,
. placement = & sys_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_placement_flags
} ;
2017-07-17 07:39:53 -07:00
static const struct ttm_place evictable_placement_flags [ ] = {
2014-08-27 13:16:04 +02:00
{
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = TTM_PL_SYSTEM ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} , {
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = TTM_PL_VRAM ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} , {
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_GMR ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
} , {
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_MOB ,
2020-09-30 16:44:16 +02:00
. flags = 0
2014-08-27 13:16:04 +02:00
}
2011-10-04 20:13:21 +02:00
} ;
2018-01-16 11:07:30 +01:00
static const struct ttm_place nonfixed_placement_flags [ ] = {
{
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = TTM_PL_SYSTEM ,
2020-09-30 16:44:16 +02:00
. flags = 0
2018-01-16 11:07:30 +01:00
} , {
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_GMR ,
2020-09-30 16:44:16 +02:00
. flags = 0
2018-01-16 11:07:30 +01:00
} , {
. fpfn = 0 ,
. lpfn = 0 ,
2020-09-10 13:39:41 +02:00
. mem_type = VMW_PL_MOB ,
2020-09-30 16:44:16 +02:00
. flags = 0
2018-01-16 11:07:30 +01:00
}
} ;
2011-10-04 20:13:21 +02:00
struct ttm_placement vmw_evictable_placement = {
2012-11-21 11:06:22 +01:00
. num_placement = 4 ,
2011-10-04 20:13:21 +02:00
. placement = evictable_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_placement_flags
} ;
2011-10-04 20:13:33 +02:00
struct ttm_placement vmw_srf_placement = {
. num_placement = 1 ,
. num_busy_placement = 2 ,
. placement = & gmr_placement_flags ,
. busy_placement = gmr_vram_placement_flags
} ;
2012-11-21 11:06:22 +01:00
struct ttm_placement vmw_mob_placement = {
. num_placement = 1 ,
. num_busy_placement = 1 ,
. placement = & mob_placement_flags ,
. busy_placement = & mob_placement_flags
} ;
2018-01-16 11:07:30 +01:00
struct ttm_placement vmw_nonfixed_placement = {
. num_placement = 3 ,
. placement = nonfixed_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_placement_flags
} ;
2011-11-01 20:46:13 -04:00
struct vmw_ttm_tt {
2020-10-21 14:06:49 +02:00
struct ttm_tt dma_ttm ;
2010-10-26 21:21:47 +02:00
struct vmw_private * dev_priv ;
int gmr_id ;
2012-11-21 11:06:22 +01:00
struct vmw_mob * mob ;
int mem_type ;
2013-10-24 01:49:26 -07:00
struct sg_table sgt ;
struct vmw_sg_table vsgt ;
uint64_t sg_alloc_size ;
bool mapped ;
2020-09-17 12:54:24 +10:00
bool bound ;
2009-12-10 00:19:58 +00:00
} ;
2013-11-28 01:46:56 -08:00
const size_t vmw_tt_size = sizeof ( struct vmw_ttm_tt ) ;
2013-10-24 01:49:26 -07:00
/**
2021-05-05 15:10:07 -04:00
* __vmw_piter_non_sg_next : Helper functions to advance
* a struct vmw_piter iterator .
2013-10-24 01:49:26 -07:00
*
* @ viter : Pointer to the iterator .
*
* These functions return false if past the end of the list ,
* true otherwise . Functions are selected depending on the current
* DMA mapping mode .
*/
static bool __vmw_piter_non_sg_next ( struct vmw_piter * viter )
{
return + + ( viter - > i ) < viter - > num_pages ;
}
static bool __vmw_piter_sg_next ( struct vmw_piter * viter )
{
2019-05-15 17:45:23 +02:00
bool ret = __vmw_piter_non_sg_next ( viter ) ;
return __sg_page_iter_dma_next ( & viter - > iter ) & & ret ;
2013-10-24 01:49:26 -07:00
}
/**
2021-05-05 15:10:07 -04:00
* __vmw_piter_non_sg_page : Helper functions to return a pointer
* to the current page .
2013-10-24 01:49:26 -07:00
*
* @ viter : Pointer to the iterator
*
* These functions return a pointer to the page currently
* pointed to by @ viter . Functions are selected depending on the
* current mapping mode .
*/
static struct page * __vmw_piter_non_sg_page ( struct vmw_piter * viter )
{
return viter - > pages [ viter - > i ] ;
}
/**
2021-05-05 15:10:07 -04:00
* __vmw_piter_phys_addr : Helper functions to return the DMA
* address of the current page .
2013-10-24 01:49:26 -07:00
*
* @ viter : Pointer to the iterator
*
* These functions return the DMA address of the page currently
* pointed to by @ viter . Functions are selected depending on the
* current mapping mode .
*/
static dma_addr_t __vmw_piter_phys_addr ( struct vmw_piter * viter )
{
return page_to_phys ( viter - > pages [ viter - > i ] ) ;
}
static dma_addr_t __vmw_piter_dma_addr ( struct vmw_piter * viter )
{
return viter - > addrs [ viter - > i ] ;
}
static dma_addr_t __vmw_piter_sg_addr ( struct vmw_piter * viter )
{
2019-05-15 17:45:23 +02:00
return sg_page_iter_dma_address ( & viter - > iter ) ;
2013-10-24 01:49:26 -07:00
}
/**
* vmw_piter_start - Initialize a struct vmw_piter .
*
* @ viter : Pointer to the iterator to initialize
* @ vsgt : Pointer to a struct vmw_sg_table to initialize from
2021-01-15 18:12:44 +00:00
* @ p_offset : Pointer offset used to update current array position
2013-10-24 01:49:26 -07:00
*
* Note that we ' re following the convention of __sg_page_iter_start , so that
* the iterator doesn ' t point to a valid page after initialization ; it has
* to be advanced one step first .
*/
void vmw_piter_start ( struct vmw_piter * viter , const struct vmw_sg_table * vsgt ,
unsigned long p_offset )
{
viter - > i = p_offset - 1 ;
viter - > num_pages = vsgt - > num_pages ;
2019-05-15 17:45:23 +02:00
viter - > page = & __vmw_piter_non_sg_page ;
viter - > pages = vsgt - > pages ;
2013-10-24 01:49:26 -07:00
switch ( vsgt - > mode ) {
case vmw_dma_phys :
viter - > next = & __vmw_piter_non_sg_next ;
viter - > dma_address = & __vmw_piter_phys_addr ;
break ;
case vmw_dma_alloc_coherent :
viter - > next = & __vmw_piter_non_sg_next ;
viter - > dma_address = & __vmw_piter_dma_addr ;
viter - > addrs = vsgt - > addrs ;
break ;
case vmw_dma_map_populate :
case vmw_dma_map_bind :
viter - > next = & __vmw_piter_sg_next ;
viter - > dma_address = & __vmw_piter_sg_addr ;
2019-05-15 17:45:23 +02:00
__sg_page_iter_start ( & viter - > iter . base , vsgt - > sgt - > sgl ,
2013-10-24 01:49:26 -07:00
vsgt - > sgt - > orig_nents , p_offset ) ;
break ;
default :
BUG ( ) ;
}
}
/**
* vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
* TTM pages
*
* @ vmw_tt : Pointer to a struct vmw_ttm_backend
*
* Used to free dma mappings previously mapped by vmw_ttm_map_for_dma .
*/
static void vmw_ttm_unmap_from_dma ( struct vmw_ttm_tt * vmw_tt )
{
2020-11-03 22:21:34 -05:00
struct device * dev = vmw_tt - > dev_priv - > drm . dev ;
2013-10-24 01:49:26 -07:00
2020-04-28 13:10:36 +02:00
dma_unmap_sgtable ( dev , & vmw_tt - > sgt , DMA_BIDIRECTIONAL , 0 ) ;
2013-10-24 01:49:26 -07:00
vmw_tt - > sgt . nents = vmw_tt - > sgt . orig_nents ;
}
/**
* vmw_ttm_map_for_dma - map TTM pages to get device addresses
*
* @ vmw_tt : Pointer to a struct vmw_ttm_backend
*
* This function is used to get device addresses from the kernel DMA layer .
* However , it ' s violating the DMA API in that when this operation has been
* performed , it ' s illegal for the CPU to write to the pages without first
* unmapping the DMA mappings , or calling dma_sync_sg_for_cpu ( ) . It is
* therefore only legal to call this function if we know that the function
* dma_sync_sg_for_cpu ( ) is a NOP , and dma_sync_sg_for_device ( ) is at most
* a CPU write buffer flush .
*/
static int vmw_ttm_map_for_dma ( struct vmw_ttm_tt * vmw_tt )
{
2020-11-03 22:21:34 -05:00
struct device * dev = vmw_tt - > dev_priv - > drm . dev ;
2013-10-24 01:49:26 -07:00
2020-04-28 13:10:36 +02:00
return dma_map_sgtable ( dev , & vmw_tt - > sgt , DMA_BIDIRECTIONAL , 0 ) ;
2013-10-24 01:49:26 -07:00
}
/**
* vmw_ttm_map_dma - Make sure TTM pages are visible to the device
*
* @ vmw_tt : Pointer to a struct vmw_ttm_tt
*
* Select the correct function for and make sure the TTM pages are
* visible to the device . Allocate storage for the device mappings .
* If a mapping has already been performed , indicated by the storage
* pointer being non NULL , the function returns success .
*/
static int vmw_ttm_map_dma ( struct vmw_ttm_tt * vmw_tt )
{
struct vmw_private * dev_priv = vmw_tt - > dev_priv ;
struct ttm_mem_global * glob = vmw_mem_glob ( dev_priv ) ;
struct vmw_sg_table * vsgt = & vmw_tt - > vsgt ;
2017-12-08 15:09:50 +08:00
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false
} ;
2013-10-24 01:49:26 -07:00
struct vmw_piter iter ;
dma_addr_t old ;
int ret = 0 ;
static size_t sgl_size ;
static size_t sgt_size ;
2020-10-04 18:43:37 +03:00
struct scatterlist * sg ;
2013-10-24 01:49:26 -07:00
if ( vmw_tt - > mapped )
return 0 ;
vsgt - > mode = dev_priv - > map_mode ;
2020-10-21 14:06:49 +02:00
vsgt - > pages = vmw_tt - > dma_ttm . pages ;
vsgt - > num_pages = vmw_tt - > dma_ttm . num_pages ;
2013-10-24 01:49:26 -07:00
vsgt - > addrs = vmw_tt - > dma_ttm . dma_address ;
vsgt - > sgt = & vmw_tt - > sgt ;
switch ( dev_priv - > map_mode ) {
case vmw_dma_map_bind :
case vmw_dma_map_populate :
if ( unlikely ( ! sgl_size ) ) {
sgl_size = ttm_round_pot ( sizeof ( struct scatterlist ) ) ;
sgt_size = ttm_round_pot ( sizeof ( struct sg_table ) ) ;
}
vmw_tt - > sg_alloc_size = sgt_size + sgl_size * vsgt - > num_pages ;
2017-12-08 15:09:50 +08:00
ret = ttm_mem_global_alloc ( glob , vmw_tt - > sg_alloc_size , & ctx ) ;
2013-10-24 01:49:26 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2020-10-04 18:43:37 +03:00
sg = __sg_alloc_table_from_pages ( & vmw_tt - > sgt , vsgt - > pages ,
vsgt - > num_pages , 0 ,
( unsigned long ) vsgt - > num_pages < < PAGE_SHIFT ,
2020-11-03 22:21:34 -05:00
dma_get_max_seg_size ( dev_priv - > drm . dev ) ,
2020-10-04 18:43:37 +03:00
NULL , 0 , GFP_KERNEL ) ;
if ( IS_ERR ( sg ) ) {
ret = PTR_ERR ( sg ) ;
2013-10-24 01:49:26 -07:00
goto out_sg_alloc_fail ;
2020-10-04 18:43:37 +03:00
}
2013-10-24 01:49:26 -07:00
2020-04-28 13:10:36 +02:00
if ( vsgt - > num_pages > vmw_tt - > sgt . orig_nents ) {
2013-10-24 01:49:26 -07:00
uint64_t over_alloc =
sgl_size * ( vsgt - > num_pages -
2020-04-28 13:10:36 +02:00
vmw_tt - > sgt . orig_nents ) ;
2013-10-24 01:49:26 -07:00
ttm_mem_global_free ( glob , over_alloc ) ;
vmw_tt - > sg_alloc_size - = over_alloc ;
}
ret = vmw_ttm_map_for_dma ( vmw_tt ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_map_fail ;
break ;
default :
break ;
}
old = ~ ( ( dma_addr_t ) 0 ) ;
vmw_tt - > vsgt . num_regions = 0 ;
for ( vmw_piter_start ( & iter , vsgt , 0 ) ; vmw_piter_next ( & iter ) ; ) {
dma_addr_t cur = vmw_piter_dma_addr ( & iter ) ;
if ( cur ! = old + PAGE_SIZE )
vmw_tt - > vsgt . num_regions + + ;
old = cur ;
}
vmw_tt - > mapped = true ;
return 0 ;
out_map_fail :
sg_free_table ( vmw_tt - > vsgt . sgt ) ;
vmw_tt - > vsgt . sgt = NULL ;
out_sg_alloc_fail :
ttm_mem_global_free ( glob , vmw_tt - > sg_alloc_size ) ;
return ret ;
}
/**
* vmw_ttm_unmap_dma - Tear down any TTM page device mappings
*
* @ vmw_tt : Pointer to a struct vmw_ttm_tt
*
* Tear down any previously set up device DMA mappings and free
* any storage space allocated for them . If there are no mappings set up ,
* this function is a NOP .
*/
static void vmw_ttm_unmap_dma ( struct vmw_ttm_tt * vmw_tt )
{
struct vmw_private * dev_priv = vmw_tt - > dev_priv ;
if ( ! vmw_tt - > vsgt . sgt )
return ;
switch ( dev_priv - > map_mode ) {
case vmw_dma_map_bind :
case vmw_dma_map_populate :
vmw_ttm_unmap_from_dma ( vmw_tt ) ;
sg_free_table ( vmw_tt - > vsgt . sgt ) ;
vmw_tt - > vsgt . sgt = NULL ;
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_tt - > sg_alloc_size ) ;
break ;
default :
break ;
}
vmw_tt - > mapped = false ;
}
2013-10-24 13:27:38 -07:00
/**
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a
* TTM buffer object
*
* @ bo : Pointer to a struct ttm_buffer_object
*
* Returns a pointer to a struct vmw_sg_table object . The object should
* not be freed after use .
* Note that for the device addresses to be valid , the buffer object must
* either be reserved or pinned .
*/
const struct vmw_sg_table * vmw_bo_sg_table ( struct ttm_buffer_object * bo )
{
struct vmw_ttm_tt * vmw_tt =
2020-10-21 14:06:49 +02:00
container_of ( bo - > ttm , struct vmw_ttm_tt , dma_ttm ) ;
2013-10-24 13:27:38 -07:00
return & vmw_tt - > vsgt ;
}
2020-10-01 14:51:40 +02:00
static int vmw_ttm_bind ( struct ttm_device * bdev ,
2020-08-25 09:46:00 +10:00
struct ttm_tt * ttm , struct ttm_resource * bo_mem )
2009-12-10 00:19:58 +00:00
{
2013-10-24 01:49:26 -07:00
struct vmw_ttm_tt * vmw_be =
2020-10-21 14:06:49 +02:00
container_of ( ttm , struct vmw_ttm_tt , dma_ttm ) ;
2020-09-17 12:54:24 +10:00
int ret = 0 ;
if ( ! bo_mem )
return - EINVAL ;
if ( vmw_be - > bound )
return 0 ;
2013-10-24 01:49:26 -07:00
ret = vmw_ttm_map_dma ( vmw_be ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2010-10-26 21:21:47 +02:00
vmw_be - > gmr_id = bo_mem - > start ;
2012-11-21 11:06:22 +01:00
vmw_be - > mem_type = bo_mem - > mem_type ;
2010-10-26 21:21:47 +02:00
2012-11-21 11:06:22 +01:00
switch ( bo_mem - > mem_type ) {
case VMW_PL_GMR :
2020-09-17 12:54:24 +10:00
ret = vmw_gmr_bind ( vmw_be - > dev_priv , & vmw_be - > vsgt ,
2012-11-21 11:06:22 +01:00
ttm - > num_pages , vmw_be - > gmr_id ) ;
2020-09-17 12:54:24 +10:00
break ;
2012-11-21 11:06:22 +01:00
case VMW_PL_MOB :
if ( unlikely ( vmw_be - > mob = = NULL ) ) {
vmw_be - > mob =
vmw_mob_create ( ttm - > num_pages ) ;
if ( unlikely ( vmw_be - > mob = = NULL ) )
return - ENOMEM ;
}
2020-09-17 12:54:24 +10:00
ret = vmw_mob_bind ( vmw_be - > dev_priv , vmw_be - > mob ,
2013-10-24 13:27:38 -07:00
& vmw_be - > vsgt , ttm - > num_pages ,
2012-11-21 11:06:22 +01:00
vmw_be - > gmr_id ) ;
2020-09-17 12:54:24 +10:00
break ;
2012-11-21 11:06:22 +01:00
default :
BUG ( ) ;
}
2020-09-17 12:54:24 +10:00
vmw_be - > bound = true ;
return ret ;
2009-12-10 00:19:58 +00:00
}
2020-10-01 14:51:40 +02:00
static void vmw_ttm_unbind ( struct ttm_device * bdev ,
2020-08-25 09:46:00 +10:00
struct ttm_tt * ttm )
2009-12-10 00:19:58 +00:00
{
2013-10-24 01:49:26 -07:00
struct vmw_ttm_tt * vmw_be =
2020-10-21 14:06:49 +02:00
container_of ( ttm , struct vmw_ttm_tt , dma_ttm ) ;
2010-10-26 21:21:47 +02:00
2020-09-17 12:54:24 +10:00
if ( ! vmw_be - > bound )
return ;
2012-11-21 11:06:22 +01:00
switch ( vmw_be - > mem_type ) {
case VMW_PL_GMR :
vmw_gmr_unbind ( vmw_be - > dev_priv , vmw_be - > gmr_id ) ;
break ;
case VMW_PL_MOB :
vmw_mob_unbind ( vmw_be - > dev_priv , vmw_be - > mob ) ;
break ;
default :
BUG ( ) ;
}
2013-10-24 01:49:26 -07:00
if ( vmw_be - > dev_priv - > map_mode = = vmw_dma_map_bind )
vmw_ttm_unmap_dma ( vmw_be ) ;
2020-09-17 12:54:24 +10:00
vmw_be - > bound = false ;
2009-12-10 00:19:58 +00:00
}
2012-11-21 11:06:22 +01:00
2020-10-01 14:51:40 +02:00
static void vmw_ttm_destroy ( struct ttm_device * bdev , struct ttm_tt * ttm )
2009-12-10 00:19:58 +00:00
{
2013-10-24 01:49:26 -07:00
struct vmw_ttm_tt * vmw_be =
2020-10-21 14:06:49 +02:00
container_of ( ttm , struct vmw_ttm_tt , dma_ttm ) ;
2013-10-24 01:49:26 -07:00
2020-09-17 13:24:50 +10:00
vmw_ttm_unbind ( bdev , ttm ) ;
2020-09-17 13:20:48 +10:00
ttm_tt_destroy_common ( bdev , ttm ) ;
2013-10-24 01:49:26 -07:00
vmw_ttm_unmap_dma ( vmw_be ) ;
if ( vmw_be - > dev_priv - > map_mode = = vmw_dma_alloc_coherent )
2020-10-21 14:06:49 +02:00
ttm_tt_fini ( & vmw_be - > dma_ttm ) ;
2013-10-24 01:49:26 -07:00
else
ttm_tt_fini ( ttm ) ;
2012-11-21 11:06:22 +01:00
if ( vmw_be - > mob )
vmw_mob_destroy ( vmw_be - > mob ) ;
2009-12-10 00:19:58 +00:00
kfree ( vmw_be ) ;
}
2013-10-24 13:27:38 -07:00
2020-10-01 14:51:40 +02:00
static int vmw_ttm_populate ( struct ttm_device * bdev ,
2020-08-25 09:46:00 +10:00
struct ttm_tt * ttm , struct ttm_operation_ctx * ctx )
2013-10-24 01:49:26 -07:00
{
2020-11-17 13:52:28 +01:00
unsigned int i ;
int ret ;
2020-10-24 13:16:05 +02:00
/* TODO: maybe completely drop this ? */
2020-09-15 10:21:15 +10:00
if ( ttm_tt_is_populated ( ttm ) )
2013-10-24 01:49:26 -07:00
return 0 ;
2020-11-17 13:52:28 +01:00
ret = ttm_pool_alloc ( & bdev - > pool , ttm , ctx ) ;
if ( ret )
return ret ;
for ( i = 0 ; i < ttm - > num_pages ; + + i ) {
ret = ttm_mem_global_alloc_page ( & ttm_mem_glob , ttm - > pages [ i ] ,
PAGE_SIZE , ctx ) ;
if ( ret )
goto error ;
}
return 0 ;
error :
while ( i - - )
ttm_mem_global_free_page ( & ttm_mem_glob , ttm - > pages [ i ] ,
PAGE_SIZE ) ;
ttm_pool_free ( & bdev - > pool , ttm ) ;
return ret ;
2013-10-24 01:49:26 -07:00
}
2020-10-01 14:51:40 +02:00
static void vmw_ttm_unpopulate ( struct ttm_device * bdev ,
2020-08-25 09:46:00 +10:00
struct ttm_tt * ttm )
2013-10-24 01:49:26 -07:00
{
struct vmw_ttm_tt * vmw_tt = container_of ( ttm , struct vmw_ttm_tt ,
2020-10-21 14:06:49 +02:00
dma_ttm ) ;
2020-11-17 13:52:28 +01:00
unsigned int i ;
2012-11-21 11:06:22 +01:00
if ( vmw_tt - > mob ) {
vmw_mob_destroy ( vmw_tt - > mob ) ;
vmw_tt - > mob = NULL ;
}
2013-10-24 01:49:26 -07:00
vmw_ttm_unmap_dma ( vmw_tt ) ;
2020-11-17 13:52:28 +01:00
for ( i = 0 ; i < ttm - > num_pages ; + + i )
ttm_mem_global_free_page ( & ttm_mem_glob , ttm - > pages [ i ] ,
PAGE_SIZE ) ;
2020-10-24 13:16:05 +02:00
ttm_pool_free ( & bdev - > pool , ttm ) ;
2013-10-24 01:49:26 -07:00
}
2018-02-22 10:18:14 +01:00
static struct ttm_tt * vmw_ttm_tt_create ( struct ttm_buffer_object * bo ,
uint32_t page_flags )
2009-12-10 00:19:58 +00:00
{
2011-11-01 20:46:13 -04:00
struct vmw_ttm_tt * vmw_be ;
2013-10-24 01:49:26 -07:00
int ret ;
2009-12-10 00:19:58 +00:00
2013-10-24 01:49:26 -07:00
vmw_be = kzalloc ( sizeof ( * vmw_be ) , GFP_KERNEL ) ;
2009-12-10 00:19:58 +00:00
if ( ! vmw_be )
return NULL ;
2018-02-22 10:18:14 +01:00
vmw_be - > dev_priv = container_of ( bo - > bdev , struct vmw_private , bdev ) ;
2012-11-21 11:06:22 +01:00
vmw_be - > mob = NULL ;
2009-12-10 00:19:58 +00:00
2013-10-24 01:49:26 -07:00
if ( vmw_be - > dev_priv - > map_mode = = vmw_dma_alloc_coherent )
2020-11-23 14:53:05 +01:00
ret = ttm_sg_tt_init ( & vmw_be - > dma_ttm , bo , page_flags ,
ttm_cached ) ;
2013-10-24 01:49:26 -07:00
else
2020-10-21 14:06:49 +02:00
ret = ttm_tt_init ( & vmw_be - > dma_ttm , bo , page_flags ,
2020-09-30 10:38:48 +02:00
ttm_cached ) ;
2013-10-24 01:49:26 -07:00
if ( unlikely ( ret ! = 0 ) )
goto out_no_init ;
2020-10-21 14:06:49 +02:00
return & vmw_be - > dma_ttm ;
2013-10-24 01:49:26 -07:00
out_no_init :
kfree ( vmw_be ) ;
return NULL ;
2009-12-10 00:19:58 +00:00
}
2014-01-06 22:20:21 +05:30
static void vmw_evict_flags ( struct ttm_buffer_object * bo ,
2009-12-10 00:19:58 +00:00
struct ttm_placement * placement )
{
* placement = vmw_sys_placement ;
}
2020-10-01 14:51:40 +02:00
static int vmw_ttm_io_mem_reserve ( struct ttm_device * bdev , struct ttm_resource * mem )
2010-04-09 14:39:26 +02:00
{
struct vmw_private * dev_priv = container_of ( bdev , struct vmw_private , bdev ) ;
switch ( mem - > mem_type ) {
case TTM_PL_SYSTEM :
2010-10-26 21:21:47 +02:00
case VMW_PL_GMR :
2012-11-21 11:06:22 +01:00
case VMW_PL_MOB :
2010-04-09 14:39:26 +02:00
return 0 ;
case TTM_PL_VRAM :
2020-09-07 13:44:36 +02:00
mem - > bus . offset = ( mem - > start < < PAGE_SHIFT ) +
dev_priv - > vram_start ;
2010-04-09 14:39:26 +02:00
mem - > bus . is_iomem = true ;
2020-09-30 11:17:44 +02:00
mem - > bus . caching = ttm_cached ;
2010-04-09 14:39:26 +02:00
break ;
default :
return - EINVAL ;
}
return 0 ;
}
2012-11-21 11:06:22 +01:00
/**
* vmw_move_notify - TTM move_notify_callback
*
2015-08-10 10:56:15 -07:00
* @ bo : The TTM buffer object about to move .
2021-02-11 13:00:25 +01:00
* @ old_mem : The old memory where we move from
* @ new_mem : The struct ttm_resource indicating to what memory
2015-08-10 10:56:15 -07:00
* region the move is taking place .
2012-11-21 11:06:22 +01:00
*
* Calls move_notify for all subsystems needing it .
* ( currently only resources ) .
*/
static void vmw_move_notify ( struct ttm_buffer_object * bo ,
2021-02-11 13:00:25 +01:00
struct ttm_resource * old_mem ,
struct ttm_resource * new_mem )
2012-11-21 11:06:22 +01:00
{
2021-02-11 13:00:25 +01:00
vmw_bo_move_notify ( bo , new_mem ) ;
vmw_query_move_notify ( bo , old_mem , new_mem ) ;
2012-11-21 11:06:22 +01:00
}
/**
* vmw_swap_notify - TTM move_notify_callback
*
2015-08-10 10:56:15 -07:00
* @ bo : The TTM buffer object about to be swapped out .
2012-11-21 11:06:22 +01:00
*/
static void vmw_swap_notify ( struct ttm_buffer_object * bo )
{
2018-06-19 15:33:53 +02:00
vmw_bo_swap_notify ( bo ) ;
2017-01-19 10:57:00 -08:00
( void ) ttm_bo_wait ( bo , false , false ) ;
2012-11-21 11:06:22 +01:00
}
2020-10-06 10:06:42 +10:00
static int vmw_move ( struct ttm_buffer_object * bo ,
bool evict ,
struct ttm_operation_ctx * ctx ,
2020-10-29 13:58:52 +10:00
struct ttm_resource * new_mem ,
struct ttm_place * hop )
2020-10-06 10:06:42 +10:00
{
2021-04-12 15:11:47 +02:00
struct ttm_resource_manager * old_man = ttm_manager_type ( bo - > bdev , bo - > resource - > mem_type ) ;
2020-10-06 10:06:42 +10:00
struct ttm_resource_manager * new_man = ttm_manager_type ( bo - > bdev , new_mem - > mem_type ) ;
2020-10-19 17:13:13 +10:00
int ret ;
2020-10-06 10:06:42 +10:00
2020-10-20 11:03:19 +10:00
if ( new_man - > use_tt & & new_mem - > mem_type ! = TTM_PL_SYSTEM ) {
ret = vmw_ttm_bind ( bo - > bdev , bo - > ttm , new_mem ) ;
if ( ret )
return ret ;
}
2021-04-12 15:11:47 +02:00
vmw_move_notify ( bo , bo - > resource , new_mem ) ;
2020-10-20 11:03:18 +10:00
2020-10-06 10:06:42 +10:00
if ( old_man - > use_tt & & new_man - > use_tt ) {
2021-04-12 15:11:47 +02:00
if ( bo - > resource - > mem_type = = TTM_PL_SYSTEM ) {
2021-06-08 20:13:06 +02:00
ttm_bo_move_null ( bo , new_mem ) ;
2020-10-06 10:06:42 +10:00
return 0 ;
}
2020-10-20 11:03:15 +10:00
ret = ttm_bo_wait_ctx ( bo , ctx ) ;
2020-10-19 17:13:13 +10:00
if ( ret )
2020-10-20 11:03:18 +10:00
goto fail ;
2020-10-20 11:03:15 +10:00
vmw_ttm_unbind ( bo - > bdev , bo - > ttm ) ;
2021-04-15 09:52:58 +02:00
ttm_resource_free ( bo , & bo - > resource ) ;
2020-10-19 17:13:13 +10:00
ttm_bo_assign_mem ( bo , new_mem ) ;
return 0 ;
2020-10-06 10:06:42 +10:00
} else {
2020-10-20 11:03:18 +10:00
ret = ttm_bo_move_memcpy ( bo , ctx , new_mem ) ;
if ( ret )
goto fail ;
2020-10-06 10:06:42 +10:00
}
2020-10-20 11:03:18 +10:00
return 0 ;
fail :
2021-04-12 15:11:47 +02:00
vmw_move_notify ( bo , new_mem , bo - > resource ) ;
2020-10-20 11:03:18 +10:00
return ret ;
2020-10-06 10:06:42 +10:00
}
2012-11-21 11:06:22 +01:00
2020-10-01 14:51:40 +02:00
struct ttm_device_funcs vmw_bo_driver = {
2011-11-01 20:46:13 -04:00
. ttm_tt_create = & vmw_ttm_tt_create ,
2013-10-24 01:49:26 -07:00
. ttm_tt_populate = & vmw_ttm_populate ,
. ttm_tt_unpopulate = & vmw_ttm_unpopulate ,
2020-09-08 06:46:24 +10:00
. ttm_tt_destroy = & vmw_ttm_destroy ,
2016-08-30 17:26:04 +02:00
. eviction_valuable = ttm_bo_eviction_valuable ,
2009-12-10 00:19:58 +00:00
. evict_flags = vmw_evict_flags ,
2020-10-06 10:06:42 +10:00
. move = vmw_move ,
2012-11-21 11:06:22 +01:00
. swap_notify = vmw_swap_notify ,
2010-04-09 14:39:26 +02:00
. io_mem_reserve = & vmw_ttm_io_mem_reserve ,
2009-12-10 00:19:58 +00:00
} ;
2020-08-04 12:55:34 +10:00
int vmw_bo_create_and_populate ( struct vmw_private * dev_priv ,
unsigned long bo_size ,
struct ttm_buffer_object * * bo_p )
{
struct ttm_operation_ctx ctx = {
. interruptible = false ,
. no_wait_gpu = false
} ;
struct ttm_buffer_object * bo ;
int ret ;
2020-09-21 14:14:32 +02:00
ret = vmw_bo_create_kernel ( dev_priv , bo_size ,
& vmw_sys_placement ,
& bo ) ;
2020-08-04 12:55:34 +10:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = ttm_bo_reserve ( bo , false , true , NULL ) ;
BUG_ON ( ret ! = 0 ) ;
2020-08-25 09:46:00 +10:00
ret = vmw_ttm_populate ( bo - > bdev , bo - > ttm , & ctx ) ;
2020-08-04 12:55:35 +10:00
if ( likely ( ret = = 0 ) ) {
struct vmw_ttm_tt * vmw_tt =
2020-10-21 14:06:49 +02:00
container_of ( bo - > ttm , struct vmw_ttm_tt , dma_ttm ) ;
2020-08-04 12:55:35 +10:00
ret = vmw_ttm_map_dma ( vmw_tt ) ;
}
2020-08-04 12:55:34 +10:00
ttm_bo_unreserve ( bo ) ;
if ( likely ( ret = = 0 ) )
* bo_p = bo ;
return ret ;
}