2009-12-10 00:19:58 +00:00
/**************************************************************************
*
2015-07-29 12:38:02 -07:00
* Copyright © 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2009-12-10 00:19:58 +00:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "vmwgfx_drv.h"
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
# include <drm/ttm/ttm_page_alloc.h>
2009-12-10 00:19:58 +00:00
2014-08-27 13:16:04 +02:00
static struct ttm_place vram_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
} ;
2009-12-10 00:19:58 +00:00
2014-08-27 13:16:04 +02:00
static struct ttm_place vram_ne_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
} ;
2009-12-10 00:19:58 +00:00
2014-08-27 13:16:04 +02:00
static struct ttm_place sys_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
} ;
2012-11-21 10:49:52 +01:00
2014-08-27 13:16:04 +02:00
static struct ttm_place sys_ne_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
} ;
2010-10-26 21:21:47 +02:00
2014-08-27 13:16:04 +02:00
static struct ttm_place gmr_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
} ;
2011-10-04 20:13:21 +02:00
2014-08-27 13:16:04 +02:00
static struct ttm_place gmr_ne_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
} ;
2012-11-21 11:06:22 +01:00
2014-08-27 13:16:04 +02:00
static struct ttm_place mob_placement_flags = {
2009-12-10 00:19:58 +00:00
. fpfn = 0 ,
. lpfn = 0 ,
2014-08-27 13:16:04 +02:00
. flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
} ;
2015-06-25 11:57:56 -07:00
static struct ttm_place mob_ne_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
} ;
2014-08-27 13:16:04 +02:00
struct ttm_placement vmw_vram_placement = {
2009-12-10 00:19:58 +00:00
. num_placement = 1 ,
. placement = & vram_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & vram_placement_flags
} ;
2014-08-27 13:16:04 +02:00
static struct ttm_place vram_gmr_placement_flags [ ] = {
{
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
} , {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
}
2010-10-26 21:21:47 +02:00
} ;
2014-08-27 13:16:04 +02:00
static struct ttm_place gmr_vram_placement_flags [ ] = {
{
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
} , {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
}
2011-10-04 20:13:33 +02:00
} ;
2010-10-26 21:21:47 +02:00
struct ttm_placement vmw_vram_gmr_placement = {
. num_placement = 2 ,
. placement = vram_gmr_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & gmr_placement_flags
} ;
2014-08-27 13:16:04 +02:00
static struct ttm_place vram_gmr_ne_placement_flags [ ] = {
{
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
} , {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
}
2011-10-04 20:13:21 +02:00
} ;
struct ttm_placement vmw_vram_gmr_ne_placement = {
. num_placement = 2 ,
. placement = vram_gmr_ne_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & gmr_ne_placement_flags
} ;
2010-01-16 16:05:05 +01:00
struct ttm_placement vmw_vram_sys_placement = {
. num_placement = 1 ,
. placement = & vram_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_placement_flags
} ;
2009-12-10 00:19:58 +00:00
struct ttm_placement vmw_vram_ne_placement = {
. num_placement = 1 ,
. placement = & vram_ne_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & vram_ne_placement_flags
} ;
struct ttm_placement vmw_sys_placement = {
. num_placement = 1 ,
. placement = & sys_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_placement_flags
} ;
2012-11-21 10:49:52 +01:00
struct ttm_placement vmw_sys_ne_placement = {
. num_placement = 1 ,
. placement = & sys_ne_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_ne_placement_flags
} ;
2014-08-27 13:16:04 +02:00
static struct ttm_place evictable_placement_flags [ ] = {
{
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
} , {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
} , {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
} , {
. fpfn = 0 ,
. lpfn = 0 ,
. flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
}
2011-10-04 20:13:21 +02:00
} ;
struct ttm_placement vmw_evictable_placement = {
2012-11-21 11:06:22 +01:00
. num_placement = 4 ,
2011-10-04 20:13:21 +02:00
. placement = evictable_placement_flags ,
. num_busy_placement = 1 ,
. busy_placement = & sys_placement_flags
} ;
2011-10-04 20:13:33 +02:00
struct ttm_placement vmw_srf_placement = {
. num_placement = 1 ,
. num_busy_placement = 2 ,
. placement = & gmr_placement_flags ,
. busy_placement = gmr_vram_placement_flags
} ;
2012-11-21 11:06:22 +01:00
struct ttm_placement vmw_mob_placement = {
. num_placement = 1 ,
. num_busy_placement = 1 ,
. placement = & mob_placement_flags ,
. busy_placement = & mob_placement_flags
} ;
2015-06-25 11:57:56 -07:00
struct ttm_placement vmw_mob_ne_placement = {
. num_placement = 1 ,
. num_busy_placement = 1 ,
. placement = & mob_ne_placement_flags ,
. busy_placement = & mob_ne_placement_flags
} ;
2011-11-01 20:46:13 -04:00
struct vmw_ttm_tt {
2013-10-24 01:49:26 -07:00
struct ttm_dma_tt dma_ttm ;
2010-10-26 21:21:47 +02:00
struct vmw_private * dev_priv ;
int gmr_id ;
2012-11-21 11:06:22 +01:00
struct vmw_mob * mob ;
int mem_type ;
2013-10-24 01:49:26 -07:00
struct sg_table sgt ;
struct vmw_sg_table vsgt ;
uint64_t sg_alloc_size ;
bool mapped ;
2009-12-10 00:19:58 +00:00
} ;
2013-11-28 01:46:56 -08:00
const size_t vmw_tt_size = sizeof ( struct vmw_ttm_tt ) ;
2013-10-24 01:49:26 -07:00
/**
* Helper functions to advance a struct vmw_piter iterator .
*
* @ viter : Pointer to the iterator .
*
* These functions return false if past the end of the list ,
* true otherwise . Functions are selected depending on the current
* DMA mapping mode .
*/
static bool __vmw_piter_non_sg_next ( struct vmw_piter * viter )
{
return + + ( viter - > i ) < viter - > num_pages ;
}
static bool __vmw_piter_sg_next ( struct vmw_piter * viter )
{
return __sg_page_iter_next ( & viter - > iter ) ;
}
/**
* Helper functions to return a pointer to the current page .
*
* @ viter : Pointer to the iterator
*
* These functions return a pointer to the page currently
* pointed to by @ viter . Functions are selected depending on the
* current mapping mode .
*/
static struct page * __vmw_piter_non_sg_page ( struct vmw_piter * viter )
{
return viter - > pages [ viter - > i ] ;
}
static struct page * __vmw_piter_sg_page ( struct vmw_piter * viter )
{
return sg_page_iter_page ( & viter - > iter ) ;
}
/**
* Helper functions to return the DMA address of the current page .
*
* @ viter : Pointer to the iterator
*
* These functions return the DMA address of the page currently
* pointed to by @ viter . Functions are selected depending on the
* current mapping mode .
*/
static dma_addr_t __vmw_piter_phys_addr ( struct vmw_piter * viter )
{
return page_to_phys ( viter - > pages [ viter - > i ] ) ;
}
static dma_addr_t __vmw_piter_dma_addr ( struct vmw_piter * viter )
{
return viter - > addrs [ viter - > i ] ;
}
static dma_addr_t __vmw_piter_sg_addr ( struct vmw_piter * viter )
{
return sg_page_iter_dma_address ( & viter - > iter ) ;
}
/**
* vmw_piter_start - Initialize a struct vmw_piter .
*
* @ viter : Pointer to the iterator to initialize
* @ vsgt : Pointer to a struct vmw_sg_table to initialize from
*
* Note that we ' re following the convention of __sg_page_iter_start , so that
* the iterator doesn ' t point to a valid page after initialization ; it has
* to be advanced one step first .
*/
void vmw_piter_start ( struct vmw_piter * viter , const struct vmw_sg_table * vsgt ,
unsigned long p_offset )
{
viter - > i = p_offset - 1 ;
viter - > num_pages = vsgt - > num_pages ;
switch ( vsgt - > mode ) {
case vmw_dma_phys :
viter - > next = & __vmw_piter_non_sg_next ;
viter - > dma_address = & __vmw_piter_phys_addr ;
viter - > page = & __vmw_piter_non_sg_page ;
viter - > pages = vsgt - > pages ;
break ;
case vmw_dma_alloc_coherent :
viter - > next = & __vmw_piter_non_sg_next ;
viter - > dma_address = & __vmw_piter_dma_addr ;
viter - > page = & __vmw_piter_non_sg_page ;
viter - > addrs = vsgt - > addrs ;
2013-10-24 13:27:38 -07:00
viter - > pages = vsgt - > pages ;
2013-10-24 01:49:26 -07:00
break ;
case vmw_dma_map_populate :
case vmw_dma_map_bind :
viter - > next = & __vmw_piter_sg_next ;
viter - > dma_address = & __vmw_piter_sg_addr ;
viter - > page = & __vmw_piter_sg_page ;
__sg_page_iter_start ( & viter - > iter , vsgt - > sgt - > sgl ,
vsgt - > sgt - > orig_nents , p_offset ) ;
break ;
default :
BUG ( ) ;
}
}
/**
* vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
* TTM pages
*
* @ vmw_tt : Pointer to a struct vmw_ttm_backend
*
* Used to free dma mappings previously mapped by vmw_ttm_map_for_dma .
*/
static void vmw_ttm_unmap_from_dma ( struct vmw_ttm_tt * vmw_tt )
{
struct device * dev = vmw_tt - > dev_priv - > dev - > dev ;
dma_unmap_sg ( dev , vmw_tt - > sgt . sgl , vmw_tt - > sgt . nents ,
DMA_BIDIRECTIONAL ) ;
vmw_tt - > sgt . nents = vmw_tt - > sgt . orig_nents ;
}
/**
* vmw_ttm_map_for_dma - map TTM pages to get device addresses
*
* @ vmw_tt : Pointer to a struct vmw_ttm_backend
*
* This function is used to get device addresses from the kernel DMA layer .
* However , it ' s violating the DMA API in that when this operation has been
* performed , it ' s illegal for the CPU to write to the pages without first
* unmapping the DMA mappings , or calling dma_sync_sg_for_cpu ( ) . It is
* therefore only legal to call this function if we know that the function
* dma_sync_sg_for_cpu ( ) is a NOP , and dma_sync_sg_for_device ( ) is at most
* a CPU write buffer flush .
*/
static int vmw_ttm_map_for_dma ( struct vmw_ttm_tt * vmw_tt )
{
struct device * dev = vmw_tt - > dev_priv - > dev - > dev ;
int ret ;
ret = dma_map_sg ( dev , vmw_tt - > sgt . sgl , vmw_tt - > sgt . orig_nents ,
DMA_BIDIRECTIONAL ) ;
if ( unlikely ( ret = = 0 ) )
return - ENOMEM ;
vmw_tt - > sgt . nents = ret ;
return 0 ;
}
/**
* vmw_ttm_map_dma - Make sure TTM pages are visible to the device
*
* @ vmw_tt : Pointer to a struct vmw_ttm_tt
*
* Select the correct function for and make sure the TTM pages are
* visible to the device . Allocate storage for the device mappings .
* If a mapping has already been performed , indicated by the storage
* pointer being non NULL , the function returns success .
*/
static int vmw_ttm_map_dma ( struct vmw_ttm_tt * vmw_tt )
{
struct vmw_private * dev_priv = vmw_tt - > dev_priv ;
struct ttm_mem_global * glob = vmw_mem_glob ( dev_priv ) ;
struct vmw_sg_table * vsgt = & vmw_tt - > vsgt ;
struct vmw_piter iter ;
dma_addr_t old ;
int ret = 0 ;
static size_t sgl_size ;
static size_t sgt_size ;
if ( vmw_tt - > mapped )
return 0 ;
vsgt - > mode = dev_priv - > map_mode ;
vsgt - > pages = vmw_tt - > dma_ttm . ttm . pages ;
vsgt - > num_pages = vmw_tt - > dma_ttm . ttm . num_pages ;
vsgt - > addrs = vmw_tt - > dma_ttm . dma_address ;
vsgt - > sgt = & vmw_tt - > sgt ;
switch ( dev_priv - > map_mode ) {
case vmw_dma_map_bind :
case vmw_dma_map_populate :
if ( unlikely ( ! sgl_size ) ) {
sgl_size = ttm_round_pot ( sizeof ( struct scatterlist ) ) ;
sgt_size = ttm_round_pot ( sizeof ( struct sg_table ) ) ;
}
vmw_tt - > sg_alloc_size = sgt_size + sgl_size * vsgt - > num_pages ;
ret = ttm_mem_global_alloc ( glob , vmw_tt - > sg_alloc_size , false ,
true ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = sg_alloc_table_from_pages ( & vmw_tt - > sgt , vsgt - > pages ,
vsgt - > num_pages , 0 ,
( unsigned long )
vsgt - > num_pages < < PAGE_SHIFT ,
GFP_KERNEL ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_sg_alloc_fail ;
if ( vsgt - > num_pages > vmw_tt - > sgt . nents ) {
uint64_t over_alloc =
sgl_size * ( vsgt - > num_pages -
vmw_tt - > sgt . nents ) ;
ttm_mem_global_free ( glob , over_alloc ) ;
vmw_tt - > sg_alloc_size - = over_alloc ;
}
ret = vmw_ttm_map_for_dma ( vmw_tt ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_map_fail ;
break ;
default :
break ;
}
old = ~ ( ( dma_addr_t ) 0 ) ;
vmw_tt - > vsgt . num_regions = 0 ;
for ( vmw_piter_start ( & iter , vsgt , 0 ) ; vmw_piter_next ( & iter ) ; ) {
dma_addr_t cur = vmw_piter_dma_addr ( & iter ) ;
if ( cur ! = old + PAGE_SIZE )
vmw_tt - > vsgt . num_regions + + ;
old = cur ;
}
vmw_tt - > mapped = true ;
return 0 ;
out_map_fail :
sg_free_table ( vmw_tt - > vsgt . sgt ) ;
vmw_tt - > vsgt . sgt = NULL ;
out_sg_alloc_fail :
ttm_mem_global_free ( glob , vmw_tt - > sg_alloc_size ) ;
return ret ;
}
/**
* vmw_ttm_unmap_dma - Tear down any TTM page device mappings
*
* @ vmw_tt : Pointer to a struct vmw_ttm_tt
*
* Tear down any previously set up device DMA mappings and free
* any storage space allocated for them . If there are no mappings set up ,
* this function is a NOP .
*/
static void vmw_ttm_unmap_dma ( struct vmw_ttm_tt * vmw_tt )
{
struct vmw_private * dev_priv = vmw_tt - > dev_priv ;
if ( ! vmw_tt - > vsgt . sgt )
return ;
switch ( dev_priv - > map_mode ) {
case vmw_dma_map_bind :
case vmw_dma_map_populate :
vmw_ttm_unmap_from_dma ( vmw_tt ) ;
sg_free_table ( vmw_tt - > vsgt . sgt ) ;
vmw_tt - > vsgt . sgt = NULL ;
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_tt - > sg_alloc_size ) ;
break ;
default :
break ;
}
vmw_tt - > mapped = false ;
}
2013-10-24 13:27:38 -07:00
/**
* vmw_bo_map_dma - Make sure buffer object pages are visible to the device
*
* @ bo : Pointer to a struct ttm_buffer_object
*
* Wrapper around vmw_ttm_map_dma , that takes a TTM buffer object pointer
* instead of a pointer to a struct vmw_ttm_backend as argument .
* Note that the buffer object must be either pinned or reserved before
* calling this function .
*/
int vmw_bo_map_dma ( struct ttm_buffer_object * bo )
{
struct vmw_ttm_tt * vmw_tt =
container_of ( bo - > ttm , struct vmw_ttm_tt , dma_ttm . ttm ) ;
return vmw_ttm_map_dma ( vmw_tt ) ;
}
/**
* vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
*
* @ bo : Pointer to a struct ttm_buffer_object
*
* Wrapper around vmw_ttm_unmap_dma , that takes a TTM buffer object pointer
* instead of a pointer to a struct vmw_ttm_backend as argument .
*/
void vmw_bo_unmap_dma ( struct ttm_buffer_object * bo )
{
struct vmw_ttm_tt * vmw_tt =
container_of ( bo - > ttm , struct vmw_ttm_tt , dma_ttm . ttm ) ;
vmw_ttm_unmap_dma ( vmw_tt ) ;
}
/**
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a
* TTM buffer object
*
* @ bo : Pointer to a struct ttm_buffer_object
*
* Returns a pointer to a struct vmw_sg_table object . The object should
* not be freed after use .
* Note that for the device addresses to be valid , the buffer object must
* either be reserved or pinned .
*/
const struct vmw_sg_table * vmw_bo_sg_table ( struct ttm_buffer_object * bo )
{
struct vmw_ttm_tt * vmw_tt =
container_of ( bo - > ttm , struct vmw_ttm_tt , dma_ttm . ttm ) ;
return & vmw_tt - > vsgt ;
}
2011-11-01 20:46:13 -04:00
static int vmw_ttm_bind ( struct ttm_tt * ttm , struct ttm_mem_reg * bo_mem )
2009-12-10 00:19:58 +00:00
{
2013-10-24 01:49:26 -07:00
struct vmw_ttm_tt * vmw_be =
container_of ( ttm , struct vmw_ttm_tt , dma_ttm . ttm ) ;
int ret ;
ret = vmw_ttm_map_dma ( vmw_be ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2010-10-26 21:21:47 +02:00
vmw_be - > gmr_id = bo_mem - > start ;
2012-11-21 11:06:22 +01:00
vmw_be - > mem_type = bo_mem - > mem_type ;
2010-10-26 21:21:47 +02:00
2012-11-21 11:06:22 +01:00
switch ( bo_mem - > mem_type ) {
case VMW_PL_GMR :
return vmw_gmr_bind ( vmw_be - > dev_priv , & vmw_be - > vsgt ,
ttm - > num_pages , vmw_be - > gmr_id ) ;
case VMW_PL_MOB :
if ( unlikely ( vmw_be - > mob = = NULL ) ) {
vmw_be - > mob =
vmw_mob_create ( ttm - > num_pages ) ;
if ( unlikely ( vmw_be - > mob = = NULL ) )
return - ENOMEM ;
}
return vmw_mob_bind ( vmw_be - > dev_priv , vmw_be - > mob ,
2013-10-24 13:27:38 -07:00
& vmw_be - > vsgt , ttm - > num_pages ,
2012-11-21 11:06:22 +01:00
vmw_be - > gmr_id ) ;
default :
BUG ( ) ;
}
return 0 ;
2009-12-10 00:19:58 +00:00
}
2011-11-01 20:46:13 -04:00
static int vmw_ttm_unbind ( struct ttm_tt * ttm )
2009-12-10 00:19:58 +00:00
{
2013-10-24 01:49:26 -07:00
struct vmw_ttm_tt * vmw_be =
container_of ( ttm , struct vmw_ttm_tt , dma_ttm . ttm ) ;
2010-10-26 21:21:47 +02:00
2012-11-21 11:06:22 +01:00
switch ( vmw_be - > mem_type ) {
case VMW_PL_GMR :
vmw_gmr_unbind ( vmw_be - > dev_priv , vmw_be - > gmr_id ) ;
break ;
case VMW_PL_MOB :
vmw_mob_unbind ( vmw_be - > dev_priv , vmw_be - > mob ) ;
break ;
default :
BUG ( ) ;
}
2013-10-24 01:49:26 -07:00
if ( vmw_be - > dev_priv - > map_mode = = vmw_dma_map_bind )
vmw_ttm_unmap_dma ( vmw_be ) ;
2009-12-10 00:19:58 +00:00
return 0 ;
}
2012-11-21 11:06:22 +01:00
2011-11-01 20:46:13 -04:00
static void vmw_ttm_destroy ( struct ttm_tt * ttm )
2009-12-10 00:19:58 +00:00
{
2013-10-24 01:49:26 -07:00
struct vmw_ttm_tt * vmw_be =
container_of ( ttm , struct vmw_ttm_tt , dma_ttm . ttm ) ;
vmw_ttm_unmap_dma ( vmw_be ) ;
if ( vmw_be - > dev_priv - > map_mode = = vmw_dma_alloc_coherent )
ttm_dma_tt_fini ( & vmw_be - > dma_ttm ) ;
else
ttm_tt_fini ( ttm ) ;
2012-11-21 11:06:22 +01:00
if ( vmw_be - > mob )
vmw_mob_destroy ( vmw_be - > mob ) ;
2009-12-10 00:19:58 +00:00
kfree ( vmw_be ) ;
}
2013-10-24 13:27:38 -07:00
2013-10-24 01:49:26 -07:00
static int vmw_ttm_populate ( struct ttm_tt * ttm )
{
struct vmw_ttm_tt * vmw_tt =
container_of ( ttm , struct vmw_ttm_tt , dma_ttm . ttm ) ;
struct vmw_private * dev_priv = vmw_tt - > dev_priv ;
struct ttm_mem_global * glob = vmw_mem_glob ( dev_priv ) ;
int ret ;
if ( ttm - > state ! = tt_unpopulated )
return 0 ;
if ( dev_priv - > map_mode = = vmw_dma_alloc_coherent ) {
size_t size =
ttm_round_pot ( ttm - > num_pages * sizeof ( dma_addr_t ) ) ;
ret = ttm_mem_global_alloc ( glob , size , false , true ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = ttm_dma_populate ( & vmw_tt - > dma_ttm , dev_priv - > dev - > dev ) ;
if ( unlikely ( ret ! = 0 ) )
ttm_mem_global_free ( glob , size ) ;
} else
ret = ttm_pool_populate ( ttm ) ;
return ret ;
}
static void vmw_ttm_unpopulate ( struct ttm_tt * ttm )
{
struct vmw_ttm_tt * vmw_tt = container_of ( ttm , struct vmw_ttm_tt ,
dma_ttm . ttm ) ;
struct vmw_private * dev_priv = vmw_tt - > dev_priv ;
struct ttm_mem_global * glob = vmw_mem_glob ( dev_priv ) ;
2012-11-21 11:06:22 +01:00
if ( vmw_tt - > mob ) {
vmw_mob_destroy ( vmw_tt - > mob ) ;
vmw_tt - > mob = NULL ;
}
2013-10-24 01:49:26 -07:00
vmw_ttm_unmap_dma ( vmw_tt ) ;
if ( dev_priv - > map_mode = = vmw_dma_alloc_coherent ) {
size_t size =
ttm_round_pot ( ttm - > num_pages * sizeof ( dma_addr_t ) ) ;
ttm_dma_unpopulate ( & vmw_tt - > dma_ttm , dev_priv - > dev - > dev ) ;
ttm_mem_global_free ( glob , size ) ;
} else
ttm_pool_unpopulate ( ttm ) ;
}
2009-12-10 00:19:58 +00:00
static struct ttm_backend_func vmw_ttm_func = {
. bind = vmw_ttm_bind ,
. unbind = vmw_ttm_unbind ,
. destroy = vmw_ttm_destroy ,
} ;
2014-01-06 22:20:21 +05:30
static struct ttm_tt * vmw_ttm_tt_create ( struct ttm_bo_device * bdev ,
2011-11-01 20:46:13 -04:00
unsigned long size , uint32_t page_flags ,
struct page * dummy_read_page )
2009-12-10 00:19:58 +00:00
{
2011-11-01 20:46:13 -04:00
struct vmw_ttm_tt * vmw_be ;
2013-10-24 01:49:26 -07:00
int ret ;
2009-12-10 00:19:58 +00:00
2013-10-24 01:49:26 -07:00
vmw_be = kzalloc ( sizeof ( * vmw_be ) , GFP_KERNEL ) ;
2009-12-10 00:19:58 +00:00
if ( ! vmw_be )
return NULL ;
2013-10-24 01:49:26 -07:00
vmw_be - > dma_ttm . ttm . func = & vmw_ttm_func ;
2010-10-26 21:21:47 +02:00
vmw_be - > dev_priv = container_of ( bdev , struct vmw_private , bdev ) ;
2012-11-21 11:06:22 +01:00
vmw_be - > mob = NULL ;
2009-12-10 00:19:58 +00:00
2013-10-24 01:49:26 -07:00
if ( vmw_be - > dev_priv - > map_mode = = vmw_dma_alloc_coherent )
ret = ttm_dma_tt_init ( & vmw_be - > dma_ttm , bdev , size , page_flags ,
dummy_read_page ) ;
else
ret = ttm_tt_init ( & vmw_be - > dma_ttm . ttm , bdev , size , page_flags ,
dummy_read_page ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_init ;
return & vmw_be - > dma_ttm . ttm ;
out_no_init :
kfree ( vmw_be ) ;
return NULL ;
2009-12-10 00:19:58 +00:00
}
2014-01-06 22:20:21 +05:30
static int vmw_invalidate_caches ( struct ttm_bo_device * bdev , uint32_t flags )
2009-12-10 00:19:58 +00:00
{
return 0 ;
}
2014-01-06 22:20:21 +05:30
static int vmw_init_mem_type ( struct ttm_bo_device * bdev , uint32_t type ,
2009-12-10 00:19:58 +00:00
struct ttm_mem_type_manager * man )
{
switch ( type ) {
case TTM_PL_SYSTEM :
/* System memory */
man - > flags = TTM_MEMTYPE_FLAG_MAPPABLE ;
2010-10-26 21:21:47 +02:00
man - > available_caching = TTM_PL_FLAG_CACHED ;
2009-12-10 00:19:58 +00:00
man - > default_caching = TTM_PL_FLAG_CACHED ;
break ;
case TTM_PL_VRAM :
/* "On-card" video ram */
2010-08-05 10:48:18 +10:00
man - > func = & ttm_bo_manager_func ;
2009-12-10 00:19:58 +00:00
man - > gpu_offset = 0 ;
2010-04-09 14:39:26 +02:00
man - > flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE ;
2010-10-26 21:21:47 +02:00
man - > available_caching = TTM_PL_FLAG_CACHED ;
man - > default_caching = TTM_PL_FLAG_CACHED ;
break ;
case VMW_PL_GMR :
2012-11-21 11:06:22 +01:00
case VMW_PL_MOB :
2010-10-26 21:21:47 +02:00
/*
* " Guest Memory Regions " is an aperture like feature with
* one slot per bo . There is an upper limit of the number of
* slots as well as the bo size .
*/
man - > func = & vmw_gmrid_manager_func ;
man - > gpu_offset = 0 ;
man - > flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE ;
man - > available_caching = TTM_PL_FLAG_CACHED ;
man - > default_caching = TTM_PL_FLAG_CACHED ;
2009-12-10 00:19:58 +00:00
break ;
default :
DRM_ERROR ( " Unsupported memory type %u \n " , ( unsigned ) type ) ;
return - EINVAL ;
}
return 0 ;
}
2014-01-06 22:20:21 +05:30
static void vmw_evict_flags ( struct ttm_buffer_object * bo ,
2009-12-10 00:19:58 +00:00
struct ttm_placement * placement )
{
* placement = vmw_sys_placement ;
}
static int vmw_verify_access ( struct ttm_buffer_object * bo , struct file * filp )
{
2012-11-21 16:04:18 +01:00
struct ttm_object_file * tfile =
vmw_fpriv ( ( struct drm_file * ) filp - > private_data ) - > tfile ;
return vmw_user_dmabuf_verify_access ( bo , tfile ) ;
2009-12-10 00:19:58 +00:00
}
2010-04-09 14:39:26 +02:00
static int vmw_ttm_io_mem_reserve ( struct ttm_bo_device * bdev , struct ttm_mem_reg * mem )
{
struct ttm_mem_type_manager * man = & bdev - > man [ mem - > mem_type ] ;
struct vmw_private * dev_priv = container_of ( bdev , struct vmw_private , bdev ) ;
mem - > bus . addr = NULL ;
mem - > bus . is_iomem = false ;
mem - > bus . offset = 0 ;
mem - > bus . size = mem - > num_pages < < PAGE_SHIFT ;
mem - > bus . base = 0 ;
if ( ! ( man - > flags & TTM_MEMTYPE_FLAG_MAPPABLE ) )
return - EINVAL ;
switch ( mem - > mem_type ) {
case TTM_PL_SYSTEM :
2010-10-26 21:21:47 +02:00
case VMW_PL_GMR :
2012-11-21 11:06:22 +01:00
case VMW_PL_MOB :
2010-04-09 14:39:26 +02:00
return 0 ;
case TTM_PL_VRAM :
2010-08-05 10:48:18 +10:00
mem - > bus . offset = mem - > start < < PAGE_SHIFT ;
2010-04-09 14:39:26 +02:00
mem - > bus . base = dev_priv - > vram_start ;
mem - > bus . is_iomem = true ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
static void vmw_ttm_io_mem_free ( struct ttm_bo_device * bdev , struct ttm_mem_reg * mem )
{
}
static int vmw_ttm_fault_reserve_notify ( struct ttm_buffer_object * bo )
{
return 0 ;
}
2012-11-21 11:06:22 +01:00
/**
* vmw_move_notify - TTM move_notify_callback
*
2015-08-10 10:56:15 -07:00
* @ bo : The TTM buffer object about to move .
* @ mem : The struct ttm_mem_reg indicating to what memory
* region the move is taking place .
2012-11-21 11:06:22 +01:00
*
* Calls move_notify for all subsystems needing it .
* ( currently only resources ) .
*/
static void vmw_move_notify ( struct ttm_buffer_object * bo ,
2016-12-15 17:23:49 +01:00
bool evict ,
2012-11-21 11:06:22 +01:00
struct ttm_mem_reg * mem )
{
vmw_resource_move_notify ( bo , mem ) ;
2015-08-10 10:56:15 -07:00
vmw_query_move_notify ( bo , mem ) ;
2012-11-21 11:06:22 +01:00
}
/**
* vmw_swap_notify - TTM move_notify_callback
*
2015-08-10 10:56:15 -07:00
* @ bo : The TTM buffer object about to be swapped out .
2012-11-21 11:06:22 +01:00
*/
static void vmw_swap_notify ( struct ttm_buffer_object * bo )
{
2017-01-19 10:57:00 -08:00
( void ) ttm_bo_wait ( bo , false , false ) ;
2012-11-21 11:06:22 +01:00
}
2009-12-10 00:19:58 +00:00
struct ttm_bo_driver vmw_bo_driver = {
2011-11-01 20:46:13 -04:00
. ttm_tt_create = & vmw_ttm_tt_create ,
2013-10-24 01:49:26 -07:00
. ttm_tt_populate = & vmw_ttm_populate ,
. ttm_tt_unpopulate = & vmw_ttm_unpopulate ,
2009-12-10 00:19:58 +00:00
. invalidate_caches = vmw_invalidate_caches ,
. init_mem_type = vmw_init_mem_type ,
2016-08-30 17:26:04 +02:00
. eviction_valuable = ttm_bo_eviction_valuable ,
2009-12-10 00:19:58 +00:00
. evict_flags = vmw_evict_flags ,
. move = NULL ,
. verify_access = vmw_verify_access ,
2012-11-21 11:06:22 +01:00
. move_notify = vmw_move_notify ,
. swap_notify = vmw_swap_notify ,
2010-04-09 14:39:26 +02:00
. fault_reserve_notify = & vmw_ttm_fault_reserve_notify ,
. io_mem_reserve = & vmw_ttm_io_mem_reserve ,
. io_mem_free = & vmw_ttm_io_mem_free ,
2017-03-28 16:54:50 +02:00
. io_mem_pfn = ttm_bo_default_io_mem_pfn ,
2009-12-10 00:19:58 +00:00
} ;