2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2016-09-28 19:58:32 -04:00
/*
* Copyright ( C ) 2016 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*/
# include "msm_drv.h"
# include "msm_gem.h"
# include "msm_mmu.h"
2017-03-07 10:02:52 -07:00
static void
msm_gem_address_space_destroy ( struct kref * kref )
{
struct msm_gem_address_space * aspace = container_of ( kref ,
struct msm_gem_address_space , kref ) ;
drm_mm_takedown ( & aspace - > mm ) ;
if ( aspace - > mmu )
aspace - > mmu - > funcs - > destroy ( aspace - > mmu ) ;
kfree ( aspace ) ;
}
void msm_gem_address_space_put ( struct msm_gem_address_space * aspace )
{
if ( aspace )
kref_put ( & aspace - > kref , msm_gem_address_space_destroy ) ;
}
2018-11-07 15:35:51 -07:00
/* Actually unmap memory for the vma */
void msm_gem_purge_vma ( struct msm_gem_address_space * aspace ,
2018-11-07 15:35:47 -07:00
struct msm_gem_vma * vma )
2016-09-28 19:58:32 -04:00
{
2018-11-07 15:35:51 -07:00
unsigned size = vma - > node . size < < PAGE_SHIFT ;
/* Print a message if we try to purge a vma in use */
if ( WARN_ON ( vma - > inuse > 0 ) )
2016-09-28 19:58:32 -04:00
return ;
2018-11-07 15:35:51 -07:00
/* Don't do anything if the memory isn't mapped */
if ( ! vma - > mapped )
return ;
2016-09-28 19:58:32 -04:00
2018-11-07 15:35:51 -07:00
if ( aspace - > mmu )
aspace - > mmu - > funcs - > unmap ( aspace - > mmu , vma - > iova , size ) ;
2016-09-28 19:58:32 -04:00
2018-11-07 15:35:48 -07:00
vma - > mapped = false ;
2018-11-07 15:35:51 -07:00
}
2017-03-07 10:02:52 -07:00
2018-11-07 15:35:51 -07:00
/* Remove reference counts for the mapping */
void msm_gem_unmap_vma ( struct msm_gem_address_space * aspace ,
struct msm_gem_vma * vma )
{
if ( ! WARN_ON ( ! vma - > iova ) )
vma - > inuse - - ;
2016-09-28 19:58:32 -04:00
}
int
msm_gem_map_vma ( struct msm_gem_address_space * aspace ,
2019-01-09 14:25:05 -05:00
struct msm_gem_vma * vma , int prot ,
struct sg_table * sgt , int npages )
2016-09-28 19:58:32 -04:00
{
2018-11-07 15:35:48 -07:00
unsigned size = npages < < PAGE_SHIFT ;
int ret = 0 ;
2016-09-28 19:58:32 -04:00
2018-11-07 15:35:48 -07:00
if ( WARN_ON ( ! vma - > iova ) )
return - EINVAL ;
2018-11-07 15:35:51 -07:00
/* Increase the usage counter */
vma - > inuse + + ;
2018-11-07 15:35:48 -07:00
if ( vma - > mapped )
2016-09-28 19:58:32 -04:00
return 0 ;
2018-11-07 15:35:48 -07:00
vma - > mapped = true ;
2019-03-02 13:35:29 +01:00
if ( aspace & & aspace - > mmu )
2018-11-07 15:35:48 -07:00
ret = aspace - > mmu - > funcs - > map ( aspace - > mmu , vma - > iova , sgt ,
2019-01-09 14:25:05 -05:00
size , prot ) ;
2018-11-07 15:35:48 -07:00
if ( ret )
vma - > mapped = false ;
return ret ;
}
2018-11-07 15:35:51 -07:00
/* Close an iova. Warn if it is still in use */
void msm_gem_close_vma ( struct msm_gem_address_space * aspace ,
struct msm_gem_vma * vma )
{
if ( WARN_ON ( vma - > inuse > 0 | | vma - > mapped ) )
return ;
spin_lock ( & aspace - > lock ) ;
if ( vma - > iova )
drm_mm_remove_node ( & vma - > node ) ;
spin_unlock ( & aspace - > lock ) ;
vma - > iova = 0 ;
msm_gem_address_space_put ( aspace ) ;
}
2018-11-07 15:35:48 -07:00
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma ( struct msm_gem_address_space * aspace ,
struct msm_gem_vma * vma , int npages )
{
int ret ;
if ( WARN_ON ( vma - > iova ) )
return - EBUSY ;
spin_lock ( & aspace - > lock ) ;
2017-02-02 21:04:38 +00:00
ret = drm_mm_insert_node ( & aspace - > mm , & vma - > node , npages ) ;
2017-06-13 16:52:54 -06:00
spin_unlock ( & aspace - > lock ) ;
2016-09-28 19:58:32 -04:00
if ( ret )
return ret ;
vma - > iova = vma - > node . start < < PAGE_SHIFT ;
2018-11-07 15:35:48 -07:00
vma - > mapped = false ;
2016-09-28 19:58:32 -04:00
2017-03-07 10:02:52 -07:00
kref_get ( & aspace - > kref ) ;
2016-09-28 19:58:32 -04:00
2018-11-07 15:35:48 -07:00
return 0 ;
2016-09-28 19:58:32 -04:00
}
2018-11-07 15:35:51 -07:00
2016-09-28 19:58:32 -04:00
struct msm_gem_address_space *
msm_gem_address_space_create ( struct device * dev , struct iommu_domain * domain ,
const char * name )
{
struct msm_gem_address_space * aspace ;
2018-01-22 11:10:46 -07:00
u64 size = domain - > geometry . aperture_end -
domain - > geometry . aperture_start ;
2016-09-28 19:58:32 -04:00
aspace = kzalloc ( sizeof ( * aspace ) , GFP_KERNEL ) ;
if ( ! aspace )
return ERR_PTR ( - ENOMEM ) ;
2017-06-13 16:52:54 -06:00
spin_lock_init ( & aspace - > lock ) ;
2016-09-28 19:58:32 -04:00
aspace - > name = name ;
aspace - > mmu = msm_iommu_new ( dev , domain ) ;
drm_mm_init ( & aspace - > mm , ( domain - > geometry . aperture_start > > PAGE_SHIFT ) ,
2018-01-22 11:10:46 -07:00
size > > PAGE_SHIFT ) ;
2016-09-28 19:58:32 -04:00
2017-03-07 10:02:52 -07:00
kref_init ( & aspace - > kref ) ;
2016-09-28 19:58:32 -04:00
return aspace ;
}
2018-11-14 17:08:04 -05:00
struct msm_gem_address_space *
msm_gem_address_space_create_a2xx ( struct device * dev , struct msm_gpu * gpu ,
const char * name , uint64_t va_start , uint64_t va_end )
{
struct msm_gem_address_space * aspace ;
u64 size = va_end - va_start ;
aspace = kzalloc ( sizeof ( * aspace ) , GFP_KERNEL ) ;
if ( ! aspace )
return ERR_PTR ( - ENOMEM ) ;
spin_lock_init ( & aspace - > lock ) ;
aspace - > name = name ;
aspace - > mmu = msm_gpummu_new ( dev , gpu ) ;
drm_mm_init ( & aspace - > mm , ( va_start > > PAGE_SHIFT ) ,
size > > PAGE_SHIFT ) ;
kref_init ( & aspace - > kref ) ;
return aspace ;
}