2019-05-30 02:57:49 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-11-16 19:36:37 +04:00
/*
2018-12-12 23:39:07 +03:00
* IOMMU API for Graphics Address Relocation Table on Tegra20
2011-11-16 19:36:37 +04:00
*
* Copyright ( c ) 2010 - 2012 , NVIDIA CORPORATION . All rights reserved .
*
2018-12-01 22:19:15 +03:00
* Author : Hiroshi DOYU < hdoyu @ nvidia . com >
2011-11-16 19:36:37 +04:00
*/
2018-12-12 23:39:04 +03:00
# define dev_fmt(fmt) "gart: " fmt
2018-12-12 23:38:44 +03:00
# include <linux/io.h>
# include <linux/iommu.h>
2018-12-01 22:19:15 +03:00
# include <linux/moduleparam.h>
2018-12-12 23:38:56 +03:00
# include <linux/platform_device.h>
2011-11-16 19:36:37 +04:00
# include <linux/slab.h>
2018-12-12 23:38:44 +03:00
# include <linux/spinlock.h>
2011-11-16 19:36:37 +04:00
# include <linux/vmalloc.h>
2018-12-12 23:38:56 +03:00
# include <soc/tegra/mc.h>
2012-05-10 11:45:32 +04:00
# define GART_REG_BASE 0x24
# define GART_CONFIG (0x24 - GART_REG_BASE)
# define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
# define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
2018-12-12 23:39:07 +03:00
# define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
2011-11-16 19:36:37 +04:00
# define GART_PAGE_SHIFT 12
# define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
2018-12-12 23:39:07 +03:00
# define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
/* bitmap of the page sizes currently supported */
# define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
2011-11-16 19:36:37 +04:00
struct gart_device {
void __iomem * regs ;
u32 * savedata ;
2018-12-12 23:39:07 +03:00
unsigned long iovmm_base ; /* offset to vmm_area start */
unsigned long iovmm_end ; /* offset to vmm_area end */
2011-11-16 19:36:37 +04:00
spinlock_t pte_lock ; /* for pagetable */
2018-12-12 23:39:06 +03:00
spinlock_t dom_lock ; /* for active domain */
unsigned int active_devices ; /* number of active devices */
2018-12-12 23:39:02 +03:00
struct iommu_domain * active_domain ; /* current active domain */
2017-08-10 01:17:28 +03:00
struct iommu_device iommu ; /* IOMMU Core handle */
2018-12-12 23:39:07 +03:00
struct device * dev ;
2011-11-16 19:36:37 +04:00
} ;
static struct gart_device * gart_handle ; /* unique for a system */
2018-04-09 23:07:19 +03:00
static bool gart_debug ;
2011-11-16 19:36:37 +04:00
/*
* Any interaction between any block on PPSB and a block on APB or AHB
* must have these read - back to ensure the APB / AHB bus transaction is
* complete before initiating activity on the PPSB block .
*/
2018-12-12 23:39:07 +03:00
# define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
2011-11-16 19:36:37 +04:00
# define for_each_gart_pte(gart, iova) \
for ( iova = gart - > iovmm_base ; \
2018-12-12 23:39:07 +03:00
iova < gart - > iovmm_end ; \
2011-11-16 19:36:37 +04:00
iova + = GART_PAGE_SIZE )
static inline void gart_set_pte ( struct gart_device * gart ,
2018-12-12 23:39:07 +03:00
unsigned long iova , unsigned long pte )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:07 +03:00
writel_relaxed ( iova , gart - > regs + GART_ENTRY_ADDR ) ;
writel_relaxed ( pte , gart - > regs + GART_ENTRY_DATA ) ;
2011-11-16 19:36:37 +04:00
}
static inline unsigned long gart_read_pte ( struct gart_device * gart ,
2018-12-12 23:39:07 +03:00
unsigned long iova )
2011-11-16 19:36:37 +04:00
{
unsigned long pte ;
2018-12-12 23:39:07 +03:00
writel_relaxed ( iova , gart - > regs + GART_ENTRY_ADDR ) ;
pte = readl_relaxed ( gart - > regs + GART_ENTRY_DATA ) ;
2011-11-16 19:36:37 +04:00
return pte ;
}
static void do_gart_setup ( struct gart_device * gart , const u32 * data )
{
unsigned long iova ;
for_each_gart_pte ( gart , iova )
gart_set_pte ( gart , iova , data ? * ( data + + ) : 0 ) ;
2018-12-12 23:39:07 +03:00
writel_relaxed ( 1 , gart - > regs + GART_CONFIG ) ;
2011-11-16 19:36:37 +04:00
FLUSH_GART_REGS ( gart ) ;
}
2018-12-12 23:39:07 +03:00
static inline bool gart_iova_range_invalid ( struct gart_device * gart ,
unsigned long iova , size_t bytes )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:07 +03:00
return unlikely ( iova < gart - > iovmm_base | | bytes ! = GART_PAGE_SIZE | |
iova + bytes > gart - > iovmm_end ) ;
2011-11-16 19:36:37 +04:00
}
2018-12-12 23:39:07 +03:00
static inline bool gart_pte_valid ( struct gart_device * gart , unsigned long iova )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:07 +03:00
return ! ! ( gart_read_pte ( gart , iova ) & GART_ENTRY_PHYS_ADDR_VALID ) ;
2011-11-16 19:36:37 +04:00
}
static int gart_iommu_attach_dev ( struct iommu_domain * domain ,
struct device * dev )
{
2018-12-12 23:39:05 +03:00
struct gart_device * gart = gart_handle ;
2018-12-12 23:39:06 +03:00
int ret = 0 ;
2011-11-16 19:36:37 +04:00
2018-12-12 23:39:06 +03:00
spin_lock ( & gart - > dom_lock ) ;
2011-11-16 19:36:37 +04:00
2018-12-12 23:39:06 +03:00
if ( gart - > active_domain & & gart - > active_domain ! = domain ) {
ret = - EBUSY ;
} else if ( dev - > archdata . iommu ! = domain ) {
dev - > archdata . iommu = domain ;
gart - > active_domain = domain ;
gart - > active_devices + + ;
2011-11-16 19:36:37 +04:00
}
2018-12-12 23:39:00 +03:00
2018-12-12 23:39:06 +03:00
spin_unlock ( & gart - > dom_lock ) ;
return ret ;
2018-12-12 23:39:00 +03:00
}
static void gart_iommu_detach_dev ( struct iommu_domain * domain ,
struct device * dev )
{
2018-12-12 23:39:06 +03:00
struct gart_device * gart = gart_handle ;
spin_lock ( & gart - > dom_lock ) ;
2018-12-12 23:39:00 +03:00
2018-12-12 23:39:06 +03:00
if ( dev - > archdata . iommu = = domain ) {
dev - > archdata . iommu = NULL ;
if ( - - gart - > active_devices = = 0 )
gart - > active_domain = NULL ;
}
spin_unlock ( & gart - > dom_lock ) ;
2011-11-16 19:36:37 +04:00
}
2015-03-26 15:43:13 +03:00
static struct iommu_domain * gart_iommu_domain_alloc ( unsigned type )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:06 +03:00
struct iommu_domain * domain ;
2011-11-16 19:36:37 +04:00
2015-03-26 15:43:13 +03:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2011-11-16 19:36:37 +04:00
2018-12-12 23:39:06 +03:00
domain = kzalloc ( sizeof ( * domain ) , GFP_KERNEL ) ;
if ( domain ) {
2018-12-12 23:39:07 +03:00
domain - > geometry . aperture_start = gart_handle - > iovmm_base ;
domain - > geometry . aperture_end = gart_handle - > iovmm_end - 1 ;
2018-12-12 23:39:06 +03:00
domain - > geometry . force_aperture = true ;
}
2015-03-27 13:07:26 +03:00
2018-12-12 23:39:06 +03:00
return domain ;
2011-11-16 19:36:37 +04:00
}
2015-03-26 15:43:13 +03:00
static void gart_iommu_domain_free ( struct iommu_domain * domain )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:06 +03:00
WARN_ON ( gart_handle - > active_domain = = domain ) ;
kfree ( domain ) ;
2011-11-16 19:36:37 +04:00
}
2018-12-12 23:39:07 +03:00
static inline int __gart_iommu_map ( struct gart_device * gart , unsigned long iova ,
unsigned long pa )
{
if ( unlikely ( gart_debug & & gart_pte_valid ( gart , iova ) ) ) {
dev_err ( gart - > dev , " Page entry is in-use \n " ) ;
return - EINVAL ;
}
gart_set_pte ( gart , iova , GART_ENTRY_PHYS_ADDR_VALID | pa ) ;
return 0 ;
}
2011-11-16 19:36:37 +04:00
static int gart_iommu_map ( struct iommu_domain * domain , unsigned long iova ,
2019-09-08 19:56:38 +03:00
phys_addr_t pa , size_t bytes , int prot , gfp_t gfp )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:06 +03:00
struct gart_device * gart = gart_handle ;
2018-12-12 23:39:07 +03:00
int ret ;
2011-11-16 19:36:37 +04:00
2018-12-12 23:39:07 +03:00
if ( gart_iova_range_invalid ( gart , iova , bytes ) )
2011-11-16 19:36:37 +04:00
return - EINVAL ;
2018-12-12 23:39:07 +03:00
spin_lock ( & gart - > pte_lock ) ;
ret = __gart_iommu_map ( gart , iova , ( unsigned long ) pa ) ;
spin_unlock ( & gart - > pte_lock ) ;
return ret ;
}
static inline int __gart_iommu_unmap ( struct gart_device * gart ,
unsigned long iova )
{
if ( unlikely ( gart_debug & & ! gart_pte_valid ( gart , iova ) ) ) {
dev_err ( gart - > dev , " Page entry is invalid \n " ) ;
2011-11-16 19:36:37 +04:00
return - EINVAL ;
}
2018-12-12 23:39:07 +03:00
gart_set_pte ( gart , iova , 0 ) ;
2011-11-16 19:36:37 +04:00
return 0 ;
}
static size_t gart_iommu_unmap ( struct iommu_domain * domain , unsigned long iova ,
2019-07-02 18:44:06 +03:00
size_t bytes , struct iommu_iotlb_gather * gather )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:06 +03:00
struct gart_device * gart = gart_handle ;
2018-12-12 23:39:07 +03:00
int err ;
2011-11-16 19:36:37 +04:00
2018-12-12 23:39:07 +03:00
if ( gart_iova_range_invalid ( gart , iova , bytes ) )
2011-11-16 19:36:37 +04:00
return 0 ;
2018-12-12 23:39:07 +03:00
spin_lock ( & gart - > pte_lock ) ;
err = __gart_iommu_unmap ( gart , iova ) ;
spin_unlock ( & gart - > pte_lock ) ;
return err ? 0 : bytes ;
2011-11-16 19:36:37 +04:00
}
static phys_addr_t gart_iommu_iova_to_phys ( struct iommu_domain * domain ,
2013-03-28 23:53:58 +04:00
dma_addr_t iova )
2011-11-16 19:36:37 +04:00
{
2018-12-12 23:39:06 +03:00
struct gart_device * gart = gart_handle ;
2011-11-16 19:36:37 +04:00
unsigned long pte ;
2018-12-12 23:39:07 +03:00
if ( gart_iova_range_invalid ( gart , iova , GART_PAGE_SIZE ) )
2011-11-16 19:36:37 +04:00
return - EINVAL ;
2018-12-12 23:39:07 +03:00
spin_lock ( & gart - > pte_lock ) ;
2011-11-16 19:36:37 +04:00
pte = gart_read_pte ( gart , iova ) ;
2018-12-12 23:39:07 +03:00
spin_unlock ( & gart - > pte_lock ) ;
2011-11-16 19:36:37 +04:00
2018-12-12 23:39:07 +03:00
return pte & GART_PAGE_MASK ;
2011-11-16 19:36:37 +04:00
}
2014-09-05 12:51:37 +04:00
static bool gart_iommu_capable ( enum iommu_cap cap )
2011-11-16 19:36:37 +04:00
{
2014-09-05 12:51:37 +04:00
return false ;
2011-11-16 19:36:37 +04:00
}
2017-07-21 15:12:37 +03:00
static int gart_iommu_add_device ( struct device * dev )
{
2018-12-12 23:38:46 +03:00
struct iommu_group * group ;
2017-07-21 15:12:37 +03:00
2018-12-12 23:38:46 +03:00
if ( ! dev - > iommu_fwspec )
return - ENODEV ;
group = iommu_group_get_for_dev ( dev ) ;
2017-07-21 15:12:37 +03:00
if ( IS_ERR ( group ) )
return PTR_ERR ( group ) ;
iommu_group_put ( group ) ;
2017-08-10 01:17:28 +03:00
iommu_device_link ( & gart_handle - > iommu , dev ) ;
2017-07-21 15:12:37 +03:00
return 0 ;
}
static void gart_iommu_remove_device ( struct device * dev )
{
iommu_group_remove_device ( dev ) ;
2017-08-10 01:17:28 +03:00
iommu_device_unlink ( & gart_handle - > iommu , dev ) ;
2017-07-21 15:12:37 +03:00
}
2018-12-12 23:38:46 +03:00
static int gart_iommu_of_xlate ( struct device * dev ,
struct of_phandle_args * args )
{
return 0 ;
}
2019-07-02 18:44:06 +03:00
static void gart_iommu_sync_map ( struct iommu_domain * domain )
2018-12-12 23:38:48 +03:00
{
2018-12-12 23:39:07 +03:00
FLUSH_GART_REGS ( gart_handle ) ;
2018-12-12 23:38:48 +03:00
}
2019-07-02 18:44:06 +03:00
static void gart_iommu_sync ( struct iommu_domain * domain ,
struct iommu_iotlb_gather * gather )
{
gart_iommu_sync_map ( domain ) ;
}
2014-06-27 11:03:12 +04:00
static const struct iommu_ops gart_iommu_ops = {
2014-09-05 12:51:37 +04:00
. capable = gart_iommu_capable ,
2015-03-26 15:43:13 +03:00
. domain_alloc = gart_iommu_domain_alloc ,
. domain_free = gart_iommu_domain_free ,
2011-11-16 19:36:37 +04:00
. attach_dev = gart_iommu_attach_dev ,
. detach_dev = gart_iommu_detach_dev ,
2017-07-21 15:12:37 +03:00
. add_device = gart_iommu_add_device ,
. remove_device = gart_iommu_remove_device ,
. device_group = generic_device_group ,
2011-11-16 19:36:37 +04:00
. map = gart_iommu_map ,
. unmap = gart_iommu_unmap ,
. iova_to_phys = gart_iommu_iova_to_phys ,
. pgsize_bitmap = GART_IOMMU_PGSIZES ,
2018-12-12 23:38:46 +03:00
. of_xlate = gart_iommu_of_xlate ,
2019-07-02 18:44:06 +03:00
. iotlb_sync_map = gart_iommu_sync_map ,
2018-12-12 23:38:48 +03:00
. iotlb_sync = gart_iommu_sync ,
2011-11-16 19:36:37 +04:00
} ;
2018-12-12 23:38:56 +03:00
int tegra_gart_suspend ( struct gart_device * gart )
2011-11-16 19:36:37 +04:00
{
u32 * data = gart - > savedata ;
2018-12-12 23:39:07 +03:00
unsigned long iova ;
/*
* All GART users shall be suspended at this point . Disable
* address translation to trap all GART accesses as invalid
* memory accesses .
*/
writel_relaxed ( 0 , gart - > regs + GART_CONFIG ) ;
FLUSH_GART_REGS ( gart ) ;
2011-11-16 19:36:37 +04:00
for_each_gart_pte ( gart , iova )
* ( data + + ) = gart_read_pte ( gart , iova ) ;
2018-12-12 23:39:07 +03:00
2011-11-16 19:36:37 +04:00
return 0 ;
}
2018-12-12 23:38:56 +03:00
int tegra_gart_resume ( struct gart_device * gart )
2011-11-16 19:36:37 +04:00
{
do_gart_setup ( gart , gart - > savedata ) ;
2018-12-12 23:39:07 +03:00
2011-11-16 19:36:37 +04:00
return 0 ;
}
2018-12-12 23:38:56 +03:00
struct gart_device * tegra_gart_probe ( struct device * dev , struct tegra_mc * mc )
2011-11-16 19:36:37 +04:00
{
struct gart_device * gart ;
2018-12-12 23:39:07 +03:00
struct resource * res ;
int err ;
2011-11-16 19:36:37 +04:00
BUILD_BUG_ON ( PAGE_SHIFT ! = GART_PAGE_SHIFT ) ;
/* the GART memory aperture is required */
2018-12-12 23:39:07 +03:00
res = platform_get_resource ( to_platform_device ( dev ) , IORESOURCE_MEM , 1 ) ;
if ( ! res ) {
dev_err ( dev , " Memory aperture resource unavailable \n " ) ;
2018-12-12 23:38:56 +03:00
return ERR_PTR ( - ENXIO ) ;
2011-11-16 19:36:37 +04:00
}
2018-12-12 23:39:03 +03:00
gart = kzalloc ( sizeof ( * gart ) , GFP_KERNEL ) ;
2018-12-12 23:39:07 +03:00
if ( ! gart )
2018-12-12 23:38:56 +03:00
return ERR_PTR ( - ENOMEM ) ;
2011-11-16 19:36:37 +04:00
2018-12-12 23:39:07 +03:00
gart_handle = gart ;
gart - > dev = dev ;
gart - > regs = mc - > regs + GART_REG_BASE ;
gart - > iovmm_base = res - > start ;
gart - > iovmm_end = res - > end + 1 ;
spin_lock_init ( & gart - > pte_lock ) ;
spin_lock_init ( & gart - > dom_lock ) ;
do_gart_setup ( gart , NULL ) ;
err = iommu_device_sysfs_add ( & gart - > iommu , dev , NULL , " gart " ) ;
if ( err )
2018-12-12 23:39:03 +03:00
goto free_gart ;
2017-08-10 01:17:28 +03:00
iommu_device_set_ops ( & gart - > iommu , & gart_iommu_ops ) ;
2018-12-12 23:38:46 +03:00
iommu_device_set_fwnode ( & gart - > iommu , dev - > fwnode ) ;
2017-08-10 01:17:28 +03:00
2018-12-12 23:39:07 +03:00
err = iommu_device_register ( & gart - > iommu ) ;
if ( err )
2018-12-12 23:38:45 +03:00
goto remove_sysfs ;
2017-08-10 01:17:28 +03:00
2018-12-12 23:39:07 +03:00
gart - > savedata = vmalloc ( resource_size ( res ) / GART_PAGE_SIZE *
sizeof ( u32 ) ) ;
2011-11-16 19:36:37 +04:00
if ( ! gart - > savedata ) {
2018-12-12 23:39:07 +03:00
err = - ENOMEM ;
2018-12-12 23:38:45 +03:00
goto unregister_iommu ;
2011-11-16 19:36:37 +04:00
}
2018-12-12 23:38:56 +03:00
return gart ;
2018-12-12 23:38:45 +03:00
unregister_iommu :
iommu_device_unregister ( & gart - > iommu ) ;
remove_sysfs :
iommu_device_sysfs_remove ( & gart - > iommu ) ;
2018-12-12 23:39:03 +03:00
free_gart :
kfree ( gart ) ;
2018-12-12 23:38:45 +03:00
2018-12-12 23:39:07 +03:00
return ERR_PTR ( err ) ;
2011-11-16 19:36:37 +04:00
}
2018-12-01 22:19:15 +03:00
module_param ( gart_debug , bool , 0644 ) ;
2018-04-09 23:07:19 +03:00
MODULE_PARM_DESC ( gart_debug , " Enable GART debugging " ) ;