2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-11-17 09:31:31 +04:00
/*
2014-04-16 11:24:44 +04:00
* Copyright ( C ) 2011 - 2014 NVIDIA CORPORATION . All rights reserved .
2011-11-17 09:31:31 +04:00
*/
2015-03-27 13:07:27 +03:00
# include <linux/bitops.h>
2015-01-23 11:49:25 +03:00
# include <linux/debugfs.h>
2013-01-21 14:09:06 +04:00
# include <linux/err.h>
2011-11-17 09:31:31 +04:00
# include <linux/iommu.h>
2014-04-16 11:24:44 +04:00
# include <linux/kernel.h>
2012-06-25 15:23:55 +04:00
# include <linux/of.h>
2014-04-16 11:24:44 +04:00
# include <linux/of_device.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
2017-04-26 16:46:20 +03:00
# include <linux/dma-mapping.h>
2014-07-17 15:17:24 +04:00
# include <soc/tegra/ahb.h>
2014-04-16 11:24:44 +04:00
# include <soc/tegra/mc.h>
2011-11-17 09:31:31 +04:00
2017-10-12 17:19:16 +03:00
struct tegra_smmu_group {
struct list_head list ;
const struct tegra_smmu_group_soc * soc ;
struct iommu_group * group ;
} ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu {
void __iomem * regs ;
struct device * dev ;
2012-09-05 02:36:15 +04:00
2014-04-16 11:24:44 +04:00
struct tegra_mc * mc ;
const struct tegra_smmu_soc * soc ;
2012-08-02 12:46:40 +04:00
2017-10-12 17:19:16 +03:00
struct list_head groups ;
2015-03-27 13:07:27 +03:00
unsigned long pfn_mask ;
2015-08-06 15:20:31 +03:00
unsigned long tlb_mask ;
2015-03-27 13:07:27 +03:00
2014-04-16 11:24:44 +04:00
unsigned long * asids ;
struct mutex lock ;
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
struct list_head list ;
2015-01-23 11:49:25 +03:00
struct dentry * debugfs ;
2017-08-09 18:41:52 +03:00
struct iommu_device iommu ; /* IOMMU Core code handle */
2011-11-17 09:31:31 +04:00
} ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu_as {
2015-03-26 15:43:12 +03:00
struct iommu_domain domain ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu * smmu ;
unsigned int use_count ;
2015-07-27 15:29:31 +03:00
u32 * count ;
2015-07-27 15:29:26 +03:00
struct page * * pts ;
2014-04-16 11:24:44 +04:00
struct page * pd ;
2015-07-27 15:29:52 +03:00
dma_addr_t pd_dma ;
2014-04-16 11:24:44 +04:00
unsigned id ;
u32 attr ;
2011-11-17 09:31:31 +04:00
} ;
2015-03-26 15:43:12 +03:00
static struct tegra_smmu_as * to_smmu_as ( struct iommu_domain * dom )
{
return container_of ( dom , struct tegra_smmu_as , domain ) ;
}
2014-04-16 11:24:44 +04:00
static inline void smmu_writel ( struct tegra_smmu * smmu , u32 value ,
unsigned long offset )
{
writel ( value , smmu - > regs + offset ) ;
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
static inline u32 smmu_readl ( struct tegra_smmu * smmu , unsigned long offset )
{
return readl ( smmu - > regs + offset ) ;
}
2012-09-14 20:22:00 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_CONFIG 0x010
# define SMMU_CONFIG_ENABLE (1 << 0)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_TLB_CONFIG 0x14
# define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
# define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
2015-08-06 15:20:31 +03:00
# define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
( ( smmu ) - > soc - > num_tlb_lines & ( smmu ) - > tlb_mask )
2012-06-25 15:23:55 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTC_CONFIG 0x18
# define SMMU_PTC_CONFIG_ENABLE (1 << 29)
# define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
# define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTB_ASID 0x01c
# define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
2012-06-25 15:23:56 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTB_DATA 0x020
2015-07-27 15:29:52 +03:00
# define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:52 +03:00
# define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_TLB_FLUSH 0x030
# define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
# define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
# define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
# define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_SECTION )
# define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_GROUP )
# define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTC_FLUSH 0x034
# define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
# define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTC_FLUSH_HI 0x9b8
# define SMMU_PTC_FLUSH_HI_MASK 0x3
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
/* per-SWGROUP SMMU_*_ASID register */
# define SMMU_ASID_ENABLE (1 << 31)
# define SMMU_ASID_MASK 0x7f
# define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
/* page table definitions */
# define SMMU_NUM_PDE 1024
# define SMMU_NUM_PTE 1024
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
# define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PDE_SHIFT 22
# define SMMU_PTE_SHIFT 12
2013-02-04 23:40:58 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PD_READABLE (1 << 31)
# define SMMU_PD_WRITABLE (1 << 30)
# define SMMU_PD_NONSECURE (1 << 29)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PDE_READABLE (1 << 31)
# define SMMU_PDE_WRITABLE (1 << 30)
# define SMMU_PDE_NONSECURE (1 << 29)
# define SMMU_PDE_NEXT (1 << 28)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTE_READABLE (1 << 31)
# define SMMU_PTE_WRITABLE (1 << 30)
# define SMMU_PTE_NONSECURE (1 << 29)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE )
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:16 +03:00
static unsigned int iova_pd_index ( unsigned long iova )
{
return ( iova > > SMMU_PDE_SHIFT ) & ( SMMU_NUM_PDE - 1 ) ;
}
static unsigned int iova_pt_index ( unsigned long iova )
{
return ( iova > > SMMU_PTE_SHIFT ) & ( SMMU_NUM_PTE - 1 ) ;
}
2015-07-27 15:29:52 +03:00
static bool smmu_dma_addr_valid ( struct tegra_smmu * smmu , dma_addr_t addr )
2015-07-27 15:29:36 +03:00
{
2015-07-27 15:29:52 +03:00
addr > > = 12 ;
return ( addr & smmu - > pfn_mask ) = = addr ;
}
2015-07-27 15:29:36 +03:00
2019-10-16 14:50:26 +03:00
static dma_addr_t smmu_pde_to_dma ( struct tegra_smmu * smmu , u32 pde )
2015-07-27 15:29:52 +03:00
{
2019-10-16 14:50:26 +03:00
return ( dma_addr_t ) ( pde & smmu - > pfn_mask ) < < 12 ;
2015-07-27 15:29:36 +03:00
}
2015-07-27 15:29:41 +03:00
static void smmu_flush_ptc_all ( struct tegra_smmu * smmu )
{
smmu_writel ( smmu , SMMU_PTC_FLUSH_TYPE_ALL , SMMU_PTC_FLUSH ) ;
}
2015-07-27 15:29:52 +03:00
static inline void smmu_flush_ptc ( struct tegra_smmu * smmu , dma_addr_t dma ,
2014-04-16 11:24:44 +04:00
unsigned long offset )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2015-07-27 15:29:41 +03:00
offset & = ~ ( smmu - > mc - > soc - > atom_size - 1 ) ;
2014-04-16 11:24:44 +04:00
2015-07-27 15:29:41 +03:00
if ( smmu - > mc - > soc - > num_address_bits > 32 ) {
2015-07-27 15:29:52 +03:00
# ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
value = ( dma > > 32 ) & SMMU_PTC_FLUSH_HI_MASK ;
2014-04-16 11:24:44 +04:00
# else
2015-07-27 15:29:41 +03:00
value = 0 ;
2014-04-16 11:24:44 +04:00
# endif
2015-07-27 15:29:41 +03:00
smmu_writel ( smmu , value , SMMU_PTC_FLUSH_HI ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
2015-07-27 15:29:52 +03:00
value = ( dma + offset ) | SMMU_PTC_FLUSH_TYPE_ADR ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_PTC_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb ( struct tegra_smmu * smmu )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , SMMU_TLB_FLUSH_VA_MATCH_ALL , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb_asid ( struct tegra_smmu * smmu ,
unsigned long asid )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:07 +03:00
if ( smmu - > soc - > num_asids = = 4 )
value = ( asid & 0x3 ) < < 29 ;
else
value = ( asid & 0x7f ) < < 24 ;
value | = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb_section ( struct tegra_smmu * smmu ,
unsigned long asid ,
unsigned long iova )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:07 +03:00
if ( smmu - > soc - > num_asids = = 4 )
value = ( asid & 0x3 ) < < 29 ;
else
value = ( asid & 0x7f ) < < 24 ;
value | = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION ( iova ) ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb_group ( struct tegra_smmu * smmu ,
unsigned long asid ,
unsigned long iova )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:07 +03:00
if ( smmu - > soc - > num_asids = = 4 )
value = ( asid & 0x3 ) < < 29 ;
else
value = ( asid & 0x7f ) < < 24 ;
value | = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP ( iova ) ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush ( struct tegra_smmu * smmu )
2011-11-17 09:31:31 +04:00
{
2019-10-16 14:50:24 +03:00
smmu_readl ( smmu , SMMU_PTB_ASID ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static int tegra_smmu_alloc_asid ( struct tegra_smmu * smmu , unsigned int * idp )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
unsigned long id ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
mutex_lock ( & smmu - > lock ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
id = find_first_zero_bit ( smmu - > asids , smmu - > soc - > num_asids ) ;
if ( id > = smmu - > soc - > num_asids ) {
mutex_unlock ( & smmu - > lock ) ;
return - ENOSPC ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
set_bit ( id , smmu - > asids ) ;
* idp = id ;
mutex_unlock ( & smmu - > lock ) ;
return 0 ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_free_asid ( struct tegra_smmu * smmu , unsigned int id )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
mutex_lock ( & smmu - > lock ) ;
clear_bit ( id , smmu - > asids ) ;
mutex_unlock ( & smmu - > lock ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static bool tegra_smmu_capable ( enum iommu_cap cap )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
return false ;
2011-11-17 09:31:31 +04:00
}
2015-03-26 15:43:12 +03:00
static struct iommu_domain * tegra_smmu_domain_alloc ( unsigned type )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
struct tegra_smmu_as * as ;
2011-11-17 09:31:31 +04:00
2015-03-26 15:43:12 +03:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2014-04-16 11:24:44 +04:00
as = kzalloc ( sizeof ( * as ) , GFP_KERNEL ) ;
if ( ! as )
2015-03-26 15:43:12 +03:00
return NULL ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
as - > attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE ;
2011-11-17 09:31:31 +04:00
2015-07-27 15:30:02 +03:00
as - > pd = alloc_page ( GFP_KERNEL | __GFP_DMA | __GFP_ZERO ) ;
2014-04-16 11:24:44 +04:00
if ( ! as - > pd ) {
kfree ( as ) ;
2015-03-26 15:43:12 +03:00
return NULL ;
2011-11-17 09:31:31 +04:00
}
2012-07-02 15:26:38 +04:00
2015-07-27 15:29:31 +03:00
as - > count = kcalloc ( SMMU_NUM_PDE , sizeof ( u32 ) , GFP_KERNEL ) ;
2014-04-16 11:24:44 +04:00
if ( ! as - > count ) {
__free_page ( as - > pd ) ;
kfree ( as ) ;
2015-03-26 15:43:12 +03:00
return NULL ;
2011-11-17 09:31:31 +04:00
}
2012-07-02 15:26:38 +04:00
2015-07-27 15:29:26 +03:00
as - > pts = kcalloc ( SMMU_NUM_PDE , sizeof ( * as - > pts ) , GFP_KERNEL ) ;
if ( ! as - > pts ) {
2015-07-27 15:29:31 +03:00
kfree ( as - > count ) ;
2015-07-27 15:29:26 +03:00
__free_page ( as - > pd ) ;
kfree ( as ) ;
return NULL ;
}
2015-03-27 13:07:25 +03:00
/* setup aperture */
2015-04-02 14:33:19 +03:00
as - > domain . geometry . aperture_start = 0 ;
as - > domain . geometry . aperture_end = 0xffffffff ;
as - > domain . geometry . force_aperture = true ;
2012-07-17 13:47:14 +04:00
2015-03-26 15:43:12 +03:00
return & as - > domain ;
2011-11-17 09:31:31 +04:00
}
2015-03-26 15:43:12 +03:00
static void tegra_smmu_domain_free ( struct iommu_domain * domain )
2011-11-17 09:31:31 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
/* TODO: free page directory and page tables */
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:08 +03:00
WARN_ON_ONCE ( as - > use_count ) ;
kfree ( as - > count ) ;
kfree ( as - > pts ) ;
2014-04-16 11:24:44 +04:00
kfree ( as ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static const struct tegra_smmu_swgroup *
tegra_smmu_find_swgroup ( struct tegra_smmu * smmu , unsigned int swgroup )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
const struct tegra_smmu_swgroup * group = NULL ;
unsigned int i ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
for ( i = 0 ; i < smmu - > soc - > num_swgroups ; i + + ) {
if ( smmu - > soc - > swgroups [ i ] . swgroup = = swgroup ) {
group = & smmu - > soc - > swgroups [ i ] ;
break ;
}
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
return group ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_enable ( struct tegra_smmu * smmu , unsigned int swgroup ,
unsigned int asid )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
const struct tegra_smmu_swgroup * group ;
unsigned int i ;
u32 value ;
2011-11-17 09:31:31 +04:00
2019-10-16 14:50:25 +03:00
group = tegra_smmu_find_swgroup ( smmu , swgroup ) ;
if ( group ) {
value = smmu_readl ( smmu , group - > reg ) ;
value & = ~ SMMU_ASID_MASK ;
value | = SMMU_ASID_VALUE ( asid ) ;
value | = SMMU_ASID_ENABLE ;
smmu_writel ( smmu , value , group - > reg ) ;
} else {
pr_warn ( " %s group from swgroup %u not found \n " , __func__ ,
swgroup ) ;
/* No point moving ahead if group was not found */
return ;
}
2014-04-16 11:24:44 +04:00
for ( i = 0 ; i < smmu - > soc - > num_clients ; i + + ) {
const struct tegra_mc_client * client = & smmu - > soc - > clients [ i ] ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( client - > swgroup ! = swgroup )
continue ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
value = smmu_readl ( smmu , client - > smmu . reg ) ;
value | = BIT ( client - > smmu . bit ) ;
smmu_writel ( smmu , value , client - > smmu . reg ) ;
}
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_disable ( struct tegra_smmu * smmu , unsigned int swgroup ,
unsigned int asid )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
const struct tegra_smmu_swgroup * group ;
unsigned int i ;
u32 value ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
group = tegra_smmu_find_swgroup ( smmu , swgroup ) ;
if ( group ) {
value = smmu_readl ( smmu , group - > reg ) ;
value & = ~ SMMU_ASID_MASK ;
value | = SMMU_ASID_VALUE ( asid ) ;
value & = ~ SMMU_ASID_ENABLE ;
smmu_writel ( smmu , value , group - > reg ) ;
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
for ( i = 0 ; i < smmu - > soc - > num_clients ; i + + ) {
const struct tegra_mc_client * client = & smmu - > soc - > clients [ i ] ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( client - > swgroup ! = swgroup )
continue ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
value = smmu_readl ( smmu , client - > smmu . reg ) ;
value & = ~ BIT ( client - > smmu . bit ) ;
smmu_writel ( smmu , value , client - > smmu . reg ) ;
}
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static int tegra_smmu_as_prepare ( struct tegra_smmu * smmu ,
struct tegra_smmu_as * as )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2011-11-17 09:31:31 +04:00
int err ;
2014-04-16 11:24:44 +04:00
if ( as - > use_count > 0 ) {
as - > use_count + + ;
return 0 ;
2011-11-17 09:31:31 +04:00
}
2015-07-27 15:29:52 +03:00
as - > pd_dma = dma_map_page ( smmu - > dev , as - > pd , 0 , SMMU_SIZE_PD ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( smmu - > dev , as - > pd_dma ) )
return - ENOMEM ;
/* We can't handle 64-bit DMA addresses */
if ( ! smmu_dma_addr_valid ( smmu , as - > pd_dma ) ) {
err = - ENOMEM ;
goto err_unmap ;
}
2014-04-16 11:24:44 +04:00
err = tegra_smmu_alloc_asid ( smmu , & as - > id ) ;
if ( err < 0 )
2015-07-27 15:29:52 +03:00
goto err_unmap ;
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:52 +03:00
smmu_flush_ptc ( smmu , as - > pd_dma , 0 ) ;
2014-04-16 11:24:44 +04:00
smmu_flush_tlb_asid ( smmu , as - > id ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , as - > id & 0x7f , SMMU_PTB_ASID ) ;
2015-07-27 15:29:52 +03:00
value = SMMU_PTB_DATA_VALUE ( as - > pd_dma , as - > attr ) ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_PTB_DATA ) ;
smmu_flush ( smmu ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
as - > smmu = smmu ;
as - > use_count + + ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
return 0 ;
2015-07-27 15:29:52 +03:00
err_unmap :
dma_unmap_page ( smmu - > dev , as - > pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE ) ;
return err ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_as_unprepare ( struct tegra_smmu * smmu ,
struct tegra_smmu_as * as )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
if ( - - as - > use_count > 0 )
return ;
tegra_smmu_free_asid ( smmu , as - > id ) ;
2015-07-27 15:29:52 +03:00
dma_unmap_page ( smmu - > dev , as - > pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE ) ;
2014-04-16 11:24:44 +04:00
as - > smmu = NULL ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static int tegra_smmu_attach_dev ( struct iommu_domain * domain ,
struct device * dev )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
struct tegra_smmu * smmu = dev - > archdata . iommu ;
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2014-04-16 11:24:44 +04:00
struct device_node * np = dev - > of_node ;
struct of_phandle_args args ;
unsigned int index = 0 ;
int err = 0 ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
while ( ! of_parse_phandle_with_args ( np , " iommus " , " #iommu-cells " , index ,
& args ) ) {
unsigned int swgroup = args . args [ 0 ] ;
2012-07-30 09:39:18 +04:00
2014-04-16 11:24:44 +04:00
if ( args . np ! = smmu - > dev - > of_node ) {
of_node_put ( args . np ) ;
2012-07-30 09:39:18 +04:00
continue ;
2014-04-16 11:24:44 +04:00
}
2012-07-30 09:39:18 +04:00
2014-04-16 11:24:44 +04:00
of_node_put ( args . np ) ;
2012-07-30 09:39:18 +04:00
2014-04-16 11:24:44 +04:00
err = tegra_smmu_as_prepare ( smmu , as ) ;
if ( err < 0 )
return err ;
tegra_smmu_enable ( smmu , swgroup , as - > id ) ;
index + + ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
if ( index = = 0 )
return - ENODEV ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
return 0 ;
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
static void tegra_smmu_detach_dev ( struct iommu_domain * domain , struct device * dev )
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2014-04-16 11:24:44 +04:00
struct device_node * np = dev - > of_node ;
struct tegra_smmu * smmu = as - > smmu ;
struct of_phandle_args args ;
unsigned int index = 0 ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
while ( ! of_parse_phandle_with_args ( np , " iommus " , " #iommu-cells " , index ,
& args ) ) {
unsigned int swgroup = args . args [ 0 ] ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( args . np ! = smmu - > dev - > of_node ) {
of_node_put ( args . np ) ;
continue ;
}
2012-01-26 22:40:57 +04:00
2014-04-16 11:24:44 +04:00
of_node_put ( args . np ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
tegra_smmu_disable ( smmu , swgroup , as - > id ) ;
tegra_smmu_as_unprepare ( smmu , as ) ;
index + + ;
}
2011-11-17 09:31:31 +04:00
}
2015-07-27 15:30:12 +03:00
static void tegra_smmu_set_pde ( struct tegra_smmu_as * as , unsigned long iova ,
u32 value )
{
unsigned int pd_index = iova_pd_index ( iova ) ;
struct tegra_smmu * smmu = as - > smmu ;
u32 * pd = page_address ( as - > pd ) ;
unsigned long offset = pd_index * sizeof ( * pd ) ;
/* Set the page directory entry first */
pd [ pd_index ] = value ;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device ( smmu - > dev , as - > pd_dma , offset ,
sizeof ( * pd ) , DMA_TO_DEVICE ) ;
/* And flush the iommu */
smmu_flush_ptc ( smmu , as - > pd_dma , offset ) ;
smmu_flush_tlb_section ( smmu , as - > id , iova ) ;
smmu_flush ( smmu ) ;
}
2015-07-27 15:29:21 +03:00
static u32 * tegra_smmu_pte_offset ( struct page * pt_page , unsigned long iova )
{
u32 * pt = page_address ( pt_page ) ;
return pt + iova_pt_index ( iova ) ;
}
static u32 * tegra_smmu_pte_lookup ( struct tegra_smmu_as * as , unsigned long iova ,
2015-07-27 15:29:52 +03:00
dma_addr_t * dmap )
2015-07-27 15:29:21 +03:00
{
unsigned int pd_index = iova_pd_index ( iova ) ;
2019-10-16 14:50:26 +03:00
struct tegra_smmu * smmu = as - > smmu ;
2015-07-27 15:29:21 +03:00
struct page * pt_page ;
2015-07-27 15:29:52 +03:00
u32 * pd ;
2015-07-27 15:29:21 +03:00
2015-07-27 15:29:26 +03:00
pt_page = as - > pts [ pd_index ] ;
if ( ! pt_page )
2015-07-27 15:29:21 +03:00
return NULL ;
2015-07-27 15:29:52 +03:00
pd = page_address ( as - > pd ) ;
2019-10-16 14:50:26 +03:00
* dmap = smmu_pde_to_dma ( smmu , pd [ pd_index ] ) ;
2015-07-27 15:29:21 +03:00
return tegra_smmu_pte_offset ( pt_page , iova ) ;
}
2014-04-16 11:24:44 +04:00
static u32 * as_get_pte ( struct tegra_smmu_as * as , dma_addr_t iova ,
2015-07-27 15:29:52 +03:00
dma_addr_t * dmap )
2011-11-17 09:31:31 +04:00
{
2015-07-27 15:29:16 +03:00
unsigned int pde = iova_pd_index ( iova ) ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu * smmu = as - > smmu ;
2015-07-27 15:29:26 +03:00
if ( ! as - > pts [ pde ] ) {
2015-07-27 15:29:52 +03:00
struct page * page ;
dma_addr_t dma ;
2015-07-27 15:30:02 +03:00
page = alloc_page ( GFP_KERNEL | __GFP_DMA | __GFP_ZERO ) ;
2014-04-16 11:24:44 +04:00
if ( ! page )
return NULL ;
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:52 +03:00
dma = dma_map_page ( smmu - > dev , page , 0 , SMMU_SIZE_PT ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( smmu - > dev , dma ) ) {
__free_page ( page ) ;
return NULL ;
}
if ( ! smmu_dma_addr_valid ( smmu , dma ) ) {
dma_unmap_page ( smmu - > dev , dma , SMMU_SIZE_PT ,
DMA_TO_DEVICE ) ;
__free_page ( page ) ;
return NULL ;
}
2015-07-27 15:29:26 +03:00
as - > pts [ pde ] = page ;
2015-07-27 15:30:12 +03:00
tegra_smmu_set_pde ( as , iova , SMMU_MK_PDE ( dma , SMMU_PDE_ATTR |
SMMU_PDE_NEXT ) ) ;
2015-07-27 15:29:52 +03:00
* dmap = dma ;
2014-04-16 11:24:44 +04:00
} else {
2015-07-27 15:30:12 +03:00
u32 * pd = page_address ( as - > pd ) ;
2019-10-16 14:50:26 +03:00
* dmap = smmu_pde_to_dma ( smmu , pd [ pde ] ) ;
2011-11-17 09:31:31 +04:00
}
2015-08-06 15:56:39 +03:00
return tegra_smmu_pte_offset ( as - > pts [ pde ] , iova ) ;
}
2015-07-27 15:29:21 +03:00
2015-08-06 15:56:39 +03:00
static void tegra_smmu_pte_get_use ( struct tegra_smmu_as * as , unsigned long iova )
{
unsigned int pd_index = iova_pd_index ( iova ) ;
2011-11-17 09:31:31 +04:00
2015-08-06 15:56:39 +03:00
as - > count [ pd_index ] + + ;
2014-04-16 11:24:44 +04:00
}
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:05 +03:00
static void tegra_smmu_pte_put_use ( struct tegra_smmu_as * as , unsigned long iova )
2012-08-02 12:46:40 +04:00
{
2015-07-27 15:29:16 +03:00
unsigned int pde = iova_pd_index ( iova ) ;
2015-07-27 15:29:26 +03:00
struct page * page = as - > pts [ pde ] ;
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
/*
* When no entries in this page table are used anymore , return the
* memory page to the system .
*/
2015-07-27 15:29:31 +03:00
if ( - - as - > count [ pde ] = = 0 ) {
2015-07-27 15:30:12 +03:00
struct tegra_smmu * smmu = as - > smmu ;
u32 * pd = page_address ( as - > pd ) ;
2019-10-16 14:50:26 +03:00
dma_addr_t pte_dma = smmu_pde_to_dma ( smmu , pd [ pde ] ) ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:30:12 +03:00
tegra_smmu_set_pde ( as , iova , 0 ) ;
2015-07-27 15:29:05 +03:00
2015-07-27 15:29:52 +03:00
dma_unmap_page ( smmu - > dev , pte_dma , SMMU_SIZE_PT , DMA_TO_DEVICE ) ;
2015-07-27 15:29:05 +03:00
__free_page ( page ) ;
2015-07-27 15:29:26 +03:00
as - > pts [ pde ] = NULL ;
2012-08-02 12:46:40 +04:00
}
}
2015-07-27 15:29:10 +03:00
static void tegra_smmu_set_pte ( struct tegra_smmu_as * as , unsigned long iova ,
2015-07-27 15:29:52 +03:00
u32 * pte , dma_addr_t pte_dma , u32 val )
2015-07-27 15:29:10 +03:00
{
struct tegra_smmu * smmu = as - > smmu ;
unsigned long offset = offset_in_page ( pte ) ;
* pte = val ;
2015-07-27 15:29:52 +03:00
dma_sync_single_range_for_device ( smmu - > dev , pte_dma , offset ,
4 , DMA_TO_DEVICE ) ;
smmu_flush_ptc ( smmu , pte_dma , offset ) ;
2015-07-27 15:29:10 +03:00
smmu_flush_tlb_group ( smmu , as - > id , iova ) ;
smmu_flush ( smmu ) ;
}
2014-04-16 11:24:44 +04:00
static int tegra_smmu_map ( struct iommu_domain * domain , unsigned long iova ,
2019-09-08 19:56:38 +03:00
phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
2012-08-02 12:46:40 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2015-07-27 15:29:52 +03:00
dma_addr_t pte_dma ;
2019-03-07 01:50:09 +03:00
u32 pte_attrs ;
2014-04-16 11:24:44 +04:00
u32 * pte ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:52 +03:00
pte = as_get_pte ( as , iova , & pte_dma ) ;
2014-04-16 11:24:44 +04:00
if ( ! pte )
return - ENOMEM ;
2012-08-02 12:46:40 +04:00
2015-08-06 15:56:39 +03:00
/* If we aren't overwriting a pre-existing entry, increment use */
if ( * pte = = 0 )
tegra_smmu_pte_get_use ( as , iova ) ;
2019-03-07 01:50:09 +03:00
pte_attrs = SMMU_PTE_NONSECURE ;
if ( prot & IOMMU_READ )
pte_attrs | = SMMU_PTE_READABLE ;
if ( prot & IOMMU_WRITE )
pte_attrs | = SMMU_PTE_WRITABLE ;
2015-07-27 15:29:52 +03:00
tegra_smmu_set_pte ( as , iova , pte , pte_dma ,
2019-03-07 01:50:09 +03:00
__phys_to_pfn ( paddr ) | pte_attrs ) ;
2012-08-02 12:46:40 +04:00
return 0 ;
}
2014-04-16 11:24:44 +04:00
static size_t tegra_smmu_unmap ( struct iommu_domain * domain , unsigned long iova ,
2019-07-02 18:44:06 +03:00
size_t size , struct iommu_iotlb_gather * gather )
2012-08-02 12:46:40 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2015-07-27 15:29:52 +03:00
dma_addr_t pte_dma ;
2014-04-16 11:24:44 +04:00
u32 * pte ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:52 +03:00
pte = tegra_smmu_pte_lookup ( as , iova , & pte_dma ) ;
2015-07-27 15:29:05 +03:00
if ( ! pte | | ! * pte )
2014-04-16 11:24:44 +04:00
return 0 ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:52 +03:00
tegra_smmu_set_pte ( as , iova , pte , pte_dma , 0 ) ;
2015-07-27 15:29:05 +03:00
tegra_smmu_pte_put_use ( as , iova ) ;
2014-04-16 11:24:44 +04:00
return size ;
2012-08-02 12:46:40 +04:00
}
2014-04-16 11:24:44 +04:00
static phys_addr_t tegra_smmu_iova_to_phys ( struct iommu_domain * domain ,
dma_addr_t iova )
2012-08-02 12:46:40 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2014-04-16 11:24:44 +04:00
unsigned long pfn ;
2015-07-27 15:29:52 +03:00
dma_addr_t pte_dma ;
2014-04-16 11:24:44 +04:00
u32 * pte ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:52 +03:00
pte = tegra_smmu_pte_lookup ( as , iova , & pte_dma ) ;
2015-07-27 15:29:00 +03:00
if ( ! pte | | ! * pte )
return 0 ;
2015-03-27 13:07:27 +03:00
pfn = * pte & as - > smmu - > pfn_mask ;
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
return PFN_PHYS ( pfn ) ;
2012-08-02 12:46:40 +04:00
}
2014-04-16 11:24:44 +04:00
static struct tegra_smmu * tegra_smmu_find ( struct device_node * np )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
struct platform_device * pdev ;
struct tegra_mc * mc ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
pdev = of_find_device_by_node ( np ) ;
if ( ! pdev )
return NULL ;
mc = platform_get_drvdata ( pdev ) ;
if ( ! mc )
return NULL ;
return mc - > smmu ;
2011-11-17 09:31:31 +04:00
}
2017-10-12 17:19:16 +03:00
static int tegra_smmu_configure ( struct tegra_smmu * smmu , struct device * dev ,
struct of_phandle_args * args )
{
const struct iommu_ops * ops = smmu - > iommu . ops ;
int err ;
err = iommu_fwspec_init ( dev , & dev - > of_node - > fwnode , ops ) ;
if ( err < 0 ) {
dev_err ( dev , " failed to initialize fwspec: %d \n " , err ) ;
return err ;
}
err = ops - > of_xlate ( dev , args ) ;
if ( err < 0 ) {
dev_err ( dev , " failed to parse SW group ID: %d \n " , err ) ;
iommu_fwspec_free ( dev ) ;
return err ;
}
return 0 ;
}
2020-04-29 16:37:04 +03:00
static struct iommu_device * tegra_smmu_probe_device ( struct device * dev )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
struct device_node * np = dev - > of_node ;
2017-10-12 17:19:16 +03:00
struct tegra_smmu * smmu = NULL ;
2014-04-16 11:24:44 +04:00
struct of_phandle_args args ;
unsigned int index = 0 ;
2017-10-12 17:19:16 +03:00
int err ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
while ( of_parse_phandle_with_args ( np , " iommus " , " #iommu-cells " , index ,
& args ) = = 0 ) {
smmu = tegra_smmu_find ( args . np ) ;
if ( smmu ) {
2017-10-12 17:19:16 +03:00
err = tegra_smmu_configure ( smmu , dev , & args ) ;
of_node_put ( args . np ) ;
if ( err < 0 )
2020-04-29 16:37:04 +03:00
return ERR_PTR ( err ) ;
2017-10-12 17:19:16 +03:00
2014-04-16 11:24:44 +04:00
/*
* Only a single IOMMU master interface is currently
* supported by the Linux kernel , so abort after the
* first match .
*/
dev - > archdata . iommu = smmu ;
2017-08-09 18:41:52 +03:00
2014-04-16 11:24:44 +04:00
break ;
}
2017-10-12 17:19:16 +03:00
of_node_put ( args . np ) ;
2014-04-16 11:24:44 +04:00
index + + ;
}
2017-10-12 17:19:16 +03:00
if ( ! smmu )
2020-04-29 16:37:04 +03:00
return ERR_PTR ( - ENODEV ) ;
2017-07-21 15:12:36 +03:00
2020-04-29 16:37:04 +03:00
return & smmu - > iommu ;
2011-11-17 09:31:31 +04:00
}
2020-04-29 16:37:04 +03:00
static void tegra_smmu_release_device ( struct device * dev )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
dev - > archdata . iommu = NULL ;
}
2011-11-17 09:31:31 +04:00
2017-10-12 17:19:16 +03:00
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group ( struct tegra_smmu * smmu , unsigned int swgroup )
{
unsigned int i , j ;
for ( i = 0 ; i < smmu - > soc - > num_groups ; i + + )
for ( j = 0 ; j < smmu - > soc - > groups [ i ] . num_swgroups ; j + + )
if ( smmu - > soc - > groups [ i ] . swgroups [ j ] = = swgroup )
return & smmu - > soc - > groups [ i ] ;
return NULL ;
}
static struct iommu_group * tegra_smmu_group_get ( struct tegra_smmu * smmu ,
unsigned int swgroup )
{
const struct tegra_smmu_group_soc * soc ;
struct tegra_smmu_group * group ;
soc = tegra_smmu_find_group ( smmu , swgroup ) ;
if ( ! soc )
return NULL ;
mutex_lock ( & smmu - > lock ) ;
list_for_each_entry ( group , & smmu - > groups , list )
if ( group - > soc = = soc ) {
mutex_unlock ( & smmu - > lock ) ;
return group - > group ;
}
group = devm_kzalloc ( smmu - > dev , sizeof ( * group ) , GFP_KERNEL ) ;
if ( ! group ) {
mutex_unlock ( & smmu - > lock ) ;
return NULL ;
}
INIT_LIST_HEAD ( & group - > list ) ;
group - > soc = soc ;
group - > group = iommu_group_alloc ( ) ;
2017-12-20 06:06:09 +03:00
if ( IS_ERR ( group - > group ) ) {
2017-10-12 17:19:16 +03:00
devm_kfree ( smmu - > dev , group ) ;
mutex_unlock ( & smmu - > lock ) ;
return NULL ;
}
list_add_tail ( & group - > list , & smmu - > groups ) ;
mutex_unlock ( & smmu - > lock ) ;
return group - > group ;
}
static struct iommu_group * tegra_smmu_device_group ( struct device * dev )
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2017-10-12 17:19:16 +03:00
struct tegra_smmu * smmu = dev - > archdata . iommu ;
struct iommu_group * group ;
group = tegra_smmu_group_get ( smmu , fwspec - > ids [ 0 ] ) ;
if ( ! group )
group = generic_device_group ( dev ) ;
return group ;
}
static int tegra_smmu_of_xlate ( struct device * dev ,
struct of_phandle_args * args )
{
u32 id = args - > args [ 0 ] ;
return iommu_fwspec_add_ids ( dev , & id , 1 ) ;
}
2014-04-16 11:24:44 +04:00
static const struct iommu_ops tegra_smmu_ops = {
. capable = tegra_smmu_capable ,
2015-03-26 15:43:12 +03:00
. domain_alloc = tegra_smmu_domain_alloc ,
. domain_free = tegra_smmu_domain_free ,
2014-04-16 11:24:44 +04:00
. attach_dev = tegra_smmu_attach_dev ,
. detach_dev = tegra_smmu_detach_dev ,
2020-04-29 16:37:04 +03:00
. probe_device = tegra_smmu_probe_device ,
. release_device = tegra_smmu_release_device ,
2017-10-12 17:19:16 +03:00
. device_group = tegra_smmu_device_group ,
2014-04-16 11:24:44 +04:00
. map = tegra_smmu_map ,
. unmap = tegra_smmu_unmap ,
. iova_to_phys = tegra_smmu_iova_to_phys ,
2017-10-12 17:19:16 +03:00
. of_xlate = tegra_smmu_of_xlate ,
2014-04-16 11:24:44 +04:00
. pgsize_bitmap = SZ_4K ,
} ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
static void tegra_smmu_ahb_enable ( void )
{
static const struct of_device_id ahb_match [ ] = {
{ . compatible = " nvidia,tegra30-ahb " , } ,
{ }
} ;
struct device_node * ahb ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
ahb = of_find_matching_node ( NULL , ahb_match ) ;
if ( ahb ) {
tegra_ahb_enable_smmu ( ahb ) ;
of_node_put ( ahb ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
}
2011-11-17 09:31:31 +04:00
2015-01-23 11:49:25 +03:00
static int tegra_smmu_swgroups_show ( struct seq_file * s , void * data )
{
struct tegra_smmu * smmu = s - > private ;
unsigned int i ;
u32 value ;
seq_printf ( s , " swgroup enabled ASID \n " ) ;
seq_printf ( s , " ------------------------ \n " ) ;
for ( i = 0 ; i < smmu - > soc - > num_swgroups ; i + + ) {
const struct tegra_smmu_swgroup * group = & smmu - > soc - > swgroups [ i ] ;
const char * status ;
unsigned int asid ;
value = smmu_readl ( smmu , group - > reg ) ;
if ( value & SMMU_ASID_ENABLE )
status = " yes " ;
else
status = " no " ;
asid = value & SMMU_ASID_MASK ;
seq_printf ( s , " %-9s %-7s %#04x \n " , group - > name , status ,
asid ) ;
}
return 0 ;
}
2018-11-22 16:30:47 +03:00
DEFINE_SHOW_ATTRIBUTE ( tegra_smmu_swgroups ) ;
2015-01-23 11:49:25 +03:00
static int tegra_smmu_clients_show ( struct seq_file * s , void * data )
{
struct tegra_smmu * smmu = s - > private ;
unsigned int i ;
u32 value ;
seq_printf ( s , " client enabled \n " ) ;
seq_printf ( s , " -------------------- \n " ) ;
for ( i = 0 ; i < smmu - > soc - > num_clients ; i + + ) {
const struct tegra_mc_client * client = & smmu - > soc - > clients [ i ] ;
const char * status ;
value = smmu_readl ( smmu , client - > smmu . reg ) ;
if ( value & BIT ( client - > smmu . bit ) )
status = " yes " ;
else
status = " no " ;
seq_printf ( s , " %-12s %s \n " , client - > name , status ) ;
}
return 0 ;
}
2018-11-22 16:30:47 +03:00
DEFINE_SHOW_ATTRIBUTE ( tegra_smmu_clients ) ;
2015-01-23 11:49:25 +03:00
static void tegra_smmu_debugfs_init ( struct tegra_smmu * smmu )
{
smmu - > debugfs = debugfs_create_dir ( " smmu " , NULL ) ;
if ( ! smmu - > debugfs )
return ;
debugfs_create_file ( " swgroups " , S_IRUGO , smmu - > debugfs , smmu ,
& tegra_smmu_swgroups_fops ) ;
debugfs_create_file ( " clients " , S_IRUGO , smmu - > debugfs , smmu ,
& tegra_smmu_clients_fops ) ;
}
static void tegra_smmu_debugfs_exit ( struct tegra_smmu * smmu )
{
debugfs_remove_recursive ( smmu - > debugfs ) ;
}
2014-04-16 11:24:44 +04:00
struct tegra_smmu * tegra_smmu_probe ( struct device * dev ,
const struct tegra_smmu_soc * soc ,
struct tegra_mc * mc )
{
struct tegra_smmu * smmu ;
size_t size ;
u32 value ;
int err ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu = devm_kzalloc ( dev , sizeof ( * smmu ) , GFP_KERNEL ) ;
if ( ! smmu )
return ERR_PTR ( - ENOMEM ) ;
2012-06-25 15:23:55 +04:00
2014-04-16 11:24:44 +04:00
/*
* This is a bit of a hack . Ideally we ' d want to simply return this
* value . However the IOMMU registration process will attempt to add
* all devices to the IOMMU when bus_set_iommu ( ) is called . In order
* not to rely on global variables to track the IOMMU instance , we
2020-04-29 16:37:04 +03:00
* set it here so that it can be looked up from the . probe_device ( )
2014-04-16 11:24:44 +04:00
* callback via the IOMMU device ' s . drvdata field .
*/
mc - > smmu = smmu ;
2012-06-25 15:23:55 +04:00
2014-04-16 11:24:44 +04:00
size = BITS_TO_LONGS ( soc - > num_asids ) * sizeof ( long ) ;
2012-06-25 15:23:55 +04:00
2014-04-16 11:24:44 +04:00
smmu - > asids = devm_kzalloc ( dev , size , GFP_KERNEL ) ;
if ( ! smmu - > asids )
return ERR_PTR ( - ENOMEM ) ;
2011-11-17 09:31:31 +04:00
2017-10-12 17:19:16 +03:00
INIT_LIST_HEAD ( & smmu - > groups ) ;
2014-04-16 11:24:44 +04:00
mutex_init ( & smmu - > lock ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu - > regs = mc - > regs ;
smmu - > soc = soc ;
smmu - > dev = dev ;
smmu - > mc = mc ;
2011-11-17 09:31:31 +04:00
2015-03-27 13:07:27 +03:00
smmu - > pfn_mask = BIT_MASK ( mc - > soc - > num_address_bits - PAGE_SHIFT ) - 1 ;
dev_dbg ( dev , " address bits: %u, PFN mask: %#lx \n " ,
mc - > soc - > num_address_bits , smmu - > pfn_mask ) ;
2015-08-06 15:20:31 +03:00
smmu - > tlb_mask = ( smmu - > soc - > num_tlb_lines < < 1 ) - 1 ;
dev_dbg ( dev , " TLB lines: %u, mask: %#lx \n " , smmu - > soc - > num_tlb_lines ,
smmu - > tlb_mask ) ;
2015-03-27 13:07:27 +03:00
2014-04-16 11:24:44 +04:00
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP ( 0x3f ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( soc - > supports_request_limit )
value | = SMMU_PTC_CONFIG_REQ_LIMIT ( 8 ) ;
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_PTC_CONFIG ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
2015-08-06 15:20:31 +03:00
SMMU_TLB_CONFIG_ACTIVE_LINES ( smmu ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( soc - > supports_round_robin_arbitration )
value | = SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_CONFIG ) ;
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:41 +03:00
smmu_flush_ptc_all ( smmu ) ;
2014-04-16 11:24:44 +04:00
smmu_flush_tlb ( smmu ) ;
smmu_writel ( smmu , SMMU_CONFIG_ENABLE , SMMU_CONFIG ) ;
smmu_flush ( smmu ) ;
tegra_smmu_ahb_enable ( ) ;
2011-11-17 09:31:31 +04:00
2017-08-09 18:41:52 +03:00
err = iommu_device_sysfs_add ( & smmu - > iommu , dev , NULL , dev_name ( dev ) ) ;
if ( err )
return ERR_PTR ( err ) ;
iommu_device_set_ops ( & smmu - > iommu , & tegra_smmu_ops ) ;
2017-10-12 17:19:16 +03:00
iommu_device_set_fwnode ( & smmu - > iommu , dev - > fwnode ) ;
2017-08-09 18:41:52 +03:00
err = iommu_device_register ( & smmu - > iommu ) ;
if ( err ) {
iommu_device_sysfs_remove ( & smmu - > iommu ) ;
return ERR_PTR ( err ) ;
}
2017-08-30 16:06:43 +03:00
err = bus_set_iommu ( & platform_bus_type , & tegra_smmu_ops ) ;
if ( err < 0 ) {
iommu_device_unregister ( & smmu - > iommu ) ;
iommu_device_sysfs_remove ( & smmu - > iommu ) ;
return ERR_PTR ( err ) ;
}
2015-01-23 11:49:25 +03:00
if ( IS_ENABLED ( CONFIG_DEBUG_FS ) )
tegra_smmu_debugfs_init ( smmu ) ;
2014-04-16 11:24:44 +04:00
return smmu ;
}
2015-01-23 11:49:25 +03:00
void tegra_smmu_remove ( struct tegra_smmu * smmu )
{
2017-08-09 18:41:52 +03:00
iommu_device_unregister ( & smmu - > iommu ) ;
iommu_device_sysfs_remove ( & smmu - > iommu ) ;
2015-01-23 11:49:25 +03:00
if ( IS_ENABLED ( CONFIG_DEBUG_FS ) )
tegra_smmu_debugfs_exit ( smmu ) ;
}