2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-11-17 09:31:31 +04:00
/*
2014-04-16 11:24:44 +04:00
* Copyright ( C ) 2011 - 2014 NVIDIA CORPORATION . All rights reserved .
2011-11-17 09:31:31 +04:00
*/
2015-03-27 13:07:27 +03:00
# include <linux/bitops.h>
2015-01-23 11:49:25 +03:00
# include <linux/debugfs.h>
2013-01-21 14:09:06 +04:00
# include <linux/err.h>
2011-11-17 09:31:31 +04:00
# include <linux/iommu.h>
2014-04-16 11:24:44 +04:00
# include <linux/kernel.h>
2012-06-25 15:23:55 +04:00
# include <linux/of.h>
2014-04-16 11:24:44 +04:00
# include <linux/of_device.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
2020-09-01 23:37:30 +03:00
# include <linux/spinlock.h>
2017-04-26 16:46:20 +03:00
# include <linux/dma-mapping.h>
2014-07-17 15:17:24 +04:00
# include <soc/tegra/ahb.h>
2014-04-16 11:24:44 +04:00
# include <soc/tegra/mc.h>
2011-11-17 09:31:31 +04:00
2017-10-12 17:19:16 +03:00
struct tegra_smmu_group {
struct list_head list ;
2020-08-06 18:54:04 +03:00
struct tegra_smmu * smmu ;
2017-10-12 17:19:16 +03:00
const struct tegra_smmu_group_soc * soc ;
struct iommu_group * group ;
2020-09-11 10:16:43 +03:00
unsigned int swgroup ;
2017-10-12 17:19:16 +03:00
} ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu {
void __iomem * regs ;
struct device * dev ;
2012-09-05 02:36:15 +04:00
2014-04-16 11:24:44 +04:00
struct tegra_mc * mc ;
const struct tegra_smmu_soc * soc ;
2012-08-02 12:46:40 +04:00
2017-10-12 17:19:16 +03:00
struct list_head groups ;
2015-03-27 13:07:27 +03:00
unsigned long pfn_mask ;
2015-08-06 15:20:31 +03:00
unsigned long tlb_mask ;
2015-03-27 13:07:27 +03:00
2014-04-16 11:24:44 +04:00
unsigned long * asids ;
struct mutex lock ;
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
struct list_head list ;
2015-01-23 11:49:25 +03:00
struct dentry * debugfs ;
2017-08-09 18:41:52 +03:00
struct iommu_device iommu ; /* IOMMU Core code handle */
2011-11-17 09:31:31 +04:00
} ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu_as {
2015-03-26 15:43:12 +03:00
struct iommu_domain domain ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu * smmu ;
unsigned int use_count ;
2020-09-01 23:37:30 +03:00
spinlock_t lock ;
2015-07-27 15:29:31 +03:00
u32 * count ;
2015-07-27 15:29:26 +03:00
struct page * * pts ;
2014-04-16 11:24:44 +04:00
struct page * pd ;
2015-07-27 15:29:52 +03:00
dma_addr_t pd_dma ;
2014-04-16 11:24:44 +04:00
unsigned id ;
u32 attr ;
2011-11-17 09:31:31 +04:00
} ;
2015-03-26 15:43:12 +03:00
static struct tegra_smmu_as * to_smmu_as ( struct iommu_domain * dom )
{
return container_of ( dom , struct tegra_smmu_as , domain ) ;
}
2014-04-16 11:24:44 +04:00
static inline void smmu_writel ( struct tegra_smmu * smmu , u32 value ,
unsigned long offset )
{
writel ( value , smmu - > regs + offset ) ;
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
static inline u32 smmu_readl ( struct tegra_smmu * smmu , unsigned long offset )
{
return readl ( smmu - > regs + offset ) ;
}
2012-09-14 20:22:00 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_CONFIG 0x010
# define SMMU_CONFIG_ENABLE (1 << 0)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_TLB_CONFIG 0x14
# define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
# define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
2015-08-06 15:20:31 +03:00
# define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
( ( smmu ) - > soc - > num_tlb_lines & ( smmu ) - > tlb_mask )
2012-06-25 15:23:55 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTC_CONFIG 0x18
# define SMMU_PTC_CONFIG_ENABLE (1 << 29)
# define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
# define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTB_ASID 0x01c
# define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
2012-06-25 15:23:56 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTB_DATA 0x020
2015-07-27 15:29:52 +03:00
# define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:52 +03:00
# define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_TLB_FLUSH 0x030
# define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
# define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
# define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
# define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_SECTION )
# define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_GROUP )
# define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTC_FLUSH 0x034
# define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
# define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTC_FLUSH_HI 0x9b8
# define SMMU_PTC_FLUSH_HI_MASK 0x3
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
/* per-SWGROUP SMMU_*_ASID register */
# define SMMU_ASID_ENABLE (1 << 31)
# define SMMU_ASID_MASK 0x7f
# define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
/* page table definitions */
# define SMMU_NUM_PDE 1024
# define SMMU_NUM_PTE 1024
2013-01-31 12:14:10 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
# define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PDE_SHIFT 22
# define SMMU_PTE_SHIFT 12
2013-02-04 23:40:58 +04:00
2020-09-11 10:16:41 +03:00
# define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
# define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
# define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
# define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
2014-04-16 11:24:44 +04:00
# define SMMU_PD_READABLE (1 << 31)
# define SMMU_PD_WRITABLE (1 << 30)
# define SMMU_PD_NONSECURE (1 << 29)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PDE_READABLE (1 << 31)
# define SMMU_PDE_WRITABLE (1 << 30)
# define SMMU_PDE_NONSECURE (1 << 29)
# define SMMU_PDE_NEXT (1 << 28)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PTE_READABLE (1 << 31)
# define SMMU_PTE_WRITABLE (1 << 30)
# define SMMU_PTE_NONSECURE (1 << 29)
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
# define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE )
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:16 +03:00
static unsigned int iova_pd_index ( unsigned long iova )
{
return ( iova > > SMMU_PDE_SHIFT ) & ( SMMU_NUM_PDE - 1 ) ;
}
static unsigned int iova_pt_index ( unsigned long iova )
{
return ( iova > > SMMU_PTE_SHIFT ) & ( SMMU_NUM_PTE - 1 ) ;
}
2015-07-27 15:29:52 +03:00
static bool smmu_dma_addr_valid ( struct tegra_smmu * smmu , dma_addr_t addr )
2015-07-27 15:29:36 +03:00
{
2015-07-27 15:29:52 +03:00
addr > > = 12 ;
return ( addr & smmu - > pfn_mask ) = = addr ;
}
2015-07-27 15:29:36 +03:00
2019-10-16 14:50:26 +03:00
static dma_addr_t smmu_pde_to_dma ( struct tegra_smmu * smmu , u32 pde )
2015-07-27 15:29:52 +03:00
{
2019-10-16 14:50:26 +03:00
return ( dma_addr_t ) ( pde & smmu - > pfn_mask ) < < 12 ;
2015-07-27 15:29:36 +03:00
}
2015-07-27 15:29:41 +03:00
static void smmu_flush_ptc_all ( struct tegra_smmu * smmu )
{
smmu_writel ( smmu , SMMU_PTC_FLUSH_TYPE_ALL , SMMU_PTC_FLUSH ) ;
}
2015-07-27 15:29:52 +03:00
static inline void smmu_flush_ptc ( struct tegra_smmu * smmu , dma_addr_t dma ,
2014-04-16 11:24:44 +04:00
unsigned long offset )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2015-07-27 15:29:41 +03:00
offset & = ~ ( smmu - > mc - > soc - > atom_size - 1 ) ;
2014-04-16 11:24:44 +04:00
2015-07-27 15:29:41 +03:00
if ( smmu - > mc - > soc - > num_address_bits > 32 ) {
2015-07-27 15:29:52 +03:00
# ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
value = ( dma > > 32 ) & SMMU_PTC_FLUSH_HI_MASK ;
2014-04-16 11:24:44 +04:00
# else
2015-07-27 15:29:41 +03:00
value = 0 ;
2014-04-16 11:24:44 +04:00
# endif
2015-07-27 15:29:41 +03:00
smmu_writel ( smmu , value , SMMU_PTC_FLUSH_HI ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
2015-07-27 15:29:52 +03:00
value = ( dma + offset ) | SMMU_PTC_FLUSH_TYPE_ADR ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_PTC_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb ( struct tegra_smmu * smmu )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , SMMU_TLB_FLUSH_VA_MATCH_ALL , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb_asid ( struct tegra_smmu * smmu ,
unsigned long asid )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:07 +03:00
if ( smmu - > soc - > num_asids = = 4 )
value = ( asid & 0x3 ) < < 29 ;
else
value = ( asid & 0x7f ) < < 24 ;
value | = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb_section ( struct tegra_smmu * smmu ,
unsigned long asid ,
unsigned long iova )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:07 +03:00
if ( smmu - > soc - > num_asids = = 4 )
value = ( asid & 0x3 ) < < 29 ;
else
value = ( asid & 0x7f ) < < 24 ;
value | = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION ( iova ) ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush_tlb_group ( struct tegra_smmu * smmu ,
unsigned long asid ,
unsigned long iova )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:07 +03:00
if ( smmu - > soc - > num_asids = = 4 )
value = ( asid & 0x3 ) < < 29 ;
else
value = ( asid & 0x7f ) < < 24 ;
value | = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP ( iova ) ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_FLUSH ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static inline void smmu_flush ( struct tegra_smmu * smmu )
2011-11-17 09:31:31 +04:00
{
2019-10-16 14:50:24 +03:00
smmu_readl ( smmu , SMMU_PTB_ASID ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static int tegra_smmu_alloc_asid ( struct tegra_smmu * smmu , unsigned int * idp )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
unsigned long id ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
id = find_first_zero_bit ( smmu - > asids , smmu - > soc - > num_asids ) ;
2020-11-25 13:10:10 +03:00
if ( id > = smmu - > soc - > num_asids )
2014-04-16 11:24:44 +04:00
return - ENOSPC ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
set_bit ( id , smmu - > asids ) ;
* idp = id ;
return 0 ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_free_asid ( struct tegra_smmu * smmu , unsigned int id )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
clear_bit ( id , smmu - > asids ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static bool tegra_smmu_capable ( enum iommu_cap cap )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
return false ;
2011-11-17 09:31:31 +04:00
}
2015-03-26 15:43:12 +03:00
static struct iommu_domain * tegra_smmu_domain_alloc ( unsigned type )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
struct tegra_smmu_as * as ;
2011-11-17 09:31:31 +04:00
2015-03-26 15:43:12 +03:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2014-04-16 11:24:44 +04:00
as = kzalloc ( sizeof ( * as ) , GFP_KERNEL ) ;
if ( ! as )
2015-03-26 15:43:12 +03:00
return NULL ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
as - > attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE ;
2011-11-17 09:31:31 +04:00
2015-07-27 15:30:02 +03:00
as - > pd = alloc_page ( GFP_KERNEL | __GFP_DMA | __GFP_ZERO ) ;
2014-04-16 11:24:44 +04:00
if ( ! as - > pd ) {
kfree ( as ) ;
2015-03-26 15:43:12 +03:00
return NULL ;
2011-11-17 09:31:31 +04:00
}
2012-07-02 15:26:38 +04:00
2015-07-27 15:29:31 +03:00
as - > count = kcalloc ( SMMU_NUM_PDE , sizeof ( u32 ) , GFP_KERNEL ) ;
2014-04-16 11:24:44 +04:00
if ( ! as - > count ) {
__free_page ( as - > pd ) ;
kfree ( as ) ;
2015-03-26 15:43:12 +03:00
return NULL ;
2011-11-17 09:31:31 +04:00
}
2012-07-02 15:26:38 +04:00
2015-07-27 15:29:26 +03:00
as - > pts = kcalloc ( SMMU_NUM_PDE , sizeof ( * as - > pts ) , GFP_KERNEL ) ;
if ( ! as - > pts ) {
2015-07-27 15:29:31 +03:00
kfree ( as - > count ) ;
2015-07-27 15:29:26 +03:00
__free_page ( as - > pd ) ;
kfree ( as ) ;
return NULL ;
}
2020-09-01 23:37:30 +03:00
spin_lock_init ( & as - > lock ) ;
2015-03-27 13:07:25 +03:00
/* setup aperture */
2015-04-02 14:33:19 +03:00
as - > domain . geometry . aperture_start = 0 ;
as - > domain . geometry . aperture_end = 0xffffffff ;
as - > domain . geometry . force_aperture = true ;
2012-07-17 13:47:14 +04:00
2015-03-26 15:43:12 +03:00
return & as - > domain ;
2011-11-17 09:31:31 +04:00
}
2015-03-26 15:43:12 +03:00
static void tegra_smmu_domain_free ( struct iommu_domain * domain )
2011-11-17 09:31:31 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
/* TODO: free page directory and page tables */
2011-11-17 09:31:31 +04:00
2019-03-07 01:50:08 +03:00
WARN_ON_ONCE ( as - > use_count ) ;
kfree ( as - > count ) ;
kfree ( as - > pts ) ;
2014-04-16 11:24:44 +04:00
kfree ( as ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static const struct tegra_smmu_swgroup *
tegra_smmu_find_swgroup ( struct tegra_smmu * smmu , unsigned int swgroup )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
const struct tegra_smmu_swgroup * group = NULL ;
unsigned int i ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
for ( i = 0 ; i < smmu - > soc - > num_swgroups ; i + + ) {
if ( smmu - > soc - > swgroups [ i ] . swgroup = = swgroup ) {
group = & smmu - > soc - > swgroups [ i ] ;
break ;
}
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
return group ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_enable ( struct tegra_smmu * smmu , unsigned int swgroup ,
unsigned int asid )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
const struct tegra_smmu_swgroup * group ;
unsigned int i ;
u32 value ;
2011-11-17 09:31:31 +04:00
2019-10-16 14:50:25 +03:00
group = tegra_smmu_find_swgroup ( smmu , swgroup ) ;
if ( group ) {
value = smmu_readl ( smmu , group - > reg ) ;
value & = ~ SMMU_ASID_MASK ;
value | = SMMU_ASID_VALUE ( asid ) ;
value | = SMMU_ASID_ENABLE ;
smmu_writel ( smmu , value , group - > reg ) ;
} else {
pr_warn ( " %s group from swgroup %u not found \n " , __func__ ,
swgroup ) ;
/* No point moving ahead if group was not found */
return ;
}
2014-04-16 11:24:44 +04:00
for ( i = 0 ; i < smmu - > soc - > num_clients ; i + + ) {
const struct tegra_mc_client * client = & smmu - > soc - > clients [ i ] ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( client - > swgroup ! = swgroup )
continue ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
value = smmu_readl ( smmu , client - > smmu . reg ) ;
value | = BIT ( client - > smmu . bit ) ;
smmu_writel ( smmu , value , client - > smmu . reg ) ;
}
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_disable ( struct tegra_smmu * smmu , unsigned int swgroup ,
unsigned int asid )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
const struct tegra_smmu_swgroup * group ;
unsigned int i ;
u32 value ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
group = tegra_smmu_find_swgroup ( smmu , swgroup ) ;
if ( group ) {
value = smmu_readl ( smmu , group - > reg ) ;
value & = ~ SMMU_ASID_MASK ;
value | = SMMU_ASID_VALUE ( asid ) ;
value & = ~ SMMU_ASID_ENABLE ;
smmu_writel ( smmu , value , group - > reg ) ;
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
for ( i = 0 ; i < smmu - > soc - > num_clients ; i + + ) {
const struct tegra_mc_client * client = & smmu - > soc - > clients [ i ] ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( client - > swgroup ! = swgroup )
continue ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
value = smmu_readl ( smmu , client - > smmu . reg ) ;
value & = ~ BIT ( client - > smmu . bit ) ;
smmu_writel ( smmu , value , client - > smmu . reg ) ;
}
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static int tegra_smmu_as_prepare ( struct tegra_smmu * smmu ,
struct tegra_smmu_as * as )
2011-11-17 09:31:31 +04:00
{
2014-04-16 11:24:44 +04:00
u32 value ;
2020-11-25 13:10:10 +03:00
int err = 0 ;
mutex_lock ( & smmu - > lock ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( as - > use_count > 0 ) {
as - > use_count + + ;
2020-11-25 13:10:10 +03:00
goto unlock ;
2011-11-17 09:31:31 +04:00
}
2015-07-27 15:29:52 +03:00
as - > pd_dma = dma_map_page ( smmu - > dev , as - > pd , 0 , SMMU_SIZE_PD ,
DMA_TO_DEVICE ) ;
2020-11-25 13:10:10 +03:00
if ( dma_mapping_error ( smmu - > dev , as - > pd_dma ) ) {
err = - ENOMEM ;
goto unlock ;
}
2015-07-27 15:29:52 +03:00
/* We can't handle 64-bit DMA addresses */
if ( ! smmu_dma_addr_valid ( smmu , as - > pd_dma ) ) {
err = - ENOMEM ;
goto err_unmap ;
}
2014-04-16 11:24:44 +04:00
err = tegra_smmu_alloc_asid ( smmu , & as - > id ) ;
if ( err < 0 )
2015-07-27 15:29:52 +03:00
goto err_unmap ;
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:52 +03:00
smmu_flush_ptc ( smmu , as - > pd_dma , 0 ) ;
2014-04-16 11:24:44 +04:00
smmu_flush_tlb_asid ( smmu , as - > id ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , as - > id & 0x7f , SMMU_PTB_ASID ) ;
2015-07-27 15:29:52 +03:00
value = SMMU_PTB_DATA_VALUE ( as - > pd_dma , as - > attr ) ;
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_PTB_DATA ) ;
smmu_flush ( smmu ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
as - > smmu = smmu ;
as - > use_count + + ;
2011-11-17 09:31:31 +04:00
2020-11-25 13:10:10 +03:00
mutex_unlock ( & smmu - > lock ) ;
2014-04-16 11:24:44 +04:00
return 0 ;
2015-07-27 15:29:52 +03:00
err_unmap :
dma_unmap_page ( smmu - > dev , as - > pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE ) ;
2020-11-25 13:10:10 +03:00
unlock :
mutex_unlock ( & smmu - > lock ) ;
2015-07-27 15:29:52 +03:00
return err ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static void tegra_smmu_as_unprepare ( struct tegra_smmu * smmu ,
struct tegra_smmu_as * as )
2011-11-17 09:31:31 +04:00
{
2020-11-25 13:10:10 +03:00
mutex_lock ( & smmu - > lock ) ;
if ( - - as - > use_count > 0 ) {
mutex_unlock ( & smmu - > lock ) ;
2014-04-16 11:24:44 +04:00
return ;
2020-11-25 13:10:10 +03:00
}
2014-04-16 11:24:44 +04:00
tegra_smmu_free_asid ( smmu , as - > id ) ;
2015-07-27 15:29:52 +03:00
dma_unmap_page ( smmu - > dev , as - > pd_dma , SMMU_SIZE_PD , DMA_TO_DEVICE ) ;
2014-04-16 11:24:44 +04:00
as - > smmu = NULL ;
2020-11-25 13:10:10 +03:00
mutex_unlock ( & smmu - > lock ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
static int tegra_smmu_attach_dev ( struct iommu_domain * domain ,
struct device * dev )
2011-11-17 09:31:31 +04:00
{
2020-11-25 13:10:11 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2020-06-25 16:08:29 +03:00
struct tegra_smmu * smmu = dev_iommu_priv_get ( dev ) ;
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2020-11-25 13:10:11 +03:00
unsigned int index ;
int err ;
2012-07-30 09:39:18 +04:00
2020-11-25 13:10:11 +03:00
if ( ! fwspec )
return - ENOENT ;
2012-07-30 09:39:18 +04:00
2020-11-25 13:10:11 +03:00
for ( index = 0 ; index < fwspec - > num_ids ; index + + ) {
2014-04-16 11:24:44 +04:00
err = tegra_smmu_as_prepare ( smmu , as ) ;
2020-11-25 13:10:11 +03:00
if ( err )
goto disable ;
2014-04-16 11:24:44 +04:00
2020-11-25 13:10:11 +03:00
tegra_smmu_enable ( smmu , fwspec - > ids [ index ] , as - > id ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
if ( index = = 0 )
return - ENODEV ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
return 0 ;
2020-11-25 13:10:11 +03:00
disable :
while ( index - - ) {
tegra_smmu_disable ( smmu , fwspec - > ids [ index ] , as - > id ) ;
tegra_smmu_as_unprepare ( smmu , as ) ;
}
return err ;
2014-04-16 11:24:44 +04:00
}
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
static void tegra_smmu_detach_dev ( struct iommu_domain * domain , struct device * dev )
{
2020-11-25 13:10:11 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu * smmu = as - > smmu ;
2020-11-25 13:10:11 +03:00
unsigned int index ;
2011-11-17 09:31:31 +04:00
2020-11-25 13:10:11 +03:00
if ( ! fwspec )
return ;
2011-11-17 09:31:31 +04:00
2020-11-25 13:10:11 +03:00
for ( index = 0 ; index < fwspec - > num_ids ; index + + ) {
tegra_smmu_disable ( smmu , fwspec - > ids [ index ] , as - > id ) ;
2014-04-16 11:24:44 +04:00
tegra_smmu_as_unprepare ( smmu , as ) ;
}
2011-11-17 09:31:31 +04:00
}
2015-07-27 15:30:12 +03:00
static void tegra_smmu_set_pde ( struct tegra_smmu_as * as , unsigned long iova ,
u32 value )
{
unsigned int pd_index = iova_pd_index ( iova ) ;
struct tegra_smmu * smmu = as - > smmu ;
u32 * pd = page_address ( as - > pd ) ;
unsigned long offset = pd_index * sizeof ( * pd ) ;
/* Set the page directory entry first */
pd [ pd_index ] = value ;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device ( smmu - > dev , as - > pd_dma , offset ,
sizeof ( * pd ) , DMA_TO_DEVICE ) ;
/* And flush the iommu */
smmu_flush_ptc ( smmu , as - > pd_dma , offset ) ;
smmu_flush_tlb_section ( smmu , as - > id , iova ) ;
smmu_flush ( smmu ) ;
}
2015-07-27 15:29:21 +03:00
static u32 * tegra_smmu_pte_offset ( struct page * pt_page , unsigned long iova )
{
u32 * pt = page_address ( pt_page ) ;
return pt + iova_pt_index ( iova ) ;
}
static u32 * tegra_smmu_pte_lookup ( struct tegra_smmu_as * as , unsigned long iova ,
2015-07-27 15:29:52 +03:00
dma_addr_t * dmap )
2015-07-27 15:29:21 +03:00
{
unsigned int pd_index = iova_pd_index ( iova ) ;
2019-10-16 14:50:26 +03:00
struct tegra_smmu * smmu = as - > smmu ;
2015-07-27 15:29:21 +03:00
struct page * pt_page ;
2015-07-27 15:29:52 +03:00
u32 * pd ;
2015-07-27 15:29:21 +03:00
2015-07-27 15:29:26 +03:00
pt_page = as - > pts [ pd_index ] ;
if ( ! pt_page )
2015-07-27 15:29:21 +03:00
return NULL ;
2015-07-27 15:29:52 +03:00
pd = page_address ( as - > pd ) ;
2019-10-16 14:50:26 +03:00
* dmap = smmu_pde_to_dma ( smmu , pd [ pd_index ] ) ;
2015-07-27 15:29:21 +03:00
return tegra_smmu_pte_offset ( pt_page , iova ) ;
}
2014-04-16 11:24:44 +04:00
static u32 * as_get_pte ( struct tegra_smmu_as * as , dma_addr_t iova ,
2020-09-01 23:37:30 +03:00
dma_addr_t * dmap , struct page * page )
2011-11-17 09:31:31 +04:00
{
2015-07-27 15:29:16 +03:00
unsigned int pde = iova_pd_index ( iova ) ;
2014-04-16 11:24:44 +04:00
struct tegra_smmu * smmu = as - > smmu ;
2015-07-27 15:29:26 +03:00
if ( ! as - > pts [ pde ] ) {
2015-07-27 15:29:52 +03:00
dma_addr_t dma ;
dma = dma_map_page ( smmu - > dev , page , 0 , SMMU_SIZE_PT ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( smmu - > dev , dma ) ) {
__free_page ( page ) ;
return NULL ;
}
if ( ! smmu_dma_addr_valid ( smmu , dma ) ) {
dma_unmap_page ( smmu - > dev , dma , SMMU_SIZE_PT ,
DMA_TO_DEVICE ) ;
__free_page ( page ) ;
return NULL ;
}
2015-07-27 15:29:26 +03:00
as - > pts [ pde ] = page ;
2015-07-27 15:30:12 +03:00
tegra_smmu_set_pde ( as , iova , SMMU_MK_PDE ( dma , SMMU_PDE_ATTR |
SMMU_PDE_NEXT ) ) ;
2015-07-27 15:29:52 +03:00
* dmap = dma ;
2014-04-16 11:24:44 +04:00
} else {
2015-07-27 15:30:12 +03:00
u32 * pd = page_address ( as - > pd ) ;
2019-10-16 14:50:26 +03:00
* dmap = smmu_pde_to_dma ( smmu , pd [ pde ] ) ;
2011-11-17 09:31:31 +04:00
}
2015-08-06 15:56:39 +03:00
return tegra_smmu_pte_offset ( as - > pts [ pde ] , iova ) ;
}
2015-07-27 15:29:21 +03:00
2015-08-06 15:56:39 +03:00
static void tegra_smmu_pte_get_use ( struct tegra_smmu_as * as , unsigned long iova )
{
unsigned int pd_index = iova_pd_index ( iova ) ;
2011-11-17 09:31:31 +04:00
2015-08-06 15:56:39 +03:00
as - > count [ pd_index ] + + ;
2014-04-16 11:24:44 +04:00
}
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:05 +03:00
static void tegra_smmu_pte_put_use ( struct tegra_smmu_as * as , unsigned long iova )
2012-08-02 12:46:40 +04:00
{
2015-07-27 15:29:16 +03:00
unsigned int pde = iova_pd_index ( iova ) ;
2015-07-27 15:29:26 +03:00
struct page * page = as - > pts [ pde ] ;
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
/*
* When no entries in this page table are used anymore , return the
* memory page to the system .
*/
2015-07-27 15:29:31 +03:00
if ( - - as - > count [ pde ] = = 0 ) {
2015-07-27 15:30:12 +03:00
struct tegra_smmu * smmu = as - > smmu ;
u32 * pd = page_address ( as - > pd ) ;
2019-10-16 14:50:26 +03:00
dma_addr_t pte_dma = smmu_pde_to_dma ( smmu , pd [ pde ] ) ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:30:12 +03:00
tegra_smmu_set_pde ( as , iova , 0 ) ;
2015-07-27 15:29:05 +03:00
2015-07-27 15:29:52 +03:00
dma_unmap_page ( smmu - > dev , pte_dma , SMMU_SIZE_PT , DMA_TO_DEVICE ) ;
2015-07-27 15:29:05 +03:00
__free_page ( page ) ;
2015-07-27 15:29:26 +03:00
as - > pts [ pde ] = NULL ;
2012-08-02 12:46:40 +04:00
}
}
2015-07-27 15:29:10 +03:00
static void tegra_smmu_set_pte ( struct tegra_smmu_as * as , unsigned long iova ,
2015-07-27 15:29:52 +03:00
u32 * pte , dma_addr_t pte_dma , u32 val )
2015-07-27 15:29:10 +03:00
{
struct tegra_smmu * smmu = as - > smmu ;
2020-09-11 10:16:41 +03:00
unsigned long offset = SMMU_OFFSET_IN_PAGE ( pte ) ;
2015-07-27 15:29:10 +03:00
* pte = val ;
2015-07-27 15:29:52 +03:00
dma_sync_single_range_for_device ( smmu - > dev , pte_dma , offset ,
4 , DMA_TO_DEVICE ) ;
smmu_flush_ptc ( smmu , pte_dma , offset ) ;
2015-07-27 15:29:10 +03:00
smmu_flush_tlb_group ( smmu , as - > id , iova ) ;
smmu_flush ( smmu ) ;
}
2020-09-01 23:37:30 +03:00
static struct page * as_get_pde_page ( struct tegra_smmu_as * as ,
unsigned long iova , gfp_t gfp ,
unsigned long * flags )
{
unsigned int pde = iova_pd_index ( iova ) ;
struct page * page = as - > pts [ pde ] ;
/* at first check whether allocation needs to be done at all */
if ( page )
return page ;
/*
* In order to prevent exhaustion of the atomic memory pool , we
* allocate page in a sleeping context if GFP flags permit . Hence
* spinlock needs to be unlocked and re - locked after allocation .
*/
if ( ! ( gfp & __GFP_ATOMIC ) )
spin_unlock_irqrestore ( & as - > lock , * flags ) ;
page = alloc_page ( gfp | __GFP_DMA | __GFP_ZERO ) ;
if ( ! ( gfp & __GFP_ATOMIC ) )
spin_lock_irqsave ( & as - > lock , * flags ) ;
/*
* In a case of blocking allocation , a concurrent mapping may win
* the PDE allocation . In this case the allocated page isn ' t needed
* if allocation succeeded and the allocation failure isn ' t fatal .
*/
if ( as - > pts [ pde ] ) {
if ( page )
__free_page ( page ) ;
page = as - > pts [ pde ] ;
}
return page ;
}
static int
__tegra_smmu_map ( struct iommu_domain * domain , unsigned long iova ,
phys_addr_t paddr , size_t size , int prot , gfp_t gfp ,
unsigned long * flags )
2012-08-02 12:46:40 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2015-07-27 15:29:52 +03:00
dma_addr_t pte_dma ;
2020-09-01 23:37:30 +03:00
struct page * page ;
2019-03-07 01:50:09 +03:00
u32 pte_attrs ;
2014-04-16 11:24:44 +04:00
u32 * pte ;
2012-08-02 12:46:40 +04:00
2020-09-01 23:37:30 +03:00
page = as_get_pde_page ( as , iova , gfp , flags ) ;
if ( ! page )
return - ENOMEM ;
pte = as_get_pte ( as , iova , & pte_dma , page ) ;
2014-04-16 11:24:44 +04:00
if ( ! pte )
return - ENOMEM ;
2012-08-02 12:46:40 +04:00
2015-08-06 15:56:39 +03:00
/* If we aren't overwriting a pre-existing entry, increment use */
if ( * pte = = 0 )
tegra_smmu_pte_get_use ( as , iova ) ;
2019-03-07 01:50:09 +03:00
pte_attrs = SMMU_PTE_NONSECURE ;
if ( prot & IOMMU_READ )
pte_attrs | = SMMU_PTE_READABLE ;
if ( prot & IOMMU_WRITE )
pte_attrs | = SMMU_PTE_WRITABLE ;
2015-07-27 15:29:52 +03:00
tegra_smmu_set_pte ( as , iova , pte , pte_dma ,
2020-09-11 10:16:41 +03:00
SMMU_PHYS_PFN ( paddr ) | pte_attrs ) ;
2012-08-02 12:46:40 +04:00
return 0 ;
}
2020-09-01 23:37:30 +03:00
static size_t
__tegra_smmu_unmap ( struct iommu_domain * domain , unsigned long iova ,
size_t size , struct iommu_iotlb_gather * gather )
2012-08-02 12:46:40 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2015-07-27 15:29:52 +03:00
dma_addr_t pte_dma ;
2014-04-16 11:24:44 +04:00
u32 * pte ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:52 +03:00
pte = tegra_smmu_pte_lookup ( as , iova , & pte_dma ) ;
2015-07-27 15:29:05 +03:00
if ( ! pte | | ! * pte )
2014-04-16 11:24:44 +04:00
return 0 ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:52 +03:00
tegra_smmu_set_pte ( as , iova , pte , pte_dma , 0 ) ;
2015-07-27 15:29:05 +03:00
tegra_smmu_pte_put_use ( as , iova ) ;
2014-04-16 11:24:44 +04:00
return size ;
2012-08-02 12:46:40 +04:00
}
2020-09-01 23:37:30 +03:00
static int tegra_smmu_map ( struct iommu_domain * domain , unsigned long iova ,
phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
{
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & as - > lock , flags ) ;
ret = __tegra_smmu_map ( domain , iova , paddr , size , prot , gfp , & flags ) ;
spin_unlock_irqrestore ( & as - > lock , flags ) ;
return ret ;
}
static size_t tegra_smmu_unmap ( struct iommu_domain * domain , unsigned long iova ,
size_t size , struct iommu_iotlb_gather * gather )
{
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
unsigned long flags ;
spin_lock_irqsave ( & as - > lock , flags ) ;
size = __tegra_smmu_unmap ( domain , iova , size , gather ) ;
spin_unlock_irqrestore ( & as - > lock , flags ) ;
return size ;
}
2014-04-16 11:24:44 +04:00
static phys_addr_t tegra_smmu_iova_to_phys ( struct iommu_domain * domain ,
dma_addr_t iova )
2012-08-02 12:46:40 +04:00
{
2015-03-26 15:43:12 +03:00
struct tegra_smmu_as * as = to_smmu_as ( domain ) ;
2014-04-16 11:24:44 +04:00
unsigned long pfn ;
2015-07-27 15:29:52 +03:00
dma_addr_t pte_dma ;
2014-04-16 11:24:44 +04:00
u32 * pte ;
2012-08-02 12:46:40 +04:00
2015-07-27 15:29:52 +03:00
pte = tegra_smmu_pte_lookup ( as , iova , & pte_dma ) ;
2015-07-27 15:29:00 +03:00
if ( ! pte | | ! * pte )
return 0 ;
2015-03-27 13:07:27 +03:00
pfn = * pte & as - > smmu - > pfn_mask ;
2012-08-02 12:46:40 +04:00
2020-09-11 10:16:42 +03:00
return SMMU_PFN_PHYS ( pfn ) + SMMU_OFFSET_IN_PAGE ( iova ) ;
2012-08-02 12:46:40 +04:00
}
2020-04-29 16:37:04 +03:00
static struct iommu_device * tegra_smmu_probe_device ( struct device * dev )
2011-11-17 09:31:31 +04:00
{
iommu/tegra-smmu: Rework tegra_smmu_probe_device()
The bus_set_iommu() in tegra_smmu_probe() enumerates all clients
to call in tegra_smmu_probe_device() where each client searches
its DT node for smmu pointer and swgroup ID, so as to configure
an fwspec. But this requires a valid smmu pointer even before mc
and smmu drivers are probed. So in tegra_smmu_probe() we added a
line of code to fill mc->smmu, marking "a bit of a hack".
This works for most of clients in the DTB, however, doesn't work
for a client that doesn't exist in DTB, a PCI device for example.
Actually, if we return ERR_PTR(-ENODEV) in ->probe_device() when
it's called from bus_set_iommu(), iommu core will let everything
carry on. Then when a client gets probed, of_iommu_configure() in
iommu core will search DTB for swgroup ID and call ->of_xlate()
to prepare an fwspec, similar to tegra_smmu_probe_device() and
tegra_smmu_configure(). Then it'll call tegra_smmu_probe_device()
again, and this time we shall return smmu->iommu pointer properly.
So we can get rid of tegra_smmu_find() and tegra_smmu_configure()
along with DT polling code by letting the iommu core handle every
thing, except a problem that we search iommus property in DTB not
only for swgroup ID but also for mc node to get mc->smmu pointer
to call dev_iommu_priv_set() and return the smmu->iommu pointer.
So we'll need to find another way to get smmu pointer.
Referencing the implementation of sun50i-iommu driver, of_xlate()
has client's dev pointer, mc node and swgroup ID. This means that
we can call dev_iommu_priv_set() in of_xlate() instead, so we can
simply get smmu pointer in ->probe_device().
This patch reworks tegra_smmu_probe_device() by:
1) Removing mc->smmu hack in tegra_smmu_probe() so as to return
ERR_PTR(-ENODEV) in tegra_smmu_probe_device() during stage of
tegra_smmu_probe/tegra_mc_probe().
2) Moving dev_iommu_priv_set() to of_xlate() so we can get smmu
pointer in tegra_smmu_probe_device() to replace DTB polling.
3) Removing tegra_smmu_configure() accordingly since iommu core
takes care of it.
This also fixes a problem that previously we could add clients to
iommu groups before iommu core initializes its default domain:
ubuntu@jetson:~$ dmesg | grep iommu
platform 50000000.host1x: Adding to iommu group 1
platform 57000000.gpu: Adding to iommu group 2
iommu: Default domain type: Translated
platform 54200000.dc: Adding to iommu group 3
platform 54240000.dc: Adding to iommu group 3
platform 54340000.vic: Adding to iommu group 4
Though it works fine with IOMMU_DOMAIN_UNMANAGED, but will have
warnings if switching to IOMMU_DOMAIN_DMA:
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
Now, bypassing the first probe_device() call from bus_set_iommu()
fixes the sequence:
ubuntu@jetson:~$ dmesg | grep iommu
iommu: Default domain type: Translated
tegra-host1x 50000000.host1x: Adding to iommu group 0
tegra-dc 54200000.dc: Adding to iommu group 1
tegra-dc 54240000.dc: Adding to iommu group 1
tegra-vic 54340000.vic: Adding to iommu group 2
nouveau 57000000.gpu: Adding to iommu group 3
Note that dmesg log above is testing with IOMMU_DOMAIN_UNMANAGED.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20201125101013.14953-5-nicoleotsuka@gmail.com
Signed-off-by: Will Deacon <will@kernel.org>
2020-11-25 13:10:12 +03:00
struct tegra_smmu * smmu = dev_iommu_priv_get ( dev ) ;
2014-04-16 11:24:44 +04:00
2017-10-12 17:19:16 +03:00
if ( ! smmu )
2020-04-29 16:37:04 +03:00
return ERR_PTR ( - ENODEV ) ;
2017-07-21 15:12:36 +03:00
2020-04-29 16:37:04 +03:00
return & smmu - > iommu ;
2011-11-17 09:31:31 +04:00
}
iommu/tegra-smmu: Rework tegra_smmu_probe_device()
The bus_set_iommu() in tegra_smmu_probe() enumerates all clients
to call in tegra_smmu_probe_device() where each client searches
its DT node for smmu pointer and swgroup ID, so as to configure
an fwspec. But this requires a valid smmu pointer even before mc
and smmu drivers are probed. So in tegra_smmu_probe() we added a
line of code to fill mc->smmu, marking "a bit of a hack".
This works for most of clients in the DTB, however, doesn't work
for a client that doesn't exist in DTB, a PCI device for example.
Actually, if we return ERR_PTR(-ENODEV) in ->probe_device() when
it's called from bus_set_iommu(), iommu core will let everything
carry on. Then when a client gets probed, of_iommu_configure() in
iommu core will search DTB for swgroup ID and call ->of_xlate()
to prepare an fwspec, similar to tegra_smmu_probe_device() and
tegra_smmu_configure(). Then it'll call tegra_smmu_probe_device()
again, and this time we shall return smmu->iommu pointer properly.
So we can get rid of tegra_smmu_find() and tegra_smmu_configure()
along with DT polling code by letting the iommu core handle every
thing, except a problem that we search iommus property in DTB not
only for swgroup ID but also for mc node to get mc->smmu pointer
to call dev_iommu_priv_set() and return the smmu->iommu pointer.
So we'll need to find another way to get smmu pointer.
Referencing the implementation of sun50i-iommu driver, of_xlate()
has client's dev pointer, mc node and swgroup ID. This means that
we can call dev_iommu_priv_set() in of_xlate() instead, so we can
simply get smmu pointer in ->probe_device().
This patch reworks tegra_smmu_probe_device() by:
1) Removing mc->smmu hack in tegra_smmu_probe() so as to return
ERR_PTR(-ENODEV) in tegra_smmu_probe_device() during stage of
tegra_smmu_probe/tegra_mc_probe().
2) Moving dev_iommu_priv_set() to of_xlate() so we can get smmu
pointer in tegra_smmu_probe_device() to replace DTB polling.
3) Removing tegra_smmu_configure() accordingly since iommu core
takes care of it.
This also fixes a problem that previously we could add clients to
iommu groups before iommu core initializes its default domain:
ubuntu@jetson:~$ dmesg | grep iommu
platform 50000000.host1x: Adding to iommu group 1
platform 57000000.gpu: Adding to iommu group 2
iommu: Default domain type: Translated
platform 54200000.dc: Adding to iommu group 3
platform 54240000.dc: Adding to iommu group 3
platform 54340000.vic: Adding to iommu group 4
Though it works fine with IOMMU_DOMAIN_UNMANAGED, but will have
warnings if switching to IOMMU_DOMAIN_DMA:
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
Now, bypassing the first probe_device() call from bus_set_iommu()
fixes the sequence:
ubuntu@jetson:~$ dmesg | grep iommu
iommu: Default domain type: Translated
tegra-host1x 50000000.host1x: Adding to iommu group 0
tegra-dc 54200000.dc: Adding to iommu group 1
tegra-dc 54240000.dc: Adding to iommu group 1
tegra-vic 54340000.vic: Adding to iommu group 2
nouveau 57000000.gpu: Adding to iommu group 3
Note that dmesg log above is testing with IOMMU_DOMAIN_UNMANAGED.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20201125101013.14953-5-nicoleotsuka@gmail.com
Signed-off-by: Will Deacon <will@kernel.org>
2020-11-25 13:10:12 +03:00
static void tegra_smmu_release_device ( struct device * dev ) { }
2011-11-17 09:31:31 +04:00
2017-10-12 17:19:16 +03:00
static const struct tegra_smmu_group_soc *
tegra_smmu_find_group ( struct tegra_smmu * smmu , unsigned int swgroup )
{
unsigned int i , j ;
for ( i = 0 ; i < smmu - > soc - > num_groups ; i + + )
for ( j = 0 ; j < smmu - > soc - > groups [ i ] . num_swgroups ; j + + )
if ( smmu - > soc - > groups [ i ] . swgroups [ j ] = = swgroup )
return & smmu - > soc - > groups [ i ] ;
return NULL ;
}
2020-08-06 18:54:04 +03:00
static void tegra_smmu_group_release ( void * iommu_data )
{
struct tegra_smmu_group * group = iommu_data ;
struct tegra_smmu * smmu = group - > smmu ;
mutex_lock ( & smmu - > lock ) ;
list_del ( & group - > list ) ;
mutex_unlock ( & smmu - > lock ) ;
}
2020-11-25 13:10:09 +03:00
static struct iommu_group * tegra_smmu_device_group ( struct device * dev )
2017-10-12 17:19:16 +03:00
{
2020-11-25 13:10:09 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
struct tegra_smmu * smmu = dev_iommu_priv_get ( dev ) ;
2017-10-12 17:19:16 +03:00
const struct tegra_smmu_group_soc * soc ;
2020-11-25 13:10:09 +03:00
unsigned int swgroup = fwspec - > ids [ 0 ] ;
2017-10-12 17:19:16 +03:00
struct tegra_smmu_group * group ;
2020-08-06 18:54:03 +03:00
struct iommu_group * grp ;
2017-10-12 17:19:16 +03:00
2020-09-11 10:16:43 +03:00
/* Find group_soc associating with swgroup */
2017-10-12 17:19:16 +03:00
soc = tegra_smmu_find_group ( smmu , swgroup ) ;
mutex_lock ( & smmu - > lock ) ;
2020-09-11 10:16:43 +03:00
/* Find existing iommu_group associating with swgroup or group_soc */
2017-10-12 17:19:16 +03:00
list_for_each_entry ( group , & smmu - > groups , list )
2020-09-11 10:16:43 +03:00
if ( ( group - > swgroup = = swgroup ) | | ( soc & & group - > soc = = soc ) ) {
2020-08-06 18:54:03 +03:00
grp = iommu_group_ref_get ( group - > group ) ;
2017-10-12 17:19:16 +03:00
mutex_unlock ( & smmu - > lock ) ;
2020-08-06 18:54:03 +03:00
return grp ;
2017-10-12 17:19:16 +03:00
}
group = devm_kzalloc ( smmu - > dev , sizeof ( * group ) , GFP_KERNEL ) ;
if ( ! group ) {
mutex_unlock ( & smmu - > lock ) ;
return NULL ;
}
INIT_LIST_HEAD ( & group - > list ) ;
2020-09-11 10:16:43 +03:00
group - > swgroup = swgroup ;
2020-08-06 18:54:04 +03:00
group - > smmu = smmu ;
2017-10-12 17:19:16 +03:00
group - > soc = soc ;
group - > group = iommu_group_alloc ( ) ;
2017-12-20 06:06:09 +03:00
if ( IS_ERR ( group - > group ) ) {
2017-10-12 17:19:16 +03:00
devm_kfree ( smmu - > dev , group ) ;
mutex_unlock ( & smmu - > lock ) ;
return NULL ;
}
2020-08-06 18:54:04 +03:00
iommu_group_set_iommudata ( group - > group , group , tegra_smmu_group_release ) ;
2020-09-11 10:16:43 +03:00
if ( soc )
iommu_group_set_name ( group - > group , soc - > name ) ;
2017-10-12 17:19:16 +03:00
list_add_tail ( & group - > list , & smmu - > groups ) ;
mutex_unlock ( & smmu - > lock ) ;
return group - > group ;
}
static int tegra_smmu_of_xlate ( struct device * dev ,
struct of_phandle_args * args )
{
iommu/tegra-smmu: Rework tegra_smmu_probe_device()
The bus_set_iommu() in tegra_smmu_probe() enumerates all clients
to call in tegra_smmu_probe_device() where each client searches
its DT node for smmu pointer and swgroup ID, so as to configure
an fwspec. But this requires a valid smmu pointer even before mc
and smmu drivers are probed. So in tegra_smmu_probe() we added a
line of code to fill mc->smmu, marking "a bit of a hack".
This works for most of clients in the DTB, however, doesn't work
for a client that doesn't exist in DTB, a PCI device for example.
Actually, if we return ERR_PTR(-ENODEV) in ->probe_device() when
it's called from bus_set_iommu(), iommu core will let everything
carry on. Then when a client gets probed, of_iommu_configure() in
iommu core will search DTB for swgroup ID and call ->of_xlate()
to prepare an fwspec, similar to tegra_smmu_probe_device() and
tegra_smmu_configure(). Then it'll call tegra_smmu_probe_device()
again, and this time we shall return smmu->iommu pointer properly.
So we can get rid of tegra_smmu_find() and tegra_smmu_configure()
along with DT polling code by letting the iommu core handle every
thing, except a problem that we search iommus property in DTB not
only for swgroup ID but also for mc node to get mc->smmu pointer
to call dev_iommu_priv_set() and return the smmu->iommu pointer.
So we'll need to find another way to get smmu pointer.
Referencing the implementation of sun50i-iommu driver, of_xlate()
has client's dev pointer, mc node and swgroup ID. This means that
we can call dev_iommu_priv_set() in of_xlate() instead, so we can
simply get smmu pointer in ->probe_device().
This patch reworks tegra_smmu_probe_device() by:
1) Removing mc->smmu hack in tegra_smmu_probe() so as to return
ERR_PTR(-ENODEV) in tegra_smmu_probe_device() during stage of
tegra_smmu_probe/tegra_mc_probe().
2) Moving dev_iommu_priv_set() to of_xlate() so we can get smmu
pointer in tegra_smmu_probe_device() to replace DTB polling.
3) Removing tegra_smmu_configure() accordingly since iommu core
takes care of it.
This also fixes a problem that previously we could add clients to
iommu groups before iommu core initializes its default domain:
ubuntu@jetson:~$ dmesg | grep iommu
platform 50000000.host1x: Adding to iommu group 1
platform 57000000.gpu: Adding to iommu group 2
iommu: Default domain type: Translated
platform 54200000.dc: Adding to iommu group 3
platform 54240000.dc: Adding to iommu group 3
platform 54340000.vic: Adding to iommu group 4
Though it works fine with IOMMU_DOMAIN_UNMANAGED, but will have
warnings if switching to IOMMU_DOMAIN_DMA:
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
Now, bypassing the first probe_device() call from bus_set_iommu()
fixes the sequence:
ubuntu@jetson:~$ dmesg | grep iommu
iommu: Default domain type: Translated
tegra-host1x 50000000.host1x: Adding to iommu group 0
tegra-dc 54200000.dc: Adding to iommu group 1
tegra-dc 54240000.dc: Adding to iommu group 1
tegra-vic 54340000.vic: Adding to iommu group 2
nouveau 57000000.gpu: Adding to iommu group 3
Note that dmesg log above is testing with IOMMU_DOMAIN_UNMANAGED.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20201125101013.14953-5-nicoleotsuka@gmail.com
Signed-off-by: Will Deacon <will@kernel.org>
2020-11-25 13:10:12 +03:00
struct platform_device * iommu_pdev = of_find_device_by_node ( args - > np ) ;
struct tegra_mc * mc = platform_get_drvdata ( iommu_pdev ) ;
2017-10-12 17:19:16 +03:00
u32 id = args - > args [ 0 ] ;
iommu/tegra-smmu: Rework tegra_smmu_probe_device()
The bus_set_iommu() in tegra_smmu_probe() enumerates all clients
to call in tegra_smmu_probe_device() where each client searches
its DT node for smmu pointer and swgroup ID, so as to configure
an fwspec. But this requires a valid smmu pointer even before mc
and smmu drivers are probed. So in tegra_smmu_probe() we added a
line of code to fill mc->smmu, marking "a bit of a hack".
This works for most of clients in the DTB, however, doesn't work
for a client that doesn't exist in DTB, a PCI device for example.
Actually, if we return ERR_PTR(-ENODEV) in ->probe_device() when
it's called from bus_set_iommu(), iommu core will let everything
carry on. Then when a client gets probed, of_iommu_configure() in
iommu core will search DTB for swgroup ID and call ->of_xlate()
to prepare an fwspec, similar to tegra_smmu_probe_device() and
tegra_smmu_configure(). Then it'll call tegra_smmu_probe_device()
again, and this time we shall return smmu->iommu pointer properly.
So we can get rid of tegra_smmu_find() and tegra_smmu_configure()
along with DT polling code by letting the iommu core handle every
thing, except a problem that we search iommus property in DTB not
only for swgroup ID but also for mc node to get mc->smmu pointer
to call dev_iommu_priv_set() and return the smmu->iommu pointer.
So we'll need to find another way to get smmu pointer.
Referencing the implementation of sun50i-iommu driver, of_xlate()
has client's dev pointer, mc node and swgroup ID. This means that
we can call dev_iommu_priv_set() in of_xlate() instead, so we can
simply get smmu pointer in ->probe_device().
This patch reworks tegra_smmu_probe_device() by:
1) Removing mc->smmu hack in tegra_smmu_probe() so as to return
ERR_PTR(-ENODEV) in tegra_smmu_probe_device() during stage of
tegra_smmu_probe/tegra_mc_probe().
2) Moving dev_iommu_priv_set() to of_xlate() so we can get smmu
pointer in tegra_smmu_probe_device() to replace DTB polling.
3) Removing tegra_smmu_configure() accordingly since iommu core
takes care of it.
This also fixes a problem that previously we could add clients to
iommu groups before iommu core initializes its default domain:
ubuntu@jetson:~$ dmesg | grep iommu
platform 50000000.host1x: Adding to iommu group 1
platform 57000000.gpu: Adding to iommu group 2
iommu: Default domain type: Translated
platform 54200000.dc: Adding to iommu group 3
platform 54240000.dc: Adding to iommu group 3
platform 54340000.vic: Adding to iommu group 4
Though it works fine with IOMMU_DOMAIN_UNMANAGED, but will have
warnings if switching to IOMMU_DOMAIN_DMA:
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
iommu: Failed to allocate default IOMMU domain of type 0 for
group (null) - Falling back to IOMMU_DOMAIN_DMA
Now, bypassing the first probe_device() call from bus_set_iommu()
fixes the sequence:
ubuntu@jetson:~$ dmesg | grep iommu
iommu: Default domain type: Translated
tegra-host1x 50000000.host1x: Adding to iommu group 0
tegra-dc 54200000.dc: Adding to iommu group 1
tegra-dc 54240000.dc: Adding to iommu group 1
tegra-vic 54340000.vic: Adding to iommu group 2
nouveau 57000000.gpu: Adding to iommu group 3
Note that dmesg log above is testing with IOMMU_DOMAIN_UNMANAGED.
Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20201125101013.14953-5-nicoleotsuka@gmail.com
Signed-off-by: Will Deacon <will@kernel.org>
2020-11-25 13:10:12 +03:00
/*
* Note : we are here releasing the reference of & iommu_pdev - > dev , which
* is mc - > dev . Although some functions in tegra_smmu_ops may keep using
* its private data beyond this point , it ' s still safe to do so because
* the SMMU parent device is the same as the MC , so the reference count
* isn ' t strictly necessary .
*/
put_device ( & iommu_pdev - > dev ) ;
dev_iommu_priv_set ( dev , mc - > smmu ) ;
2017-10-12 17:19:16 +03:00
return iommu_fwspec_add_ids ( dev , & id , 1 ) ;
}
2014-04-16 11:24:44 +04:00
static const struct iommu_ops tegra_smmu_ops = {
. capable = tegra_smmu_capable ,
2015-03-26 15:43:12 +03:00
. domain_alloc = tegra_smmu_domain_alloc ,
. domain_free = tegra_smmu_domain_free ,
2014-04-16 11:24:44 +04:00
. attach_dev = tegra_smmu_attach_dev ,
. detach_dev = tegra_smmu_detach_dev ,
2020-04-29 16:37:04 +03:00
. probe_device = tegra_smmu_probe_device ,
. release_device = tegra_smmu_release_device ,
2017-10-12 17:19:16 +03:00
. device_group = tegra_smmu_device_group ,
2014-04-16 11:24:44 +04:00
. map = tegra_smmu_map ,
. unmap = tegra_smmu_unmap ,
. iova_to_phys = tegra_smmu_iova_to_phys ,
2017-10-12 17:19:16 +03:00
. of_xlate = tegra_smmu_of_xlate ,
2014-04-16 11:24:44 +04:00
. pgsize_bitmap = SZ_4K ,
} ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
static void tegra_smmu_ahb_enable ( void )
{
static const struct of_device_id ahb_match [ ] = {
{ . compatible = " nvidia,tegra30-ahb " , } ,
{ }
} ;
struct device_node * ahb ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
ahb = of_find_matching_node ( NULL , ahb_match ) ;
if ( ahb ) {
tegra_ahb_enable_smmu ( ahb ) ;
of_node_put ( ahb ) ;
2011-11-17 09:31:31 +04:00
}
2014-04-16 11:24:44 +04:00
}
2011-11-17 09:31:31 +04:00
2015-01-23 11:49:25 +03:00
static int tegra_smmu_swgroups_show ( struct seq_file * s , void * data )
{
struct tegra_smmu * smmu = s - > private ;
unsigned int i ;
u32 value ;
seq_printf ( s , " swgroup enabled ASID \n " ) ;
seq_printf ( s , " ------------------------ \n " ) ;
for ( i = 0 ; i < smmu - > soc - > num_swgroups ; i + + ) {
const struct tegra_smmu_swgroup * group = & smmu - > soc - > swgroups [ i ] ;
const char * status ;
unsigned int asid ;
value = smmu_readl ( smmu , group - > reg ) ;
if ( value & SMMU_ASID_ENABLE )
status = " yes " ;
else
status = " no " ;
asid = value & SMMU_ASID_MASK ;
seq_printf ( s , " %-9s %-7s %#04x \n " , group - > name , status ,
asid ) ;
}
return 0 ;
}
2018-11-22 16:30:47 +03:00
DEFINE_SHOW_ATTRIBUTE ( tegra_smmu_swgroups ) ;
2015-01-23 11:49:25 +03:00
static int tegra_smmu_clients_show ( struct seq_file * s , void * data )
{
struct tegra_smmu * smmu = s - > private ;
unsigned int i ;
u32 value ;
seq_printf ( s , " client enabled \n " ) ;
seq_printf ( s , " -------------------- \n " ) ;
for ( i = 0 ; i < smmu - > soc - > num_clients ; i + + ) {
const struct tegra_mc_client * client = & smmu - > soc - > clients [ i ] ;
const char * status ;
value = smmu_readl ( smmu , client - > smmu . reg ) ;
if ( value & BIT ( client - > smmu . bit ) )
status = " yes " ;
else
status = " no " ;
seq_printf ( s , " %-12s %s \n " , client - > name , status ) ;
}
return 0 ;
}
2018-11-22 16:30:47 +03:00
DEFINE_SHOW_ATTRIBUTE ( tegra_smmu_clients ) ;
2015-01-23 11:49:25 +03:00
static void tegra_smmu_debugfs_init ( struct tegra_smmu * smmu )
{
smmu - > debugfs = debugfs_create_dir ( " smmu " , NULL ) ;
if ( ! smmu - > debugfs )
return ;
debugfs_create_file ( " swgroups " , S_IRUGO , smmu - > debugfs , smmu ,
& tegra_smmu_swgroups_fops ) ;
debugfs_create_file ( " clients " , S_IRUGO , smmu - > debugfs , smmu ,
& tegra_smmu_clients_fops ) ;
}
static void tegra_smmu_debugfs_exit ( struct tegra_smmu * smmu )
{
debugfs_remove_recursive ( smmu - > debugfs ) ;
}
2014-04-16 11:24:44 +04:00
struct tegra_smmu * tegra_smmu_probe ( struct device * dev ,
const struct tegra_smmu_soc * soc ,
struct tegra_mc * mc )
{
struct tegra_smmu * smmu ;
size_t size ;
u32 value ;
int err ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu = devm_kzalloc ( dev , sizeof ( * smmu ) , GFP_KERNEL ) ;
if ( ! smmu )
return ERR_PTR ( - ENOMEM ) ;
2012-06-25 15:23:55 +04:00
2014-04-16 11:24:44 +04:00
size = BITS_TO_LONGS ( soc - > num_asids ) * sizeof ( long ) ;
2012-06-25 15:23:55 +04:00
2014-04-16 11:24:44 +04:00
smmu - > asids = devm_kzalloc ( dev , size , GFP_KERNEL ) ;
if ( ! smmu - > asids )
return ERR_PTR ( - ENOMEM ) ;
2011-11-17 09:31:31 +04:00
2017-10-12 17:19:16 +03:00
INIT_LIST_HEAD ( & smmu - > groups ) ;
2014-04-16 11:24:44 +04:00
mutex_init ( & smmu - > lock ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu - > regs = mc - > regs ;
smmu - > soc = soc ;
smmu - > dev = dev ;
smmu - > mc = mc ;
2011-11-17 09:31:31 +04:00
2020-09-11 10:16:41 +03:00
smmu - > pfn_mask =
BIT_MASK ( mc - > soc - > num_address_bits - SMMU_PTE_SHIFT ) - 1 ;
2015-03-27 13:07:27 +03:00
dev_dbg ( dev , " address bits: %u, PFN mask: %#lx \n " ,
mc - > soc - > num_address_bits , smmu - > pfn_mask ) ;
2020-09-17 14:31:54 +03:00
smmu - > tlb_mask = ( 1 < < fls ( smmu - > soc - > num_tlb_lines ) ) - 1 ;
2015-08-06 15:20:31 +03:00
dev_dbg ( dev , " TLB lines: %u, mask: %#lx \n " , smmu - > soc - > num_tlb_lines ,
smmu - > tlb_mask ) ;
2015-03-27 13:07:27 +03:00
2014-04-16 11:24:44 +04:00
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP ( 0x3f ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( soc - > supports_request_limit )
value | = SMMU_PTC_CONFIG_REQ_LIMIT ( 8 ) ;
2012-08-02 12:46:40 +04:00
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_PTC_CONFIG ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
2015-08-06 15:20:31 +03:00
SMMU_TLB_CONFIG_ACTIVE_LINES ( smmu ) ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
if ( soc - > supports_round_robin_arbitration )
value | = SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION ;
2011-11-17 09:31:31 +04:00
2014-04-16 11:24:44 +04:00
smmu_writel ( smmu , value , SMMU_TLB_CONFIG ) ;
2011-11-17 09:31:31 +04:00
2015-07-27 15:29:41 +03:00
smmu_flush_ptc_all ( smmu ) ;
2014-04-16 11:24:44 +04:00
smmu_flush_tlb ( smmu ) ;
smmu_writel ( smmu , SMMU_CONFIG_ENABLE , SMMU_CONFIG ) ;
smmu_flush ( smmu ) ;
tegra_smmu_ahb_enable ( ) ;
2011-11-17 09:31:31 +04:00
2017-08-09 18:41:52 +03:00
err = iommu_device_sysfs_add ( & smmu - > iommu , dev , NULL , dev_name ( dev ) ) ;
if ( err )
return ERR_PTR ( err ) ;
iommu_device_set_ops ( & smmu - > iommu , & tegra_smmu_ops ) ;
2017-10-12 17:19:16 +03:00
iommu_device_set_fwnode ( & smmu - > iommu , dev - > fwnode ) ;
2017-08-09 18:41:52 +03:00
err = iommu_device_register ( & smmu - > iommu ) ;
if ( err ) {
iommu_device_sysfs_remove ( & smmu - > iommu ) ;
return ERR_PTR ( err ) ;
}
2017-08-30 16:06:43 +03:00
err = bus_set_iommu ( & platform_bus_type , & tegra_smmu_ops ) ;
if ( err < 0 ) {
iommu_device_unregister ( & smmu - > iommu ) ;
iommu_device_sysfs_remove ( & smmu - > iommu ) ;
return ERR_PTR ( err ) ;
}
2015-01-23 11:49:25 +03:00
if ( IS_ENABLED ( CONFIG_DEBUG_FS ) )
tegra_smmu_debugfs_init ( smmu ) ;
2014-04-16 11:24:44 +04:00
return smmu ;
}
2015-01-23 11:49:25 +03:00
void tegra_smmu_remove ( struct tegra_smmu * smmu )
{
2017-08-09 18:41:52 +03:00
iommu_device_unregister ( & smmu - > iommu ) ;
iommu_device_sysfs_remove ( & smmu - > iommu ) ;
2015-01-23 11:49:25 +03:00
if ( IS_ENABLED ( CONFIG_DEBUG_FS ) )
tegra_smmu_debugfs_exit ( smmu ) ;
}