2018-09-07 04:42:15 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-04-02 14:47:37 +04:00
/*
2018-12-01 22:19:13 +03:00
* IOMMU API for Renesas VMSA - compatible IPMMU
* Author : Laurent Pinchart < laurent . pinchart @ ideasonboard . com >
2014-04-02 14:47:37 +04:00
*
* Copyright ( C ) 2014 Renesas Electronics Corporation
*/
2017-05-17 13:06:38 +03:00
# include <linux/bitmap.h>
2014-04-02 14:47:37 +04:00
# include <linux/delay.h>
2017-05-17 13:07:10 +03:00
# include <linux/dma-iommu.h>
2014-04-02 14:47:37 +04:00
# include <linux/dma-mapping.h>
# include <linux/err.h>
# include <linux/export.h>
2018-12-01 22:19:13 +03:00
# include <linux/init.h>
2014-04-02 14:47:37 +04:00
# include <linux/interrupt.h>
# include <linux/io.h>
2019-02-05 19:37:31 +03:00
# include <linux/io-pgtable.h>
2014-04-02 14:47:37 +04:00
# include <linux/iommu.h>
2014-03-17 04:02:46 +04:00
# include <linux/of.h>
2017-10-16 15:29:25 +03:00
# include <linux/of_device.h>
2017-10-16 15:29:57 +03:00
# include <linux/of_iommu.h>
2017-07-17 16:05:41 +03:00
# include <linux/of_platform.h>
2014-04-02 14:47:37 +04:00
# include <linux/platform_device.h>
# include <linux/sizes.h>
# include <linux/slab.h>
2017-10-16 15:30:50 +03:00
# include <linux/sys_soc.h>
2014-04-02 14:47:37 +04:00
2017-05-17 13:07:10 +03:00
# if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
2014-04-02 14:47:37 +04:00
# include <asm/dma-iommu.h>
# include <asm/pgalloc.h>
2017-10-13 21:23:42 +03:00
# else
# define arm_iommu_create_mapping(...) NULL
# define arm_iommu_attach_device(...) -ENODEV
# define arm_iommu_release_mapping(...) do {} while (0)
# define arm_iommu_detach_device(...) do {} while (0)
2017-05-17 13:07:10 +03:00
# endif
2014-04-02 14:47:37 +04:00
2019-05-27 14:52:53 +03:00
# define IPMMU_CTX_MAX 8U
# define IPMMU_CTX_INVALID -1
# define IPMMU_UTLB_MAX 48U
2017-05-17 13:06:38 +03:00
2017-10-16 15:29:25 +03:00
struct ipmmu_features {
bool use_ns_alias_offset ;
2017-10-16 15:29:36 +03:00
bool has_cache_leaf_nodes ;
2017-10-16 15:29:46 +03:00
unsigned int number_of_contexts ;
2019-05-27 14:52:51 +03:00
unsigned int num_utlbs ;
2017-10-16 15:30:28 +03:00
bool setup_imbuscr ;
2017-10-16 15:30:39 +03:00
bool twobit_imttbcr_sl0 ;
2018-07-09 05:53:31 +03:00
bool reserved_context ;
2019-09-04 15:08:02 +03:00
bool cache_snoop ;
2019-11-06 05:35:48 +03:00
unsigned int ctx_offset_base ;
unsigned int ctx_offset_stride ;
2019-11-06 05:35:50 +03:00
unsigned int utlb_offset_base ;
2017-10-16 15:29:25 +03:00
} ;
2014-04-02 14:47:37 +04:00
struct ipmmu_vmsa_device {
struct device * dev ;
void __iomem * base ;
2017-07-17 16:05:10 +03:00
struct iommu_device iommu ;
2017-10-16 15:29:36 +03:00
struct ipmmu_vmsa_device * root ;
2017-10-16 15:29:25 +03:00
const struct ipmmu_features * features ;
2017-10-16 15:29:46 +03:00
unsigned int num_ctx ;
2017-05-17 13:06:38 +03:00
spinlock_t lock ; /* Protects ctx and domains[] */
DECLARE_BITMAP ( ctx , IPMMU_CTX_MAX ) ;
struct ipmmu_vmsa_domain * domains [ IPMMU_CTX_MAX ] ;
2019-05-27 14:52:53 +03:00
s8 utlb_ctx [ IPMMU_UTLB_MAX ] ;
2014-04-02 14:47:37 +04:00
2017-10-13 21:23:40 +03:00
struct iommu_group * group ;
2014-04-02 14:47:37 +04:00
struct dma_iommu_mapping * mapping ;
} ;
struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device * mmu ;
2015-03-26 15:43:16 +03:00
struct iommu_domain io_domain ;
2014-04-02 14:47:37 +04:00
2015-01-20 19:30:04 +03:00
struct io_pgtable_cfg cfg ;
struct io_pgtable_ops * iop ;
2014-04-02 14:47:37 +04:00
unsigned int context_id ;
2018-07-20 19:16:59 +03:00
struct mutex mutex ; /* Protects mappings */
2014-04-02 14:47:37 +04:00
} ;
2015-03-26 15:43:16 +03:00
static struct ipmmu_vmsa_domain * to_vmsa_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct ipmmu_vmsa_domain , io_domain ) ;
}
2017-10-13 21:23:41 +03:00
static struct ipmmu_vmsa_device * to_ipmmu ( struct device * dev )
2017-05-17 13:07:20 +03:00
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
return fwspec ? fwspec - > iommu_priv : NULL ;
2017-05-17 13:07:20 +03:00
}
2014-04-02 14:47:37 +04:00
# define TLB_LOOP_TIMEOUT 100 /* 100us */
/* -----------------------------------------------------------------------------
* Registers Definition
*/
2014-03-17 04:02:46 +04:00
# define IM_NS_ALIAS_OFFSET 0x800
2019-11-06 05:35:46 +03:00
/* MMU "context" registers */
# define IMCTR 0x0000 /* R-Car Gen2/3 */
# define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
# define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
# define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
# define IMTTBCR 0x0008 /* R-Car Gen2/3 */
# define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
2019-09-04 15:08:02 +03:00
# define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
# define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
# define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
2019-09-04 15:08:01 +03:00
# define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
2019-11-06 05:35:46 +03:00
# define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
2014-04-02 14:47:37 +04:00
2019-11-06 05:35:46 +03:00
# define IMBUSCR 0x000c /* R-Car Gen2 only */
# define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
# define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
2014-04-02 14:47:37 +04:00
2019-11-06 05:35:46 +03:00
# define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
# define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
2014-04-02 14:47:37 +04:00
2019-11-06 05:35:46 +03:00
# define IMSTR 0x0020 /* R-Car Gen2/3 */
# define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
# define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
# define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
# define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
2014-04-02 14:47:37 +04:00
2019-11-06 05:35:46 +03:00
# define IMMAIR0 0x0028 /* R-Car Gen2/3 */
2014-04-02 14:47:37 +04:00
2019-11-06 05:35:46 +03:00
# define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
# define IMEUAR 0x0034 /* R-Car Gen3 only */
2014-04-02 14:47:37 +04:00
2019-11-06 05:35:46 +03:00
/* uTLB registers */
2018-06-14 13:48:21 +03:00
# define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
2019-11-06 05:35:46 +03:00
# define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
# define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
# define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
# define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
# define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
2014-04-02 14:47:37 +04:00
2018-06-14 13:48:21 +03:00
# define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
2019-11-06 05:35:46 +03:00
# define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
# define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
2014-04-02 14:47:37 +04:00
2017-10-16 15:29:36 +03:00
/* -----------------------------------------------------------------------------
* Root device handling
*/
static struct platform_driver ipmmu_driver ;
static bool ipmmu_is_root ( struct ipmmu_vmsa_device * mmu )
{
return mmu - > root = = mmu ;
}
static int __ipmmu_check_device ( struct device * dev , void * data )
{
struct ipmmu_vmsa_device * mmu = dev_get_drvdata ( dev ) ;
struct ipmmu_vmsa_device * * rootp = data ;
if ( ipmmu_is_root ( mmu ) )
* rootp = mmu ;
return 0 ;
}
static struct ipmmu_vmsa_device * ipmmu_find_root ( void )
{
struct ipmmu_vmsa_device * root = NULL ;
return driver_for_each_device ( & ipmmu_driver . driver , NULL , & root ,
__ipmmu_check_device ) = = 0 ? root : NULL ;
}
2014-04-02 14:47:37 +04:00
/* -----------------------------------------------------------------------------
* Read / Write Access
*/
static u32 ipmmu_read ( struct ipmmu_vmsa_device * mmu , unsigned int offset )
{
return ioread32 ( mmu - > base + offset ) ;
}
static void ipmmu_write ( struct ipmmu_vmsa_device * mmu , unsigned int offset ,
u32 data )
{
iowrite32 ( data , mmu - > base + offset ) ;
}
2019-11-06 05:35:47 +03:00
static unsigned int ipmmu_ctx_reg ( struct ipmmu_vmsa_device * mmu ,
unsigned int context_id , unsigned int reg )
{
2019-11-06 05:35:48 +03:00
return mmu - > features - > ctx_offset_base +
context_id * mmu - > features - > ctx_offset_stride + reg ;
2019-11-06 05:35:47 +03:00
}
static u32 ipmmu_ctx_read ( struct ipmmu_vmsa_device * mmu ,
unsigned int context_id , unsigned int reg )
{
return ipmmu_read ( mmu , ipmmu_ctx_reg ( mmu , context_id , reg ) ) ;
}
static void ipmmu_ctx_write ( struct ipmmu_vmsa_device * mmu ,
unsigned int context_id , unsigned int reg , u32 data )
{
ipmmu_write ( mmu , ipmmu_ctx_reg ( mmu , context_id , reg ) , data ) ;
}
2017-10-16 15:30:18 +03:00
static u32 ipmmu_ctx_read_root ( struct ipmmu_vmsa_domain * domain ,
unsigned int reg )
2014-04-02 14:47:37 +04:00
{
2019-11-06 05:35:47 +03:00
return ipmmu_ctx_read ( domain - > mmu - > root , domain - > context_id , reg ) ;
2014-04-02 14:47:37 +04:00
}
2017-10-16 15:30:18 +03:00
static void ipmmu_ctx_write_root ( struct ipmmu_vmsa_domain * domain ,
unsigned int reg , u32 data )
2014-04-02 14:47:37 +04:00
{
2019-11-06 05:35:47 +03:00
ipmmu_ctx_write ( domain - > mmu - > root , domain - > context_id , reg , data ) ;
2014-04-02 14:47:37 +04:00
}
2017-10-16 15:30:18 +03:00
static void ipmmu_ctx_write_all ( struct ipmmu_vmsa_domain * domain ,
unsigned int reg , u32 data )
{
if ( domain - > mmu ! = domain - > mmu - > root )
2019-11-06 05:35:47 +03:00
ipmmu_ctx_write ( domain - > mmu , domain - > context_id , reg , data ) ;
2017-10-16 15:30:18 +03:00
2019-11-06 05:35:47 +03:00
ipmmu_ctx_write ( domain - > mmu - > root , domain - > context_id , reg , data ) ;
2017-10-16 15:30:18 +03:00
}
2019-11-06 05:35:49 +03:00
static u32 ipmmu_utlb_reg ( struct ipmmu_vmsa_device * mmu , unsigned int reg )
{
2019-11-06 05:35:50 +03:00
return mmu - > features - > utlb_offset_base + reg ;
2019-11-06 05:35:49 +03:00
}
static void ipmmu_imuasid_write ( struct ipmmu_vmsa_device * mmu ,
unsigned int utlb , u32 data )
{
ipmmu_write ( mmu , ipmmu_utlb_reg ( mmu , IMUASID ( utlb ) ) , data ) ;
}
2017-10-16 15:30:18 +03:00
2019-11-06 05:35:49 +03:00
static void ipmmu_imuctr_write ( struct ipmmu_vmsa_device * mmu ,
unsigned int utlb , u32 data )
{
ipmmu_write ( mmu , ipmmu_utlb_reg ( mmu , IMUCTR ( utlb ) ) , data ) ;
2017-10-16 15:30:18 +03:00
}
2014-04-02 14:47:37 +04:00
/* -----------------------------------------------------------------------------
* TLB and microTLB Management
*/
/* Wait for any pending TLB invalidations to complete */
static void ipmmu_tlb_sync ( struct ipmmu_vmsa_domain * domain )
{
unsigned int count = 0 ;
2017-10-16 15:30:18 +03:00
while ( ipmmu_ctx_read_root ( domain , IMCTR ) & IMCTR_FLUSH ) {
2014-04-02 14:47:37 +04:00
cpu_relax ( ) ;
if ( + + count = = TLB_LOOP_TIMEOUT ) {
dev_err_ratelimited ( domain - > mmu - > dev ,
" TLB sync timed out -- MMU may be deadlocked \n " ) ;
return ;
}
udelay ( 1 ) ;
}
}
static void ipmmu_tlb_invalidate ( struct ipmmu_vmsa_domain * domain )
{
u32 reg ;
2017-10-16 15:30:18 +03:00
reg = ipmmu_ctx_read_root ( domain , IMCTR ) ;
2014-04-02 14:47:37 +04:00
reg | = IMCTR_FLUSH ;
2017-10-16 15:30:18 +03:00
ipmmu_ctx_write_all ( domain , IMCTR , reg ) ;
2014-04-02 14:47:37 +04:00
ipmmu_tlb_sync ( domain ) ;
}
/*
* Enable MMU translation for the microTLB .
*/
static void ipmmu_utlb_enable ( struct ipmmu_vmsa_domain * domain ,
2014-05-15 14:40:42 +04:00
unsigned int utlb )
2014-04-02 14:47:37 +04:00
{
struct ipmmu_vmsa_device * mmu = domain - > mmu ;
2014-05-15 14:40:42 +04:00
/*
* TODO : Reference - count the microTLB as several bus masters can be
* connected to the same microTLB .
*/
2014-04-02 14:47:37 +04:00
/* TODO: What should we set the ASID to ? */
2019-11-06 05:35:49 +03:00
ipmmu_imuasid_write ( mmu , utlb , 0 ) ;
2014-04-02 14:47:37 +04:00
/* TODO: Do we need to flush the microTLB ? */
2019-11-06 05:35:49 +03:00
ipmmu_imuctr_write ( mmu , utlb , IMUCTR_TTSEL_MMU ( domain - > context_id ) |
IMUCTR_FLUSH | IMUCTR_MMUEN ) ;
2019-05-27 14:52:53 +03:00
mmu - > utlb_ctx [ utlb ] = domain - > context_id ;
2014-04-02 14:47:37 +04:00
}
/*
* Disable MMU translation for the microTLB .
*/
static void ipmmu_utlb_disable ( struct ipmmu_vmsa_domain * domain ,
2014-05-15 14:40:42 +04:00
unsigned int utlb )
2014-04-02 14:47:37 +04:00
{
struct ipmmu_vmsa_device * mmu = domain - > mmu ;
2019-11-06 05:35:49 +03:00
ipmmu_imuctr_write ( mmu , utlb , 0 ) ;
2019-05-27 14:52:53 +03:00
mmu - > utlb_ctx [ utlb ] = IPMMU_CTX_INVALID ;
2014-04-02 14:47:37 +04:00
}
2015-01-20 19:30:04 +03:00
static void ipmmu_tlb_flush_all ( void * cookie )
2014-04-02 14:47:37 +04:00
{
2015-01-20 19:30:04 +03:00
struct ipmmu_vmsa_domain * domain = cookie ;
ipmmu_tlb_invalidate ( domain ) ;
}
2019-07-02 18:44:25 +03:00
static void ipmmu_tlb_flush ( unsigned long iova , size_t size ,
size_t granule , void * cookie )
2015-01-20 19:30:04 +03:00
{
2019-07-02 18:44:25 +03:00
ipmmu_tlb_flush_all ( cookie ) ;
2015-01-20 19:30:04 +03:00
}
2019-07-02 18:43:34 +03:00
static const struct iommu_flush_ops ipmmu_flush_ops = {
2015-01-20 19:30:04 +03:00
. tlb_flush_all = ipmmu_tlb_flush_all ,
2019-07-02 18:44:25 +03:00
. tlb_flush_walk = ipmmu_tlb_flush ,
. tlb_flush_leaf = ipmmu_tlb_flush ,
2015-01-20 19:30:04 +03:00
} ;
2014-04-02 14:47:37 +04:00
/* -----------------------------------------------------------------------------
* Domain / Context Management
*/
2017-05-17 13:06:38 +03:00
static int ipmmu_domain_allocate_context ( struct ipmmu_vmsa_device * mmu ,
struct ipmmu_vmsa_domain * domain )
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & mmu - > lock , flags ) ;
2017-10-16 15:29:46 +03:00
ret = find_first_zero_bit ( mmu - > ctx , mmu - > num_ctx ) ;
if ( ret ! = mmu - > num_ctx ) {
2017-05-17 13:06:38 +03:00
mmu - > domains [ ret ] = domain ;
set_bit ( ret , mmu - > ctx ) ;
2017-10-16 15:29:46 +03:00
} else
ret = - EBUSY ;
2017-05-17 13:06:38 +03:00
spin_unlock_irqrestore ( & mmu - > lock , flags ) ;
return ret ;
}
2017-08-23 17:31:42 +03:00
static void ipmmu_domain_free_context ( struct ipmmu_vmsa_device * mmu ,
unsigned int context_id )
{
unsigned long flags ;
spin_lock_irqsave ( & mmu - > lock , flags ) ;
clear_bit ( context_id , mmu - > ctx ) ;
mmu - > domains [ context_id ] = NULL ;
spin_unlock_irqrestore ( & mmu - > lock , flags ) ;
}
2019-05-27 14:52:52 +03:00
static void ipmmu_domain_setup_context ( struct ipmmu_vmsa_domain * domain )
2014-04-02 14:47:37 +04:00
{
2015-12-22 22:01:06 +03:00
u64 ttbr ;
2017-10-16 15:30:39 +03:00
u32 tmp ;
2017-08-23 17:31:42 +03:00
2014-04-02 14:47:37 +04:00
/* TTBR0 */
2015-01-20 19:30:04 +03:00
ttbr = domain - > cfg . arm_lpae_s1_cfg . ttbr [ 0 ] ;
2017-10-16 15:30:18 +03:00
ipmmu_ctx_write_root ( domain , IMTTLBR0 , ttbr ) ;
ipmmu_ctx_write_root ( domain , IMTTUBR0 , ttbr > > 32 ) ;
2014-04-02 14:47:37 +04:00
/*
* TTBCR
2019-09-04 15:08:02 +03:00
* We use long descriptors and allocate the whole 32 - bit VA space to
* TTBR0 .
2014-04-02 14:47:37 +04:00
*/
2017-10-16 15:30:39 +03:00
if ( domain - > mmu - > features - > twobit_imttbcr_sl0 )
tmp = IMTTBCR_SL0_TWOBIT_LVL_1 ;
else
tmp = IMTTBCR_SL0_LVL_1 ;
2019-09-04 15:08:02 +03:00
if ( domain - > mmu - > features - > cache_snoop )
tmp | = IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA ;
ipmmu_ctx_write_root ( domain , IMTTBCR , IMTTBCR_EAE | tmp ) ;
2014-04-02 14:47:37 +04:00
2015-01-20 19:30:04 +03:00
/* MAIR0 */
2017-10-16 15:30:18 +03:00
ipmmu_ctx_write_root ( domain , IMMAIR0 ,
2019-10-25 21:08:36 +03:00
domain - > cfg . arm_lpae_s1_cfg . mair ) ;
2014-04-02 14:47:37 +04:00
/* IMBUSCR */
2017-10-16 15:30:28 +03:00
if ( domain - > mmu - > features - > setup_imbuscr )
ipmmu_ctx_write_root ( domain , IMBUSCR ,
ipmmu_ctx_read_root ( domain , IMBUSCR ) &
~ ( IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK ) ) ;
2014-04-02 14:47:37 +04:00
/*
* IMSTR
* Clear all interrupt flags .
*/
2017-10-16 15:30:18 +03:00
ipmmu_ctx_write_root ( domain , IMSTR , ipmmu_ctx_read_root ( domain , IMSTR ) ) ;
2014-04-02 14:47:37 +04:00
/*
* IMCTR
* Enable the MMU and interrupt generation . The long - descriptor
* translation table format doesn ' t use TEX remapping . Don ' t enable AF
* software management as we have no use for it . Flush the TLB as
* required when modifying the context registers .
*/
2017-10-16 15:30:18 +03:00
ipmmu_ctx_write_all ( domain , IMCTR ,
IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN ) ;
2019-05-27 14:52:52 +03:00
}
static int ipmmu_domain_init_context ( struct ipmmu_vmsa_domain * domain )
{
int ret ;
/*
* Allocate the page table operations .
*
* VMSA states in section B3 .6 .3 " Control of Secure or Non-secure memory
* access , Long - descriptor format " that the NStable bit being set in a
* table descriptor will result in the NStable and NS bits of all child
* entries being ignored and considered as being set . The IPMMU seems
* not to comply with this , as it generates a secure access page fault
* if any of the NStable and NS bits isn ' t set when running in
* non - secure mode .
*/
domain - > cfg . quirks = IO_PGTABLE_QUIRK_ARM_NS ;
domain - > cfg . pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K ;
domain - > cfg . ias = 32 ;
domain - > cfg . oas = 40 ;
2019-07-02 18:43:34 +03:00
domain - > cfg . tlb = & ipmmu_flush_ops ;
2019-05-27 14:52:52 +03:00
domain - > io_domain . geometry . aperture_end = DMA_BIT_MASK ( 32 ) ;
domain - > io_domain . geometry . force_aperture = true ;
/*
* TODO : Add support for coherent walk through CCI with DVM and remove
* cache handling . For now , delegate it to the io - pgtable code .
*/
2019-07-01 15:41:24 +03:00
domain - > cfg . coherent_walk = false ;
2019-05-27 14:52:52 +03:00
domain - > cfg . iommu_dev = domain - > mmu - > root - > dev ;
/*
* Find an unused context .
*/
ret = ipmmu_domain_allocate_context ( domain - > mmu - > root , domain ) ;
if ( ret < 0 )
return ret ;
domain - > context_id = ret ;
domain - > iop = alloc_io_pgtable_ops ( ARM_32_LPAE_S1 , & domain - > cfg ,
domain ) ;
if ( ! domain - > iop ) {
ipmmu_domain_free_context ( domain - > mmu - > root ,
domain - > context_id ) ;
return - EINVAL ;
}
2014-04-02 14:47:37 +04:00
2019-05-27 14:52:52 +03:00
ipmmu_domain_setup_context ( domain ) ;
2014-04-02 14:47:37 +04:00
return 0 ;
}
static void ipmmu_domain_destroy_context ( struct ipmmu_vmsa_domain * domain )
{
2018-11-07 16:18:50 +03:00
if ( ! domain - > mmu )
return ;
2014-04-02 14:47:37 +04:00
/*
* Disable the context . Flush the TLB as required when modifying the
* context registers .
*
* TODO : Is TLB flush really needed ?
*/
2017-10-16 15:30:18 +03:00
ipmmu_ctx_write_all ( domain , IMCTR , IMCTR_FLUSH ) ;
2014-04-02 14:47:37 +04:00
ipmmu_tlb_sync ( domain ) ;
2017-10-16 15:29:36 +03:00
ipmmu_domain_free_context ( domain - > mmu - > root , domain - > context_id ) ;
2014-04-02 14:47:37 +04:00
}
/* -----------------------------------------------------------------------------
* Fault Handling
*/
static irqreturn_t ipmmu_domain_irq ( struct ipmmu_vmsa_domain * domain )
{
const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF ;
struct ipmmu_vmsa_device * mmu = domain - > mmu ;
2019-05-27 14:52:49 +03:00
unsigned long iova ;
2014-04-02 14:47:37 +04:00
u32 status ;
2017-10-16 15:30:18 +03:00
status = ipmmu_ctx_read_root ( domain , IMSTR ) ;
2014-04-02 14:47:37 +04:00
if ( ! ( status & err_mask ) )
return IRQ_NONE ;
2019-05-27 14:52:49 +03:00
iova = ipmmu_ctx_read_root ( domain , IMELAR ) ;
if ( IS_ENABLED ( CONFIG_64BIT ) )
iova | = ( u64 ) ipmmu_ctx_read_root ( domain , IMEUAR ) < < 32 ;
2014-04-02 14:47:37 +04:00
/*
* Clear the error status flags . Unlike traditional interrupt flag
* registers that must be cleared by writing 1 , this status register
* seems to require 0. The error address register must be read before ,
* otherwise its value will be 0.
*/
2017-10-16 15:30:18 +03:00
ipmmu_ctx_write_root ( domain , IMSTR , 0 ) ;
2014-04-02 14:47:37 +04:00
/* Log fatal errors. */
if ( status & IMSTR_MHIT )
2019-05-27 14:52:49 +03:00
dev_err_ratelimited ( mmu - > dev , " Multiple TLB hits @0x%lx \n " ,
2014-04-02 14:47:37 +04:00
iova ) ;
if ( status & IMSTR_ABORT )
2019-05-27 14:52:49 +03:00
dev_err_ratelimited ( mmu - > dev , " Page Table Walk Abort @0x%lx \n " ,
2014-04-02 14:47:37 +04:00
iova ) ;
if ( ! ( status & ( IMSTR_PF | IMSTR_TF ) ) )
return IRQ_NONE ;
/*
* Try to handle page faults and translation faults .
*
* TODO : We need to look up the faulty device based on the I / O VA . Use
* the IOMMU device for now .
*/
2015-03-26 15:43:16 +03:00
if ( ! report_iommu_fault ( & domain - > io_domain , mmu - > dev , iova , 0 ) )
2014-04-02 14:47:37 +04:00
return IRQ_HANDLED ;
dev_err_ratelimited ( mmu - > dev ,
2019-05-27 14:52:49 +03:00
" Unhandled fault: status 0x%08x iova 0x%lx \n " ,
2014-04-02 14:47:37 +04:00
status , iova ) ;
return IRQ_HANDLED ;
}
static irqreturn_t ipmmu_irq ( int irq , void * dev )
{
struct ipmmu_vmsa_device * mmu = dev ;
2017-05-17 13:06:38 +03:00
irqreturn_t status = IRQ_NONE ;
unsigned int i ;
unsigned long flags ;
2014-04-02 14:47:37 +04:00
2017-05-17 13:06:38 +03:00
spin_lock_irqsave ( & mmu - > lock , flags ) ;
/*
* Check interrupts for all active contexts .
*/
2017-10-16 15:29:46 +03:00
for ( i = 0 ; i < mmu - > num_ctx ; i + + ) {
2017-05-17 13:06:38 +03:00
if ( ! mmu - > domains [ i ] )
continue ;
if ( ipmmu_domain_irq ( mmu - > domains [ i ] ) = = IRQ_HANDLED )
status = IRQ_HANDLED ;
}
2014-04-02 14:47:37 +04:00
2017-05-17 13:06:38 +03:00
spin_unlock_irqrestore ( & mmu - > lock , flags ) ;
2014-04-02 14:47:37 +04:00
2017-05-17 13:06:38 +03:00
return status ;
2014-04-02 14:47:37 +04:00
}
/* -----------------------------------------------------------------------------
* IOMMU Operations
*/
2017-05-17 13:06:59 +03:00
static struct iommu_domain * __ipmmu_domain_alloc ( unsigned type )
2014-04-02 14:47:37 +04:00
{
struct ipmmu_vmsa_domain * domain ;
domain = kzalloc ( sizeof ( * domain ) , GFP_KERNEL ) ;
if ( ! domain )
2015-03-26 15:43:16 +03:00
return NULL ;
2014-04-02 14:47:37 +04:00
2018-07-20 19:16:59 +03:00
mutex_init ( & domain - > mutex ) ;
2014-04-02 14:47:37 +04:00
2015-03-26 15:43:16 +03:00
return & domain - > io_domain ;
2014-04-02 14:47:37 +04:00
}
2017-10-13 21:23:39 +03:00
static struct iommu_domain * ipmmu_domain_alloc ( unsigned type )
{
struct iommu_domain * io_domain = NULL ;
switch ( type ) {
case IOMMU_DOMAIN_UNMANAGED :
io_domain = __ipmmu_domain_alloc ( type ) ;
break ;
case IOMMU_DOMAIN_DMA :
io_domain = __ipmmu_domain_alloc ( type ) ;
if ( io_domain & & iommu_get_dma_cookie ( io_domain ) ) {
kfree ( io_domain ) ;
io_domain = NULL ;
}
break ;
}
return io_domain ;
}
2015-03-26 15:43:16 +03:00
static void ipmmu_domain_free ( struct iommu_domain * io_domain )
2014-04-02 14:47:37 +04:00
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
/*
* Free the domain resources . We assume that all devices have already
* been detached .
*/
2017-10-13 21:23:39 +03:00
iommu_put_dma_cookie ( io_domain ) ;
2014-04-02 14:47:37 +04:00
ipmmu_domain_destroy_context ( domain ) ;
2015-01-20 19:30:04 +03:00
free_io_pgtable_ops ( domain - > iop ) ;
2014-04-02 14:47:37 +04:00
kfree ( domain ) ;
}
static int ipmmu_attach_device ( struct iommu_domain * io_domain ,
struct device * dev )
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2017-10-13 21:23:41 +03:00
struct ipmmu_vmsa_device * mmu = to_ipmmu ( dev ) ;
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-07-24 03:36:43 +04:00
unsigned int i ;
2014-04-02 14:47:37 +04:00
int ret = 0 ;
2017-10-13 21:23:41 +03:00
if ( ! mmu ) {
2014-04-02 14:47:37 +04:00
dev_err ( dev , " Cannot attach to IPMMU \n " ) ;
return - ENXIO ;
}
2018-07-20 19:16:59 +03:00
mutex_lock ( & domain - > mutex ) ;
2014-04-02 14:47:37 +04:00
if ( ! domain - > mmu ) {
/* The domain hasn't been used yet, initialize it. */
domain - > mmu = mmu ;
ret = ipmmu_domain_init_context ( domain ) ;
2017-10-16 15:29:46 +03:00
if ( ret < 0 ) {
dev_err ( dev , " Unable to initialize IPMMU context \n " ) ;
domain - > mmu = NULL ;
} else {
dev_info ( dev , " Using IPMMU context %u \n " ,
domain - > context_id ) ;
}
2014-04-02 14:47:37 +04:00
} else if ( domain - > mmu ! = mmu ) {
/*
* Something is wrong , we can ' t attach two devices using
* different IOMMUs to the same domain .
*/
dev_err ( dev , " Can't attach IPMMU %s to domain on IPMMU %s \n " ,
dev_name ( mmu - > dev ) , dev_name ( domain - > mmu - > dev ) ) ;
ret = - EINVAL ;
2017-05-17 13:07:10 +03:00
} else
dev_info ( dev , " Reusing IPMMU context %u \n " , domain - > context_id ) ;
2014-04-02 14:47:37 +04:00
2018-07-20 19:16:59 +03:00
mutex_unlock ( & domain - > mutex ) ;
2014-04-02 14:47:37 +04:00
if ( ret < 0 )
return ret ;
2017-07-17 16:05:41 +03:00
for ( i = 0 ; i < fwspec - > num_ids ; + + i )
ipmmu_utlb_enable ( domain , fwspec - > ids [ i ] ) ;
2014-04-02 14:47:37 +04:00
return 0 ;
}
static void ipmmu_detach_device ( struct iommu_domain * io_domain ,
struct device * dev )
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-07-24 03:36:43 +04:00
unsigned int i ;
2014-04-02 14:47:37 +04:00
2017-07-17 16:05:41 +03:00
for ( i = 0 ; i < fwspec - > num_ids ; + + i )
ipmmu_utlb_disable ( domain , fwspec - > ids [ i ] ) ;
2014-04-02 14:47:37 +04:00
/*
* TODO : Optimize by disabling the context when no device is attached .
*/
}
static int ipmmu_map ( struct iommu_domain * io_domain , unsigned long iova ,
2019-09-08 19:56:38 +03:00
phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
2014-04-02 14:47:37 +04:00
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
if ( ! domain )
return - ENODEV ;
2015-01-20 19:30:04 +03:00
return domain - > iop - > map ( domain - > iop , iova , paddr , size , prot ) ;
2014-04-02 14:47:37 +04:00
}
static size_t ipmmu_unmap ( struct iommu_domain * io_domain , unsigned long iova ,
2019-07-02 18:44:06 +03:00
size_t size , struct iommu_iotlb_gather * gather )
2014-04-02 14:47:37 +04:00
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
2019-07-02 18:44:58 +03:00
return domain - > iop - > unmap ( domain - > iop , iova , size , gather ) ;
2014-04-02 14:47:37 +04:00
}
2019-07-02 18:44:06 +03:00
static void ipmmu_flush_iotlb_all ( struct iommu_domain * io_domain )
2017-09-28 17:55:01 +03:00
{
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
if ( domain - > mmu )
ipmmu_tlb_flush_all ( domain ) ;
}
2019-07-02 18:44:06 +03:00
static void ipmmu_iotlb_sync ( struct iommu_domain * io_domain ,
struct iommu_iotlb_gather * gather )
{
ipmmu_flush_iotlb_all ( io_domain ) ;
}
2014-04-02 14:47:37 +04:00
static phys_addr_t ipmmu_iova_to_phys ( struct iommu_domain * io_domain ,
dma_addr_t iova )
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
/* TODO: Is locking needed ? */
2015-01-20 19:30:04 +03:00
return domain - > iop - > iova_to_phys ( domain - > iop , iova ) ;
2014-04-02 14:47:37 +04:00
}
2017-07-17 16:05:41 +03:00
static int ipmmu_init_platform_device ( struct device * dev ,
struct of_phandle_args * args )
2014-04-02 14:47:37 +04:00
{
2018-11-29 16:01:00 +03:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2017-07-17 16:05:41 +03:00
struct platform_device * ipmmu_pdev ;
2015-01-25 00:13:50 +03:00
2017-07-17 16:05:41 +03:00
ipmmu_pdev = of_find_device_by_node ( args - > np ) ;
if ( ! ipmmu_pdev )
2015-01-25 00:13:50 +03:00
return - ENODEV ;
2018-11-29 16:01:00 +03:00
fwspec - > iommu_priv = platform_get_drvdata ( ipmmu_pdev ) ;
2017-05-17 13:06:48 +03:00
return 0 ;
2017-10-16 15:30:50 +03:00
}
2018-06-14 13:48:22 +03:00
static const struct soc_device_attribute soc_rcar_gen3 [ ] = {
2018-08-23 18:33:04 +03:00
{ . soc_id = " r8a774a1 " , } ,
2019-09-27 13:53:21 +03:00
{ . soc_id = " r8a774b1 " , } ,
2018-12-13 23:22:44 +03:00
{ . soc_id = " r8a774c0 " , } ,
2017-10-16 15:30:50 +03:00
{ . soc_id = " r8a7795 " , } ,
2018-06-14 13:48:22 +03:00
{ . soc_id = " r8a7796 " , } ,
2018-06-14 13:48:25 +03:00
{ . soc_id = " r8a77965 " , } ,
2018-06-14 13:48:23 +03:00
{ . soc_id = " r8a77970 " , } ,
2018-10-17 12:13:22 +03:00
{ . soc_id = " r8a77990 " , } ,
2018-06-14 13:48:23 +03:00
{ . soc_id = " r8a77995 " , } ,
2017-10-16 15:30:50 +03:00
{ /* sentinel */ }
} ;
2018-11-28 12:23:36 +03:00
static const struct soc_device_attribute soc_rcar_gen3_whitelist [ ] = {
2019-09-27 13:53:21 +03:00
{ . soc_id = " r8a774b1 " , } ,
2018-12-13 23:22:44 +03:00
{ . soc_id = " r8a774c0 " , } ,
2018-11-28 12:23:36 +03:00
{ . soc_id = " r8a7795 " , . revision = " ES3.* " } ,
{ . soc_id = " r8a77965 " , } ,
{ . soc_id = " r8a77990 " , } ,
2018-06-14 13:48:23 +03:00
{ . soc_id = " r8a77995 " , } ,
2017-10-16 15:30:50 +03:00
{ /* sentinel */ }
} ;
2018-11-28 12:23:36 +03:00
static const char * const rcar_gen3_slave_whitelist [ ] = {
} ;
2018-11-28 12:23:36 +03:00
static bool ipmmu_slave_whitelist ( struct device * dev )
{
2018-11-28 12:23:36 +03:00
unsigned int i ;
2018-11-28 12:23:36 +03:00
/*
* For R - Car Gen3 use a white list to opt - in slave devices .
* For Other SoCs , this returns true anyway .
*/
if ( ! soc_device_match ( soc_rcar_gen3 ) )
return true ;
/* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
if ( ! soc_device_match ( soc_rcar_gen3_whitelist ) )
return false ;
2018-11-28 12:23:36 +03:00
/* Check whether this slave device can work with the IPMMU */
for ( i = 0 ; i < ARRAY_SIZE ( rcar_gen3_slave_whitelist ) ; i + + ) {
if ( ! strcmp ( dev_name ( dev ) , rcar_gen3_slave_whitelist [ i ] ) )
return true ;
}
/* Otherwise, do not allow use of IPMMU */
2018-11-28 12:23:36 +03:00
return false ;
}
2017-07-17 16:05:20 +03:00
static int ipmmu_of_xlate ( struct device * dev ,
struct of_phandle_args * spec )
{
2018-11-28 12:23:36 +03:00
if ( ! ipmmu_slave_whitelist ( dev ) )
2017-10-16 15:30:50 +03:00
return - ENODEV ;
2017-07-17 16:05:41 +03:00
iommu_fwspec_add_ids ( dev , spec - > args , 1 ) ;
2017-07-17 16:05:20 +03:00
/* Initialize once - xlate() will call multiple times */
2017-10-13 21:23:41 +03:00
if ( to_ipmmu ( dev ) )
2017-07-17 16:05:20 +03:00
return 0 ;
2017-07-17 16:05:41 +03:00
return ipmmu_init_platform_device ( dev , spec ) ;
2017-07-17 16:05:20 +03:00
}
2017-10-13 21:23:42 +03:00
static int ipmmu_init_arm_mapping ( struct device * dev )
2017-05-17 13:06:48 +03:00
{
2017-10-13 21:23:41 +03:00
struct ipmmu_vmsa_device * mmu = to_ipmmu ( dev ) ;
2017-05-17 13:06:48 +03:00
struct iommu_group * group ;
int ret ;
2014-04-02 14:47:37 +04:00
/* Create a device group and add the device to it. */
group = iommu_group_alloc ( ) ;
if ( IS_ERR ( group ) ) {
dev_err ( dev , " Failed to allocate IOMMU group \n " ) ;
2017-10-13 21:23:42 +03:00
return PTR_ERR ( group ) ;
2014-04-02 14:47:37 +04:00
}
ret = iommu_group_add_device ( group , dev ) ;
iommu_group_put ( group ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to add device to IPMMU group \n " ) ;
2017-10-13 21:23:42 +03:00
return ret ;
2014-04-02 14:47:37 +04:00
}
/*
* Create the ARM mapping , used by the ARM DMA mapping core to allocate
* VAs . This will allocate a corresponding IOMMU domain .
*
* TODO :
* - Create one mapping per context ( TLB ) .
* - Make the mapping size configurable ? We currently use a 2 GB mapping
* at a 1 GB offset to ensure that NULL VAs will fault .
*/
if ( ! mmu - > mapping ) {
struct dma_iommu_mapping * mapping ;
mapping = arm_iommu_create_mapping ( & platform_bus_type ,
2014-05-26 15:07:01 +04:00
SZ_1G , SZ_2G ) ;
2014-04-02 14:47:37 +04:00
if ( IS_ERR ( mapping ) ) {
dev_err ( mmu - > dev , " failed to create ARM IOMMU mapping \n " ) ;
2014-03-14 17:00:56 +04:00
ret = PTR_ERR ( mapping ) ;
goto error ;
2014-04-02 14:47:37 +04:00
}
mmu - > mapping = mapping ;
}
/* Attach the ARM VA mapping to the device. */
ret = arm_iommu_attach_device ( dev , mmu - > mapping ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to attach device to VA mapping \n " ) ;
goto error ;
}
return 0 ;
error :
2017-10-13 21:23:42 +03:00
iommu_group_remove_device ( dev ) ;
if ( mmu - > mapping )
2017-05-17 13:06:48 +03:00
arm_iommu_release_mapping ( mmu - > mapping ) ;
2014-07-24 03:36:43 +04:00
2014-04-02 14:47:37 +04:00
return ret ;
}
2017-10-13 21:23:42 +03:00
static int ipmmu_add_device ( struct device * dev )
2017-05-17 13:07:10 +03:00
{
2019-05-27 14:52:48 +03:00
struct ipmmu_vmsa_device * mmu = to_ipmmu ( dev ) ;
2017-05-17 13:07:10 +03:00
struct iommu_group * group ;
2019-05-27 14:52:48 +03:00
int ret ;
2017-05-17 13:07:10 +03:00
2017-05-17 13:07:20 +03:00
/*
* Only let through devices that have been verified in xlate ( )
*/
2019-05-27 14:52:48 +03:00
if ( ! mmu )
2017-05-17 13:07:10 +03:00
return - ENODEV ;
2019-05-27 14:52:48 +03:00
if ( IS_ENABLED ( CONFIG_ARM ) & & ! IS_ENABLED ( CONFIG_IOMMU_DMA ) ) {
ret = ipmmu_init_arm_mapping ( dev ) ;
if ( ret )
return ret ;
} else {
group = iommu_group_get_for_dev ( dev ) ;
if ( IS_ERR ( group ) )
return PTR_ERR ( group ) ;
2017-10-13 21:23:42 +03:00
2019-05-27 14:52:48 +03:00
iommu_group_put ( group ) ;
}
2017-05-17 13:07:10 +03:00
2019-05-27 14:52:48 +03:00
iommu_device_link ( & mmu - > iommu , dev ) ;
2017-05-17 13:07:10 +03:00
return 0 ;
}
2017-10-13 21:23:42 +03:00
static void ipmmu_remove_device ( struct device * dev )
2017-05-17 13:07:10 +03:00
{
2019-05-27 14:52:48 +03:00
struct ipmmu_vmsa_device * mmu = to_ipmmu ( dev ) ;
iommu_device_unlink ( & mmu - > iommu , dev ) ;
2017-10-13 21:23:42 +03:00
arm_iommu_detach_device ( dev ) ;
2017-05-17 13:07:10 +03:00
iommu_group_remove_device ( dev ) ;
}
2017-10-13 21:23:40 +03:00
static struct iommu_group * ipmmu_find_group ( struct device * dev )
2017-05-17 13:07:10 +03:00
{
2017-10-13 21:23:41 +03:00
struct ipmmu_vmsa_device * mmu = to_ipmmu ( dev ) ;
2017-05-17 13:07:10 +03:00
struct iommu_group * group ;
2017-10-13 21:23:41 +03:00
if ( mmu - > group )
return iommu_group_ref_get ( mmu - > group ) ;
2017-10-13 21:23:40 +03:00
group = iommu_group_alloc ( ) ;
if ( ! IS_ERR ( group ) )
2017-10-13 21:23:41 +03:00
mmu - > group = group ;
2017-05-17 13:07:10 +03:00
return group ;
}
static const struct iommu_ops ipmmu_ops = {
2017-10-13 21:23:39 +03:00
. domain_alloc = ipmmu_domain_alloc ,
. domain_free = ipmmu_domain_free ,
2017-05-17 13:07:10 +03:00
. attach_dev = ipmmu_attach_device ,
. detach_dev = ipmmu_detach_device ,
. map = ipmmu_map ,
. unmap = ipmmu_unmap ,
2019-07-02 18:44:06 +03:00
. flush_iotlb_all = ipmmu_flush_iotlb_all ,
2017-09-28 17:55:01 +03:00
. iotlb_sync = ipmmu_iotlb_sync ,
2017-05-17 13:07:10 +03:00
. iova_to_phys = ipmmu_iova_to_phys ,
2017-10-13 21:23:42 +03:00
. add_device = ipmmu_add_device ,
. remove_device = ipmmu_remove_device ,
2017-10-13 21:23:40 +03:00
. device_group = ipmmu_find_group ,
2017-05-17 13:07:10 +03:00
. pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K ,
2017-07-17 16:05:20 +03:00
. of_xlate = ipmmu_of_xlate ,
2017-05-17 13:07:10 +03:00
} ;
2014-04-02 14:47:37 +04:00
/* -----------------------------------------------------------------------------
* Probe / remove and init
*/
static void ipmmu_device_reset ( struct ipmmu_vmsa_device * mmu )
{
unsigned int i ;
/* Disable all contexts. */
2017-10-16 15:29:46 +03:00
for ( i = 0 ; i < mmu - > num_ctx ; + + i )
2019-11-06 05:35:47 +03:00
ipmmu_ctx_write ( mmu , i , IMCTR , 0 ) ;
2014-04-02 14:47:37 +04:00
}
2017-10-16 15:29:25 +03:00
static const struct ipmmu_features ipmmu_features_default = {
. use_ns_alias_offset = true ,
2017-10-16 15:29:36 +03:00
. has_cache_leaf_nodes = false ,
2017-10-16 15:29:46 +03:00
. number_of_contexts = 1 , /* software only tested with one context */
2019-05-27 14:52:51 +03:00
. num_utlbs = 32 ,
2017-10-16 15:30:28 +03:00
. setup_imbuscr = true ,
2017-10-16 15:30:39 +03:00
. twobit_imttbcr_sl0 = false ,
2018-07-09 05:53:31 +03:00
. reserved_context = false ,
2019-09-04 15:08:02 +03:00
. cache_snoop = true ,
2019-11-06 05:35:48 +03:00
. ctx_offset_base = 0 ,
. ctx_offset_stride = 0x40 ,
2019-11-06 05:35:50 +03:00
. utlb_offset_base = 0 ,
2017-10-16 15:29:25 +03:00
} ;
2018-06-14 13:48:22 +03:00
static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
2017-10-16 15:30:50 +03:00
. use_ns_alias_offset = false ,
. has_cache_leaf_nodes = true ,
. number_of_contexts = 8 ,
2019-05-27 14:52:51 +03:00
. num_utlbs = 48 ,
2017-10-16 15:30:50 +03:00
. setup_imbuscr = false ,
. twobit_imttbcr_sl0 = true ,
2018-07-09 05:53:31 +03:00
. reserved_context = true ,
2019-09-04 15:08:02 +03:00
. cache_snoop = false ,
2019-11-06 05:35:48 +03:00
. ctx_offset_base = 0 ,
. ctx_offset_stride = 0x40 ,
2019-11-06 05:35:50 +03:00
. utlb_offset_base = 0 ,
2017-10-16 15:30:50 +03:00
} ;
2017-10-16 15:29:25 +03:00
static const struct of_device_id ipmmu_of_ids [ ] = {
{
. compatible = " renesas,ipmmu-vmsa " ,
. data = & ipmmu_features_default ,
2018-08-23 18:33:04 +03:00
} , {
. compatible = " renesas,ipmmu-r8a774a1 " ,
. data = & ipmmu_features_rcar_gen3 ,
2019-09-27 13:53:21 +03:00
} , {
. compatible = " renesas,ipmmu-r8a774b1 " ,
. data = & ipmmu_features_rcar_gen3 ,
2018-12-13 23:22:44 +03:00
} , {
. compatible = " renesas,ipmmu-r8a774c0 " ,
. data = & ipmmu_features_rcar_gen3 ,
2017-10-16 15:30:50 +03:00
} , {
. compatible = " renesas,ipmmu-r8a7795 " ,
2018-06-14 13:48:22 +03:00
. data = & ipmmu_features_rcar_gen3 ,
} , {
. compatible = " renesas,ipmmu-r8a7796 " ,
. data = & ipmmu_features_rcar_gen3 ,
2018-06-14 13:48:25 +03:00
} , {
. compatible = " renesas,ipmmu-r8a77965 " ,
. data = & ipmmu_features_rcar_gen3 ,
2018-06-14 13:48:23 +03:00
} , {
. compatible = " renesas,ipmmu-r8a77970 " ,
. data = & ipmmu_features_rcar_gen3 ,
2018-10-17 12:13:22 +03:00
} , {
. compatible = " renesas,ipmmu-r8a77990 " ,
. data = & ipmmu_features_rcar_gen3 ,
2018-06-14 13:48:23 +03:00
} , {
. compatible = " renesas,ipmmu-r8a77995 " ,
. data = & ipmmu_features_rcar_gen3 ,
2017-10-16 15:29:25 +03:00
} , {
/* Terminator */
} ,
} ;
2014-04-02 14:47:37 +04:00
static int ipmmu_probe ( struct platform_device * pdev )
{
struct ipmmu_vmsa_device * mmu ;
struct resource * res ;
int irq ;
int ret ;
mmu = devm_kzalloc ( & pdev - > dev , sizeof ( * mmu ) , GFP_KERNEL ) ;
if ( ! mmu ) {
dev_err ( & pdev - > dev , " cannot allocate device data \n " ) ;
return - ENOMEM ;
}
mmu - > dev = & pdev - > dev ;
2017-05-17 13:06:38 +03:00
spin_lock_init ( & mmu - > lock ) ;
bitmap_zero ( mmu - > ctx , IPMMU_CTX_MAX ) ;
2017-10-16 15:29:25 +03:00
mmu - > features = of_device_get_match_data ( & pdev - > dev ) ;
2019-05-27 14:52:53 +03:00
memset ( mmu - > utlb_ctx , IPMMU_CTX_INVALID , mmu - > features - > num_utlbs ) ;
2017-10-16 15:30:07 +03:00
dma_set_mask_and_coherent ( & pdev - > dev , DMA_BIT_MASK ( 40 ) ) ;
2014-04-02 14:47:37 +04:00
/* Map I/O memory and request IRQ. */
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
mmu - > base = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( mmu - > base ) )
return PTR_ERR ( mmu - > base ) ;
2014-03-17 04:02:46 +04:00
/*
* The IPMMU has two register banks , for secure and non - secure modes .
* The bank mapped at the beginning of the IPMMU address space
* corresponds to the running mode of the CPU . When running in secure
* mode the non - secure register bank is also available at an offset .
*
* Secure mode operation isn ' t clearly documented and is thus currently
* not implemented in the driver . Furthermore , preliminary tests of
* non - secure operation with the main register bank were not successful .
* Offset the registers base unconditionally to point to the non - secure
* alias space for now .
*/
2017-10-16 15:29:25 +03:00
if ( mmu - > features - > use_ns_alias_offset )
mmu - > base + = IM_NS_ALIAS_OFFSET ;
2014-03-17 04:02:46 +04:00
2019-05-27 14:52:50 +03:00
mmu - > num_ctx = min ( IPMMU_CTX_MAX , mmu - > features - > number_of_contexts ) ;
2017-10-16 15:29:46 +03:00
2017-10-16 15:29:36 +03:00
/*
* Determine if this IPMMU instance is a root device by checking for
* the lack of has_cache_leaf_nodes flag or renesas , ipmmu - main property .
*/
if ( ! mmu - > features - > has_cache_leaf_nodes | |
! of_find_property ( pdev - > dev . of_node , " renesas,ipmmu-main " , NULL ) )
mmu - > root = mmu ;
else
mmu - > root = ipmmu_find_root ( ) ;
2014-04-02 14:47:37 +04:00
2017-10-16 15:29:36 +03:00
/*
* Wait until the root device has been registered for sure .
*/
if ( ! mmu - > root )
return - EPROBE_DEFER ;
/* Root devices have mandatory IRQs */
if ( ipmmu_is_root ( mmu ) ) {
2019-10-01 21:06:22 +03:00
irq = platform_get_irq ( pdev , 0 ) ;
2019-10-23 16:59:41 +03:00
if ( irq < 0 )
2017-10-16 15:29:36 +03:00
return irq ;
ret = devm_request_irq ( & pdev - > dev , irq , ipmmu_irq , 0 ,
dev_name ( & pdev - > dev ) , mmu ) ;
if ( ret < 0 ) {
dev_err ( & pdev - > dev , " failed to request IRQ %d \n " , irq ) ;
return ret ;
}
ipmmu_device_reset ( mmu ) ;
2018-07-09 05:53:31 +03:00
if ( mmu - > features - > reserved_context ) {
dev_info ( & pdev - > dev , " IPMMU context 0 is reserved \n " ) ;
set_bit ( 0 , mmu - > ctx ) ;
}
2017-10-16 15:29:36 +03:00
}
2014-04-02 14:47:37 +04:00
2017-10-16 15:29:57 +03:00
/*
* Register the IPMMU to the IOMMU subsystem in the following cases :
* - R - Car Gen2 IPMMU ( all devices registered )
* - R - Car Gen3 IPMMU ( leaf devices only - skip root IPMMU - MM device )
*/
if ( ! mmu - > features - > has_cache_leaf_nodes | | ! ipmmu_is_root ( mmu ) ) {
ret = iommu_device_sysfs_add ( & mmu - > iommu , & pdev - > dev , NULL ,
dev_name ( & pdev - > dev ) ) ;
if ( ret )
return ret ;
2017-08-21 08:53:35 +03:00
2017-10-16 15:29:57 +03:00
iommu_device_set_ops ( & mmu - > iommu , & ipmmu_ops ) ;
iommu_device_set_fwnode ( & mmu - > iommu ,
& pdev - > dev . of_node - > fwnode ) ;
2017-07-17 16:05:10 +03:00
2017-10-16 15:29:57 +03:00
ret = iommu_device_register ( & mmu - > iommu ) ;
if ( ret )
return ret ;
# if defined(CONFIG_IOMMU_DMA)
if ( ! iommu_present ( & platform_bus_type ) )
bus_set_iommu ( & platform_bus_type , & ipmmu_ops ) ;
# endif
}
2017-07-17 16:05:10 +03:00
2014-04-02 14:47:37 +04:00
/*
* We can ' t create the ARM mapping here as it requires the bus to have
* an IOMMU , which only happens when bus_set_iommu ( ) is called in
* ipmmu_init ( ) after the probe function returns .
*/
platform_set_drvdata ( pdev , mmu ) ;
return 0 ;
}
static int ipmmu_remove ( struct platform_device * pdev )
{
struct ipmmu_vmsa_device * mmu = platform_get_drvdata ( pdev ) ;
2017-08-21 08:53:35 +03:00
iommu_device_sysfs_remove ( & mmu - > iommu ) ;
2017-07-17 16:05:10 +03:00
iommu_device_unregister ( & mmu - > iommu ) ;
2014-04-02 14:47:37 +04:00
arm_iommu_release_mapping ( mmu - > mapping ) ;
ipmmu_device_reset ( mmu ) ;
return 0 ;
}
2019-05-27 14:52:53 +03:00
# ifdef CONFIG_PM_SLEEP
static int ipmmu_resume_noirq ( struct device * dev )
{
struct ipmmu_vmsa_device * mmu = dev_get_drvdata ( dev ) ;
unsigned int i ;
/* Reset root MMU and restore contexts */
if ( ipmmu_is_root ( mmu ) ) {
ipmmu_device_reset ( mmu ) ;
for ( i = 0 ; i < mmu - > num_ctx ; i + + ) {
if ( ! mmu - > domains [ i ] )
continue ;
ipmmu_domain_setup_context ( mmu - > domains [ i ] ) ;
}
}
/* Re-enable active micro-TLBs */
for ( i = 0 ; i < mmu - > features - > num_utlbs ; i + + ) {
if ( mmu - > utlb_ctx [ i ] = = IPMMU_CTX_INVALID )
continue ;
ipmmu_utlb_enable ( mmu - > root - > domains [ mmu - > utlb_ctx [ i ] ] , i ) ;
}
return 0 ;
}
static const struct dev_pm_ops ipmmu_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS ( NULL , ipmmu_resume_noirq )
} ;
# define DEV_PM_OPS &ipmmu_pm
# else
# define DEV_PM_OPS NULL
# endif /* CONFIG_PM_SLEEP */
2014-04-02 14:47:37 +04:00
static struct platform_driver ipmmu_driver = {
. driver = {
. name = " ipmmu-vmsa " ,
2014-03-17 04:02:46 +04:00
. of_match_table = of_match_ptr ( ipmmu_of_ids ) ,
2019-05-27 14:52:53 +03:00
. pm = DEV_PM_OPS ,
2014-04-02 14:47:37 +04:00
} ,
. probe = ipmmu_probe ,
. remove = ipmmu_remove ,
} ;
static int __init ipmmu_init ( void )
{
2018-07-27 00:19:16 +03:00
struct device_node * np ;
2017-10-16 15:29:57 +03:00
static bool setup_done ;
2014-04-02 14:47:37 +04:00
int ret ;
2017-10-16 15:29:57 +03:00
if ( setup_done )
return 0 ;
2018-07-27 00:19:16 +03:00
np = of_find_matching_node ( NULL , ipmmu_of_ids ) ;
if ( ! np )
return 0 ;
of_node_put ( np ) ;
2014-04-02 14:47:37 +04:00
ret = platform_driver_register ( & ipmmu_driver ) ;
if ( ret < 0 )
return ret ;
2017-10-16 15:29:57 +03:00
# if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
2014-04-02 14:47:37 +04:00
if ( ! iommu_present ( & platform_bus_type ) )
bus_set_iommu ( & platform_bus_type , & ipmmu_ops ) ;
2017-10-16 15:29:57 +03:00
# endif
2014-04-02 14:47:37 +04:00
2017-10-16 15:29:57 +03:00
setup_done = true ;
2014-04-02 14:47:37 +04:00
return 0 ;
}
subsys_initcall ( ipmmu_init ) ;