2014-04-02 14:47:37 +04:00
/*
* IPMMU VMSA
*
* Copyright ( C ) 2014 Renesas Electronics Corporation
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; version 2 of the License .
*/
# include <linux/delay.h>
# include <linux/dma-mapping.h>
# include <linux/err.h>
# include <linux/export.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/iommu.h>
# include <linux/module.h>
2014-03-17 04:02:46 +04:00
# include <linux/of.h>
2014-04-02 14:47:37 +04:00
# include <linux/platform_device.h>
# include <linux/sizes.h>
# include <linux/slab.h>
# include <asm/dma-iommu.h>
# include <asm/pgalloc.h>
2015-01-20 19:30:04 +03:00
# include "io-pgtable.h"
2014-04-02 14:47:37 +04:00
struct ipmmu_vmsa_device {
struct device * dev ;
void __iomem * base ;
struct list_head list ;
unsigned int num_utlbs ;
struct dma_iommu_mapping * mapping ;
} ;
struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device * mmu ;
2015-03-26 15:43:16 +03:00
struct iommu_domain io_domain ;
2014-04-02 14:47:37 +04:00
2015-01-20 19:30:04 +03:00
struct io_pgtable_cfg cfg ;
struct io_pgtable_ops * iop ;
2014-04-02 14:47:37 +04:00
unsigned int context_id ;
spinlock_t lock ; /* Protects mappings */
} ;
2014-05-15 14:40:42 +04:00
struct ipmmu_vmsa_archdata {
struct ipmmu_vmsa_device * mmu ;
2014-07-24 03:36:43 +04:00
unsigned int * utlbs ;
unsigned int num_utlbs ;
2014-05-15 14:40:42 +04:00
} ;
2014-04-02 14:47:37 +04:00
static DEFINE_SPINLOCK ( ipmmu_devices_lock ) ;
static LIST_HEAD ( ipmmu_devices ) ;
2015-03-26 15:43:16 +03:00
static struct ipmmu_vmsa_domain * to_vmsa_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct ipmmu_vmsa_domain , io_domain ) ;
}
2014-04-02 14:47:37 +04:00
# define TLB_LOOP_TIMEOUT 100 /* 100us */
/* -----------------------------------------------------------------------------
* Registers Definition
*/
2014-03-17 04:02:46 +04:00
# define IM_NS_ALIAS_OFFSET 0x800
2014-04-02 14:47:37 +04:00
# define IM_CTX_SIZE 0x40
# define IMCTR 0x0000
# define IMCTR_TRE (1 << 17)
# define IMCTR_AFE (1 << 16)
# define IMCTR_RTSEL_MASK (3 << 4)
# define IMCTR_RTSEL_SHIFT 4
# define IMCTR_TREN (1 << 3)
# define IMCTR_INTEN (1 << 2)
# define IMCTR_FLUSH (1 << 1)
# define IMCTR_MMUEN (1 << 0)
# define IMCAAR 0x0004
# define IMTTBCR 0x0008
# define IMTTBCR_EAE (1 << 31)
# define IMTTBCR_PMB (1 << 30)
# define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
# define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
# define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
# define IMTTBCR_SH1_MASK (3 << 28)
# define IMTTBCR_ORGN1_NC (0 << 26)
# define IMTTBCR_ORGN1_WB_WA (1 << 26)
# define IMTTBCR_ORGN1_WT (2 << 26)
# define IMTTBCR_ORGN1_WB (3 << 26)
# define IMTTBCR_ORGN1_MASK (3 << 26)
# define IMTTBCR_IRGN1_NC (0 << 24)
# define IMTTBCR_IRGN1_WB_WA (1 << 24)
# define IMTTBCR_IRGN1_WT (2 << 24)
# define IMTTBCR_IRGN1_WB (3 << 24)
# define IMTTBCR_IRGN1_MASK (3 << 24)
# define IMTTBCR_TSZ1_MASK (7 << 16)
# define IMTTBCR_TSZ1_SHIFT 16
# define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
# define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
# define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
# define IMTTBCR_SH0_MASK (3 << 12)
# define IMTTBCR_ORGN0_NC (0 << 10)
# define IMTTBCR_ORGN0_WB_WA (1 << 10)
# define IMTTBCR_ORGN0_WT (2 << 10)
# define IMTTBCR_ORGN0_WB (3 << 10)
# define IMTTBCR_ORGN0_MASK (3 << 10)
# define IMTTBCR_IRGN0_NC (0 << 8)
# define IMTTBCR_IRGN0_WB_WA (1 << 8)
# define IMTTBCR_IRGN0_WT (2 << 8)
# define IMTTBCR_IRGN0_WB (3 << 8)
# define IMTTBCR_IRGN0_MASK (3 << 8)
# define IMTTBCR_SL0_LVL_2 (0 << 4)
# define IMTTBCR_SL0_LVL_1 (1 << 4)
# define IMTTBCR_TSZ0_MASK (7 << 0)
# define IMTTBCR_TSZ0_SHIFT O
# define IMBUSCR 0x000c
# define IMBUSCR_DVM (1 << 2)
# define IMBUSCR_BUSSEL_SYS (0 << 0)
# define IMBUSCR_BUSSEL_CCI (1 << 0)
# define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
# define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
# define IMBUSCR_BUSSEL_MASK (3 << 0)
# define IMTTLBR0 0x0010
# define IMTTUBR0 0x0014
# define IMTTLBR1 0x0018
# define IMTTUBR1 0x001c
# define IMSTR 0x0020
# define IMSTR_ERRLVL_MASK (3 << 12)
# define IMSTR_ERRLVL_SHIFT 12
# define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
# define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
# define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
# define IMSTR_ERRCODE_MASK (7 << 8)
# define IMSTR_MHIT (1 << 4)
# define IMSTR_ABORT (1 << 2)
# define IMSTR_PF (1 << 1)
# define IMSTR_TF (1 << 0)
# define IMMAIR0 0x0028
# define IMMAIR1 0x002c
# define IMMAIR_ATTR_MASK 0xff
# define IMMAIR_ATTR_DEVICE 0x04
# define IMMAIR_ATTR_NC 0x44
# define IMMAIR_ATTR_WBRWA 0xff
# define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
# define IMMAIR_ATTR_IDX_NC 0
# define IMMAIR_ATTR_IDX_WBRWA 1
# define IMMAIR_ATTR_IDX_DEV 2
# define IMEAR 0x0030
# define IMPCTR 0x0200
# define IMPSTR 0x0208
# define IMPEAR 0x020c
# define IMPMBA(n) (0x0280 + ((n) * 4))
# define IMPMBD(n) (0x02c0 + ((n) * 4))
# define IMUCTR(n) (0x0300 + ((n) * 16))
# define IMUCTR_FIXADDEN (1 << 31)
# define IMUCTR_FIXADD_MASK (0xff << 16)
# define IMUCTR_FIXADD_SHIFT 16
# define IMUCTR_TTSEL_MMU(n) ((n) << 4)
# define IMUCTR_TTSEL_PMB (8 << 4)
# define IMUCTR_TTSEL_MASK (15 << 4)
# define IMUCTR_FLUSH (1 << 1)
# define IMUCTR_MMUEN (1 << 0)
# define IMUASID(n) (0x0308 + ((n) * 16))
# define IMUASID_ASID8_MASK (0xff << 8)
# define IMUASID_ASID8_SHIFT 8
# define IMUASID_ASID0_MASK (0xff << 0)
# define IMUASID_ASID0_SHIFT 0
/* -----------------------------------------------------------------------------
* Read / Write Access
*/
static u32 ipmmu_read ( struct ipmmu_vmsa_device * mmu , unsigned int offset )
{
return ioread32 ( mmu - > base + offset ) ;
}
static void ipmmu_write ( struct ipmmu_vmsa_device * mmu , unsigned int offset ,
u32 data )
{
iowrite32 ( data , mmu - > base + offset ) ;
}
static u32 ipmmu_ctx_read ( struct ipmmu_vmsa_domain * domain , unsigned int reg )
{
return ipmmu_read ( domain - > mmu , domain - > context_id * IM_CTX_SIZE + reg ) ;
}
static void ipmmu_ctx_write ( struct ipmmu_vmsa_domain * domain , unsigned int reg ,
u32 data )
{
ipmmu_write ( domain - > mmu , domain - > context_id * IM_CTX_SIZE + reg , data ) ;
}
/* -----------------------------------------------------------------------------
* TLB and microTLB Management
*/
/* Wait for any pending TLB invalidations to complete */
static void ipmmu_tlb_sync ( struct ipmmu_vmsa_domain * domain )
{
unsigned int count = 0 ;
while ( ipmmu_ctx_read ( domain , IMCTR ) & IMCTR_FLUSH ) {
cpu_relax ( ) ;
if ( + + count = = TLB_LOOP_TIMEOUT ) {
dev_err_ratelimited ( domain - > mmu - > dev ,
" TLB sync timed out -- MMU may be deadlocked \n " ) ;
return ;
}
udelay ( 1 ) ;
}
}
static void ipmmu_tlb_invalidate ( struct ipmmu_vmsa_domain * domain )
{
u32 reg ;
reg = ipmmu_ctx_read ( domain , IMCTR ) ;
reg | = IMCTR_FLUSH ;
ipmmu_ctx_write ( domain , IMCTR , reg ) ;
ipmmu_tlb_sync ( domain ) ;
}
/*
* Enable MMU translation for the microTLB .
*/
static void ipmmu_utlb_enable ( struct ipmmu_vmsa_domain * domain ,
2014-05-15 14:40:42 +04:00
unsigned int utlb )
2014-04-02 14:47:37 +04:00
{
struct ipmmu_vmsa_device * mmu = domain - > mmu ;
2014-05-15 14:40:42 +04:00
/*
* TODO : Reference - count the microTLB as several bus masters can be
* connected to the same microTLB .
*/
2014-04-02 14:47:37 +04:00
/* TODO: What should we set the ASID to ? */
2014-05-15 14:40:42 +04:00
ipmmu_write ( mmu , IMUASID ( utlb ) , 0 ) ;
2014-04-02 14:47:37 +04:00
/* TODO: Do we need to flush the microTLB ? */
2014-05-15 14:40:42 +04:00
ipmmu_write ( mmu , IMUCTR ( utlb ) ,
2014-04-02 14:47:37 +04:00
IMUCTR_TTSEL_MMU ( domain - > context_id ) | IMUCTR_FLUSH |
IMUCTR_MMUEN ) ;
}
/*
* Disable MMU translation for the microTLB .
*/
static void ipmmu_utlb_disable ( struct ipmmu_vmsa_domain * domain ,
2014-05-15 14:40:42 +04:00
unsigned int utlb )
2014-04-02 14:47:37 +04:00
{
struct ipmmu_vmsa_device * mmu = domain - > mmu ;
2014-05-15 14:40:42 +04:00
ipmmu_write ( mmu , IMUCTR ( utlb ) , 0 ) ;
2014-04-02 14:47:37 +04:00
}
2015-01-20 19:30:04 +03:00
static void ipmmu_tlb_flush_all ( void * cookie )
2014-04-02 14:47:37 +04:00
{
2015-01-20 19:30:04 +03:00
struct ipmmu_vmsa_domain * domain = cookie ;
ipmmu_tlb_invalidate ( domain ) ;
}
static void ipmmu_tlb_add_flush ( unsigned long iova , size_t size , bool leaf ,
void * cookie )
{
/* The hardware doesn't support selective TLB flush. */
}
static struct iommu_gather_ops ipmmu_gather_ops = {
. tlb_flush_all = ipmmu_tlb_flush_all ,
. tlb_add_flush = ipmmu_tlb_add_flush ,
. tlb_sync = ipmmu_tlb_flush_all ,
} ;
2014-04-02 14:47:37 +04:00
/* -----------------------------------------------------------------------------
* Domain / Context Management
*/
static int ipmmu_domain_init_context ( struct ipmmu_vmsa_domain * domain )
{
phys_addr_t ttbr ;
2015-01-20 19:30:04 +03:00
/*
* Allocate the page table operations .
*
* VMSA states in section B3 .6 .3 " Control of Secure or Non-secure memory
* access , Long - descriptor format " that the NStable bit being set in a
* table descriptor will result in the NStable and NS bits of all child
* entries being ignored and considered as being set . The IPMMU seems
* not to comply with this , as it generates a secure access page fault
* if any of the NStable and NS bits isn ' t set when running in
* non - secure mode .
*/
domain - > cfg . quirks = IO_PGTABLE_QUIRK_ARM_NS ;
domain - > cfg . pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K ,
domain - > cfg . ias = 32 ;
domain - > cfg . oas = 40 ;
domain - > cfg . tlb = & ipmmu_gather_ops ;
2015-07-29 21:46:08 +03:00
/*
* TODO : Add support for coherent walk through CCI with DVM and remove
* cache handling . For now , delegate it to the io - pgtable code .
*/
domain - > cfg . iommu_dev = domain - > mmu - > dev ;
2015-01-20 19:30:04 +03:00
domain - > iop = alloc_io_pgtable_ops ( ARM_32_LPAE_S1 , & domain - > cfg ,
domain ) ;
if ( ! domain - > iop )
return - EINVAL ;
2014-04-02 14:47:37 +04:00
/*
* TODO : When adding support for multiple contexts , find an unused
* context .
*/
domain - > context_id = 0 ;
/* TTBR0 */
2015-01-20 19:30:04 +03:00
ttbr = domain - > cfg . arm_lpae_s1_cfg . ttbr [ 0 ] ;
2014-04-02 14:47:37 +04:00
ipmmu_ctx_write ( domain , IMTTLBR0 , ttbr ) ;
ipmmu_ctx_write ( domain , IMTTUBR0 , ttbr > > 32 ) ;
/*
* TTBCR
* We use long descriptors with inner - shareable WBWA tables and allocate
* the whole 32 - bit VA space to TTBR0 .
*/
ipmmu_ctx_write ( domain , IMTTBCR , IMTTBCR_EAE |
IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1 ) ;
2015-01-20 19:30:04 +03:00
/* MAIR0 */
ipmmu_ctx_write ( domain , IMMAIR0 , domain - > cfg . arm_lpae_s1_cfg . mair [ 0 ] ) ;
2014-04-02 14:47:37 +04:00
/* IMBUSCR */
ipmmu_ctx_write ( domain , IMBUSCR ,
ipmmu_ctx_read ( domain , IMBUSCR ) &
~ ( IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK ) ) ;
/*
* IMSTR
* Clear all interrupt flags .
*/
ipmmu_ctx_write ( domain , IMSTR , ipmmu_ctx_read ( domain , IMSTR ) ) ;
/*
* IMCTR
* Enable the MMU and interrupt generation . The long - descriptor
* translation table format doesn ' t use TEX remapping . Don ' t enable AF
* software management as we have no use for it . Flush the TLB as
* required when modifying the context registers .
*/
ipmmu_ctx_write ( domain , IMCTR , IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN ) ;
return 0 ;
}
static void ipmmu_domain_destroy_context ( struct ipmmu_vmsa_domain * domain )
{
/*
* Disable the context . Flush the TLB as required when modifying the
* context registers .
*
* TODO : Is TLB flush really needed ?
*/
ipmmu_ctx_write ( domain , IMCTR , IMCTR_FLUSH ) ;
ipmmu_tlb_sync ( domain ) ;
}
/* -----------------------------------------------------------------------------
* Fault Handling
*/
static irqreturn_t ipmmu_domain_irq ( struct ipmmu_vmsa_domain * domain )
{
const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF ;
struct ipmmu_vmsa_device * mmu = domain - > mmu ;
u32 status ;
u32 iova ;
status = ipmmu_ctx_read ( domain , IMSTR ) ;
if ( ! ( status & err_mask ) )
return IRQ_NONE ;
iova = ipmmu_ctx_read ( domain , IMEAR ) ;
/*
* Clear the error status flags . Unlike traditional interrupt flag
* registers that must be cleared by writing 1 , this status register
* seems to require 0. The error address register must be read before ,
* otherwise its value will be 0.
*/
ipmmu_ctx_write ( domain , IMSTR , 0 ) ;
/* Log fatal errors. */
if ( status & IMSTR_MHIT )
dev_err_ratelimited ( mmu - > dev , " Multiple TLB hits @0x%08x \n " ,
iova ) ;
if ( status & IMSTR_ABORT )
dev_err_ratelimited ( mmu - > dev , " Page Table Walk Abort @0x%08x \n " ,
iova ) ;
if ( ! ( status & ( IMSTR_PF | IMSTR_TF ) ) )
return IRQ_NONE ;
/*
* Try to handle page faults and translation faults .
*
* TODO : We need to look up the faulty device based on the I / O VA . Use
* the IOMMU device for now .
*/
2015-03-26 15:43:16 +03:00
if ( ! report_iommu_fault ( & domain - > io_domain , mmu - > dev , iova , 0 ) )
2014-04-02 14:47:37 +04:00
return IRQ_HANDLED ;
dev_err_ratelimited ( mmu - > dev ,
" Unhandled fault: status 0x%08x iova 0x%08x \n " ,
status , iova ) ;
return IRQ_HANDLED ;
}
static irqreturn_t ipmmu_irq ( int irq , void * dev )
{
struct ipmmu_vmsa_device * mmu = dev ;
struct iommu_domain * io_domain ;
struct ipmmu_vmsa_domain * domain ;
if ( ! mmu - > mapping )
return IRQ_NONE ;
io_domain = mmu - > mapping - > domain ;
2015-03-26 15:43:16 +03:00
domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
return ipmmu_domain_irq ( domain ) ;
}
/* -----------------------------------------------------------------------------
* IOMMU Operations
*/
2015-03-26 15:43:16 +03:00
static struct iommu_domain * ipmmu_domain_alloc ( unsigned type )
2014-04-02 14:47:37 +04:00
{
struct ipmmu_vmsa_domain * domain ;
2015-03-26 15:43:16 +03:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2014-04-02 14:47:37 +04:00
domain = kzalloc ( sizeof ( * domain ) , GFP_KERNEL ) ;
if ( ! domain )
2015-03-26 15:43:16 +03:00
return NULL ;
2014-04-02 14:47:37 +04:00
spin_lock_init ( & domain - > lock ) ;
2015-03-26 15:43:16 +03:00
return & domain - > io_domain ;
2014-04-02 14:47:37 +04:00
}
2015-03-26 15:43:16 +03:00
static void ipmmu_domain_free ( struct iommu_domain * io_domain )
2014-04-02 14:47:37 +04:00
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
/*
* Free the domain resources . We assume that all devices have already
* been detached .
*/
ipmmu_domain_destroy_context ( domain ) ;
2015-01-20 19:30:04 +03:00
free_io_pgtable_ops ( domain - > iop ) ;
2014-04-02 14:47:37 +04:00
kfree ( domain ) ;
}
static int ipmmu_attach_device ( struct iommu_domain * io_domain ,
struct device * dev )
{
2014-05-15 14:40:42 +04:00
struct ipmmu_vmsa_archdata * archdata = dev - > archdata . iommu ;
struct ipmmu_vmsa_device * mmu = archdata - > mmu ;
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
unsigned long flags ;
2014-07-24 03:36:43 +04:00
unsigned int i ;
2014-04-02 14:47:37 +04:00
int ret = 0 ;
if ( ! mmu ) {
dev_err ( dev , " Cannot attach to IPMMU \n " ) ;
return - ENXIO ;
}
spin_lock_irqsave ( & domain - > lock , flags ) ;
if ( ! domain - > mmu ) {
/* The domain hasn't been used yet, initialize it. */
domain - > mmu = mmu ;
ret = ipmmu_domain_init_context ( domain ) ;
} else if ( domain - > mmu ! = mmu ) {
/*
* Something is wrong , we can ' t attach two devices using
* different IOMMUs to the same domain .
*/
dev_err ( dev , " Can't attach IPMMU %s to domain on IPMMU %s \n " ,
dev_name ( mmu - > dev ) , dev_name ( domain - > mmu - > dev ) ) ;
ret = - EINVAL ;
}
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
if ( ret < 0 )
return ret ;
2014-07-24 03:36:43 +04:00
for ( i = 0 ; i < archdata - > num_utlbs ; + + i )
ipmmu_utlb_enable ( domain , archdata - > utlbs [ i ] ) ;
2014-04-02 14:47:37 +04:00
return 0 ;
}
static void ipmmu_detach_device ( struct iommu_domain * io_domain ,
struct device * dev )
{
2014-05-15 14:40:42 +04:00
struct ipmmu_vmsa_archdata * archdata = dev - > archdata . iommu ;
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-07-24 03:36:43 +04:00
unsigned int i ;
2014-04-02 14:47:37 +04:00
2014-07-24 03:36:43 +04:00
for ( i = 0 ; i < archdata - > num_utlbs ; + + i )
ipmmu_utlb_disable ( domain , archdata - > utlbs [ i ] ) ;
2014-04-02 14:47:37 +04:00
/*
* TODO : Optimize by disabling the context when no device is attached .
*/
}
static int ipmmu_map ( struct iommu_domain * io_domain , unsigned long iova ,
phys_addr_t paddr , size_t size , int prot )
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
if ( ! domain )
return - ENODEV ;
2015-01-20 19:30:04 +03:00
return domain - > iop - > map ( domain - > iop , iova , paddr , size , prot ) ;
2014-04-02 14:47:37 +04:00
}
static size_t ipmmu_unmap ( struct iommu_domain * io_domain , unsigned long iova ,
size_t size )
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
2015-01-20 19:30:04 +03:00
return domain - > iop - > unmap ( domain - > iop , iova , size ) ;
2014-04-02 14:47:37 +04:00
}
static phys_addr_t ipmmu_iova_to_phys ( struct iommu_domain * io_domain ,
dma_addr_t iova )
{
2015-03-26 15:43:16 +03:00
struct ipmmu_vmsa_domain * domain = to_vmsa_domain ( io_domain ) ;
2014-04-02 14:47:37 +04:00
/* TODO: Is locking needed ? */
2015-01-20 19:30:04 +03:00
return domain - > iop - > iova_to_phys ( domain - > iop , iova ) ;
2014-04-02 14:47:37 +04:00
}
2014-07-24 03:36:43 +04:00
static int ipmmu_find_utlbs ( struct ipmmu_vmsa_device * mmu , struct device * dev ,
2015-01-25 00:13:50 +03:00
unsigned int * utlbs , unsigned int num_utlbs )
2014-05-15 14:40:42 +04:00
{
2014-07-24 03:36:43 +04:00
unsigned int i ;
2015-01-25 00:13:50 +03:00
for ( i = 0 ; i < num_utlbs ; + + i ) {
2014-07-24 03:36:43 +04:00
struct of_phandle_args args ;
int ret ;
ret = of_parse_phandle_with_args ( dev - > of_node , " iommus " ,
" #iommu-cells " , i , & args ) ;
if ( ret < 0 )
2015-01-25 00:13:50 +03:00
return ret ;
2014-07-24 03:36:43 +04:00
of_node_put ( args . np ) ;
if ( args . np ! = mmu - > dev - > of_node | | args . args_count ! = 1 )
2015-01-25 00:13:50 +03:00
return - EINVAL ;
2014-07-24 03:36:43 +04:00
utlbs [ i ] = args . args [ 0 ] ;
}
2014-03-17 04:02:46 +04:00
2015-01-25 00:13:50 +03:00
return 0 ;
2014-05-15 14:40:42 +04:00
}
2014-04-02 14:47:37 +04:00
static int ipmmu_add_device ( struct device * dev )
{
2014-05-15 14:40:42 +04:00
struct ipmmu_vmsa_archdata * archdata ;
2014-04-02 14:47:37 +04:00
struct ipmmu_vmsa_device * mmu ;
2014-07-24 03:36:43 +04:00
struct iommu_group * group = NULL ;
2015-01-25 00:13:50 +03:00
unsigned int * utlbs ;
2014-07-24 03:36:43 +04:00
unsigned int i ;
2015-01-25 00:13:50 +03:00
int num_utlbs ;
int ret = - ENODEV ;
2014-04-02 14:47:37 +04:00
if ( dev - > archdata . iommu ) {
dev_warn ( dev , " IOMMU driver already assigned to device %s \n " ,
dev_name ( dev ) ) ;
return - EINVAL ;
}
/* Find the master corresponding to the device. */
2015-01-25 00:13:50 +03:00
num_utlbs = of_count_phandle_with_args ( dev - > of_node , " iommus " ,
" #iommu-cells " ) ;
if ( num_utlbs < 0 )
return - ENODEV ;
utlbs = kcalloc ( num_utlbs , sizeof ( * utlbs ) , GFP_KERNEL ) ;
if ( ! utlbs )
return - ENOMEM ;
2014-04-02 14:47:37 +04:00
spin_lock ( & ipmmu_devices_lock ) ;
list_for_each_entry ( mmu , & ipmmu_devices , list ) {
2015-01-25 00:13:50 +03:00
ret = ipmmu_find_utlbs ( mmu , dev , utlbs , num_utlbs ) ;
if ( ! ret ) {
2014-04-02 14:47:37 +04:00
/*
2014-05-15 14:40:42 +04:00
* TODO Take a reference to the MMU to protect
2014-04-02 14:47:37 +04:00
* against device removal .
*/
break ;
}
}
spin_unlock ( & ipmmu_devices_lock ) ;
2015-01-25 00:13:50 +03:00
if ( ret < 0 )
2014-04-02 14:47:37 +04:00
return - ENODEV ;
2014-07-24 03:36:43 +04:00
for ( i = 0 ; i < num_utlbs ; + + i ) {
if ( utlbs [ i ] > = mmu - > num_utlbs ) {
ret = - EINVAL ;
goto error ;
}
}
2014-04-02 14:47:37 +04:00
/* Create a device group and add the device to it. */
group = iommu_group_alloc ( ) ;
if ( IS_ERR ( group ) ) {
dev_err ( dev , " Failed to allocate IOMMU group \n " ) ;
2014-07-24 03:36:43 +04:00
ret = PTR_ERR ( group ) ;
goto error ;
2014-04-02 14:47:37 +04:00
}
ret = iommu_group_add_device ( group , dev ) ;
iommu_group_put ( group ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to add device to IPMMU group \n " ) ;
2014-07-24 03:36:43 +04:00
group = NULL ;
goto error ;
2014-04-02 14:47:37 +04:00
}
2014-05-15 14:40:42 +04:00
archdata = kzalloc ( sizeof ( * archdata ) , GFP_KERNEL ) ;
if ( ! archdata ) {
ret = - ENOMEM ;
goto error ;
}
archdata - > mmu = mmu ;
2014-07-24 03:36:43 +04:00
archdata - > utlbs = utlbs ;
archdata - > num_utlbs = num_utlbs ;
2014-05-15 14:40:42 +04:00
dev - > archdata . iommu = archdata ;
2014-04-02 14:47:37 +04:00
/*
* Create the ARM mapping , used by the ARM DMA mapping core to allocate
* VAs . This will allocate a corresponding IOMMU domain .
*
* TODO :
* - Create one mapping per context ( TLB ) .
* - Make the mapping size configurable ? We currently use a 2 GB mapping
* at a 1 GB offset to ensure that NULL VAs will fault .
*/
if ( ! mmu - > mapping ) {
struct dma_iommu_mapping * mapping ;
mapping = arm_iommu_create_mapping ( & platform_bus_type ,
2014-05-26 15:07:01 +04:00
SZ_1G , SZ_2G ) ;
2014-04-02 14:47:37 +04:00
if ( IS_ERR ( mapping ) ) {
dev_err ( mmu - > dev , " failed to create ARM IOMMU mapping \n " ) ;
2014-03-14 17:00:56 +04:00
ret = PTR_ERR ( mapping ) ;
goto error ;
2014-04-02 14:47:37 +04:00
}
mmu - > mapping = mapping ;
}
/* Attach the ARM VA mapping to the device. */
ret = arm_iommu_attach_device ( dev , mmu - > mapping ) ;
if ( ret < 0 ) {
dev_err ( dev , " Failed to attach device to VA mapping \n " ) ;
goto error ;
}
return 0 ;
error :
2014-03-14 17:00:56 +04:00
arm_iommu_release_mapping ( mmu - > mapping ) ;
2014-07-24 03:36:43 +04:00
2014-05-15 14:40:42 +04:00
kfree ( dev - > archdata . iommu ) ;
2014-07-24 03:36:43 +04:00
kfree ( utlbs ) ;
2014-04-02 14:47:37 +04:00
dev - > archdata . iommu = NULL ;
2014-07-24 03:36:43 +04:00
if ( ! IS_ERR_OR_NULL ( group ) )
iommu_group_remove_device ( dev ) ;
2014-04-02 14:47:37 +04:00
return ret ;
}
static void ipmmu_remove_device ( struct device * dev )
{
2014-07-24 03:36:43 +04:00
struct ipmmu_vmsa_archdata * archdata = dev - > archdata . iommu ;
2014-04-02 14:47:37 +04:00
arm_iommu_detach_device ( dev ) ;
iommu_group_remove_device ( dev ) ;
2014-07-24 03:36:43 +04:00
kfree ( archdata - > utlbs ) ;
kfree ( archdata ) ;
2014-04-02 14:47:37 +04:00
dev - > archdata . iommu = NULL ;
}
2014-06-27 11:03:12 +04:00
static const struct iommu_ops ipmmu_ops = {
2015-03-26 15:43:16 +03:00
. domain_alloc = ipmmu_domain_alloc ,
. domain_free = ipmmu_domain_free ,
2014-04-02 14:47:37 +04:00
. attach_dev = ipmmu_attach_device ,
. detach_dev = ipmmu_detach_device ,
. map = ipmmu_map ,
. unmap = ipmmu_unmap ,
2014-10-25 20:55:16 +04:00
. map_sg = default_iommu_map_sg ,
2014-04-02 14:47:37 +04:00
. iova_to_phys = ipmmu_iova_to_phys ,
. add_device = ipmmu_add_device ,
. remove_device = ipmmu_remove_device ,
2015-01-20 19:30:04 +03:00
. pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K ,
2014-04-02 14:47:37 +04:00
} ;
/* -----------------------------------------------------------------------------
* Probe / remove and init
*/
static void ipmmu_device_reset ( struct ipmmu_vmsa_device * mmu )
{
unsigned int i ;
/* Disable all contexts. */
for ( i = 0 ; i < 4 ; + + i )
ipmmu_write ( mmu , i * IM_CTX_SIZE + IMCTR , 0 ) ;
}
static int ipmmu_probe ( struct platform_device * pdev )
{
struct ipmmu_vmsa_device * mmu ;
struct resource * res ;
int irq ;
int ret ;
2014-03-17 04:02:46 +04:00
if ( ! IS_ENABLED ( CONFIG_OF ) & & ! pdev - > dev . platform_data ) {
2014-04-02 14:47:37 +04:00
dev_err ( & pdev - > dev , " missing platform data \n " ) ;
return - EINVAL ;
}
mmu = devm_kzalloc ( & pdev - > dev , sizeof ( * mmu ) , GFP_KERNEL ) ;
if ( ! mmu ) {
dev_err ( & pdev - > dev , " cannot allocate device data \n " ) ;
return - ENOMEM ;
}
mmu - > dev = & pdev - > dev ;
mmu - > num_utlbs = 32 ;
/* Map I/O memory and request IRQ. */
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
mmu - > base = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( mmu - > base ) )
return PTR_ERR ( mmu - > base ) ;
2014-03-17 04:02:46 +04:00
/*
* The IPMMU has two register banks , for secure and non - secure modes .
* The bank mapped at the beginning of the IPMMU address space
* corresponds to the running mode of the CPU . When running in secure
* mode the non - secure register bank is also available at an offset .
*
* Secure mode operation isn ' t clearly documented and is thus currently
* not implemented in the driver . Furthermore , preliminary tests of
* non - secure operation with the main register bank were not successful .
* Offset the registers base unconditionally to point to the non - secure
* alias space for now .
*/
mmu - > base + = IM_NS_ALIAS_OFFSET ;
2014-04-02 14:47:37 +04:00
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < 0 ) {
dev_err ( & pdev - > dev , " no IRQ found \n " ) ;
return irq ;
}
ret = devm_request_irq ( & pdev - > dev , irq , ipmmu_irq , 0 ,
dev_name ( & pdev - > dev ) , mmu ) ;
if ( ret < 0 ) {
dev_err ( & pdev - > dev , " failed to request IRQ %d \n " , irq ) ;
2014-11-01 06:45:32 +03:00
return ret ;
2014-04-02 14:47:37 +04:00
}
ipmmu_device_reset ( mmu ) ;
/*
* We can ' t create the ARM mapping here as it requires the bus to have
* an IOMMU , which only happens when bus_set_iommu ( ) is called in
* ipmmu_init ( ) after the probe function returns .
*/
spin_lock ( & ipmmu_devices_lock ) ;
list_add ( & mmu - > list , & ipmmu_devices ) ;
spin_unlock ( & ipmmu_devices_lock ) ;
platform_set_drvdata ( pdev , mmu ) ;
return 0 ;
}
static int ipmmu_remove ( struct platform_device * pdev )
{
struct ipmmu_vmsa_device * mmu = platform_get_drvdata ( pdev ) ;
spin_lock ( & ipmmu_devices_lock ) ;
list_del ( & mmu - > list ) ;
spin_unlock ( & ipmmu_devices_lock ) ;
arm_iommu_release_mapping ( mmu - > mapping ) ;
ipmmu_device_reset ( mmu ) ;
return 0 ;
}
2014-03-17 04:02:46 +04:00
static const struct of_device_id ipmmu_of_ids [ ] = {
{ . compatible = " renesas,ipmmu-vmsa " , } ,
2015-03-17 03:06:45 +03:00
{ }
2014-03-17 04:02:46 +04:00
} ;
2014-04-02 14:47:37 +04:00
static struct platform_driver ipmmu_driver = {
. driver = {
. name = " ipmmu-vmsa " ,
2014-03-17 04:02:46 +04:00
. of_match_table = of_match_ptr ( ipmmu_of_ids ) ,
2014-04-02 14:47:37 +04:00
} ,
. probe = ipmmu_probe ,
. remove = ipmmu_remove ,
} ;
static int __init ipmmu_init ( void )
{
int ret ;
ret = platform_driver_register ( & ipmmu_driver ) ;
if ( ret < 0 )
return ret ;
if ( ! iommu_present ( & platform_bus_type ) )
bus_set_iommu ( & platform_bus_type , & ipmmu_ops ) ;
return 0 ;
}
static void __exit ipmmu_exit ( void )
{
return platform_driver_unregister ( & ipmmu_driver ) ;
}
subsys_initcall ( ipmmu_init ) ;
module_exit ( ipmmu_exit ) ;
MODULE_DESCRIPTION ( " IOMMU API for Renesas VMSA-compatible IPMMU " ) ;
MODULE_AUTHOR ( " Laurent Pinchart <laurent.pinchart@ideasonboard.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;