2012-05-12 00:56:09 +04:00
/* linux/drivers/iommu/exynos_iommu.c
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifdef CONFIG_EXYNOS_IOMMU_DEBUG
# define DEBUG
# endif
# include <linux/io.h>
# include <linux/interrupt.h>
# include <linux/platform_device.h>
# include <linux/slab.h>
# include <linux/pm_runtime.h>
# include <linux/clk.h>
# include <linux/err.h>
# include <linux/mm.h>
# include <linux/iommu.h>
# include <linux/errno.h>
# include <linux/list.h>
# include <linux/memblock.h>
# include <linux/export.h>
# include <asm/cacheflush.h>
# include <asm/pgtable.h>
2014-05-12 10:14:58 +04:00
typedef u32 sysmmu_iova_t ;
typedef u32 sysmmu_pte_t ;
2014-08-04 08:36:28 +04:00
/* We do not consider super section mapping (16MB) */
2012-05-12 00:56:09 +04:00
# define SECT_ORDER 20
# define LPAGE_ORDER 16
# define SPAGE_ORDER 12
# define SECT_SIZE (1 << SECT_ORDER)
# define LPAGE_SIZE (1 << LPAGE_ORDER)
# define SPAGE_SIZE (1 << SPAGE_ORDER)
# define SECT_MASK (~(SECT_SIZE - 1))
# define LPAGE_MASK (~(LPAGE_SIZE - 1))
# define SPAGE_MASK (~(SPAGE_SIZE - 1))
2014-05-12 10:15:04 +04:00
# define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
( ( * ( sent ) & 3 ) = = 0 ) | | ( ( * ( sent ) & 3 ) = = 3 ) )
# define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
# define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
# define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
( ( * ( sent ) & 3 ) = = 1 ) )
2012-05-12 00:56:09 +04:00
# define lv1ent_section(sent) ((*(sent) & 3) == 2)
# define lv2ent_fault(pent) ((*(pent) & 3) == 0)
# define lv2ent_small(pent) ((*(pent) & 2) == 2)
# define lv2ent_large(pent) ((*(pent) & 3) == 1)
2014-05-12 10:14:58 +04:00
static u32 sysmmu_page_offset ( sysmmu_iova_t iova , u32 size )
{
return iova & ( size - 1 ) ;
}
2012-05-12 00:56:09 +04:00
# define section_phys(sent) (*(sent) & SECT_MASK)
2014-05-12 10:14:58 +04:00
# define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
2012-05-12 00:56:09 +04:00
# define lpage_phys(pent) (*(pent) & LPAGE_MASK)
2014-05-12 10:14:58 +04:00
# define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
2012-05-12 00:56:09 +04:00
# define spage_phys(pent) (*(pent) & SPAGE_MASK)
2014-05-12 10:14:58 +04:00
# define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
2012-05-12 00:56:09 +04:00
# define NUM_LV1ENTRIES 4096
2014-05-12 10:14:58 +04:00
# define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:58 +04:00
static u32 lv1ent_offset ( sysmmu_iova_t iova )
{
return iova > > SECT_ORDER ;
}
static u32 lv2ent_offset ( sysmmu_iova_t iova )
{
return ( iova > > SPAGE_ORDER ) & ( NUM_LV2ENTRIES - 1 ) ;
}
# define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2012-05-12 00:56:09 +04:00
# define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
# define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
# define mk_lv1ent_sect(pa) ((pa) | 2)
# define mk_lv1ent_page(pa) ((pa) | 1)
# define mk_lv2ent_lpage(pa) ((pa) | 1)
# define mk_lv2ent_spage(pa) ((pa) | 2)
# define CTRL_ENABLE 0x5
# define CTRL_BLOCK 0x7
# define CTRL_DISABLE 0x0
2014-05-12 10:15:03 +04:00
# define CFG_LRU 0x1
# define CFG_QOS(n) ((n & 0xF) << 7)
# define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
# define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
# define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
# define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
2012-05-12 00:56:09 +04:00
# define REG_MMU_CTRL 0x000
# define REG_MMU_CFG 0x004
# define REG_MMU_STATUS 0x008
# define REG_MMU_FLUSH 0x00C
# define REG_MMU_FLUSH_ENTRY 0x010
# define REG_PT_BASE_ADDR 0x014
# define REG_INT_STATUS 0x018
# define REG_INT_CLEAR 0x01C
# define REG_PAGE_FAULT_ADDR 0x024
# define REG_AW_FAULT_ADDR 0x028
# define REG_AR_FAULT_ADDR 0x02C
# define REG_DEFAULT_SLAVE_ADDR 0x030
# define REG_MMU_VERSION 0x034
2014-05-12 10:15:03 +04:00
# define MMU_MAJ_VER(val) ((val) >> 7)
# define MMU_MIN_VER(val) ((val) & 0x7F)
# define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
# define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
2012-05-12 00:56:09 +04:00
# define REG_PB0_SADDR 0x04C
# define REG_PB0_EADDR 0x050
# define REG_PB1_SADDR 0x054
# define REG_PB1_EADDR 0x058
2014-05-12 10:15:02 +04:00
# define has_sysmmu(dev) (dev->archdata.iommu != NULL)
2014-05-12 10:14:48 +04:00
static struct kmem_cache * lv2table_kmem_cache ;
2014-05-12 10:15:04 +04:00
static sysmmu_pte_t * zero_lv2_table ;
# define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
2014-05-12 10:14:48 +04:00
2014-05-12 10:14:58 +04:00
static sysmmu_pte_t * section_entry ( sysmmu_pte_t * pgtable , sysmmu_iova_t iova )
2012-05-12 00:56:09 +04:00
{
return pgtable + lv1ent_offset ( iova ) ;
}
2014-05-12 10:14:58 +04:00
static sysmmu_pte_t * page_entry ( sysmmu_pte_t * sent , sysmmu_iova_t iova )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:58 +04:00
return ( sysmmu_pte_t * ) phys_to_virt (
2014-05-12 10:14:46 +04:00
lv2table_base ( sent ) ) + lv2ent_offset ( iova ) ;
2012-05-12 00:56:09 +04:00
}
enum exynos_sysmmu_inttype {
SYSMMU_PAGEFAULT ,
SYSMMU_AR_MULTIHIT ,
SYSMMU_AW_MULTIHIT ,
SYSMMU_BUSERROR ,
SYSMMU_AR_SECURITY ,
SYSMMU_AR_ACCESS ,
SYSMMU_AW_SECURITY ,
SYSMMU_AW_PROTECTION , /* 7 */
SYSMMU_FAULT_UNKNOWN ,
SYSMMU_FAULTS_NUM
} ;
static unsigned short fault_reg_offset [ SYSMMU_FAULTS_NUM ] = {
REG_PAGE_FAULT_ADDR ,
REG_AR_FAULT_ADDR ,
REG_AW_FAULT_ADDR ,
REG_DEFAULT_SLAVE_ADDR ,
REG_AR_FAULT_ADDR ,
REG_AR_FAULT_ADDR ,
REG_AW_FAULT_ADDR ,
REG_AW_FAULT_ADDR
} ;
static char * sysmmu_fault_name [ SYSMMU_FAULTS_NUM ] = {
" PAGE FAULT " ,
" AR MULTI-HIT FAULT " ,
" AW MULTI-HIT FAULT " ,
" BUS ERROR " ,
" AR SECURITY PROTECTION FAULT " ,
" AR ACCESS PROTECTION FAULT " ,
" AW SECURITY PROTECTION FAULT " ,
" AW ACCESS PROTECTION FAULT " ,
" UNKNOWN FAULT "
} ;
2014-05-12 10:15:02 +04:00
/* attached to dev.archdata.iommu of the master device */
struct exynos_iommu_owner {
struct list_head client ; /* entry of exynos_iommu_domain.clients */
struct device * dev ;
struct device * sysmmu ;
struct iommu_domain * domain ;
void * vmm_data ; /* IO virtual memory manager's data */
spinlock_t lock ; /* Lock to preserve consistency of System MMU */
} ;
2012-05-12 00:56:09 +04:00
struct exynos_iommu_domain {
struct list_head clients ; /* list of sysmmu_drvdata.node */
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * pgtable ; /* lv1 page table, 16KB */
2012-05-12 00:56:09 +04:00
short * lv2entcnt ; /* free lv2 entry counter for each section */
spinlock_t lock ; /* lock for this structure */
spinlock_t pgtablelock ; /* lock for modifying page table @ pgtable */
2015-03-26 15:43:11 +03:00
struct iommu_domain domain ; /* generic domain data structure */
2012-05-12 00:56:09 +04:00
} ;
struct sysmmu_drvdata {
struct device * sysmmu ; /* System MMU's device descriptor */
2014-05-12 10:15:02 +04:00
struct device * master ; /* Owner of system MMU */
2014-05-12 10:14:46 +04:00
void __iomem * sfrbase ;
struct clk * clk ;
2014-05-12 10:14:55 +04:00
struct clk * clk_master ;
2012-05-12 00:56:09 +04:00
int activations ;
2014-05-12 10:14:57 +04:00
spinlock_t lock ;
2012-05-12 00:56:09 +04:00
struct iommu_domain * domain ;
2014-05-12 10:14:46 +04:00
phys_addr_t pgtable ;
2012-05-12 00:56:09 +04:00
} ;
2015-03-26 15:43:11 +03:00
static struct exynos_iommu_domain * to_exynos_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct exynos_iommu_domain , domain ) ;
}
2012-05-12 00:56:09 +04:00
static bool set_sysmmu_active ( struct sysmmu_drvdata * data )
{
/* return true if the System MMU was not active previously
and it needs to be initialized */
return + + data - > activations = = 1 ;
}
static bool set_sysmmu_inactive ( struct sysmmu_drvdata * data )
{
/* return true if the System MMU is needed to be disabled */
BUG_ON ( data - > activations < 1 ) ;
return - - data - > activations = = 0 ;
}
static bool is_sysmmu_active ( struct sysmmu_drvdata * data )
{
return data - > activations > 0 ;
}
static void sysmmu_unblock ( void __iomem * sfrbase )
{
__raw_writel ( CTRL_ENABLE , sfrbase + REG_MMU_CTRL ) ;
}
2014-05-12 10:15:03 +04:00
static unsigned int __raw_sysmmu_version ( struct sysmmu_drvdata * data )
{
return MMU_RAW_VER ( __raw_readl ( data - > sfrbase + REG_MMU_VERSION ) ) ;
}
2012-05-12 00:56:09 +04:00
static bool sysmmu_block ( void __iomem * sfrbase )
{
int i = 120 ;
__raw_writel ( CTRL_BLOCK , sfrbase + REG_MMU_CTRL ) ;
while ( ( i > 0 ) & & ! ( __raw_readl ( sfrbase + REG_MMU_STATUS ) & 1 ) )
- - i ;
if ( ! ( __raw_readl ( sfrbase + REG_MMU_STATUS ) & 1 ) ) {
sysmmu_unblock ( sfrbase ) ;
return false ;
}
return true ;
}
static void __sysmmu_tlb_invalidate ( void __iomem * sfrbase )
{
__raw_writel ( 0x1 , sfrbase + REG_MMU_FLUSH ) ;
}
static void __sysmmu_tlb_invalidate_entry ( void __iomem * sfrbase ,
2014-05-12 10:14:58 +04:00
sysmmu_iova_t iova , unsigned int num_inv )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:49 +04:00
unsigned int i ;
2014-05-22 08:20:56 +04:00
2014-05-12 10:14:49 +04:00
for ( i = 0 ; i < num_inv ; i + + ) {
__raw_writel ( ( iova & SPAGE_MASK ) | 1 ,
sfrbase + REG_MMU_FLUSH_ENTRY ) ;
iova + = SPAGE_SIZE ;
}
2012-05-12 00:56:09 +04:00
}
static void __sysmmu_set_ptbase ( void __iomem * sfrbase ,
2014-05-12 10:14:58 +04:00
phys_addr_t pgd )
2012-05-12 00:56:09 +04:00
{
__raw_writel ( pgd , sfrbase + REG_PT_BASE_ADDR ) ;
__sysmmu_tlb_invalidate ( sfrbase ) ;
}
2014-05-12 10:14:56 +04:00
static void show_fault_information ( const char * name ,
enum exynos_sysmmu_inttype itype ,
2014-05-12 10:14:58 +04:00
phys_addr_t pgtable_base , sysmmu_iova_t fault_addr )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * ent ;
2012-05-12 00:56:09 +04:00
if ( ( itype > = SYSMMU_FAULTS_NUM ) | | ( itype < SYSMMU_PAGEFAULT ) )
itype = SYSMMU_FAULT_UNKNOWN ;
2014-05-12 10:14:58 +04:00
pr_err ( " %s occurred at %#x by %s(Page table base: %pa) \n " ,
2014-05-12 10:14:56 +04:00
sysmmu_fault_name [ itype ] , fault_addr , name , & pgtable_base ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:46 +04:00
ent = section_entry ( phys_to_virt ( pgtable_base ) , fault_addr ) ;
2014-05-12 10:14:58 +04:00
pr_err ( " \t Lv1 entry: %#x \n " , * ent ) ;
2012-05-12 00:56:09 +04:00
if ( lv1ent_page ( ent ) ) {
ent = page_entry ( ent , fault_addr ) ;
2014-05-12 10:14:58 +04:00
pr_err ( " \t Lv2 entry: %#x \n " , * ent ) ;
2012-05-12 00:56:09 +04:00
}
}
static irqreturn_t exynos_sysmmu_irq ( int irq , void * dev_id )
{
2014-08-04 08:36:28 +04:00
/* SYSMMU is in blocked state when interrupt occurred. */
2012-05-12 00:56:09 +04:00
struct sysmmu_drvdata * data = dev_id ;
enum exynos_sysmmu_inttype itype ;
2014-05-12 10:14:58 +04:00
sysmmu_iova_t addr = - 1 ;
2014-05-12 10:14:46 +04:00
int ret = - ENOSYS ;
2012-05-12 00:56:09 +04:00
WARN_ON ( ! is_sysmmu_active ( data ) ) ;
2014-05-12 10:14:57 +04:00
spin_lock ( & data - > lock ) ;
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
2014-05-12 10:14:57 +04:00
2014-05-12 10:14:46 +04:00
itype = ( enum exynos_sysmmu_inttype )
__ffs ( __raw_readl ( data - > sfrbase + REG_INT_STATUS ) ) ;
if ( WARN_ON ( ! ( ( itype > = 0 ) & & ( itype < SYSMMU_FAULT_UNKNOWN ) ) ) )
2012-05-12 00:56:09 +04:00
itype = SYSMMU_FAULT_UNKNOWN ;
2014-05-12 10:14:46 +04:00
else
addr = __raw_readl ( data - > sfrbase + fault_reg_offset [ itype ] ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:56 +04:00
if ( itype = = SYSMMU_FAULT_UNKNOWN ) {
pr_err ( " %s: Fault is not occurred by System MMU '%s'! \n " ,
__func__ , dev_name ( data - > sysmmu ) ) ;
pr_err ( " %s: Please check if IRQ is correctly configured. \n " ,
__func__ ) ;
BUG ( ) ;
} else {
2014-05-12 10:14:58 +04:00
unsigned int base =
2014-05-12 10:14:56 +04:00
__raw_readl ( data - > sfrbase + REG_PT_BASE_ADDR ) ;
show_fault_information ( dev_name ( data - > sysmmu ) ,
itype , base , addr ) ;
if ( data - > domain )
ret = report_iommu_fault ( data - > domain ,
2014-05-12 10:15:02 +04:00
data - > master , addr , itype ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:56 +04:00
/* fault is not recovered by fault handler */
BUG_ON ( ret ! = 0 ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:56 +04:00
__raw_writel ( 1 < < itype , data - > sfrbase + REG_INT_CLEAR ) ;
sysmmu_unblock ( data - > sfrbase ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2014-05-12 10:14:57 +04:00
spin_unlock ( & data - > lock ) ;
2012-05-12 00:56:09 +04:00
return IRQ_HANDLED ;
}
2014-05-12 10:15:02 +04:00
static void __sysmmu_disable_nocount ( struct sysmmu_drvdata * data )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
2014-05-12 10:14:46 +04:00
__raw_writel ( CTRL_DISABLE , data - > sfrbase + REG_MMU_CTRL ) ;
2014-05-12 10:15:02 +04:00
__raw_writel ( 0 , data - > sfrbase + REG_MMU_CFG ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:54 +04:00
clk_disable ( data - > clk ) ;
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:15:02 +04:00
static bool __sysmmu_disable ( struct sysmmu_drvdata * data )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:15:02 +04:00
bool disabled ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
2014-05-12 10:14:57 +04:00
spin_lock_irqsave ( & data - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
disabled = set_sysmmu_inactive ( data ) ;
if ( disabled ) {
data - > pgtable = 0 ;
data - > domain = NULL ;
__sysmmu_disable_nocount ( data ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
dev_dbg ( data - > sysmmu , " Disabled \n " ) ;
} else {
dev_dbg ( data - > sysmmu , " %d times left to disable \n " ,
data - > activations ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:15:02 +04:00
spin_unlock_irqrestore ( & data - > lock , flags ) ;
return disabled ;
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
static void __sysmmu_init_config ( struct sysmmu_drvdata * data )
{
2014-05-12 10:15:03 +04:00
unsigned int cfg = CFG_LRU | CFG_QOS ( 15 ) ;
unsigned int ver ;
ver = __raw_sysmmu_version ( data ) ;
if ( MMU_MAJ_VER ( ver ) = = 3 ) {
if ( MMU_MIN_VER ( ver ) > = 2 ) {
cfg | = CFG_FLPDCACHE ;
if ( MMU_MIN_VER ( ver ) = = 3 ) {
cfg | = CFG_ACGEN ;
cfg & = ~ CFG_LRU ;
} else {
cfg | = CFG_SYSSEL ;
}
}
}
2014-05-12 10:15:02 +04:00
__raw_writel ( cfg , data - > sfrbase + REG_MMU_CFG ) ;
}
static void __sysmmu_enable_nocount ( struct sysmmu_drvdata * data )
{
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
clk_enable ( data - > clk ) ;
2014-05-12 10:15:02 +04:00
__raw_writel ( CTRL_BLOCK , data - > sfrbase + REG_MMU_CTRL ) ;
__sysmmu_init_config ( data ) ;
__sysmmu_set_ptbase ( data - > sfrbase , data - > pgtable ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:46 +04:00
__raw_writel ( CTRL_ENABLE , data - > sfrbase + REG_MMU_CTRL ) ;
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2014-05-12 10:15:02 +04:00
}
2014-05-12 10:14:55 +04:00
2014-05-12 10:15:02 +04:00
static int __sysmmu_enable ( struct sysmmu_drvdata * data ,
phys_addr_t pgtable , struct iommu_domain * domain )
{
int ret = 0 ;
unsigned long flags ;
spin_lock_irqsave ( & data - > lock , flags ) ;
if ( set_sysmmu_active ( data ) ) {
data - > pgtable = pgtable ;
data - > domain = domain ;
__sysmmu_enable_nocount ( data ) ;
dev_dbg ( data - > sysmmu , " Enabled \n " ) ;
} else {
ret = ( pgtable = = data - > pgtable ) ? 1 : - EBUSY ;
dev_dbg ( data - > sysmmu , " already enabled \n " ) ;
}
if ( WARN_ON ( ret < 0 ) )
set_sysmmu_inactive ( data ) ; /* decrement count */
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:57 +04:00
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
return ret ;
}
2014-05-12 10:15:02 +04:00
/* __exynos_sysmmu_enable: Enables System MMU
*
* returns - error if an error occurred and System MMU is not enabled ,
* 0 if the System MMU has been just enabled and 1 if System MMU was already
* enabled before .
*/
static int __exynos_sysmmu_enable ( struct device * dev , phys_addr_t pgtable ,
struct iommu_domain * domain )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:15:02 +04:00
int ret = 0 ;
unsigned long flags ;
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
struct sysmmu_drvdata * data ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
BUG_ON ( ! has_sysmmu ( dev ) ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
spin_lock_irqsave ( & owner - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
data = dev_get_drvdata ( owner - > sysmmu ) ;
ret = __sysmmu_enable ( data , pgtable , domain ) ;
if ( ret > = 0 )
data - > master = dev ;
spin_unlock_irqrestore ( & owner - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
return ret ;
}
2014-05-12 10:15:02 +04:00
int exynos_sysmmu_enable ( struct device * dev , phys_addr_t pgtable )
{
BUG_ON ( ! memblock_is_memory ( pgtable ) ) ;
return __exynos_sysmmu_enable ( dev , pgtable , NULL ) ;
}
2013-02-06 12:25:17 +04:00
static bool exynos_sysmmu_disable ( struct device * dev )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:15:02 +04:00
unsigned long flags ;
bool disabled = true ;
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
struct sysmmu_drvdata * data ;
BUG_ON ( ! has_sysmmu ( dev ) ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
spin_lock_irqsave ( & owner - > lock , flags ) ;
data = dev_get_drvdata ( owner - > sysmmu ) ;
disabled = __sysmmu_disable ( data ) ;
if ( disabled )
data - > master = NULL ;
spin_unlock_irqrestore ( & owner - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
return disabled ;
}
2014-05-12 10:15:04 +04:00
static void __sysmmu_tlb_invalidate_flpdcache ( struct sysmmu_drvdata * data ,
sysmmu_iova_t iova )
{
if ( __raw_sysmmu_version ( data ) = = MAKE_MMU_VER ( 3 , 3 ) )
__raw_writel ( iova | 0x1 , data - > sfrbase + REG_MMU_FLUSH_ENTRY ) ;
}
static void sysmmu_tlb_invalidate_flpdcache ( struct device * dev ,
sysmmu_iova_t iova )
{
unsigned long flags ;
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
struct sysmmu_drvdata * data = dev_get_drvdata ( owner - > sysmmu ) ;
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
spin_lock_irqsave ( & data - > lock , flags ) ;
if ( is_sysmmu_active ( data ) )
__sysmmu_tlb_invalidate_flpdcache ( data , iova ) ;
spin_unlock_irqrestore ( & data - > lock , flags ) ;
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
}
2014-05-12 10:14:58 +04:00
static void sysmmu_tlb_invalidate_entry ( struct device * dev , sysmmu_iova_t iova ,
2014-05-12 10:14:49 +04:00
size_t size )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:15:02 +04:00
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
2014-05-12 10:15:02 +04:00
struct sysmmu_drvdata * data ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
data = dev_get_drvdata ( owner - > sysmmu ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
spin_lock_irqsave ( & data - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
if ( is_sysmmu_active ( data ) ) {
2014-05-12 10:14:49 +04:00
unsigned int num_inv = 1 ;
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
2014-05-12 10:14:49 +04:00
/*
* L2TLB invalidation required
* 4 KB page : 1 invalidation
2014-08-04 08:36:28 +04:00
* 64 KB page : 16 invalidations
* 1 MB page : 64 invalidations
2014-05-12 10:14:49 +04:00
* because it is set - associative TLB
* with 8 - way and 64 sets .
* 1 MB page can be cached in one of all sets .
* 64 KB page can be one of 16 consecutive sets .
*/
2014-05-12 10:15:03 +04:00
if ( MMU_MAJ_VER ( __raw_sysmmu_version ( data ) ) = = 2 )
2014-05-12 10:14:49 +04:00
num_inv = min_t ( unsigned int , size / PAGE_SIZE , 64 ) ;
2014-05-12 10:14:46 +04:00
if ( sysmmu_block ( data - > sfrbase ) ) {
__sysmmu_tlb_invalidate_entry (
2014-05-12 10:14:49 +04:00
data - > sfrbase , iova , num_inv ) ;
2014-05-12 10:14:46 +04:00
sysmmu_unblock ( data - > sfrbase ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2012-05-12 00:56:09 +04:00
} else {
2014-05-12 10:15:02 +04:00
dev_dbg ( dev , " disabled. Skipping TLB invalidation @ %#x \n " ,
iova ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:57 +04:00
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
}
void exynos_sysmmu_tlb_invalidate ( struct device * dev )
{
2014-05-12 10:15:02 +04:00
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
2014-05-12 10:15:02 +04:00
struct sysmmu_drvdata * data ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
data = dev_get_drvdata ( owner - > sysmmu ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
spin_lock_irqsave ( & data - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
if ( is_sysmmu_active ( data ) ) {
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
2014-05-12 10:14:46 +04:00
if ( sysmmu_block ( data - > sfrbase ) ) {
__sysmmu_tlb_invalidate ( data - > sfrbase ) ;
sysmmu_unblock ( data - > sfrbase ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:55 +04:00
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2012-05-12 00:56:09 +04:00
} else {
2014-05-12 10:15:02 +04:00
dev_dbg ( dev , " disabled. Skipping TLB invalidation \n " ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:57 +04:00
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:15:02 +04:00
static int __init exynos_sysmmu_probe ( struct platform_device * pdev )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:54 +04:00
int irq , ret ;
2014-05-12 10:14:46 +04:00
struct device * dev = & pdev - > dev ;
2012-05-12 00:56:09 +04:00
struct sysmmu_drvdata * data ;
2014-05-12 10:14:46 +04:00
struct resource * res ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:54 +04:00
data = devm_kzalloc ( dev , sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:46 +04:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2014-05-12 10:14:54 +04:00
data - > sfrbase = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( data - > sfrbase ) )
return PTR_ERR ( data - > sfrbase ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:54 +04:00
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < = 0 ) {
2014-05-12 10:15:00 +04:00
dev_err ( dev , " Unable to find IRQ resource \n " ) ;
2014-05-12 10:14:54 +04:00
return irq ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:54 +04:00
ret = devm_request_irq ( dev , irq , exynos_sysmmu_irq , 0 ,
2014-05-12 10:14:46 +04:00
dev_name ( dev ) , data ) ;
if ( ret ) {
2014-05-12 10:14:54 +04:00
dev_err ( dev , " Unabled to register handler of irq %d \n " , irq ) ;
return ret ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:54 +04:00
data - > clk = devm_clk_get ( dev , " sysmmu " ) ;
if ( IS_ERR ( data - > clk ) ) {
dev_err ( dev , " Failed to get clock! \n " ) ;
return PTR_ERR ( data - > clk ) ;
} else {
ret = clk_prepare ( data - > clk ) ;
if ( ret ) {
dev_err ( dev , " Failed to prepare clk \n " ) ;
return ret ;
}
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:55 +04:00
data - > clk_master = devm_clk_get ( dev , " master " ) ;
if ( ! IS_ERR ( data - > clk_master ) ) {
ret = clk_prepare ( data - > clk_master ) ;
if ( ret ) {
clk_unprepare ( data - > clk ) ;
dev_err ( dev , " Failed to prepare master's clk \n " ) ;
return ret ;
}
}
2012-05-12 00:56:09 +04:00
data - > sysmmu = dev ;
2014-05-12 10:14:57 +04:00
spin_lock_init ( & data - > lock ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:46 +04:00
platform_set_drvdata ( pdev , data ) ;
2014-05-12 10:14:52 +04:00
pm_runtime_enable ( dev ) ;
2012-05-12 00:56:09 +04:00
return 0 ;
}
2014-05-12 10:15:02 +04:00
static const struct of_device_id sysmmu_of_match [ ] __initconst = {
{ . compatible = " samsung,exynos-sysmmu " , } ,
{ } ,
} ;
static struct platform_driver exynos_sysmmu_driver __refdata = {
. probe = exynos_sysmmu_probe ,
. driver = {
2012-05-12 00:56:09 +04:00
. name = " exynos-sysmmu " ,
2014-05-12 10:15:02 +04:00
. of_match_table = sysmmu_of_match ,
2012-05-12 00:56:09 +04:00
}
} ;
static inline void pgtable_flush ( void * vastart , void * vaend )
{
dmac_flush_range ( vastart , vaend ) ;
outer_flush_range ( virt_to_phys ( vastart ) ,
virt_to_phys ( vaend ) ) ;
}
2015-03-26 15:43:11 +03:00
static struct iommu_domain * exynos_iommu_domain_alloc ( unsigned type )
2012-05-12 00:56:09 +04:00
{
2015-03-26 15:43:11 +03:00
struct exynos_iommu_domain * exynos_domain ;
2014-05-12 10:15:04 +04:00
int i ;
2012-05-12 00:56:09 +04:00
2015-03-26 15:43:11 +03:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
exynos_domain = kzalloc ( sizeof ( * exynos_domain ) , GFP_KERNEL ) ;
if ( ! exynos_domain )
return NULL ;
2012-05-12 00:56:09 +04:00
2015-03-26 15:43:11 +03:00
exynos_domain - > pgtable = ( sysmmu_pte_t * ) __get_free_pages ( GFP_KERNEL , 2 ) ;
if ( ! exynos_domain - > pgtable )
2012-05-12 00:56:09 +04:00
goto err_pgtable ;
2015-03-26 15:43:11 +03:00
exynos_domain - > lv2entcnt = ( short * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO , 1 ) ;
if ( ! exynos_domain - > lv2entcnt )
2012-05-12 00:56:09 +04:00
goto err_counter ;
2014-08-04 08:36:28 +04:00
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
2014-05-12 10:15:04 +04:00
for ( i = 0 ; i < NUM_LV1ENTRIES ; i + = 8 ) {
2015-03-26 15:43:11 +03:00
exynos_domain - > pgtable [ i + 0 ] = ZERO_LV2LINK ;
exynos_domain - > pgtable [ i + 1 ] = ZERO_LV2LINK ;
exynos_domain - > pgtable [ i + 2 ] = ZERO_LV2LINK ;
exynos_domain - > pgtable [ i + 3 ] = ZERO_LV2LINK ;
exynos_domain - > pgtable [ i + 4 ] = ZERO_LV2LINK ;
exynos_domain - > pgtable [ i + 5 ] = ZERO_LV2LINK ;
exynos_domain - > pgtable [ i + 6 ] = ZERO_LV2LINK ;
exynos_domain - > pgtable [ i + 7 ] = ZERO_LV2LINK ;
2014-05-12 10:15:04 +04:00
}
2015-03-26 15:43:11 +03:00
pgtable_flush ( exynos_domain - > pgtable , exynos_domain - > pgtable + NUM_LV1ENTRIES ) ;
2012-05-12 00:56:09 +04:00
2015-03-26 15:43:11 +03:00
spin_lock_init ( & exynos_domain - > lock ) ;
spin_lock_init ( & exynos_domain - > pgtablelock ) ;
INIT_LIST_HEAD ( & exynos_domain - > clients ) ;
2012-05-12 00:56:09 +04:00
2015-03-26 15:43:11 +03:00
exynos_domain - > domain . geometry . aperture_start = 0 ;
exynos_domain - > domain . geometry . aperture_end = ~ 0UL ;
exynos_domain - > domain . geometry . force_aperture = true ;
2012-07-11 14:41:10 +04:00
2015-03-26 15:43:11 +03:00
return & exynos_domain - > domain ;
2012-05-12 00:56:09 +04:00
err_counter :
2015-03-26 15:43:11 +03:00
free_pages ( ( unsigned long ) exynos_domain - > pgtable , 2 ) ;
2012-05-12 00:56:09 +04:00
err_pgtable :
2015-03-26 15:43:11 +03:00
kfree ( exynos_domain ) ;
return NULL ;
2012-05-12 00:56:09 +04:00
}
2015-03-26 15:43:11 +03:00
static void exynos_iommu_domain_free ( struct iommu_domain * domain )
2012-05-12 00:56:09 +04:00
{
2015-03-26 15:43:11 +03:00
struct exynos_iommu_domain * priv = to_exynos_domain ( domain ) ;
2014-05-12 10:15:02 +04:00
struct exynos_iommu_owner * owner ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
int i ;
WARN_ON ( ! list_empty ( & priv - > clients ) ) ;
spin_lock_irqsave ( & priv - > lock , flags ) ;
2014-05-12 10:15:02 +04:00
list_for_each_entry ( owner , & priv - > clients , client ) {
while ( ! exynos_sysmmu_disable ( owner - > dev ) )
2012-05-12 00:56:09 +04:00
; /* until System MMU is actually disabled */
}
2014-05-12 10:15:02 +04:00
while ( ! list_empty ( & priv - > clients ) )
list_del_init ( priv - > clients . next ) ;
2012-05-12 00:56:09 +04:00
spin_unlock_irqrestore ( & priv - > lock , flags ) ;
for ( i = 0 ; i < NUM_LV1ENTRIES ; i + + )
if ( lv1ent_page ( priv - > pgtable + i ) )
2014-05-12 10:14:48 +04:00
kmem_cache_free ( lv2table_kmem_cache ,
phys_to_virt ( lv2table_base ( priv - > pgtable + i ) ) ) ;
2012-05-12 00:56:09 +04:00
free_pages ( ( unsigned long ) priv - > pgtable , 2 ) ;
free_pages ( ( unsigned long ) priv - > lv2entcnt , 1 ) ;
2015-03-26 15:43:11 +03:00
kfree ( priv ) ;
2012-05-12 00:56:09 +04:00
}
static int exynos_iommu_attach_device ( struct iommu_domain * domain ,
struct device * dev )
{
2014-05-12 10:15:02 +04:00
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
2015-03-26 15:43:11 +03:00
struct exynos_iommu_domain * priv = to_exynos_domain ( domain ) ;
2014-05-12 10:14:46 +04:00
phys_addr_t pagetable = virt_to_phys ( priv - > pgtable ) ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & priv - > lock , flags ) ;
2014-05-12 10:15:02 +04:00
ret = __exynos_sysmmu_enable ( dev , pagetable , domain ) ;
2012-05-12 00:56:09 +04:00
if ( ret = = 0 ) {
2014-05-12 10:15:02 +04:00
list_add_tail ( & owner - > client , & priv - > clients ) ;
owner - > domain = domain ;
2012-05-12 00:56:09 +04:00
}
spin_unlock_irqrestore ( & priv - > lock , flags ) ;
if ( ret < 0 ) {
2014-05-12 10:14:46 +04:00
dev_err ( dev , " %s: Failed to attach IOMMU with pgtable %pa \n " ,
__func__ , & pagetable ) ;
return ret ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:46 +04:00
dev_dbg ( dev , " %s: Attached IOMMU with pgtable %pa %s \n " ,
__func__ , & pagetable , ( ret = = 0 ) ? " " : " , again " ) ;
2012-05-12 00:56:09 +04:00
return ret ;
}
static void exynos_iommu_detach_device ( struct iommu_domain * domain ,
struct device * dev )
{
2014-05-12 10:15:02 +04:00
struct exynos_iommu_owner * owner ;
2015-03-26 15:43:11 +03:00
struct exynos_iommu_domain * priv = to_exynos_domain ( domain ) ;
2014-05-12 10:14:46 +04:00
phys_addr_t pagetable = virt_to_phys ( priv - > pgtable ) ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
spin_lock_irqsave ( & priv - > lock , flags ) ;
2014-05-12 10:15:02 +04:00
list_for_each_entry ( owner , & priv - > clients , client ) {
if ( owner = = dev - > archdata . iommu ) {
if ( exynos_sysmmu_disable ( dev ) ) {
list_del_init ( & owner - > client ) ;
owner - > domain = NULL ;
}
2012-05-12 00:56:09 +04:00
break ;
}
}
2014-05-12 10:15:02 +04:00
spin_unlock_irqrestore ( & priv - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
if ( owner = = dev - > archdata . iommu )
2014-05-12 10:14:46 +04:00
dev_dbg ( dev , " %s: Detached IOMMU with pgtable %pa \n " ,
__func__ , & pagetable ) ;
2014-05-12 10:15:02 +04:00
else
dev_err ( dev , " %s: No IOMMU is attached \n " , __func__ ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:15:04 +04:00
static sysmmu_pte_t * alloc_lv2entry ( struct exynos_iommu_domain * priv ,
sysmmu_pte_t * sent , sysmmu_iova_t iova , short * pgcounter )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:47 +04:00
if ( lv1ent_section ( sent ) ) {
2014-05-12 10:14:58 +04:00
WARN ( 1 , " Trying mapping on %#08x mapped with 1MiB page " , iova ) ;
2014-05-12 10:14:47 +04:00
return ERR_PTR ( - EADDRINUSE ) ;
}
2012-05-12 00:56:09 +04:00
if ( lv1ent_fault ( sent ) ) {
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * pent ;
2014-05-12 10:15:04 +04:00
bool need_flush_flpd_cache = lv1ent_zero ( sent ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:48 +04:00
pent = kmem_cache_zalloc ( lv2table_kmem_cache , GFP_ATOMIC ) ;
2014-05-12 10:14:58 +04:00
BUG_ON ( ( unsigned int ) pent & ( LV2TABLE_SIZE - 1 ) ) ;
2012-05-12 00:56:09 +04:00
if ( ! pent )
2014-05-12 10:14:47 +04:00
return ERR_PTR ( - ENOMEM ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:46 +04:00
* sent = mk_lv1ent_page ( virt_to_phys ( pent ) ) ;
2012-05-12 00:56:09 +04:00
* pgcounter = NUM_LV2ENTRIES ;
pgtable_flush ( pent , pent + NUM_LV2ENTRIES ) ;
pgtable_flush ( sent , sent + 1 ) ;
2014-05-12 10:15:04 +04:00
/*
2014-08-04 08:36:28 +04:00
* If pre - fetched SLPD is a faulty SLPD in zero_l2_table ,
* FLPD cache may cache the address of zero_l2_table . This
* function replaces the zero_l2_table with new L2 page table
* to write valid mappings .
2014-05-12 10:15:04 +04:00
* Accessing the valid area may cause page fault since FLPD
2014-08-04 08:36:28 +04:00
* cache may still cache zero_l2_table for the valid area
* instead of new L2 page table that has the mapping
* information of the valid area .
2014-05-12 10:15:04 +04:00
* Thus any replacement of zero_l2_table with other valid L2
* page table must involve FLPD cache invalidation for System
* MMU v3 .3 .
* FLPD cache invalidation is performed with TLB invalidation
* by VPN without blocking . It is safe to invalidate TLB without
* blocking because the target address of TLB invalidation is
* not currently mapped .
*/
if ( need_flush_flpd_cache ) {
struct exynos_iommu_owner * owner ;
2014-05-22 08:20:56 +04:00
2014-05-12 10:15:04 +04:00
spin_lock ( & priv - > lock ) ;
list_for_each_entry ( owner , & priv - > clients , client )
sysmmu_tlb_invalidate_flpdcache (
owner - > dev , iova ) ;
spin_unlock ( & priv - > lock ) ;
}
2012-05-12 00:56:09 +04:00
}
return page_entry ( sent , iova ) ;
}
2014-05-12 10:15:04 +04:00
static int lv1set_section ( struct exynos_iommu_domain * priv ,
sysmmu_pte_t * sent , sysmmu_iova_t iova ,
2014-05-12 10:14:47 +04:00
phys_addr_t paddr , short * pgcnt )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:47 +04:00
if ( lv1ent_section ( sent ) ) {
2014-05-12 10:14:58 +04:00
WARN ( 1 , " Trying mapping on 1MiB@%#08x that is mapped " ,
2014-05-12 10:14:47 +04:00
iova ) ;
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
2014-05-12 10:14:47 +04:00
}
2012-05-12 00:56:09 +04:00
if ( lv1ent_page ( sent ) ) {
2014-05-12 10:14:47 +04:00
if ( * pgcnt ! = NUM_LV2ENTRIES ) {
2014-05-12 10:14:58 +04:00
WARN ( 1 , " Trying mapping on 1MiB@%#08x that is mapped " ,
2014-05-12 10:14:47 +04:00
iova ) ;
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
2014-05-12 10:14:47 +04:00
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:48 +04:00
kmem_cache_free ( lv2table_kmem_cache , page_entry ( sent , 0 ) ) ;
2012-05-12 00:56:09 +04:00
* pgcnt = 0 ;
}
* sent = mk_lv1ent_sect ( paddr ) ;
pgtable_flush ( sent , sent + 1 ) ;
2014-05-12 10:15:04 +04:00
spin_lock ( & priv - > lock ) ;
if ( lv1ent_page_zero ( sent ) ) {
struct exynos_iommu_owner * owner ;
/*
* Flushing FLPD cache in System MMU v3 .3 that may cache a FLPD
* entry by speculative prefetch of SLPD which has no mapping .
*/
list_for_each_entry ( owner , & priv - > clients , client )
sysmmu_tlb_invalidate_flpdcache ( owner - > dev , iova ) ;
}
spin_unlock ( & priv - > lock ) ;
2012-05-12 00:56:09 +04:00
return 0 ;
}
2014-05-12 10:14:58 +04:00
static int lv2set_page ( sysmmu_pte_t * pent , phys_addr_t paddr , size_t size ,
2012-05-12 00:56:09 +04:00
short * pgcnt )
{
if ( size = = SPAGE_SIZE ) {
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( ! lv2ent_fault ( pent ) ) )
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
* pent = mk_lv2ent_spage ( paddr ) ;
pgtable_flush ( pent , pent + 1 ) ;
* pgcnt - = 1 ;
} else { /* size == LPAGE_SIZE */
int i ;
2014-05-22 08:20:56 +04:00
2012-05-12 00:56:09 +04:00
for ( i = 0 ; i < SPAGES_PER_LPAGE ; i + + , pent + + ) {
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( ! lv2ent_fault ( pent ) ) ) {
2014-05-12 10:14:47 +04:00
if ( i > 0 )
memset ( pent - i , 0 , sizeof ( * pent ) * i ) ;
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
}
* pent = mk_lv2ent_lpage ( paddr ) ;
}
pgtable_flush ( pent - SPAGES_PER_LPAGE , pent ) ;
* pgcnt - = SPAGES_PER_LPAGE ;
}
return 0 ;
}
2014-05-12 10:15:04 +04:00
/*
* * CAUTION * to the I / O virtual memory managers that support exynos - iommu :
*
2014-08-04 08:36:28 +04:00
* System MMU v3 . x has advanced logic to improve address translation
2014-05-12 10:15:04 +04:00
* performance with caching more page table entries by a page table walk .
2014-08-04 08:36:28 +04:00
* However , the logic has a bug that while caching faulty page table entries ,
* System MMU reports page fault if the cached fault entry is hit even though
* the fault entry is updated to a valid entry after the entry is cached .
* To prevent caching faulty page table entries which may be updated to valid
* entries later , the virtual memory manager should care about the workaround
* for the problem . The following describes the workaround .
2014-05-12 10:15:04 +04:00
*
* Any two consecutive I / O virtual address regions must have a hole of 128 KiB
2014-08-04 08:36:28 +04:00
* at maximum to prevent misbehavior of System MMU 3. x ( workaround for h / w bug ) .
2014-05-12 10:15:04 +04:00
*
2014-08-04 08:36:28 +04:00
* Precisely , any start address of I / O virtual region must be aligned with
2014-05-12 10:15:04 +04:00
* the following sizes for System MMU v3 .1 and v3 .2 .
* System MMU v3 .1 : 128 KiB
* System MMU v3 .2 : 256 KiB
*
* Because System MMU v3 .3 caches page table entries more aggressively , it needs
2014-08-04 08:36:28 +04:00
* more workarounds .
* - Any two consecutive I / O virtual regions must have a hole of size larger
* than or equal to 128 KiB .
2014-05-12 10:15:04 +04:00
* - Start address of an I / O virtual region must be aligned by 128 KiB .
*/
2014-05-12 10:14:58 +04:00
static int exynos_iommu_map ( struct iommu_domain * domain , unsigned long l_iova ,
2012-05-12 00:56:09 +04:00
phys_addr_t paddr , size_t size , int prot )
{
2015-03-26 15:43:11 +03:00
struct exynos_iommu_domain * priv = to_exynos_domain ( domain ) ;
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * entry ;
sysmmu_iova_t iova = ( sysmmu_iova_t ) l_iova ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
int ret = - ENOMEM ;
BUG_ON ( priv - > pgtable = = NULL ) ;
spin_lock_irqsave ( & priv - > pgtablelock , flags ) ;
entry = section_entry ( priv - > pgtable , iova ) ;
if ( size = = SECT_SIZE ) {
2014-05-12 10:15:04 +04:00
ret = lv1set_section ( priv , entry , iova , paddr ,
2012-05-12 00:56:09 +04:00
& priv - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
} else {
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * pent ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:04 +04:00
pent = alloc_lv2entry ( priv , entry , iova ,
2012-05-12 00:56:09 +04:00
& priv - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
2014-05-12 10:14:47 +04:00
if ( IS_ERR ( pent ) )
ret = PTR_ERR ( pent ) ;
2012-05-12 00:56:09 +04:00
else
ret = lv2set_page ( pent , paddr , size ,
& priv - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
}
2014-05-12 10:14:47 +04:00
if ( ret )
2014-05-12 10:15:00 +04:00
pr_err ( " %s: Failed(%d) to map %#zx bytes @ %#x \n " ,
__func__ , ret , size , iova ) ;
2012-05-12 00:56:09 +04:00
spin_unlock_irqrestore ( & priv - > pgtablelock , flags ) ;
return ret ;
}
2014-05-12 10:15:04 +04:00
static void exynos_iommu_tlb_invalidate_entry ( struct exynos_iommu_domain * priv ,
sysmmu_iova_t iova , size_t size )
{
struct exynos_iommu_owner * owner ;
unsigned long flags ;
spin_lock_irqsave ( & priv - > lock , flags ) ;
list_for_each_entry ( owner , & priv - > clients , client )
sysmmu_tlb_invalidate_entry ( owner - > dev , iova , size ) ;
spin_unlock_irqrestore ( & priv - > lock , flags ) ;
}
2012-05-12 00:56:09 +04:00
static size_t exynos_iommu_unmap ( struct iommu_domain * domain ,
2014-05-12 10:14:58 +04:00
unsigned long l_iova , size_t size )
2012-05-12 00:56:09 +04:00
{
2015-03-26 15:43:11 +03:00
struct exynos_iommu_domain * priv = to_exynos_domain ( domain ) ;
2014-05-12 10:14:58 +04:00
sysmmu_iova_t iova = ( sysmmu_iova_t ) l_iova ;
sysmmu_pte_t * ent ;
2014-05-12 10:14:47 +04:00
size_t err_pgsize ;
2014-05-12 10:14:58 +04:00
unsigned long flags ;
2012-05-12 00:56:09 +04:00
BUG_ON ( priv - > pgtable = = NULL ) ;
spin_lock_irqsave ( & priv - > pgtablelock , flags ) ;
ent = section_entry ( priv - > pgtable , iova ) ;
if ( lv1ent_section ( ent ) ) {
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( size < SECT_SIZE ) ) {
2014-05-12 10:14:47 +04:00
err_pgsize = SECT_SIZE ;
goto err ;
}
2012-05-12 00:56:09 +04:00
2014-08-04 08:36:28 +04:00
/* workaround for h/w bug in System MMU v3.3 */
* ent = ZERO_LV2LINK ;
2012-05-12 00:56:09 +04:00
pgtable_flush ( ent , ent + 1 ) ;
size = SECT_SIZE ;
goto done ;
}
if ( unlikely ( lv1ent_fault ( ent ) ) ) {
if ( size > SECT_SIZE )
size = SECT_SIZE ;
goto done ;
}
/* lv1ent_page(sent) == true here */
ent = page_entry ( ent , iova ) ;
if ( unlikely ( lv2ent_fault ( ent ) ) ) {
size = SPAGE_SIZE ;
goto done ;
}
if ( lv2ent_small ( ent ) ) {
* ent = 0 ;
size = SPAGE_SIZE ;
2014-05-12 10:14:51 +04:00
pgtable_flush ( ent , ent + 1 ) ;
2012-05-12 00:56:09 +04:00
priv - > lv2entcnt [ lv1ent_offset ( iova ) ] + = 1 ;
goto done ;
}
/* lv1ent_large(ent) == true here */
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( size < LPAGE_SIZE ) ) {
2014-05-12 10:14:47 +04:00
err_pgsize = LPAGE_SIZE ;
goto err ;
}
2012-05-12 00:56:09 +04:00
memset ( ent , 0 , sizeof ( * ent ) * SPAGES_PER_LPAGE ) ;
2014-05-12 10:14:51 +04:00
pgtable_flush ( ent , ent + SPAGES_PER_LPAGE ) ;
2012-05-12 00:56:09 +04:00
size = LPAGE_SIZE ;
priv - > lv2entcnt [ lv1ent_offset ( iova ) ] + = SPAGES_PER_LPAGE ;
done :
spin_unlock_irqrestore ( & priv - > pgtablelock , flags ) ;
2014-05-12 10:15:04 +04:00
exynos_iommu_tlb_invalidate_entry ( priv , iova , size ) ;
2012-05-12 00:56:09 +04:00
return size ;
2014-05-12 10:14:47 +04:00
err :
spin_unlock_irqrestore ( & priv - > pgtablelock , flags ) ;
2014-05-12 10:15:00 +04:00
pr_err ( " %s: Failed: size(%#zx) @ %#x is smaller than page size %#zx \n " ,
__func__ , size , iova , err_pgsize ) ;
2014-05-12 10:14:47 +04:00
return 0 ;
2012-05-12 00:56:09 +04:00
}
static phys_addr_t exynos_iommu_iova_to_phys ( struct iommu_domain * domain ,
2013-03-28 23:53:58 +04:00
dma_addr_t iova )
2012-05-12 00:56:09 +04:00
{
2015-03-26 15:43:11 +03:00
struct exynos_iommu_domain * priv = to_exynos_domain ( domain ) ;
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * entry ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
phys_addr_t phys = 0 ;
spin_lock_irqsave ( & priv - > pgtablelock , flags ) ;
entry = section_entry ( priv - > pgtable , iova ) ;
if ( lv1ent_section ( entry ) ) {
phys = section_phys ( entry ) + section_offs ( iova ) ;
} else if ( lv1ent_page ( entry ) ) {
entry = page_entry ( entry , iova ) ;
if ( lv2ent_large ( entry ) )
phys = lpage_phys ( entry ) + lpage_offs ( iova ) ;
else if ( lv2ent_small ( entry ) )
phys = spage_phys ( entry ) + spage_offs ( iova ) ;
}
spin_unlock_irqrestore ( & priv - > pgtablelock , flags ) ;
return phys ;
}
2014-05-12 10:14:59 +04:00
static int exynos_iommu_add_device ( struct device * dev )
{
struct iommu_group * group ;
int ret ;
group = iommu_group_get ( dev ) ;
if ( ! group ) {
group = iommu_group_alloc ( ) ;
if ( IS_ERR ( group ) ) {
dev_err ( dev , " Failed to allocate IOMMU group \n " ) ;
return PTR_ERR ( group ) ;
}
}
ret = iommu_group_add_device ( group , dev ) ;
iommu_group_put ( group ) ;
return ret ;
}
static void exynos_iommu_remove_device ( struct device * dev )
{
iommu_group_remove_device ( dev ) ;
}
2014-06-27 11:03:12 +04:00
static const struct iommu_ops exynos_iommu_ops = {
2015-03-26 15:43:11 +03:00
. domain_alloc = exynos_iommu_domain_alloc ,
. domain_free = exynos_iommu_domain_free ,
2014-05-09 00:49:14 +04:00
. attach_dev = exynos_iommu_attach_device ,
. detach_dev = exynos_iommu_detach_device ,
. map = exynos_iommu_map ,
. unmap = exynos_iommu_unmap ,
2014-10-25 20:55:16 +04:00
. map_sg = default_iommu_map_sg ,
2014-05-09 00:49:14 +04:00
. iova_to_phys = exynos_iommu_iova_to_phys ,
. add_device = exynos_iommu_add_device ,
. remove_device = exynos_iommu_remove_device ,
2012-05-12 00:56:09 +04:00
. pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE ,
} ;
static int __init exynos_iommu_init ( void )
{
2015-02-06 13:44:05 +03:00
struct device_node * np ;
2012-05-12 00:56:09 +04:00
int ret ;
2015-02-06 13:44:05 +03:00
np = of_find_matching_node ( NULL , sysmmu_of_match ) ;
if ( ! np )
return 0 ;
of_node_put ( np ) ;
2014-05-12 10:14:48 +04:00
lv2table_kmem_cache = kmem_cache_create ( " exynos-iommu-lv2table " ,
LV2TABLE_SIZE , LV2TABLE_SIZE , 0 , NULL ) ;
if ( ! lv2table_kmem_cache ) {
pr_err ( " %s: Failed to create kmem cache \n " , __func__ ) ;
return - ENOMEM ;
}
2012-05-12 00:56:09 +04:00
ret = platform_driver_register ( & exynos_sysmmu_driver ) ;
2014-05-12 10:14:48 +04:00
if ( ret ) {
pr_err ( " %s: Failed to register driver \n " , __func__ ) ;
goto err_reg_driver ;
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:04 +04:00
zero_lv2_table = kmem_cache_zalloc ( lv2table_kmem_cache , GFP_KERNEL ) ;
if ( zero_lv2_table = = NULL ) {
pr_err ( " %s: Failed to allocate zero level2 page table \n " ,
__func__ ) ;
ret = - ENOMEM ;
goto err_zero_lv2 ;
}
2014-05-12 10:14:48 +04:00
ret = bus_set_iommu ( & platform_bus_type , & exynos_iommu_ops ) ;
if ( ret ) {
pr_err ( " %s: Failed to register exynos-iommu driver. \n " ,
__func__ ) ;
goto err_set_iommu ;
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:48 +04:00
return 0 ;
err_set_iommu :
2014-05-12 10:15:04 +04:00
kmem_cache_free ( lv2table_kmem_cache , zero_lv2_table ) ;
err_zero_lv2 :
2014-05-12 10:14:48 +04:00
platform_driver_unregister ( & exynos_sysmmu_driver ) ;
err_reg_driver :
kmem_cache_destroy ( lv2table_kmem_cache ) ;
2012-05-12 00:56:09 +04:00
return ret ;
}
subsys_initcall ( exynos_iommu_init ) ;