2012-05-12 05:56:09 +09:00
/* linux/drivers/iommu/exynos_iommu.c
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifdef CONFIG_EXYNOS_IOMMU_DEBUG
# define DEBUG
# endif
# include <linux/clk.h>
2015-05-19 15:20:36 +02:00
# include <linux/dma-mapping.h>
2012-05-12 05:56:09 +09:00
# include <linux/err.h>
2015-05-19 15:20:30 +02:00
# include <linux/io.h>
2012-05-12 05:56:09 +09:00
# include <linux/iommu.h>
2015-05-19 15:20:30 +02:00
# include <linux/interrupt.h>
2012-05-12 05:56:09 +09:00
# include <linux/list.h>
2015-05-19 15:20:36 +02:00
# include <linux/of.h>
# include <linux/of_iommu.h>
# include <linux/of_platform.h>
2015-05-19 15:20:30 +02:00
# include <linux/platform_device.h>
# include <linux/pm_runtime.h>
# include <linux/slab.h>
2012-05-12 05:56:09 +09:00
# include <asm/cacheflush.h>
2015-05-19 15:20:36 +02:00
# include <asm/dma-iommu.h>
2012-05-12 05:56:09 +09:00
# include <asm/pgtable.h>
2014-05-12 11:44:58 +05:30
typedef u32 sysmmu_iova_t ;
typedef u32 sysmmu_pte_t ;
2014-08-04 10:06:28 +05:30
/* We do not consider super section mapping (16MB) */
2012-05-12 05:56:09 +09:00
# define SECT_ORDER 20
# define LPAGE_ORDER 16
# define SPAGE_ORDER 12
# define SECT_SIZE (1 << SECT_ORDER)
# define LPAGE_SIZE (1 << LPAGE_ORDER)
# define SPAGE_SIZE (1 << SPAGE_ORDER)
# define SECT_MASK (~(SECT_SIZE - 1))
# define LPAGE_MASK (~(LPAGE_SIZE - 1))
# define SPAGE_MASK (~(SPAGE_SIZE - 1))
2014-05-12 11:45:04 +05:30
# define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
( ( * ( sent ) & 3 ) = = 0 ) | | ( ( * ( sent ) & 3 ) = = 3 ) )
# define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
# define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
# define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
( ( * ( sent ) & 3 ) = = 1 ) )
2012-05-12 05:56:09 +09:00
# define lv1ent_section(sent) ((*(sent) & 3) == 2)
# define lv2ent_fault(pent) ((*(pent) & 3) == 0)
# define lv2ent_small(pent) ((*(pent) & 2) == 2)
# define lv2ent_large(pent) ((*(pent) & 3) == 1)
2014-05-12 11:44:58 +05:30
static u32 sysmmu_page_offset ( sysmmu_iova_t iova , u32 size )
{
return iova & ( size - 1 ) ;
}
2012-05-12 05:56:09 +09:00
# define section_phys(sent) (*(sent) & SECT_MASK)
2014-05-12 11:44:58 +05:30
# define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
2012-05-12 05:56:09 +09:00
# define lpage_phys(pent) (*(pent) & LPAGE_MASK)
2014-05-12 11:44:58 +05:30
# define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
2012-05-12 05:56:09 +09:00
# define spage_phys(pent) (*(pent) & SPAGE_MASK)
2014-05-12 11:44:58 +05:30
# define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
2012-05-12 05:56:09 +09:00
# define NUM_LV1ENTRIES 4096
2014-05-12 11:44:58 +05:30
# define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:58 +05:30
static u32 lv1ent_offset ( sysmmu_iova_t iova )
{
return iova > > SECT_ORDER ;
}
static u32 lv2ent_offset ( sysmmu_iova_t iova )
{
return ( iova > > SPAGE_ORDER ) & ( NUM_LV2ENTRIES - 1 ) ;
}
# define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2012-05-12 05:56:09 +09:00
# define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
# define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
# define mk_lv1ent_sect(pa) ((pa) | 2)
# define mk_lv1ent_page(pa) ((pa) | 1)
# define mk_lv2ent_lpage(pa) ((pa) | 1)
# define mk_lv2ent_spage(pa) ((pa) | 2)
# define CTRL_ENABLE 0x5
# define CTRL_BLOCK 0x7
# define CTRL_DISABLE 0x0
2014-05-12 11:45:03 +05:30
# define CFG_LRU 0x1
# define CFG_QOS(n) ((n & 0xF) << 7)
# define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
# define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
# define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
# define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
2012-05-12 05:56:09 +09:00
# define REG_MMU_CTRL 0x000
# define REG_MMU_CFG 0x004
# define REG_MMU_STATUS 0x008
# define REG_MMU_FLUSH 0x00C
# define REG_MMU_FLUSH_ENTRY 0x010
# define REG_PT_BASE_ADDR 0x014
# define REG_INT_STATUS 0x018
# define REG_INT_CLEAR 0x01C
# define REG_PAGE_FAULT_ADDR 0x024
# define REG_AW_FAULT_ADDR 0x028
# define REG_AR_FAULT_ADDR 0x02C
# define REG_DEFAULT_SLAVE_ADDR 0x030
# define REG_MMU_VERSION 0x034
2014-05-12 11:45:03 +05:30
# define MMU_MAJ_VER(val) ((val) >> 7)
# define MMU_MIN_VER(val) ((val) & 0x7F)
# define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
# define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
2012-05-12 05:56:09 +09:00
# define REG_PB0_SADDR 0x04C
# define REG_PB0_EADDR 0x050
# define REG_PB1_SADDR 0x054
# define REG_PB1_EADDR 0x058
2014-05-12 11:45:02 +05:30
# define has_sysmmu(dev) (dev->archdata.iommu != NULL)
2014-05-12 11:44:48 +05:30
static struct kmem_cache * lv2table_kmem_cache ;
2014-05-12 11:45:04 +05:30
static sysmmu_pte_t * zero_lv2_table ;
# define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
2014-05-12 11:44:48 +05:30
2014-05-12 11:44:58 +05:30
static sysmmu_pte_t * section_entry ( sysmmu_pte_t * pgtable , sysmmu_iova_t iova )
2012-05-12 05:56:09 +09:00
{
return pgtable + lv1ent_offset ( iova ) ;
}
2014-05-12 11:44:58 +05:30
static sysmmu_pte_t * page_entry ( sysmmu_pte_t * sent , sysmmu_iova_t iova )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:44:58 +05:30
return ( sysmmu_pte_t * ) phys_to_virt (
2014-05-12 11:44:46 +05:30
lv2table_base ( sent ) ) + lv2ent_offset ( iova ) ;
2012-05-12 05:56:09 +09:00
}
enum exynos_sysmmu_inttype {
SYSMMU_PAGEFAULT ,
SYSMMU_AR_MULTIHIT ,
SYSMMU_AW_MULTIHIT ,
SYSMMU_BUSERROR ,
SYSMMU_AR_SECURITY ,
SYSMMU_AR_ACCESS ,
SYSMMU_AW_SECURITY ,
SYSMMU_AW_PROTECTION , /* 7 */
SYSMMU_FAULT_UNKNOWN ,
SYSMMU_FAULTS_NUM
} ;
static unsigned short fault_reg_offset [ SYSMMU_FAULTS_NUM ] = {
REG_PAGE_FAULT_ADDR ,
REG_AR_FAULT_ADDR ,
REG_AW_FAULT_ADDR ,
REG_DEFAULT_SLAVE_ADDR ,
REG_AR_FAULT_ADDR ,
REG_AR_FAULT_ADDR ,
REG_AW_FAULT_ADDR ,
REG_AW_FAULT_ADDR
} ;
static char * sysmmu_fault_name [ SYSMMU_FAULTS_NUM ] = {
" PAGE FAULT " ,
" AR MULTI-HIT FAULT " ,
" AW MULTI-HIT FAULT " ,
" BUS ERROR " ,
" AR SECURITY PROTECTION FAULT " ,
" AR ACCESS PROTECTION FAULT " ,
" AW SECURITY PROTECTION FAULT " ,
" AW ACCESS PROTECTION FAULT " ,
" UNKNOWN FAULT "
} ;
2015-05-19 15:20:31 +02:00
/*
* This structure is attached to dev . archdata . iommu of the master device
* on device add , contains a list of SYSMMU controllers defined by device tree ,
* which are bound to given master device . It is usually referenced by ' owner '
* pointer .
*/
2014-05-12 11:45:02 +05:30
struct exynos_iommu_owner {
2015-05-19 15:20:33 +02:00
struct list_head controllers ; /* list of sysmmu_drvdata.owner_node */
2014-05-12 11:45:02 +05:30
} ;
2015-05-19 15:20:31 +02:00
/*
* This structure exynos specific generalization of struct iommu_domain .
* It contains list of SYSMMU controllers from all master devices , which has
* been attached to this domain and page tables of IO address space defined by
* it . It is usually referenced by ' domain ' pointer .
*/
2012-05-12 05:56:09 +09:00
struct exynos_iommu_domain {
2015-05-19 15:20:31 +02:00
struct list_head clients ; /* list of sysmmu_drvdata.domain_node */
sysmmu_pte_t * pgtable ; /* lv1 page table, 16KB */
short * lv2entcnt ; /* free lv2 entry counter for each section */
spinlock_t lock ; /* lock for modyfying list of clients */
spinlock_t pgtablelock ; /* lock for modifying page table @ pgtable */
2015-03-26 13:43:11 +01:00
struct iommu_domain domain ; /* generic domain data structure */
2012-05-12 05:56:09 +09:00
} ;
2015-05-19 15:20:31 +02:00
/*
* This structure hold all data of a single SYSMMU controller , this includes
* hw resources like registers and clocks , pointers and list nodes to connect
* it to all other structures , internal state and parameters read from device
* tree . It is usually referenced by ' data ' pointer .
*/
2012-05-12 05:56:09 +09:00
struct sysmmu_drvdata {
2015-05-19 15:20:31 +02:00
struct device * sysmmu ; /* SYSMMU controller device */
struct device * master ; /* master device (owner) */
void __iomem * sfrbase ; /* our registers */
struct clk * clk ; /* SYSMMU's clock */
struct clk * clk_master ; /* master's device clock */
int activations ; /* number of calls to sysmmu_enable */
spinlock_t lock ; /* lock for modyfying state */
struct exynos_iommu_domain * domain ; /* domain we belong to */
struct list_head domain_node ; /* node for domain clients list */
2015-05-19 15:20:33 +02:00
struct list_head owner_node ; /* node for owner controllers list */
2015-05-19 15:20:31 +02:00
phys_addr_t pgtable ; /* assigned page table structure */
unsigned int version ; /* our version */
2012-05-12 05:56:09 +09:00
} ;
2015-03-26 13:43:11 +01:00
static struct exynos_iommu_domain * to_exynos_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct exynos_iommu_domain , domain ) ;
}
2012-05-12 05:56:09 +09:00
static bool set_sysmmu_active ( struct sysmmu_drvdata * data )
{
/* return true if the System MMU was not active previously
and it needs to be initialized */
return + + data - > activations = = 1 ;
}
static bool set_sysmmu_inactive ( struct sysmmu_drvdata * data )
{
/* return true if the System MMU is needed to be disabled */
BUG_ON ( data - > activations < 1 ) ;
return - - data - > activations = = 0 ;
}
static bool is_sysmmu_active ( struct sysmmu_drvdata * data )
{
return data - > activations > 0 ;
}
static void sysmmu_unblock ( void __iomem * sfrbase )
{
__raw_writel ( CTRL_ENABLE , sfrbase + REG_MMU_CTRL ) ;
}
static bool sysmmu_block ( void __iomem * sfrbase )
{
int i = 120 ;
__raw_writel ( CTRL_BLOCK , sfrbase + REG_MMU_CTRL ) ;
while ( ( i > 0 ) & & ! ( __raw_readl ( sfrbase + REG_MMU_STATUS ) & 1 ) )
- - i ;
if ( ! ( __raw_readl ( sfrbase + REG_MMU_STATUS ) & 1 ) ) {
sysmmu_unblock ( sfrbase ) ;
return false ;
}
return true ;
}
static void __sysmmu_tlb_invalidate ( void __iomem * sfrbase )
{
__raw_writel ( 0x1 , sfrbase + REG_MMU_FLUSH ) ;
}
static void __sysmmu_tlb_invalidate_entry ( void __iomem * sfrbase ,
2014-05-12 11:44:58 +05:30
sysmmu_iova_t iova , unsigned int num_inv )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:44:49 +05:30
unsigned int i ;
2014-05-22 09:50:56 +05:30
2014-05-12 11:44:49 +05:30
for ( i = 0 ; i < num_inv ; i + + ) {
__raw_writel ( ( iova & SPAGE_MASK ) | 1 ,
sfrbase + REG_MMU_FLUSH_ENTRY ) ;
iova + = SPAGE_SIZE ;
}
2012-05-12 05:56:09 +09:00
}
static void __sysmmu_set_ptbase ( void __iomem * sfrbase ,
2014-05-12 11:44:58 +05:30
phys_addr_t pgd )
2012-05-12 05:56:09 +09:00
{
__raw_writel ( pgd , sfrbase + REG_PT_BASE_ADDR ) ;
__sysmmu_tlb_invalidate ( sfrbase ) ;
}
2014-05-12 11:44:56 +05:30
static void show_fault_information ( const char * name ,
enum exynos_sysmmu_inttype itype ,
2014-05-12 11:44:58 +05:30
phys_addr_t pgtable_base , sysmmu_iova_t fault_addr )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:44:58 +05:30
sysmmu_pte_t * ent ;
2012-05-12 05:56:09 +09:00
if ( ( itype > = SYSMMU_FAULTS_NUM ) | | ( itype < SYSMMU_PAGEFAULT ) )
itype = SYSMMU_FAULT_UNKNOWN ;
2014-05-12 11:44:58 +05:30
pr_err ( " %s occurred at %#x by %s(Page table base: %pa) \n " ,
2014-05-12 11:44:56 +05:30
sysmmu_fault_name [ itype ] , fault_addr , name , & pgtable_base ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:46 +05:30
ent = section_entry ( phys_to_virt ( pgtable_base ) , fault_addr ) ;
2014-05-12 11:44:58 +05:30
pr_err ( " \t Lv1 entry: %#x \n " , * ent ) ;
2012-05-12 05:56:09 +09:00
if ( lv1ent_page ( ent ) ) {
ent = page_entry ( ent , fault_addr ) ;
2014-05-12 11:44:58 +05:30
pr_err ( " \t Lv2 entry: %#x \n " , * ent ) ;
2012-05-12 05:56:09 +09:00
}
}
static irqreturn_t exynos_sysmmu_irq ( int irq , void * dev_id )
{
2014-08-04 10:06:28 +05:30
/* SYSMMU is in blocked state when interrupt occurred. */
2012-05-12 05:56:09 +09:00
struct sysmmu_drvdata * data = dev_id ;
enum exynos_sysmmu_inttype itype ;
2014-05-12 11:44:58 +05:30
sysmmu_iova_t addr = - 1 ;
2014-05-12 11:44:46 +05:30
int ret = - ENOSYS ;
2012-05-12 05:56:09 +09:00
WARN_ON ( ! is_sysmmu_active ( data ) ) ;
2014-05-12 11:44:57 +05:30
spin_lock ( & data - > lock ) ;
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
2014-05-12 11:44:57 +05:30
2014-05-12 11:44:46 +05:30
itype = ( enum exynos_sysmmu_inttype )
__ffs ( __raw_readl ( data - > sfrbase + REG_INT_STATUS ) ) ;
if ( WARN_ON ( ! ( ( itype > = 0 ) & & ( itype < SYSMMU_FAULT_UNKNOWN ) ) ) )
2012-05-12 05:56:09 +09:00
itype = SYSMMU_FAULT_UNKNOWN ;
2014-05-12 11:44:46 +05:30
else
addr = __raw_readl ( data - > sfrbase + fault_reg_offset [ itype ] ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:56 +05:30
if ( itype = = SYSMMU_FAULT_UNKNOWN ) {
pr_err ( " %s: Fault is not occurred by System MMU '%s'! \n " ,
__func__ , dev_name ( data - > sysmmu ) ) ;
pr_err ( " %s: Please check if IRQ is correctly configured. \n " ,
__func__ ) ;
BUG ( ) ;
} else {
2014-05-12 11:44:58 +05:30
unsigned int base =
2014-05-12 11:44:56 +05:30
__raw_readl ( data - > sfrbase + REG_PT_BASE_ADDR ) ;
show_fault_information ( dev_name ( data - > sysmmu ) ,
itype , base , addr ) ;
if ( data - > domain )
2015-05-19 15:20:29 +02:00
ret = report_iommu_fault ( & data - > domain - > domain ,
2014-05-12 11:45:02 +05:30
data - > master , addr , itype ) ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:56 +05:30
/* fault is not recovered by fault handler */
BUG_ON ( ret ! = 0 ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:56 +05:30
__raw_writel ( 1 < < itype , data - > sfrbase + REG_INT_CLEAR ) ;
sysmmu_unblock ( data - > sfrbase ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2014-05-12 11:44:57 +05:30
spin_unlock ( & data - > lock ) ;
2012-05-12 05:56:09 +09:00
return IRQ_HANDLED ;
}
2014-05-12 11:45:02 +05:30
static void __sysmmu_disable_nocount ( struct sysmmu_drvdata * data )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
2014-05-12 11:44:46 +05:30
__raw_writel ( CTRL_DISABLE , data - > sfrbase + REG_MMU_CTRL ) ;
2014-05-12 11:45:02 +05:30
__raw_writel ( 0 , data - > sfrbase + REG_MMU_CFG ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:54 +05:30
clk_disable ( data - > clk ) ;
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:45:02 +05:30
static bool __sysmmu_disable ( struct sysmmu_drvdata * data )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:45:02 +05:30
bool disabled ;
2012-05-12 05:56:09 +09:00
unsigned long flags ;
2014-05-12 11:44:57 +05:30
spin_lock_irqsave ( & data - > lock , flags ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:45:02 +05:30
disabled = set_sysmmu_inactive ( data ) ;
if ( disabled ) {
data - > pgtable = 0 ;
data - > domain = NULL ;
__sysmmu_disable_nocount ( data ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:45:02 +05:30
dev_dbg ( data - > sysmmu , " Disabled \n " ) ;
} else {
dev_dbg ( data - > sysmmu , " %d times left to disable \n " ,
data - > activations ) ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:45:02 +05:30
spin_unlock_irqrestore ( & data - > lock , flags ) ;
return disabled ;
}
2012-05-12 05:56:09 +09:00
2014-05-12 11:45:02 +05:30
static void __sysmmu_init_config ( struct sysmmu_drvdata * data )
{
2014-05-12 11:45:03 +05:30
unsigned int cfg = CFG_LRU | CFG_QOS ( 15 ) ;
unsigned int ver ;
2015-05-19 15:20:24 +02:00
ver = MMU_RAW_VER ( __raw_readl ( data - > sfrbase + REG_MMU_VERSION ) ) ;
2014-05-12 11:45:03 +05:30
if ( MMU_MAJ_VER ( ver ) = = 3 ) {
if ( MMU_MIN_VER ( ver ) > = 2 ) {
cfg | = CFG_FLPDCACHE ;
if ( MMU_MIN_VER ( ver ) = = 3 ) {
cfg | = CFG_ACGEN ;
cfg & = ~ CFG_LRU ;
} else {
cfg | = CFG_SYSSEL ;
}
}
}
2014-05-12 11:45:02 +05:30
__raw_writel ( cfg , data - > sfrbase + REG_MMU_CFG ) ;
2015-05-19 15:20:24 +02:00
data - > version = ver ;
2014-05-12 11:45:02 +05:30
}
static void __sysmmu_enable_nocount ( struct sysmmu_drvdata * data )
{
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
clk_enable ( data - > clk ) ;
2014-05-12 11:45:02 +05:30
__raw_writel ( CTRL_BLOCK , data - > sfrbase + REG_MMU_CTRL ) ;
__sysmmu_init_config ( data ) ;
__sysmmu_set_ptbase ( data - > sfrbase , data - > pgtable ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:46 +05:30
__raw_writel ( CTRL_ENABLE , data - > sfrbase + REG_MMU_CTRL ) ;
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2014-05-12 11:45:02 +05:30
}
2014-05-12 11:44:55 +05:30
2015-05-19 15:20:28 +02:00
static int __sysmmu_enable ( struct sysmmu_drvdata * data , phys_addr_t pgtable ,
2015-05-19 15:20:29 +02:00
struct exynos_iommu_domain * domain )
2014-05-12 11:45:02 +05:30
{
int ret = 0 ;
unsigned long flags ;
spin_lock_irqsave ( & data - > lock , flags ) ;
if ( set_sysmmu_active ( data ) ) {
data - > pgtable = pgtable ;
2015-05-19 15:20:29 +02:00
data - > domain = domain ;
2014-05-12 11:45:02 +05:30
__sysmmu_enable_nocount ( data ) ;
dev_dbg ( data - > sysmmu , " Enabled \n " ) ;
} else {
ret = ( pgtable = = data - > pgtable ) ? 1 : - EBUSY ;
dev_dbg ( data - > sysmmu , " already enabled \n " ) ;
}
if ( WARN_ON ( ret < 0 ) )
set_sysmmu_inactive ( data ) ; /* decrement count */
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:57 +05:30
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2012-05-12 05:56:09 +09:00
return ret ;
}
2014-05-12 11:45:04 +05:30
static void __sysmmu_tlb_invalidate_flpdcache ( struct sysmmu_drvdata * data ,
sysmmu_iova_t iova )
{
2015-05-19 15:20:24 +02:00
if ( data - > version = = MAKE_MMU_VER ( 3 , 3 ) )
2014-05-12 11:45:04 +05:30
__raw_writel ( iova | 0x1 , data - > sfrbase + REG_MMU_FLUSH_ENTRY ) ;
}
2015-05-19 15:20:27 +02:00
static void sysmmu_tlb_invalidate_flpdcache ( struct sysmmu_drvdata * data ,
2014-05-12 11:45:04 +05:30
sysmmu_iova_t iova )
{
unsigned long flags ;
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
spin_lock_irqsave ( & data - > lock , flags ) ;
if ( is_sysmmu_active ( data ) )
__sysmmu_tlb_invalidate_flpdcache ( data , iova ) ;
spin_unlock_irqrestore ( & data - > lock , flags ) ;
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
}
2015-05-19 15:20:27 +02:00
static void sysmmu_tlb_invalidate_entry ( struct sysmmu_drvdata * data ,
sysmmu_iova_t iova , size_t size )
2012-05-12 05:56:09 +09:00
{
unsigned long flags ;
2014-05-12 11:45:02 +05:30
spin_lock_irqsave ( & data - > lock , flags ) ;
2012-05-12 05:56:09 +09:00
if ( is_sysmmu_active ( data ) ) {
2014-05-12 11:44:49 +05:30
unsigned int num_inv = 1 ;
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_enable ( data - > clk_master ) ;
2014-05-12 11:44:49 +05:30
/*
* L2TLB invalidation required
* 4 KB page : 1 invalidation
2014-08-04 10:06:28 +05:30
* 64 KB page : 16 invalidations
* 1 MB page : 64 invalidations
2014-05-12 11:44:49 +05:30
* because it is set - associative TLB
* with 8 - way and 64 sets .
* 1 MB page can be cached in one of all sets .
* 64 KB page can be one of 16 consecutive sets .
*/
2015-05-19 15:20:24 +02:00
if ( MMU_MAJ_VER ( data - > version ) = = 2 )
2014-05-12 11:44:49 +05:30
num_inv = min_t ( unsigned int , size / PAGE_SIZE , 64 ) ;
2014-05-12 11:44:46 +05:30
if ( sysmmu_block ( data - > sfrbase ) ) {
__sysmmu_tlb_invalidate_entry (
2014-05-12 11:44:49 +05:30
data - > sfrbase , iova , num_inv ) ;
2014-05-12 11:44:46 +05:30
sysmmu_unblock ( data - > sfrbase ) ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:55 +05:30
if ( ! IS_ERR ( data - > clk_master ) )
clk_disable ( data - > clk_master ) ;
2012-05-12 05:56:09 +09:00
} else {
2015-05-19 15:20:27 +02:00
dev_dbg ( data - > master ,
" disabled. Skipping TLB invalidation @ %#x \n " , iova ) ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:57 +05:30
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:45:02 +05:30
static int __init exynos_sysmmu_probe ( struct platform_device * pdev )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:44:54 +05:30
int irq , ret ;
2014-05-12 11:44:46 +05:30
struct device * dev = & pdev - > dev ;
2012-05-12 05:56:09 +09:00
struct sysmmu_drvdata * data ;
2014-05-12 11:44:46 +05:30
struct resource * res ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:54 +05:30
data = devm_kzalloc ( dev , sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:46 +05:30
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2014-05-12 11:44:54 +05:30
data - > sfrbase = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( data - > sfrbase ) )
return PTR_ERR ( data - > sfrbase ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:54 +05:30
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < = 0 ) {
2014-05-12 11:45:00 +05:30
dev_err ( dev , " Unable to find IRQ resource \n " ) ;
2014-05-12 11:44:54 +05:30
return irq ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:54 +05:30
ret = devm_request_irq ( dev , irq , exynos_sysmmu_irq , 0 ,
2014-05-12 11:44:46 +05:30
dev_name ( dev ) , data ) ;
if ( ret ) {
2014-05-12 11:44:54 +05:30
dev_err ( dev , " Unabled to register handler of irq %d \n " , irq ) ;
return ret ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:54 +05:30
data - > clk = devm_clk_get ( dev , " sysmmu " ) ;
if ( IS_ERR ( data - > clk ) ) {
dev_err ( dev , " Failed to get clock! \n " ) ;
return PTR_ERR ( data - > clk ) ;
} else {
ret = clk_prepare ( data - > clk ) ;
if ( ret ) {
dev_err ( dev , " Failed to prepare clk \n " ) ;
return ret ;
}
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:55 +05:30
data - > clk_master = devm_clk_get ( dev , " master " ) ;
if ( ! IS_ERR ( data - > clk_master ) ) {
ret = clk_prepare ( data - > clk_master ) ;
if ( ret ) {
clk_unprepare ( data - > clk ) ;
dev_err ( dev , " Failed to prepare master's clk \n " ) ;
return ret ;
}
}
2012-05-12 05:56:09 +09:00
data - > sysmmu = dev ;
2014-05-12 11:44:57 +05:30
spin_lock_init ( & data - > lock ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:46 +05:30
platform_set_drvdata ( pdev , data ) ;
2014-05-12 11:44:52 +05:30
pm_runtime_enable ( dev ) ;
2012-05-12 05:56:09 +09:00
return 0 ;
}
2015-05-19 15:20:35 +02:00
# ifdef CONFIG_PM_SLEEP
static int exynos_sysmmu_suspend ( struct device * dev )
{
struct sysmmu_drvdata * data = dev_get_drvdata ( dev ) ;
dev_dbg ( dev , " suspend \n " ) ;
if ( is_sysmmu_active ( data ) ) {
__sysmmu_disable_nocount ( data ) ;
pm_runtime_put ( dev ) ;
}
return 0 ;
}
static int exynos_sysmmu_resume ( struct device * dev )
{
struct sysmmu_drvdata * data = dev_get_drvdata ( dev ) ;
dev_dbg ( dev , " resume \n " ) ;
if ( is_sysmmu_active ( data ) ) {
pm_runtime_get_sync ( dev ) ;
__sysmmu_enable_nocount ( data ) ;
}
return 0 ;
}
# endif
static const struct dev_pm_ops sysmmu_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS ( exynos_sysmmu_suspend , exynos_sysmmu_resume )
} ;
2014-05-12 11:45:02 +05:30
static const struct of_device_id sysmmu_of_match [ ] __initconst = {
{ . compatible = " samsung,exynos-sysmmu " , } ,
{ } ,
} ;
static struct platform_driver exynos_sysmmu_driver __refdata = {
. probe = exynos_sysmmu_probe ,
. driver = {
2012-05-12 05:56:09 +09:00
. name = " exynos-sysmmu " ,
2014-05-12 11:45:02 +05:30
. of_match_table = sysmmu_of_match ,
2015-05-19 15:20:35 +02:00
. pm = & sysmmu_pm_ops ,
2012-05-12 05:56:09 +09:00
}
} ;
static inline void pgtable_flush ( void * vastart , void * vaend )
{
dmac_flush_range ( vastart , vaend ) ;
outer_flush_range ( virt_to_phys ( vastart ) ,
virt_to_phys ( vaend ) ) ;
}
2015-03-26 13:43:11 +01:00
static struct iommu_domain * exynos_iommu_domain_alloc ( unsigned type )
2012-05-12 05:56:09 +09:00
{
2015-05-19 15:20:28 +02:00
struct exynos_iommu_domain * domain ;
2014-05-12 11:45:04 +05:30
int i ;
2012-05-12 05:56:09 +09:00
2015-03-26 13:43:11 +01:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2015-05-19 15:20:28 +02:00
domain = kzalloc ( sizeof ( * domain ) , GFP_KERNEL ) ;
if ( ! domain )
2015-03-26 13:43:11 +01:00
return NULL ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
domain - > pgtable = ( sysmmu_pte_t * ) __get_free_pages ( GFP_KERNEL , 2 ) ;
if ( ! domain - > pgtable )
2012-05-12 05:56:09 +09:00
goto err_pgtable ;
2015-05-19 15:20:28 +02:00
domain - > lv2entcnt = ( short * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO , 1 ) ;
if ( ! domain - > lv2entcnt )
2012-05-12 05:56:09 +09:00
goto err_counter ;
2014-08-04 10:06:28 +05:30
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
2014-05-12 11:45:04 +05:30
for ( i = 0 ; i < NUM_LV1ENTRIES ; i + = 8 ) {
2015-05-19 15:20:28 +02:00
domain - > pgtable [ i + 0 ] = ZERO_LV2LINK ;
domain - > pgtable [ i + 1 ] = ZERO_LV2LINK ;
domain - > pgtable [ i + 2 ] = ZERO_LV2LINK ;
domain - > pgtable [ i + 3 ] = ZERO_LV2LINK ;
domain - > pgtable [ i + 4 ] = ZERO_LV2LINK ;
domain - > pgtable [ i + 5 ] = ZERO_LV2LINK ;
domain - > pgtable [ i + 6 ] = ZERO_LV2LINK ;
domain - > pgtable [ i + 7 ] = ZERO_LV2LINK ;
2014-05-12 11:45:04 +05:30
}
2015-05-19 15:20:28 +02:00
pgtable_flush ( domain - > pgtable , domain - > pgtable + NUM_LV1ENTRIES ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
spin_lock_init ( & domain - > lock ) ;
spin_lock_init ( & domain - > pgtablelock ) ;
INIT_LIST_HEAD ( & domain - > clients ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
domain - > domain . geometry . aperture_start = 0 ;
domain - > domain . geometry . aperture_end = ~ 0UL ;
domain - > domain . geometry . force_aperture = true ;
2012-07-11 12:41:10 +02:00
2015-05-19 15:20:28 +02:00
return & domain - > domain ;
2012-05-12 05:56:09 +09:00
err_counter :
2015-05-19 15:20:28 +02:00
free_pages ( ( unsigned long ) domain - > pgtable , 2 ) ;
2012-05-12 05:56:09 +09:00
err_pgtable :
2015-05-19 15:20:28 +02:00
kfree ( domain ) ;
2015-03-26 13:43:11 +01:00
return NULL ;
2012-05-12 05:56:09 +09:00
}
2015-05-19 15:20:28 +02:00
static void exynos_iommu_domain_free ( struct iommu_domain * iommu_domain )
2012-05-12 05:56:09 +09:00
{
2015-05-19 15:20:28 +02:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2015-05-19 15:20:27 +02:00
struct sysmmu_drvdata * data , * next ;
2012-05-12 05:56:09 +09:00
unsigned long flags ;
int i ;
2015-05-19 15:20:28 +02:00
WARN_ON ( ! list_empty ( & domain - > clients ) ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
list_for_each_entry_safe ( data , next , & domain - > clients , domain_node ) {
2015-05-19 15:20:27 +02:00
if ( __sysmmu_disable ( data ) )
data - > master = NULL ;
list_del_init ( & data - > domain_node ) ;
2012-05-12 05:56:09 +09:00
}
2015-05-19 15:20:28 +02:00
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2012-05-12 05:56:09 +09:00
for ( i = 0 ; i < NUM_LV1ENTRIES ; i + + )
2015-05-19 15:20:28 +02:00
if ( lv1ent_page ( domain - > pgtable + i ) )
2014-05-12 11:44:48 +05:30
kmem_cache_free ( lv2table_kmem_cache ,
2015-05-19 15:20:28 +02:00
phys_to_virt ( lv2table_base ( domain - > pgtable + i ) ) ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
free_pages ( ( unsigned long ) domain - > pgtable , 2 ) ;
free_pages ( ( unsigned long ) domain - > lv2entcnt , 1 ) ;
kfree ( domain ) ;
2012-05-12 05:56:09 +09:00
}
2015-05-19 15:20:28 +02:00
static int exynos_iommu_attach_device ( struct iommu_domain * iommu_domain ,
2012-05-12 05:56:09 +09:00
struct device * dev )
{
2014-05-12 11:45:02 +05:30
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
2015-05-19 15:20:28 +02:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2015-05-19 15:20:27 +02:00
struct sysmmu_drvdata * data ;
2015-05-19 15:20:28 +02:00
phys_addr_t pagetable = virt_to_phys ( domain - > pgtable ) ;
2012-05-12 05:56:09 +09:00
unsigned long flags ;
2015-05-19 15:20:27 +02:00
int ret = - ENODEV ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:27 +02:00
if ( ! has_sysmmu ( dev ) )
return - ENODEV ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:33 +02:00
list_for_each_entry ( data , & owner - > controllers , owner_node ) {
2015-05-19 15:20:34 +02:00
pm_runtime_get_sync ( data - > sysmmu ) ;
2015-05-19 15:20:29 +02:00
ret = __sysmmu_enable ( data , pagetable , domain ) ;
2015-05-19 15:20:27 +02:00
if ( ret > = 0 ) {
data - > master = dev ;
2015-05-19 15:20:28 +02:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
list_add_tail ( & data - > domain_node , & domain - > clients ) ;
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2015-05-19 15:20:27 +02:00
}
}
2012-05-12 05:56:09 +09:00
if ( ret < 0 ) {
2014-05-12 11:44:46 +05:30
dev_err ( dev , " %s: Failed to attach IOMMU with pgtable %pa \n " ,
__func__ , & pagetable ) ;
return ret ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:46 +05:30
dev_dbg ( dev , " %s: Attached IOMMU with pgtable %pa %s \n " ,
__func__ , & pagetable , ( ret = = 0 ) ? " " : " , again " ) ;
2012-05-12 05:56:09 +09:00
return ret ;
}
2015-05-19 15:20:28 +02:00
static void exynos_iommu_detach_device ( struct iommu_domain * iommu_domain ,
2012-05-12 05:56:09 +09:00
struct device * dev )
{
2015-05-19 15:20:28 +02:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
phys_addr_t pagetable = virt_to_phys ( domain - > pgtable ) ;
2015-05-19 15:20:33 +02:00
struct sysmmu_drvdata * data , * next ;
2012-05-12 05:56:09 +09:00
unsigned long flags ;
2015-05-19 15:20:27 +02:00
bool found = false ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:27 +02:00
if ( ! has_sysmmu ( dev ) )
return ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
2015-05-19 15:20:33 +02:00
list_for_each_entry_safe ( data , next , & domain - > clients , domain_node ) {
2015-05-19 15:20:27 +02:00
if ( data - > master = = dev ) {
if ( __sysmmu_disable ( data ) ) {
data - > master = NULL ;
list_del_init ( & data - > domain_node ) ;
}
2015-05-19 15:20:34 +02:00
pm_runtime_put ( data - > sysmmu ) ;
2015-05-19 15:20:27 +02:00
found = true ;
2012-05-12 05:56:09 +09:00
}
}
2015-05-19 15:20:28 +02:00
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:27 +02:00
if ( found )
2014-05-12 11:44:46 +05:30
dev_dbg ( dev , " %s: Detached IOMMU with pgtable %pa \n " ,
__func__ , & pagetable ) ;
2014-05-12 11:45:02 +05:30
else
dev_err ( dev , " %s: No IOMMU is attached \n " , __func__ ) ;
2012-05-12 05:56:09 +09:00
}
2015-05-19 15:20:28 +02:00
static sysmmu_pte_t * alloc_lv2entry ( struct exynos_iommu_domain * domain ,
2014-05-12 11:45:04 +05:30
sysmmu_pte_t * sent , sysmmu_iova_t iova , short * pgcounter )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:44:47 +05:30
if ( lv1ent_section ( sent ) ) {
2014-05-12 11:44:58 +05:30
WARN ( 1 , " Trying mapping on %#08x mapped with 1MiB page " , iova ) ;
2014-05-12 11:44:47 +05:30
return ERR_PTR ( - EADDRINUSE ) ;
}
2012-05-12 05:56:09 +09:00
if ( lv1ent_fault ( sent ) ) {
2014-05-12 11:44:58 +05:30
sysmmu_pte_t * pent ;
2014-05-12 11:45:04 +05:30
bool need_flush_flpd_cache = lv1ent_zero ( sent ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:48 +05:30
pent = kmem_cache_zalloc ( lv2table_kmem_cache , GFP_ATOMIC ) ;
2014-05-12 11:44:58 +05:30
BUG_ON ( ( unsigned int ) pent & ( LV2TABLE_SIZE - 1 ) ) ;
2012-05-12 05:56:09 +09:00
if ( ! pent )
2014-05-12 11:44:47 +05:30
return ERR_PTR ( - ENOMEM ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:46 +05:30
* sent = mk_lv1ent_page ( virt_to_phys ( pent ) ) ;
2015-05-08 17:05:44 -07:00
kmemleak_ignore ( pent ) ;
2012-05-12 05:56:09 +09:00
* pgcounter = NUM_LV2ENTRIES ;
pgtable_flush ( pent , pent + NUM_LV2ENTRIES ) ;
pgtable_flush ( sent , sent + 1 ) ;
2014-05-12 11:45:04 +05:30
/*
2014-08-04 10:06:28 +05:30
* If pre - fetched SLPD is a faulty SLPD in zero_l2_table ,
* FLPD cache may cache the address of zero_l2_table . This
* function replaces the zero_l2_table with new L2 page table
* to write valid mappings .
2014-05-12 11:45:04 +05:30
* Accessing the valid area may cause page fault since FLPD
2014-08-04 10:06:28 +05:30
* cache may still cache zero_l2_table for the valid area
* instead of new L2 page table that has the mapping
* information of the valid area .
2014-05-12 11:45:04 +05:30
* Thus any replacement of zero_l2_table with other valid L2
* page table must involve FLPD cache invalidation for System
* MMU v3 .3 .
* FLPD cache invalidation is performed with TLB invalidation
* by VPN without blocking . It is safe to invalidate TLB without
* blocking because the target address of TLB invalidation is
* not currently mapped .
*/
if ( need_flush_flpd_cache ) {
2015-05-19 15:20:27 +02:00
struct sysmmu_drvdata * data ;
2014-05-22 09:50:56 +05:30
2015-05-19 15:20:28 +02:00
spin_lock ( & domain - > lock ) ;
list_for_each_entry ( data , & domain - > clients , domain_node )
2015-05-19 15:20:27 +02:00
sysmmu_tlb_invalidate_flpdcache ( data , iova ) ;
2015-05-19 15:20:28 +02:00
spin_unlock ( & domain - > lock ) ;
2014-05-12 11:45:04 +05:30
}
2012-05-12 05:56:09 +09:00
}
return page_entry ( sent , iova ) ;
}
2015-05-19 15:20:28 +02:00
static int lv1set_section ( struct exynos_iommu_domain * domain ,
2014-05-12 11:45:04 +05:30
sysmmu_pte_t * sent , sysmmu_iova_t iova ,
2014-05-12 11:44:47 +05:30
phys_addr_t paddr , short * pgcnt )
2012-05-12 05:56:09 +09:00
{
2014-05-12 11:44:47 +05:30
if ( lv1ent_section ( sent ) ) {
2014-05-12 11:44:58 +05:30
WARN ( 1 , " Trying mapping on 1MiB@%#08x that is mapped " ,
2014-05-12 11:44:47 +05:30
iova ) ;
2012-05-12 05:56:09 +09:00
return - EADDRINUSE ;
2014-05-12 11:44:47 +05:30
}
2012-05-12 05:56:09 +09:00
if ( lv1ent_page ( sent ) ) {
2014-05-12 11:44:47 +05:30
if ( * pgcnt ! = NUM_LV2ENTRIES ) {
2014-05-12 11:44:58 +05:30
WARN ( 1 , " Trying mapping on 1MiB@%#08x that is mapped " ,
2014-05-12 11:44:47 +05:30
iova ) ;
2012-05-12 05:56:09 +09:00
return - EADDRINUSE ;
2014-05-12 11:44:47 +05:30
}
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:48 +05:30
kmem_cache_free ( lv2table_kmem_cache , page_entry ( sent , 0 ) ) ;
2012-05-12 05:56:09 +09:00
* pgcnt = 0 ;
}
* sent = mk_lv1ent_sect ( paddr ) ;
pgtable_flush ( sent , sent + 1 ) ;
2015-05-19 15:20:28 +02:00
spin_lock ( & domain - > lock ) ;
2014-05-12 11:45:04 +05:30
if ( lv1ent_page_zero ( sent ) ) {
2015-05-19 15:20:27 +02:00
struct sysmmu_drvdata * data ;
2014-05-12 11:45:04 +05:30
/*
* Flushing FLPD cache in System MMU v3 .3 that may cache a FLPD
* entry by speculative prefetch of SLPD which has no mapping .
*/
2015-05-19 15:20:28 +02:00
list_for_each_entry ( data , & domain - > clients , domain_node )
2015-05-19 15:20:27 +02:00
sysmmu_tlb_invalidate_flpdcache ( data , iova ) ;
2014-05-12 11:45:04 +05:30
}
2015-05-19 15:20:28 +02:00
spin_unlock ( & domain - > lock ) ;
2014-05-12 11:45:04 +05:30
2012-05-12 05:56:09 +09:00
return 0 ;
}
2014-05-12 11:44:58 +05:30
static int lv2set_page ( sysmmu_pte_t * pent , phys_addr_t paddr , size_t size ,
2012-05-12 05:56:09 +09:00
short * pgcnt )
{
if ( size = = SPAGE_SIZE ) {
2014-05-12 11:45:00 +05:30
if ( WARN_ON ( ! lv2ent_fault ( pent ) ) )
2012-05-12 05:56:09 +09:00
return - EADDRINUSE ;
* pent = mk_lv2ent_spage ( paddr ) ;
pgtable_flush ( pent , pent + 1 ) ;
* pgcnt - = 1 ;
} else { /* size == LPAGE_SIZE */
int i ;
2014-05-22 09:50:56 +05:30
2012-05-12 05:56:09 +09:00
for ( i = 0 ; i < SPAGES_PER_LPAGE ; i + + , pent + + ) {
2014-05-12 11:45:00 +05:30
if ( WARN_ON ( ! lv2ent_fault ( pent ) ) ) {
2014-05-12 11:44:47 +05:30
if ( i > 0 )
memset ( pent - i , 0 , sizeof ( * pent ) * i ) ;
2012-05-12 05:56:09 +09:00
return - EADDRINUSE ;
}
* pent = mk_lv2ent_lpage ( paddr ) ;
}
pgtable_flush ( pent - SPAGES_PER_LPAGE , pent ) ;
* pgcnt - = SPAGES_PER_LPAGE ;
}
return 0 ;
}
2014-05-12 11:45:04 +05:30
/*
* * CAUTION * to the I / O virtual memory managers that support exynos - iommu :
*
2014-08-04 10:06:28 +05:30
* System MMU v3 . x has advanced logic to improve address translation
2014-05-12 11:45:04 +05:30
* performance with caching more page table entries by a page table walk .
2014-08-04 10:06:28 +05:30
* However , the logic has a bug that while caching faulty page table entries ,
* System MMU reports page fault if the cached fault entry is hit even though
* the fault entry is updated to a valid entry after the entry is cached .
* To prevent caching faulty page table entries which may be updated to valid
* entries later , the virtual memory manager should care about the workaround
* for the problem . The following describes the workaround .
2014-05-12 11:45:04 +05:30
*
* Any two consecutive I / O virtual address regions must have a hole of 128 KiB
2014-08-04 10:06:28 +05:30
* at maximum to prevent misbehavior of System MMU 3. x ( workaround for h / w bug ) .
2014-05-12 11:45:04 +05:30
*
2014-08-04 10:06:28 +05:30
* Precisely , any start address of I / O virtual region must be aligned with
2014-05-12 11:45:04 +05:30
* the following sizes for System MMU v3 .1 and v3 .2 .
* System MMU v3 .1 : 128 KiB
* System MMU v3 .2 : 256 KiB
*
* Because System MMU v3 .3 caches page table entries more aggressively , it needs
2014-08-04 10:06:28 +05:30
* more workarounds .
* - Any two consecutive I / O virtual regions must have a hole of size larger
* than or equal to 128 KiB .
2014-05-12 11:45:04 +05:30
* - Start address of an I / O virtual region must be aligned by 128 KiB .
*/
2015-05-19 15:20:28 +02:00
static int exynos_iommu_map ( struct iommu_domain * iommu_domain ,
unsigned long l_iova , phys_addr_t paddr , size_t size ,
int prot )
2012-05-12 05:56:09 +09:00
{
2015-05-19 15:20:28 +02:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2014-05-12 11:44:58 +05:30
sysmmu_pte_t * entry ;
sysmmu_iova_t iova = ( sysmmu_iova_t ) l_iova ;
2012-05-12 05:56:09 +09:00
unsigned long flags ;
int ret = - ENOMEM ;
2015-05-19 15:20:28 +02:00
BUG_ON ( domain - > pgtable = = NULL ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
spin_lock_irqsave ( & domain - > pgtablelock , flags ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
entry = section_entry ( domain - > pgtable , iova ) ;
2012-05-12 05:56:09 +09:00
if ( size = = SECT_SIZE ) {
2015-05-19 15:20:28 +02:00
ret = lv1set_section ( domain , entry , iova , paddr ,
& domain - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
2012-05-12 05:56:09 +09:00
} else {
2014-05-12 11:44:58 +05:30
sysmmu_pte_t * pent ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
pent = alloc_lv2entry ( domain , entry , iova ,
& domain - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
2012-05-12 05:56:09 +09:00
2014-05-12 11:44:47 +05:30
if ( IS_ERR ( pent ) )
ret = PTR_ERR ( pent ) ;
2012-05-12 05:56:09 +09:00
else
ret = lv2set_page ( pent , paddr , size ,
2015-05-19 15:20:28 +02:00
& domain - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
2012-05-12 05:56:09 +09:00
}
2014-05-12 11:44:47 +05:30
if ( ret )
2014-05-12 11:45:00 +05:30
pr_err ( " %s: Failed(%d) to map %#zx bytes @ %#x \n " ,
__func__ , ret , size , iova ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2012-05-12 05:56:09 +09:00
return ret ;
}
2015-05-19 15:20:28 +02:00
static void exynos_iommu_tlb_invalidate_entry ( struct exynos_iommu_domain * domain ,
sysmmu_iova_t iova , size_t size )
2014-05-12 11:45:04 +05:30
{
2015-05-19 15:20:27 +02:00
struct sysmmu_drvdata * data ;
2014-05-12 11:45:04 +05:30
unsigned long flags ;
2015-05-19 15:20:28 +02:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
2014-05-12 11:45:04 +05:30
2015-05-19 15:20:28 +02:00
list_for_each_entry ( data , & domain - > clients , domain_node )
2015-05-19 15:20:27 +02:00
sysmmu_tlb_invalidate_entry ( data , iova , size ) ;
2014-05-12 11:45:04 +05:30
2015-05-19 15:20:28 +02:00
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2014-05-12 11:45:04 +05:30
}
2015-05-19 15:20:28 +02:00
static size_t exynos_iommu_unmap ( struct iommu_domain * iommu_domain ,
unsigned long l_iova , size_t size )
2012-05-12 05:56:09 +09:00
{
2015-05-19 15:20:28 +02:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2014-05-12 11:44:58 +05:30
sysmmu_iova_t iova = ( sysmmu_iova_t ) l_iova ;
sysmmu_pte_t * ent ;
2014-05-12 11:44:47 +05:30
size_t err_pgsize ;
2014-05-12 11:44:58 +05:30
unsigned long flags ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
BUG_ON ( domain - > pgtable = = NULL ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
spin_lock_irqsave ( & domain - > pgtablelock , flags ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
ent = section_entry ( domain - > pgtable , iova ) ;
2012-05-12 05:56:09 +09:00
if ( lv1ent_section ( ent ) ) {
2014-05-12 11:45:00 +05:30
if ( WARN_ON ( size < SECT_SIZE ) ) {
2014-05-12 11:44:47 +05:30
err_pgsize = SECT_SIZE ;
goto err ;
}
2012-05-12 05:56:09 +09:00
2014-08-04 10:06:28 +05:30
/* workaround for h/w bug in System MMU v3.3 */
* ent = ZERO_LV2LINK ;
2012-05-12 05:56:09 +09:00
pgtable_flush ( ent , ent + 1 ) ;
size = SECT_SIZE ;
goto done ;
}
if ( unlikely ( lv1ent_fault ( ent ) ) ) {
if ( size > SECT_SIZE )
size = SECT_SIZE ;
goto done ;
}
/* lv1ent_page(sent) == true here */
ent = page_entry ( ent , iova ) ;
if ( unlikely ( lv2ent_fault ( ent ) ) ) {
size = SPAGE_SIZE ;
goto done ;
}
if ( lv2ent_small ( ent ) ) {
* ent = 0 ;
size = SPAGE_SIZE ;
2014-05-12 11:44:51 +05:30
pgtable_flush ( ent , ent + 1 ) ;
2015-05-19 15:20:28 +02:00
domain - > lv2entcnt [ lv1ent_offset ( iova ) ] + = 1 ;
2012-05-12 05:56:09 +09:00
goto done ;
}
/* lv1ent_large(ent) == true here */
2014-05-12 11:45:00 +05:30
if ( WARN_ON ( size < LPAGE_SIZE ) ) {
2014-05-12 11:44:47 +05:30
err_pgsize = LPAGE_SIZE ;
goto err ;
}
2012-05-12 05:56:09 +09:00
memset ( ent , 0 , sizeof ( * ent ) * SPAGES_PER_LPAGE ) ;
2014-05-12 11:44:51 +05:30
pgtable_flush ( ent , ent + SPAGES_PER_LPAGE ) ;
2012-05-12 05:56:09 +09:00
size = LPAGE_SIZE ;
2015-05-19 15:20:28 +02:00
domain - > lv2entcnt [ lv1ent_offset ( iova ) ] + = SPAGES_PER_LPAGE ;
2012-05-12 05:56:09 +09:00
done :
2015-05-19 15:20:28 +02:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
exynos_iommu_tlb_invalidate_entry ( domain , iova , size ) ;
2012-05-12 05:56:09 +09:00
return size ;
2014-05-12 11:44:47 +05:30
err :
2015-05-19 15:20:28 +02:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2014-05-12 11:44:47 +05:30
2014-05-12 11:45:00 +05:30
pr_err ( " %s: Failed: size(%#zx) @ %#x is smaller than page size %#zx \n " ,
__func__ , size , iova , err_pgsize ) ;
2014-05-12 11:44:47 +05:30
return 0 ;
2012-05-12 05:56:09 +09:00
}
2015-05-19 15:20:28 +02:00
static phys_addr_t exynos_iommu_iova_to_phys ( struct iommu_domain * iommu_domain ,
2013-03-29 01:23:58 +05:30
dma_addr_t iova )
2012-05-12 05:56:09 +09:00
{
2015-05-19 15:20:28 +02:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2014-05-12 11:44:58 +05:30
sysmmu_pte_t * entry ;
2012-05-12 05:56:09 +09:00
unsigned long flags ;
phys_addr_t phys = 0 ;
2015-05-19 15:20:28 +02:00
spin_lock_irqsave ( & domain - > pgtablelock , flags ) ;
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:28 +02:00
entry = section_entry ( domain - > pgtable , iova ) ;
2012-05-12 05:56:09 +09:00
if ( lv1ent_section ( entry ) ) {
phys = section_phys ( entry ) + section_offs ( iova ) ;
} else if ( lv1ent_page ( entry ) ) {
entry = page_entry ( entry , iova ) ;
if ( lv2ent_large ( entry ) )
phys = lpage_phys ( entry ) + lpage_offs ( iova ) ;
else if ( lv2ent_small ( entry ) )
phys = spage_phys ( entry ) + spage_offs ( iova ) ;
}
2015-05-19 15:20:28 +02:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2012-05-12 05:56:09 +09:00
return phys ;
}
2014-05-12 11:44:59 +05:30
static int exynos_iommu_add_device ( struct device * dev )
{
struct iommu_group * group ;
int ret ;
2015-05-19 15:20:32 +02:00
if ( ! has_sysmmu ( dev ) )
return - ENODEV ;
2014-05-12 11:44:59 +05:30
group = iommu_group_get ( dev ) ;
if ( ! group ) {
group = iommu_group_alloc ( ) ;
if ( IS_ERR ( group ) ) {
dev_err ( dev , " Failed to allocate IOMMU group \n " ) ;
return PTR_ERR ( group ) ;
}
}
ret = iommu_group_add_device ( group , dev ) ;
iommu_group_put ( group ) ;
return ret ;
}
static void exynos_iommu_remove_device ( struct device * dev )
{
2015-05-19 15:20:32 +02:00
if ( ! has_sysmmu ( dev ) )
return ;
2014-05-12 11:44:59 +05:30
iommu_group_remove_device ( dev ) ;
}
2015-05-19 15:20:37 +02:00
static int exynos_iommu_of_xlate ( struct device * dev ,
struct of_phandle_args * spec )
{
struct exynos_iommu_owner * owner = dev - > archdata . iommu ;
struct platform_device * sysmmu = of_find_device_by_node ( spec - > np ) ;
struct sysmmu_drvdata * data ;
if ( ! sysmmu )
return - ENODEV ;
data = platform_get_drvdata ( sysmmu ) ;
if ( ! data )
return - ENODEV ;
if ( ! owner ) {
owner = kzalloc ( sizeof ( * owner ) , GFP_KERNEL ) ;
if ( ! owner )
return - ENOMEM ;
INIT_LIST_HEAD ( & owner - > controllers ) ;
dev - > archdata . iommu = owner ;
}
list_add_tail ( & data - > owner_node , & owner - > controllers ) ;
return 0 ;
}
2015-05-19 15:20:36 +02:00
static struct iommu_ops exynos_iommu_ops = {
2015-03-26 13:43:11 +01:00
. domain_alloc = exynos_iommu_domain_alloc ,
. domain_free = exynos_iommu_domain_free ,
2014-05-08 14:49:14 -06:00
. attach_dev = exynos_iommu_attach_device ,
. detach_dev = exynos_iommu_detach_device ,
. map = exynos_iommu_map ,
. unmap = exynos_iommu_unmap ,
2014-10-25 09:55:16 -07:00
. map_sg = default_iommu_map_sg ,
2014-05-08 14:49:14 -06:00
. iova_to_phys = exynos_iommu_iova_to_phys ,
. add_device = exynos_iommu_add_device ,
. remove_device = exynos_iommu_remove_device ,
2012-05-12 05:56:09 +09:00
. pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE ,
2015-05-19 15:20:37 +02:00
. of_xlate = exynos_iommu_of_xlate ,
2012-05-12 05:56:09 +09:00
} ;
2015-05-19 15:20:36 +02:00
static bool init_done ;
2012-05-12 05:56:09 +09:00
static int __init exynos_iommu_init ( void )
{
int ret ;
2014-05-12 11:44:48 +05:30
lv2table_kmem_cache = kmem_cache_create ( " exynos-iommu-lv2table " ,
LV2TABLE_SIZE , LV2TABLE_SIZE , 0 , NULL ) ;
if ( ! lv2table_kmem_cache ) {
pr_err ( " %s: Failed to create kmem cache \n " , __func__ ) ;
return - ENOMEM ;
}
2012-05-12 05:56:09 +09:00
ret = platform_driver_register ( & exynos_sysmmu_driver ) ;
2014-05-12 11:44:48 +05:30
if ( ret ) {
pr_err ( " %s: Failed to register driver \n " , __func__ ) ;
goto err_reg_driver ;
}
2012-05-12 05:56:09 +09:00
2014-05-12 11:45:04 +05:30
zero_lv2_table = kmem_cache_zalloc ( lv2table_kmem_cache , GFP_KERNEL ) ;
if ( zero_lv2_table = = NULL ) {
pr_err ( " %s: Failed to allocate zero level2 page table \n " ,
__func__ ) ;
ret = - ENOMEM ;
goto err_zero_lv2 ;
}
2014-05-12 11:44:48 +05:30
ret = bus_set_iommu ( & platform_bus_type , & exynos_iommu_ops ) ;
if ( ret ) {
pr_err ( " %s: Failed to register exynos-iommu driver. \n " ,
__func__ ) ;
goto err_set_iommu ;
}
2012-05-12 05:56:09 +09:00
2015-05-19 15:20:36 +02:00
init_done = true ;
2014-05-12 11:44:48 +05:30
return 0 ;
err_set_iommu :
2014-05-12 11:45:04 +05:30
kmem_cache_free ( lv2table_kmem_cache , zero_lv2_table ) ;
err_zero_lv2 :
2014-05-12 11:44:48 +05:30
platform_driver_unregister ( & exynos_sysmmu_driver ) ;
err_reg_driver :
kmem_cache_destroy ( lv2table_kmem_cache ) ;
2012-05-12 05:56:09 +09:00
return ret ;
}
2015-05-19 15:20:36 +02:00
static int __init exynos_iommu_of_setup ( struct device_node * np )
{
struct platform_device * pdev ;
if ( ! init_done )
exynos_iommu_init ( ) ;
pdev = of_platform_device_create ( np , NULL , platform_bus_type . dev_root ) ;
if ( IS_ERR ( pdev ) )
return PTR_ERR ( pdev ) ;
of_iommu_set_ops ( np , & exynos_iommu_ops ) ;
return 0 ;
}
IOMMU_OF_DECLARE ( exynos_iommu_of , " samsung,exynos-sysmmu " ,
exynos_iommu_of_setup ) ;