2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-02-18 17:12:58 +03:00
/*
* Copyright ( c ) 2011 , 2016 Samsung Electronics Co . , Ltd .
2012-05-12 00:56:09 +04:00
* http : //www.samsung.com
*/
# ifdef CONFIG_EXYNOS_IOMMU_DEBUG
# define DEBUG
# endif
# include <linux/clk.h>
2015-05-19 16:20:36 +03:00
# include <linux/dma-mapping.h>
2012-05-12 00:56:09 +04:00
# include <linux/err.h>
2015-05-19 16:20:30 +03:00
# include <linux/io.h>
2012-05-12 00:56:09 +04:00
# include <linux/iommu.h>
2015-05-19 16:20:30 +03:00
# include <linux/interrupt.h>
2018-04-06 02:25:34 +03:00
# include <linux/kmemleak.h>
2012-05-12 00:56:09 +04:00
# include <linux/list.h>
2015-05-19 16:20:36 +03:00
# include <linux/of.h>
# include <linux/of_platform.h>
2015-05-19 16:20:30 +03:00
# include <linux/platform_device.h>
# include <linux/pm_runtime.h>
# include <linux/slab.h>
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:58 +04:00
typedef u32 sysmmu_iova_t ;
typedef u32 sysmmu_pte_t ;
2014-08-04 08:36:28 +04:00
/* We do not consider super section mapping (16MB) */
2012-05-12 00:56:09 +04:00
# define SECT_ORDER 20
# define LPAGE_ORDER 16
# define SPAGE_ORDER 12
# define SECT_SIZE (1 << SECT_ORDER)
# define LPAGE_SIZE (1 << LPAGE_ORDER)
# define SPAGE_SIZE (1 << SPAGE_ORDER)
# define SECT_MASK (~(SECT_SIZE - 1))
# define LPAGE_MASK (~(LPAGE_SIZE - 1))
# define SPAGE_MASK (~(SPAGE_SIZE - 1))
2014-05-12 10:15:04 +04:00
# define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
( ( * ( sent ) & 3 ) = = 0 ) | | ( ( * ( sent ) & 3 ) = = 3 ) )
# define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
# define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
# define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
( ( * ( sent ) & 3 ) = = 1 ) )
2012-05-12 00:56:09 +04:00
# define lv1ent_section(sent) ((*(sent) & 3) == 2)
# define lv2ent_fault(pent) ((*(pent) & 3) == 0)
# define lv2ent_small(pent) ((*(pent) & 2) == 2)
# define lv2ent_large(pent) ((*(pent) & 3) == 1)
2016-02-18 17:12:58 +03:00
/*
* v1 . x - v3 . x SYSMMU supports 32 bit physical and 32 bit virtual address spaces
* v5 .0 introduced support for 36 bit physical address space by shifting
* all page entry values by 4 bits .
* All SYSMMU controllers in the system support the address spaces of the same
* size , so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
* value ( 0 or 4 ) .
*/
static short PG_ENT_SHIFT = - 1 ;
# define SYSMMU_PG_ENT_SHIFT 0
# define SYSMMU_V5_PG_ENT_SHIFT 4
2016-11-03 11:04:45 +03:00
static const sysmmu_pte_t * LV1_PROT ;
static const sysmmu_pte_t SYSMMU_LV1_PROT [ ] = {
( ( 0 < < 15 ) | ( 0 < < 10 ) ) , /* no access */
( ( 1 < < 15 ) | ( 1 < < 10 ) ) , /* IOMMU_READ only */
( ( 0 < < 15 ) | ( 1 < < 10 ) ) , /* IOMMU_WRITE not supported, use read/write */
( ( 0 < < 15 ) | ( 1 < < 10 ) ) , /* IOMMU_READ | IOMMU_WRITE */
} ;
static const sysmmu_pte_t SYSMMU_V5_LV1_PROT [ ] = {
( 0 < < 4 ) , /* no access */
( 1 < < 4 ) , /* IOMMU_READ only */
( 2 < < 4 ) , /* IOMMU_WRITE only */
( 3 < < 4 ) , /* IOMMU_READ | IOMMU_WRITE */
} ;
static const sysmmu_pte_t * LV2_PROT ;
static const sysmmu_pte_t SYSMMU_LV2_PROT [ ] = {
( ( 0 < < 9 ) | ( 0 < < 4 ) ) , /* no access */
( ( 1 < < 9 ) | ( 1 < < 4 ) ) , /* IOMMU_READ only */
( ( 0 < < 9 ) | ( 1 < < 4 ) ) , /* IOMMU_WRITE not supported, use read/write */
( ( 0 < < 9 ) | ( 1 < < 4 ) ) , /* IOMMU_READ | IOMMU_WRITE */
} ;
static const sysmmu_pte_t SYSMMU_V5_LV2_PROT [ ] = {
( 0 < < 2 ) , /* no access */
( 1 < < 2 ) , /* IOMMU_READ only */
( 2 < < 2 ) , /* IOMMU_WRITE only */
( 3 < < 2 ) , /* IOMMU_READ | IOMMU_WRITE */
} ;
# define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
2016-02-18 17:12:58 +03:00
# define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
# define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
# define section_offs(iova) (iova & (SECT_SIZE - 1))
# define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
# define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
# define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
# define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
2012-05-12 00:56:09 +04:00
# define NUM_LV1ENTRIES 4096
2014-05-12 10:14:58 +04:00
# define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:58 +04:00
static u32 lv1ent_offset ( sysmmu_iova_t iova )
{
return iova > > SECT_ORDER ;
}
static u32 lv2ent_offset ( sysmmu_iova_t iova )
{
return ( iova > > SPAGE_ORDER ) & ( NUM_LV2ENTRIES - 1 ) ;
}
2016-02-18 17:12:50 +03:00
# define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
2014-05-12 10:14:58 +04:00
# define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2012-05-12 00:56:09 +04:00
# define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
2016-02-18 17:12:58 +03:00
# define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
2012-05-12 00:56:09 +04:00
2016-11-03 11:04:45 +03:00
# define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
2016-02-18 17:12:58 +03:00
# define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
2016-11-03 11:04:45 +03:00
# define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
# define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
2012-05-12 00:56:09 +04:00
# define CTRL_ENABLE 0x5
# define CTRL_BLOCK 0x7
# define CTRL_DISABLE 0x0
2014-05-12 10:15:03 +04:00
# define CFG_LRU 0x1
2016-11-03 11:04:45 +03:00
# define CFG_EAP (1 << 2)
2014-05-12 10:15:03 +04:00
# define CFG_QOS(n) ((n & 0xF) << 7)
# define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
# define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
# define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
2016-02-18 17:12:58 +03:00
/* common registers */
2012-05-12 00:56:09 +04:00
# define REG_MMU_CTRL 0x000
# define REG_MMU_CFG 0x004
# define REG_MMU_STATUS 0x008
2016-02-18 17:12:58 +03:00
# define REG_MMU_VERSION 0x034
# define MMU_MAJ_VER(val) ((val) >> 7)
# define MMU_MIN_VER(val) ((val) & 0x7F)
# define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
# define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
/* v1.x - v3.x registers */
2012-05-12 00:56:09 +04:00
# define REG_MMU_FLUSH 0x00C
# define REG_MMU_FLUSH_ENTRY 0x010
# define REG_PT_BASE_ADDR 0x014
# define REG_INT_STATUS 0x018
# define REG_INT_CLEAR 0x01C
# define REG_PAGE_FAULT_ADDR 0x024
# define REG_AW_FAULT_ADDR 0x028
# define REG_AR_FAULT_ADDR 0x02C
# define REG_DEFAULT_SLAVE_ADDR 0x030
2016-02-18 17:12:58 +03:00
/* v5.x registers */
# define REG_V5_PT_BASE_PFN 0x00C
# define REG_V5_MMU_FLUSH_ALL 0x010
# define REG_V5_MMU_FLUSH_ENTRY 0x014
2017-03-24 12:19:01 +03:00
# define REG_V5_MMU_FLUSH_RANGE 0x018
# define REG_V5_MMU_FLUSH_START 0x020
# define REG_V5_MMU_FLUSH_END 0x024
2016-02-18 17:12:58 +03:00
# define REG_V5_INT_STATUS 0x060
# define REG_V5_INT_CLEAR 0x064
# define REG_V5_FAULT_AR_VA 0x070
# define REG_V5_FAULT_AW_VA 0x080
2012-05-12 00:56:09 +04:00
2020-06-25 16:08:24 +03:00
# define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
2014-05-12 10:15:02 +04:00
2016-02-18 17:12:50 +03:00
static struct device * dma_dev ;
2014-05-12 10:14:48 +04:00
static struct kmem_cache * lv2table_kmem_cache ;
2014-05-12 10:15:04 +04:00
static sysmmu_pte_t * zero_lv2_table ;
# define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
2014-05-12 10:14:48 +04:00
2014-05-12 10:14:58 +04:00
static sysmmu_pte_t * section_entry ( sysmmu_pte_t * pgtable , sysmmu_iova_t iova )
2012-05-12 00:56:09 +04:00
{
return pgtable + lv1ent_offset ( iova ) ;
}
2014-05-12 10:14:58 +04:00
static sysmmu_pte_t * page_entry ( sysmmu_pte_t * sent , sysmmu_iova_t iova )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:58 +04:00
return ( sysmmu_pte_t * ) phys_to_virt (
2014-05-12 10:14:46 +04:00
lv2table_base ( sent ) ) + lv2ent_offset ( iova ) ;
2012-05-12 00:56:09 +04:00
}
2016-02-18 17:12:53 +03:00
/*
* IOMMU fault information register
*/
struct sysmmu_fault_info {
unsigned int bit ; /* bit number in STATUS register */
unsigned short addr_reg ; /* register to read VA fault address */
const char * name ; /* human readable fault name */
unsigned int type ; /* fault type for report_iommu_fault */
2012-05-12 00:56:09 +04:00
} ;
2016-02-18 17:12:53 +03:00
static const struct sysmmu_fault_info sysmmu_faults [ ] = {
{ 0 , REG_PAGE_FAULT_ADDR , " PAGE " , IOMMU_FAULT_READ } ,
{ 1 , REG_AR_FAULT_ADDR , " AR MULTI-HIT " , IOMMU_FAULT_READ } ,
{ 2 , REG_AW_FAULT_ADDR , " AW MULTI-HIT " , IOMMU_FAULT_WRITE } ,
{ 3 , REG_DEFAULT_SLAVE_ADDR , " BUS ERROR " , IOMMU_FAULT_READ } ,
{ 4 , REG_AR_FAULT_ADDR , " AR SECURITY PROTECTION " , IOMMU_FAULT_READ } ,
{ 5 , REG_AR_FAULT_ADDR , " AR ACCESS PROTECTION " , IOMMU_FAULT_READ } ,
{ 6 , REG_AW_FAULT_ADDR , " AW SECURITY PROTECTION " , IOMMU_FAULT_WRITE } ,
{ 7 , REG_AW_FAULT_ADDR , " AW ACCESS PROTECTION " , IOMMU_FAULT_WRITE } ,
2012-05-12 00:56:09 +04:00
} ;
2016-02-18 17:12:58 +03:00
static const struct sysmmu_fault_info sysmmu_v5_faults [ ] = {
{ 0 , REG_V5_FAULT_AR_VA , " AR PTW " , IOMMU_FAULT_READ } ,
{ 1 , REG_V5_FAULT_AR_VA , " AR PAGE " , IOMMU_FAULT_READ } ,
{ 2 , REG_V5_FAULT_AR_VA , " AR MULTI-HIT " , IOMMU_FAULT_READ } ,
{ 3 , REG_V5_FAULT_AR_VA , " AR ACCESS PROTECTION " , IOMMU_FAULT_READ } ,
{ 4 , REG_V5_FAULT_AR_VA , " AR SECURITY PROTECTION " , IOMMU_FAULT_READ } ,
{ 16 , REG_V5_FAULT_AW_VA , " AW PTW " , IOMMU_FAULT_WRITE } ,
{ 17 , REG_V5_FAULT_AW_VA , " AW PAGE " , IOMMU_FAULT_WRITE } ,
{ 18 , REG_V5_FAULT_AW_VA , " AW MULTI-HIT " , IOMMU_FAULT_WRITE } ,
{ 19 , REG_V5_FAULT_AW_VA , " AW ACCESS PROTECTION " , IOMMU_FAULT_WRITE } ,
{ 20 , REG_V5_FAULT_AW_VA , " AW SECURITY PROTECTION " , IOMMU_FAULT_WRITE } ,
} ;
2015-05-19 16:20:31 +03:00
/*
2020-06-25 16:08:24 +03:00
* This structure is attached to dev - > iommu - > priv of the master device
2015-05-19 16:20:31 +03:00
* on device add , contains a list of SYSMMU controllers defined by device tree ,
* which are bound to given master device . It is usually referenced by ' owner '
* pointer .
*/
2014-05-12 10:15:02 +04:00
struct exynos_iommu_owner {
2015-05-19 16:20:33 +03:00
struct list_head controllers ; /* list of sysmmu_drvdata.owner_node */
2016-02-18 17:13:00 +03:00
struct iommu_domain * domain ; /* domain this device is attached */
2016-11-14 13:08:11 +03:00
struct mutex rpm_lock ; /* for runtime pm of all sysmmus */
2014-05-12 10:15:02 +04:00
} ;
2015-05-19 16:20:31 +03:00
/*
* This structure exynos specific generalization of struct iommu_domain .
* It contains list of SYSMMU controllers from all master devices , which has
* been attached to this domain and page tables of IO address space defined by
* it . It is usually referenced by ' domain ' pointer .
*/
2012-05-12 00:56:09 +04:00
struct exynos_iommu_domain {
2015-05-19 16:20:31 +03:00
struct list_head clients ; /* list of sysmmu_drvdata.domain_node */
sysmmu_pte_t * pgtable ; /* lv1 page table, 16KB */
short * lv2entcnt ; /* free lv2 entry counter for each section */
spinlock_t lock ; /* lock for modyfying list of clients */
spinlock_t pgtablelock ; /* lock for modifying page table @ pgtable */
2015-03-26 15:43:11 +03:00
struct iommu_domain domain ; /* generic domain data structure */
2012-05-12 00:56:09 +04:00
} ;
2015-05-19 16:20:31 +03:00
/*
* This structure hold all data of a single SYSMMU controller , this includes
* hw resources like registers and clocks , pointers and list nodes to connect
* it to all other structures , internal state and parameters read from device
* tree . It is usually referenced by ' data ' pointer .
*/
2012-05-12 00:56:09 +04:00
struct sysmmu_drvdata {
2015-05-19 16:20:31 +03:00
struct device * sysmmu ; /* SYSMMU controller device */
struct device * master ; /* master device (owner) */
2017-09-15 14:05:08 +03:00
struct device_link * link ; /* runtime PM link to master */
2015-05-19 16:20:31 +03:00
void __iomem * sfrbase ; /* our registers */
struct clk * clk ; /* SYSMMU's clock */
2016-02-18 17:12:58 +03:00
struct clk * aclk ; /* SYSMMU's aclk clock */
struct clk * pclk ; /* SYSMMU's pclk clock */
2015-05-19 16:20:31 +03:00
struct clk * clk_master ; /* master's device clock */
spinlock_t lock ; /* lock for modyfying state */
2016-11-14 13:08:08 +03:00
bool active ; /* current status */
2015-05-19 16:20:31 +03:00
struct exynos_iommu_domain * domain ; /* domain we belong to */
struct list_head domain_node ; /* node for domain clients list */
2015-05-19 16:20:33 +03:00
struct list_head owner_node ; /* node for owner controllers list */
2015-05-19 16:20:31 +03:00
phys_addr_t pgtable ; /* assigned page table structure */
unsigned int version ; /* our version */
2017-02-03 15:23:42 +03:00
struct iommu_device iommu ; /* IOMMU core handle */
2012-05-12 00:56:09 +04:00
} ;
2015-03-26 15:43:11 +03:00
static struct exynos_iommu_domain * to_exynos_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct exynos_iommu_domain , domain ) ;
}
2016-02-18 17:12:52 +03:00
static void sysmmu_unblock ( struct sysmmu_drvdata * data )
2012-05-12 00:56:09 +04:00
{
2016-02-29 15:42:57 +03:00
writel ( CTRL_ENABLE , data - > sfrbase + REG_MMU_CTRL ) ;
2012-05-12 00:56:09 +04:00
}
2016-02-18 17:12:52 +03:00
static bool sysmmu_block ( struct sysmmu_drvdata * data )
2012-05-12 00:56:09 +04:00
{
int i = 120 ;
2016-02-29 15:42:57 +03:00
writel ( CTRL_BLOCK , data - > sfrbase + REG_MMU_CTRL ) ;
while ( ( i > 0 ) & & ! ( readl ( data - > sfrbase + REG_MMU_STATUS ) & 1 ) )
2012-05-12 00:56:09 +04:00
- - i ;
2016-02-29 15:42:57 +03:00
if ( ! ( readl ( data - > sfrbase + REG_MMU_STATUS ) & 1 ) ) {
2016-02-18 17:12:52 +03:00
sysmmu_unblock ( data ) ;
2012-05-12 00:56:09 +04:00
return false ;
}
return true ;
}
2016-02-18 17:12:52 +03:00
static void __sysmmu_tlb_invalidate ( struct sysmmu_drvdata * data )
2012-05-12 00:56:09 +04:00
{
2016-02-18 17:12:58 +03:00
if ( MMU_MAJ_VER ( data - > version ) < 5 )
2016-02-29 15:42:57 +03:00
writel ( 0x1 , data - > sfrbase + REG_MMU_FLUSH ) ;
2016-02-18 17:12:58 +03:00
else
2016-02-29 15:42:57 +03:00
writel ( 0x1 , data - > sfrbase + REG_V5_MMU_FLUSH_ALL ) ;
2012-05-12 00:56:09 +04:00
}
2016-02-18 17:12:52 +03:00
static void __sysmmu_tlb_invalidate_entry ( struct sysmmu_drvdata * data ,
2014-05-12 10:14:58 +04:00
sysmmu_iova_t iova , unsigned int num_inv )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:49 +04:00
unsigned int i ;
2014-05-22 08:20:56 +04:00
2017-03-24 12:19:01 +03:00
if ( MMU_MAJ_VER ( data - > version ) < 5 ) {
for ( i = 0 ; i < num_inv ; i + + ) {
2016-02-29 15:42:57 +03:00
writel ( ( iova & SPAGE_MASK ) | 1 ,
2016-02-18 17:12:58 +03:00
data - > sfrbase + REG_MMU_FLUSH_ENTRY ) ;
2017-03-24 12:19:01 +03:00
iova + = SPAGE_SIZE ;
}
} else {
if ( num_inv = = 1 ) {
2016-02-29 15:42:57 +03:00
writel ( ( iova & SPAGE_MASK ) | 1 ,
2016-02-18 17:12:58 +03:00
data - > sfrbase + REG_V5_MMU_FLUSH_ENTRY ) ;
2017-03-24 12:19:01 +03:00
} else {
writel ( ( iova & SPAGE_MASK ) ,
data - > sfrbase + REG_V5_MMU_FLUSH_START ) ;
writel ( ( iova & SPAGE_MASK ) + ( num_inv - 1 ) * SPAGE_SIZE ,
data - > sfrbase + REG_V5_MMU_FLUSH_END ) ;
writel ( 1 , data - > sfrbase + REG_V5_MMU_FLUSH_RANGE ) ;
}
2014-05-12 10:14:49 +04:00
}
2012-05-12 00:56:09 +04:00
}
2016-02-18 17:12:52 +03:00
static void __sysmmu_set_ptbase ( struct sysmmu_drvdata * data , phys_addr_t pgd )
2012-05-12 00:56:09 +04:00
{
2016-02-18 17:12:58 +03:00
if ( MMU_MAJ_VER ( data - > version ) < 5 )
2016-02-29 15:42:57 +03:00
writel ( pgd , data - > sfrbase + REG_PT_BASE_ADDR ) ;
2016-02-18 17:12:58 +03:00
else
2016-02-29 15:42:57 +03:00
writel ( pgd > > PAGE_SHIFT ,
2016-02-18 17:12:58 +03:00
data - > sfrbase + REG_V5_PT_BASE_PFN ) ;
2012-05-12 00:56:09 +04:00
2016-02-18 17:12:52 +03:00
__sysmmu_tlb_invalidate ( data ) ;
2012-05-12 00:56:09 +04:00
}
2016-05-23 12:30:09 +03:00
static void __sysmmu_enable_clocks ( struct sysmmu_drvdata * data )
{
BUG_ON ( clk_prepare_enable ( data - > clk_master ) ) ;
BUG_ON ( clk_prepare_enable ( data - > clk ) ) ;
BUG_ON ( clk_prepare_enable ( data - > pclk ) ) ;
BUG_ON ( clk_prepare_enable ( data - > aclk ) ) ;
}
static void __sysmmu_disable_clocks ( struct sysmmu_drvdata * data )
{
clk_disable_unprepare ( data - > aclk ) ;
clk_disable_unprepare ( data - > pclk ) ;
clk_disable_unprepare ( data - > clk ) ;
clk_disable_unprepare ( data - > clk_master ) ;
}
2016-02-18 17:12:56 +03:00
static void __sysmmu_get_version ( struct sysmmu_drvdata * data )
{
u32 ver ;
2016-05-23 12:30:09 +03:00
__sysmmu_enable_clocks ( data ) ;
2016-02-18 17:12:56 +03:00
2016-02-29 15:42:57 +03:00
ver = readl ( data - > sfrbase + REG_MMU_VERSION ) ;
2016-02-18 17:12:56 +03:00
/* controllers on some SoCs don't report proper version */
if ( ver = = 0x80000001u )
data - > version = MAKE_MMU_VER ( 1 , 0 ) ;
else
data - > version = MMU_RAW_VER ( ver ) ;
dev_dbg ( data - > sysmmu , " hardware version: %d.%d \n " ,
MMU_MAJ_VER ( data - > version ) , MMU_MIN_VER ( data - > version ) ) ;
2016-05-23 12:30:09 +03:00
__sysmmu_disable_clocks ( data ) ;
2016-02-18 17:12:56 +03:00
}
2016-02-18 17:12:53 +03:00
static void show_fault_information ( struct sysmmu_drvdata * data ,
const struct sysmmu_fault_info * finfo ,
sysmmu_iova_t fault_addr )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * ent ;
2012-05-12 00:56:09 +04:00
2017-01-09 15:03:53 +03:00
dev_err ( data - > sysmmu , " %s: %s FAULT occurred at %#x \n " ,
dev_name ( data - > master ) , finfo - > name , fault_addr ) ;
dev_dbg ( data - > sysmmu , " Page table base: %pa \n " , & data - > pgtable ) ;
2016-02-18 17:12:53 +03:00
ent = section_entry ( phys_to_virt ( data - > pgtable ) , fault_addr ) ;
2017-01-09 15:03:53 +03:00
dev_dbg ( data - > sysmmu , " \t Lv1 entry: %#x \n " , * ent ) ;
2012-05-12 00:56:09 +04:00
if ( lv1ent_page ( ent ) ) {
ent = page_entry ( ent , fault_addr ) ;
2017-01-09 15:03:53 +03:00
dev_dbg ( data - > sysmmu , " \t Lv2 entry: %#x \n " , * ent ) ;
2012-05-12 00:56:09 +04:00
}
}
static irqreturn_t exynos_sysmmu_irq ( int irq , void * dev_id )
{
2014-08-04 08:36:28 +04:00
/* SYSMMU is in blocked state when interrupt occurred. */
2012-05-12 00:56:09 +04:00
struct sysmmu_drvdata * data = dev_id ;
2016-02-18 17:12:58 +03:00
const struct sysmmu_fault_info * finfo ;
unsigned int i , n , itype ;
2021-04-08 23:16:22 +03:00
sysmmu_iova_t fault_addr ;
2016-02-18 17:12:58 +03:00
unsigned short reg_status , reg_clear ;
2014-05-12 10:14:46 +04:00
int ret = - ENOSYS ;
2012-05-12 00:56:09 +04:00
2016-11-14 13:08:08 +03:00
WARN_ON ( ! data - > active ) ;
2012-05-12 00:56:09 +04:00
2016-02-18 17:12:58 +03:00
if ( MMU_MAJ_VER ( data - > version ) < 5 ) {
reg_status = REG_INT_STATUS ;
reg_clear = REG_INT_CLEAR ;
finfo = sysmmu_faults ;
n = ARRAY_SIZE ( sysmmu_faults ) ;
} else {
reg_status = REG_V5_INT_STATUS ;
reg_clear = REG_V5_INT_CLEAR ;
finfo = sysmmu_v5_faults ;
n = ARRAY_SIZE ( sysmmu_v5_faults ) ;
}
2014-05-12 10:14:57 +04:00
spin_lock ( & data - > lock ) ;
2016-02-18 17:12:51 +03:00
clk_enable ( data - > clk_master ) ;
2014-05-12 10:14:57 +04:00
2016-02-29 15:42:57 +03:00
itype = __ffs ( readl ( data - > sfrbase + reg_status ) ) ;
2016-02-18 17:12:53 +03:00
for ( i = 0 ; i < n ; i + + , finfo + + )
if ( finfo - > bit = = itype )
break ;
/* unknown/unsupported fault */
BUG_ON ( i = = n ) ;
/* print debug message */
2016-02-29 15:42:57 +03:00
fault_addr = readl ( data - > sfrbase + finfo - > addr_reg ) ;
2016-02-18 17:12:53 +03:00
show_fault_information ( data , finfo , fault_addr ) ;
2012-05-12 00:56:09 +04:00
2016-02-18 17:12:53 +03:00
if ( data - > domain )
ret = report_iommu_fault ( & data - > domain - > domain ,
data - > master , fault_addr , finfo - > type ) ;
2014-05-12 10:14:56 +04:00
/* fault is not recovered by fault handler */
BUG_ON ( ret ! = 0 ) ;
2012-05-12 00:56:09 +04:00
2016-02-29 15:42:57 +03:00
writel ( 1 < < itype , data - > sfrbase + reg_clear ) ;
2014-05-12 10:14:56 +04:00
2016-02-18 17:12:52 +03:00
sysmmu_unblock ( data ) ;
2012-05-12 00:56:09 +04:00
2016-02-18 17:12:51 +03:00
clk_disable ( data - > clk_master ) ;
2014-05-12 10:14:55 +04:00
2014-05-12 10:14:57 +04:00
spin_unlock ( & data - > lock ) ;
2012-05-12 00:56:09 +04:00
return IRQ_HANDLED ;
}
2016-11-14 13:08:08 +03:00
static void __sysmmu_disable ( struct sysmmu_drvdata * data )
2012-05-12 00:56:09 +04:00
{
2016-11-14 13:08:08 +03:00
unsigned long flags ;
2016-02-18 17:12:51 +03:00
clk_enable ( data - > clk_master ) ;
2014-05-12 10:14:55 +04:00
2016-11-14 13:08:08 +03:00
spin_lock_irqsave ( & data - > lock , flags ) ;
2016-02-29 15:42:57 +03:00
writel ( CTRL_DISABLE , data - > sfrbase + REG_MMU_CTRL ) ;
writel ( 0 , data - > sfrbase + REG_MMU_CFG ) ;
2016-11-14 13:08:08 +03:00
data - > active = false ;
2014-05-12 10:15:02 +04:00
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2016-11-14 13:08:08 +03:00
__sysmmu_disable_clocks ( data ) ;
2014-05-12 10:15:02 +04:00
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:02 +04:00
static void __sysmmu_init_config ( struct sysmmu_drvdata * data )
{
2016-02-18 17:12:54 +03:00
unsigned int cfg ;
if ( data - > version < = MAKE_MMU_VER ( 3 , 1 ) )
cfg = CFG_LRU | CFG_QOS ( 15 ) ;
else if ( data - > version < = MAKE_MMU_VER ( 3 , 2 ) )
cfg = CFG_LRU | CFG_QOS ( 15 ) | CFG_FLPDCACHE | CFG_SYSSEL ;
else
cfg = CFG_QOS ( 15 ) | CFG_FLPDCACHE | CFG_ACGEN ;
2014-05-12 10:15:02 +04:00
2016-11-03 11:04:45 +03:00
cfg | = CFG_EAP ; /* enable access protection bits check */
2016-02-29 15:42:57 +03:00
writel ( cfg , data - > sfrbase + REG_MMU_CFG ) ;
2014-05-12 10:15:02 +04:00
}
2016-11-14 13:08:08 +03:00
static void __sysmmu_enable ( struct sysmmu_drvdata * data )
2014-05-12 10:15:02 +04:00
{
2016-11-14 13:08:08 +03:00
unsigned long flags ;
2016-05-23 12:30:09 +03:00
__sysmmu_enable_clocks ( data ) ;
2014-05-12 10:14:55 +04:00
2016-11-14 13:08:08 +03:00
spin_lock_irqsave ( & data - > lock , flags ) ;
2016-02-29 15:42:57 +03:00
writel ( CTRL_BLOCK , data - > sfrbase + REG_MMU_CTRL ) ;
2014-05-12 10:15:02 +04:00
__sysmmu_init_config ( data ) ;
2016-02-18 17:12:52 +03:00
__sysmmu_set_ptbase ( data , data - > pgtable ) ;
2016-02-29 15:42:57 +03:00
writel ( CTRL_ENABLE , data - > sfrbase + REG_MMU_CTRL ) ;
2016-11-14 13:08:08 +03:00
data - > active = true ;
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2014-05-12 10:14:46 +04:00
2016-05-23 12:30:09 +03:00
/*
* SYSMMU driver keeps master ' s clock enabled only for the short
* time , while accessing the registers . For performing address
* translation during DMA transaction it relies on the client
* driver to enable it .
*/
2016-02-18 17:12:51 +03:00
clk_disable ( data - > clk_master ) ;
2014-05-12 10:15:02 +04:00
}
2014-05-12 10:14:55 +04:00
2015-05-19 16:20:27 +03:00
static void sysmmu_tlb_invalidate_flpdcache ( struct sysmmu_drvdata * data ,
2014-05-12 10:15:04 +04:00
sysmmu_iova_t iova )
{
unsigned long flags ;
spin_lock_irqsave ( & data - > lock , flags ) ;
2016-11-14 13:08:08 +03:00
if ( data - > active & & data - > version > = MAKE_MMU_VER ( 3 , 3 ) ) {
2016-05-23 12:30:08 +03:00
clk_enable ( data - > clk_master ) ;
2017-03-20 12:17:56 +03:00
if ( sysmmu_block ( data ) ) {
2017-03-20 12:17:57 +03:00
if ( data - > version > = MAKE_MMU_VER ( 5 , 0 ) )
__sysmmu_tlb_invalidate ( data ) ;
else
__sysmmu_tlb_invalidate_entry ( data , iova , 1 ) ;
2017-03-20 12:17:56 +03:00
sysmmu_unblock ( data ) ;
}
2016-05-23 12:30:08 +03:00
clk_disable ( data - > clk_master ) ;
2016-02-18 17:12:55 +03:00
}
2014-05-12 10:15:04 +04:00
spin_unlock_irqrestore ( & data - > lock , flags ) ;
}
2015-05-19 16:20:27 +03:00
static void sysmmu_tlb_invalidate_entry ( struct sysmmu_drvdata * data ,
sysmmu_iova_t iova , size_t size )
2012-05-12 00:56:09 +04:00
{
unsigned long flags ;
2014-05-12 10:15:02 +04:00
spin_lock_irqsave ( & data - > lock , flags ) ;
2016-11-14 13:08:08 +03:00
if ( data - > active ) {
2014-05-12 10:14:49 +04:00
unsigned int num_inv = 1 ;
2014-05-12 10:14:55 +04:00
2016-02-18 17:12:51 +03:00
clk_enable ( data - > clk_master ) ;
2014-05-12 10:14:55 +04:00
2014-05-12 10:14:49 +04:00
/*
* L2TLB invalidation required
* 4 KB page : 1 invalidation
2014-08-04 08:36:28 +04:00
* 64 KB page : 16 invalidations
* 1 MB page : 64 invalidations
2014-05-12 10:14:49 +04:00
* because it is set - associative TLB
* with 8 - way and 64 sets .
* 1 MB page can be cached in one of all sets .
* 64 KB page can be one of 16 consecutive sets .
*/
2015-05-19 16:20:24 +03:00
if ( MMU_MAJ_VER ( data - > version ) = = 2 )
2014-05-12 10:14:49 +04:00
num_inv = min_t ( unsigned int , size / PAGE_SIZE , 64 ) ;
2016-02-18 17:12:52 +03:00
if ( sysmmu_block ( data ) ) {
__sysmmu_tlb_invalidate_entry ( data , iova , num_inv ) ;
sysmmu_unblock ( data ) ;
2012-05-12 00:56:09 +04:00
}
2016-02-18 17:12:51 +03:00
clk_disable ( data - > clk_master ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:57 +04:00
spin_unlock_irqrestore ( & data - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
}
2017-08-28 15:12:05 +03:00
static const struct iommu_ops exynos_iommu_ops ;
2016-05-23 14:01:27 +03:00
2019-08-12 13:32:46 +03:00
static int exynos_sysmmu_probe ( struct platform_device * pdev )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:54 +04:00
int irq , ret ;
2014-05-12 10:14:46 +04:00
struct device * dev = & pdev - > dev ;
2012-05-12 00:56:09 +04:00
struct sysmmu_drvdata * data ;
2014-05-12 10:14:46 +04:00
struct resource * res ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:54 +04:00
data = devm_kzalloc ( dev , sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:46 +04:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2014-05-12 10:14:54 +04:00
data - > sfrbase = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( data - > sfrbase ) )
return PTR_ERR ( data - > sfrbase ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:54 +04:00
irq = platform_get_irq ( pdev , 0 ) ;
2019-07-30 21:15:22 +03:00
if ( irq < = 0 )
2014-05-12 10:14:54 +04:00
return irq ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:54 +04:00
ret = devm_request_irq ( dev , irq , exynos_sysmmu_irq , 0 ,
2014-05-12 10:14:46 +04:00
dev_name ( dev ) , data ) ;
if ( ret ) {
2014-05-12 10:14:54 +04:00
dev_err ( dev , " Unabled to register handler of irq %d \n " , irq ) ;
return ret ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:54 +04:00
data - > clk = devm_clk_get ( dev , " sysmmu " ) ;
2016-05-23 12:30:07 +03:00
if ( PTR_ERR ( data - > clk ) = = - ENOENT )
2016-02-18 17:12:58 +03:00
data - > clk = NULL ;
2016-05-23 12:30:07 +03:00
else if ( IS_ERR ( data - > clk ) )
return PTR_ERR ( data - > clk ) ;
2016-02-18 17:12:58 +03:00
data - > aclk = devm_clk_get ( dev , " aclk " ) ;
2016-05-23 12:30:07 +03:00
if ( PTR_ERR ( data - > aclk ) = = - ENOENT )
2016-02-18 17:12:58 +03:00
data - > aclk = NULL ;
2016-05-23 12:30:07 +03:00
else if ( IS_ERR ( data - > aclk ) )
return PTR_ERR ( data - > aclk ) ;
2016-02-18 17:12:58 +03:00
data - > pclk = devm_clk_get ( dev , " pclk " ) ;
2016-05-23 12:30:07 +03:00
if ( PTR_ERR ( data - > pclk ) = = - ENOENT )
2016-02-18 17:12:58 +03:00
data - > pclk = NULL ;
2016-05-23 12:30:07 +03:00
else if ( IS_ERR ( data - > pclk ) )
return PTR_ERR ( data - > pclk ) ;
2016-02-18 17:12:58 +03:00
if ( ! data - > clk & & ( ! data - > aclk | | ! data - > pclk ) ) {
dev_err ( dev , " Failed to get device clock(s)! \n " ) ;
return - ENOSYS ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:55 +04:00
data - > clk_master = devm_clk_get ( dev , " master " ) ;
2016-05-23 12:30:07 +03:00
if ( PTR_ERR ( data - > clk_master ) = = - ENOENT )
2016-02-18 17:12:51 +03:00
data - > clk_master = NULL ;
2016-05-23 12:30:07 +03:00
else if ( IS_ERR ( data - > clk_master ) )
return PTR_ERR ( data - > clk_master ) ;
2014-05-12 10:14:55 +04:00
2012-05-12 00:56:09 +04:00
data - > sysmmu = dev ;
2014-05-12 10:14:57 +04:00
spin_lock_init ( & data - > lock ) ;
2012-05-12 00:56:09 +04:00
2017-02-03 15:23:42 +03:00
ret = iommu_device_sysfs_add ( & data - > iommu , & pdev - > dev , NULL ,
dev_name ( data - > sysmmu ) ) ;
if ( ret )
return ret ;
2021-04-01 16:56:26 +03:00
ret = iommu_device_register ( & data - > iommu , & exynos_iommu_ops , dev ) ;
2017-02-03 15:23:42 +03:00
if ( ret )
return ret ;
2014-05-12 10:14:46 +04:00
platform_set_drvdata ( pdev , data ) ;
2016-02-18 17:12:56 +03:00
__sysmmu_get_version ( data ) ;
2016-02-18 17:12:58 +03:00
if ( PG_ENT_SHIFT < 0 ) {
2016-11-03 11:04:45 +03:00
if ( MMU_MAJ_VER ( data - > version ) < 5 ) {
2016-02-18 17:12:58 +03:00
PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT ;
2016-11-03 11:04:45 +03:00
LV1_PROT = SYSMMU_LV1_PROT ;
LV2_PROT = SYSMMU_LV2_PROT ;
} else {
2016-02-18 17:12:58 +03:00
PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT ;
2016-11-03 11:04:45 +03:00
LV1_PROT = SYSMMU_V5_LV1_PROT ;
LV2_PROT = SYSMMU_V5_LV2_PROT ;
}
2016-02-18 17:12:58 +03:00
}
2017-08-04 13:28:33 +03:00
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables ( cpu cache flush )
*/
if ( ! dma_dev )
dma_dev = & pdev - > dev ;
2014-05-12 10:14:52 +04:00
pm_runtime_enable ( dev ) ;
2012-05-12 00:56:09 +04:00
return 0 ;
}
2016-11-14 13:08:11 +03:00
static int __maybe_unused exynos_sysmmu_suspend ( struct device * dev )
2015-05-19 16:20:35 +03:00
{
struct sysmmu_drvdata * data = dev_get_drvdata ( dev ) ;
2016-11-14 13:08:08 +03:00
struct device * master = data - > master ;
2015-05-19 16:20:35 +03:00
2016-11-14 13:08:08 +03:00
if ( master ) {
2020-06-25 16:08:24 +03:00
struct exynos_iommu_owner * owner = dev_iommu_priv_get ( master ) ;
2016-11-14 13:08:11 +03:00
mutex_lock ( & owner - > rpm_lock ) ;
2016-11-14 13:08:09 +03:00
if ( data - > domain ) {
dev_dbg ( data - > sysmmu , " saving state \n " ) ;
__sysmmu_disable ( data ) ;
}
2016-11-14 13:08:11 +03:00
mutex_unlock ( & owner - > rpm_lock ) ;
2015-05-19 16:20:35 +03:00
}
return 0 ;
}
2016-11-14 13:08:11 +03:00
static int __maybe_unused exynos_sysmmu_resume ( struct device * dev )
2015-05-19 16:20:35 +03:00
{
struct sysmmu_drvdata * data = dev_get_drvdata ( dev ) ;
2016-11-14 13:08:08 +03:00
struct device * master = data - > master ;
2015-05-19 16:20:35 +03:00
2016-11-14 13:08:08 +03:00
if ( master ) {
2020-06-25 16:08:24 +03:00
struct exynos_iommu_owner * owner = dev_iommu_priv_get ( master ) ;
2016-11-14 13:08:11 +03:00
mutex_lock ( & owner - > rpm_lock ) ;
2016-11-14 13:08:09 +03:00
if ( data - > domain ) {
dev_dbg ( data - > sysmmu , " restoring state \n " ) ;
__sysmmu_enable ( data ) ;
}
2016-11-14 13:08:11 +03:00
mutex_unlock ( & owner - > rpm_lock ) ;
2015-05-19 16:20:35 +03:00
}
return 0 ;
}
static const struct dev_pm_ops sysmmu_pm_ops = {
2016-11-14 13:08:11 +03:00
SET_RUNTIME_PM_OPS ( exynos_sysmmu_suspend , exynos_sysmmu_resume , NULL )
2016-11-14 13:08:12 +03:00
SET_SYSTEM_SLEEP_PM_OPS ( pm_runtime_force_suspend ,
pm_runtime_force_resume )
2015-05-19 16:20:35 +03:00
} ;
2017-10-09 14:40:23 +03:00
static const struct of_device_id sysmmu_of_match [ ] = {
2014-05-12 10:15:02 +04:00
{ . compatible = " samsung,exynos-sysmmu " , } ,
{ } ,
} ;
static struct platform_driver exynos_sysmmu_driver __refdata = {
. probe = exynos_sysmmu_probe ,
. driver = {
2012-05-12 00:56:09 +04:00
. name = " exynos-sysmmu " ,
2014-05-12 10:15:02 +04:00
. of_match_table = sysmmu_of_match ,
2015-05-19 16:20:35 +03:00
. pm = & sysmmu_pm_ops ,
2016-05-20 16:48:21 +03:00
. suppress_bind_attrs = true ,
2012-05-12 00:56:09 +04:00
}
} ;
2020-07-14 14:59:28 +03:00
static inline void exynos_iommu_set_pte ( sysmmu_pte_t * ent , sysmmu_pte_t val )
2012-05-12 00:56:09 +04:00
{
2016-02-18 17:12:50 +03:00
dma_sync_single_for_cpu ( dma_dev , virt_to_phys ( ent ) , sizeof ( * ent ) ,
DMA_TO_DEVICE ) ;
2016-06-08 21:31:10 +03:00
* ent = cpu_to_le32 ( val ) ;
2016-02-18 17:12:50 +03:00
dma_sync_single_for_device ( dma_dev , virt_to_phys ( ent ) , sizeof ( * ent ) ,
DMA_TO_DEVICE ) ;
2012-05-12 00:56:09 +04:00
}
2015-03-26 15:43:11 +03:00
static struct iommu_domain * exynos_iommu_domain_alloc ( unsigned type )
2012-05-12 00:56:09 +04:00
{
2015-05-19 16:20:28 +03:00
struct exynos_iommu_domain * domain ;
2016-02-18 17:12:50 +03:00
dma_addr_t handle ;
2014-05-12 10:15:04 +04:00
int i ;
2012-05-12 00:56:09 +04:00
2016-02-18 17:12:58 +03:00
/* Check if correct PTE offsets are initialized */
BUG_ON ( PG_ENT_SHIFT < 0 | | ! dma_dev ) ;
2015-03-26 15:43:11 +03:00
2021-08-11 15:21:19 +03:00
if ( type ! = IOMMU_DOMAIN_DMA & & type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2015-05-19 16:20:28 +03:00
domain = kzalloc ( sizeof ( * domain ) , GFP_KERNEL ) ;
if ( ! domain )
2015-03-26 15:43:11 +03:00
return NULL ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
domain - > pgtable = ( sysmmu_pte_t * ) __get_free_pages ( GFP_KERNEL , 2 ) ;
if ( ! domain - > pgtable )
2021-08-11 15:21:19 +03:00
goto err_pgtable ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
domain - > lv2entcnt = ( short * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO , 1 ) ;
if ( ! domain - > lv2entcnt )
2012-05-12 00:56:09 +04:00
goto err_counter ;
2014-08-04 08:36:28 +04:00
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
2017-03-24 12:18:44 +03:00
for ( i = 0 ; i < NUM_LV1ENTRIES ; i + + )
domain - > pgtable [ i ] = ZERO_LV2LINK ;
2014-05-12 10:15:04 +04:00
2016-02-18 17:12:50 +03:00
handle = dma_map_single ( dma_dev , domain - > pgtable , LV1TABLE_SIZE ,
DMA_TO_DEVICE ) ;
/* For mapping page table entries we rely on dma == phys */
BUG_ON ( handle ! = virt_to_phys ( domain - > pgtable ) ) ;
2017-01-09 15:03:54 +03:00
if ( dma_mapping_error ( dma_dev , handle ) )
goto err_lv2ent ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
spin_lock_init ( & domain - > lock ) ;
spin_lock_init ( & domain - > pgtablelock ) ;
INIT_LIST_HEAD ( & domain - > clients ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
domain - > domain . geometry . aperture_start = 0 ;
domain - > domain . geometry . aperture_end = ~ 0UL ;
domain - > domain . geometry . force_aperture = true ;
2012-07-11 14:41:10 +04:00
2015-05-19 16:20:28 +03:00
return & domain - > domain ;
2012-05-12 00:56:09 +04:00
2017-01-09 15:03:54 +03:00
err_lv2ent :
free_pages ( ( unsigned long ) domain - > lv2entcnt , 1 ) ;
2012-05-12 00:56:09 +04:00
err_counter :
2015-05-19 16:20:28 +03:00
free_pages ( ( unsigned long ) domain - > pgtable , 2 ) ;
2012-05-12 00:56:09 +04:00
err_pgtable :
2015-05-19 16:20:28 +03:00
kfree ( domain ) ;
2015-03-26 15:43:11 +03:00
return NULL ;
2012-05-12 00:56:09 +04:00
}
2015-05-19 16:20:28 +03:00
static void exynos_iommu_domain_free ( struct iommu_domain * iommu_domain )
2012-05-12 00:56:09 +04:00
{
2015-05-19 16:20:28 +03:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2015-05-19 16:20:27 +03:00
struct sysmmu_drvdata * data , * next ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
int i ;
2015-05-19 16:20:28 +03:00
WARN_ON ( ! list_empty ( & domain - > clients ) ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
list_for_each_entry_safe ( data , next , & domain - > clients , domain_node ) {
2016-11-14 13:08:10 +03:00
spin_lock ( & data - > lock ) ;
2016-11-14 13:08:07 +03:00
__sysmmu_disable ( data ) ;
2016-11-14 13:08:08 +03:00
data - > pgtable = 0 ;
data - > domain = NULL ;
2015-05-19 16:20:27 +03:00
list_del_init ( & data - > domain_node ) ;
2016-11-14 13:08:10 +03:00
spin_unlock ( & data - > lock ) ;
2012-05-12 00:56:09 +04:00
}
2015-05-19 16:20:28 +03:00
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2012-05-12 00:56:09 +04:00
2016-02-18 17:12:50 +03:00
dma_unmap_single ( dma_dev , virt_to_phys ( domain - > pgtable ) , LV1TABLE_SIZE ,
DMA_TO_DEVICE ) ;
2012-05-12 00:56:09 +04:00
for ( i = 0 ; i < NUM_LV1ENTRIES ; i + + )
2016-02-18 17:12:50 +03:00
if ( lv1ent_page ( domain - > pgtable + i ) ) {
phys_addr_t base = lv2table_base ( domain - > pgtable + i ) ;
dma_unmap_single ( dma_dev , base , LV2TABLE_SIZE ,
DMA_TO_DEVICE ) ;
2014-05-12 10:14:48 +04:00
kmem_cache_free ( lv2table_kmem_cache ,
2016-02-18 17:12:50 +03:00
phys_to_virt ( base ) ) ;
}
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
free_pages ( ( unsigned long ) domain - > pgtable , 2 ) ;
free_pages ( ( unsigned long ) domain - > lv2entcnt , 1 ) ;
kfree ( domain ) ;
2012-05-12 00:56:09 +04:00
}
2016-02-18 17:13:00 +03:00
static void exynos_iommu_detach_device ( struct iommu_domain * iommu_domain ,
struct device * dev )
{
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2020-06-25 16:08:24 +03:00
struct exynos_iommu_owner * owner = dev_iommu_priv_get ( dev ) ;
2016-02-18 17:13:00 +03:00
phys_addr_t pagetable = virt_to_phys ( domain - > pgtable ) ;
struct sysmmu_drvdata * data , * next ;
unsigned long flags ;
if ( ! has_sysmmu ( dev ) | | owner - > domain ! = iommu_domain )
return ;
2016-11-14 13:08:11 +03:00
mutex_lock ( & owner - > rpm_lock ) ;
list_for_each_entry ( data , & owner - > controllers , owner_node ) {
pm_runtime_get_noresume ( data - > sysmmu ) ;
if ( pm_runtime_active ( data - > sysmmu ) )
__sysmmu_disable ( data ) ;
2016-11-14 13:08:10 +03:00
pm_runtime_put ( data - > sysmmu ) ;
}
2016-02-18 17:13:00 +03:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
list_for_each_entry_safe ( data , next , & domain - > clients , domain_node ) {
2016-11-14 13:08:10 +03:00
spin_lock ( & data - > lock ) ;
2016-11-14 13:08:08 +03:00
data - > pgtable = 0 ;
data - > domain = NULL ;
2016-11-14 13:08:07 +03:00
list_del_init ( & data - > domain_node ) ;
2016-11-14 13:08:10 +03:00
spin_unlock ( & data - > lock ) ;
2016-02-18 17:13:00 +03:00
}
2016-11-14 13:08:10 +03:00
owner - > domain = NULL ;
2016-02-18 17:13:00 +03:00
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2016-11-14 13:08:11 +03:00
mutex_unlock ( & owner - > rpm_lock ) ;
2016-02-18 17:13:00 +03:00
2016-11-14 13:08:07 +03:00
dev_dbg ( dev , " %s: Detached IOMMU with pgtable %pa \n " , __func__ ,
& pagetable ) ;
2016-02-18 17:13:00 +03:00
}
2015-05-19 16:20:28 +03:00
static int exynos_iommu_attach_device ( struct iommu_domain * iommu_domain ,
2012-05-12 00:56:09 +04:00
struct device * dev )
{
2015-05-19 16:20:28 +03:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2020-06-25 16:08:24 +03:00
struct exynos_iommu_owner * owner = dev_iommu_priv_get ( dev ) ;
2015-05-19 16:20:27 +03:00
struct sysmmu_drvdata * data ;
2015-05-19 16:20:28 +03:00
phys_addr_t pagetable = virt_to_phys ( domain - > pgtable ) ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
2015-05-19 16:20:27 +03:00
if ( ! has_sysmmu ( dev ) )
return - ENODEV ;
2012-05-12 00:56:09 +04:00
2016-02-18 17:13:00 +03:00
if ( owner - > domain )
exynos_iommu_detach_device ( owner - > domain , dev ) ;
2016-11-14 13:08:11 +03:00
mutex_lock ( & owner - > rpm_lock ) ;
2016-11-14 13:08:10 +03:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
2015-05-19 16:20:33 +03:00
list_for_each_entry ( data , & owner - > controllers , owner_node ) {
2016-11-14 13:08:10 +03:00
spin_lock ( & data - > lock ) ;
2016-11-14 13:08:08 +03:00
data - > pgtable = pagetable ;
data - > domain = domain ;
2016-11-14 13:08:10 +03:00
list_add_tail ( & data - > domain_node , & domain - > clients ) ;
spin_unlock ( & data - > lock ) ;
}
owner - > domain = iommu_domain ;
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2016-11-14 13:08:11 +03:00
list_for_each_entry ( data , & owner - > controllers , owner_node ) {
pm_runtime_get_noresume ( data - > sysmmu ) ;
if ( pm_runtime_active ( data - > sysmmu ) )
__sysmmu_enable ( data ) ;
pm_runtime_put ( data - > sysmmu ) ;
}
mutex_unlock ( & owner - > rpm_lock ) ;
2016-11-14 13:08:07 +03:00
dev_dbg ( dev , " %s: Attached IOMMU with pgtable %pa \n " , __func__ ,
& pagetable ) ;
2014-05-12 10:14:46 +04:00
2016-11-14 13:08:07 +03:00
return 0 ;
2012-05-12 00:56:09 +04:00
}
2015-05-19 16:20:28 +03:00
static sysmmu_pte_t * alloc_lv2entry ( struct exynos_iommu_domain * domain ,
2014-05-12 10:15:04 +04:00
sysmmu_pte_t * sent , sysmmu_iova_t iova , short * pgcounter )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:47 +04:00
if ( lv1ent_section ( sent ) ) {
2014-05-12 10:14:58 +04:00
WARN ( 1 , " Trying mapping on %#08x mapped with 1MiB page " , iova ) ;
2014-05-12 10:14:47 +04:00
return ERR_PTR ( - EADDRINUSE ) ;
}
2012-05-12 00:56:09 +04:00
if ( lv1ent_fault ( sent ) ) {
2017-01-09 15:03:54 +03:00
dma_addr_t handle ;
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * pent ;
2014-05-12 10:15:04 +04:00
bool need_flush_flpd_cache = lv1ent_zero ( sent ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:48 +04:00
pent = kmem_cache_zalloc ( lv2table_kmem_cache , GFP_ATOMIC ) ;
2016-02-29 11:45:59 +03:00
BUG_ON ( ( uintptr_t ) pent & ( LV2TABLE_SIZE - 1 ) ) ;
2012-05-12 00:56:09 +04:00
if ( ! pent )
2014-05-12 10:14:47 +04:00
return ERR_PTR ( - ENOMEM ) ;
2012-05-12 00:56:09 +04:00
2020-07-14 14:59:28 +03:00
exynos_iommu_set_pte ( sent , mk_lv1ent_page ( virt_to_phys ( pent ) ) ) ;
2015-05-09 03:05:44 +03:00
kmemleak_ignore ( pent ) ;
2012-05-12 00:56:09 +04:00
* pgcounter = NUM_LV2ENTRIES ;
2017-01-09 15:03:54 +03:00
handle = dma_map_single ( dma_dev , pent , LV2TABLE_SIZE ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dma_dev , handle ) ) {
kmem_cache_free ( lv2table_kmem_cache , pent ) ;
return ERR_PTR ( - EADDRINUSE ) ;
}
2014-05-12 10:15:04 +04:00
/*
2014-08-04 08:36:28 +04:00
* If pre - fetched SLPD is a faulty SLPD in zero_l2_table ,
* FLPD cache may cache the address of zero_l2_table . This
* function replaces the zero_l2_table with new L2 page table
* to write valid mappings .
2014-05-12 10:15:04 +04:00
* Accessing the valid area may cause page fault since FLPD
2014-08-04 08:36:28 +04:00
* cache may still cache zero_l2_table for the valid area
* instead of new L2 page table that has the mapping
* information of the valid area .
2014-05-12 10:15:04 +04:00
* Thus any replacement of zero_l2_table with other valid L2
* page table must involve FLPD cache invalidation for System
* MMU v3 .3 .
* FLPD cache invalidation is performed with TLB invalidation
* by VPN without blocking . It is safe to invalidate TLB without
* blocking because the target address of TLB invalidation is
* not currently mapped .
*/
if ( need_flush_flpd_cache ) {
2015-05-19 16:20:27 +03:00
struct sysmmu_drvdata * data ;
2014-05-22 08:20:56 +04:00
2015-05-19 16:20:28 +03:00
spin_lock ( & domain - > lock ) ;
list_for_each_entry ( data , & domain - > clients , domain_node )
2015-05-19 16:20:27 +03:00
sysmmu_tlb_invalidate_flpdcache ( data , iova ) ;
2015-05-19 16:20:28 +03:00
spin_unlock ( & domain - > lock ) ;
2014-05-12 10:15:04 +04:00
}
2012-05-12 00:56:09 +04:00
}
return page_entry ( sent , iova ) ;
}
2015-05-19 16:20:28 +03:00
static int lv1set_section ( struct exynos_iommu_domain * domain ,
2014-05-12 10:15:04 +04:00
sysmmu_pte_t * sent , sysmmu_iova_t iova ,
2016-11-03 11:04:45 +03:00
phys_addr_t paddr , int prot , short * pgcnt )
2012-05-12 00:56:09 +04:00
{
2014-05-12 10:14:47 +04:00
if ( lv1ent_section ( sent ) ) {
2014-05-12 10:14:58 +04:00
WARN ( 1 , " Trying mapping on 1MiB@%#08x that is mapped " ,
2014-05-12 10:14:47 +04:00
iova ) ;
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
2014-05-12 10:14:47 +04:00
}
2012-05-12 00:56:09 +04:00
if ( lv1ent_page ( sent ) ) {
2014-05-12 10:14:47 +04:00
if ( * pgcnt ! = NUM_LV2ENTRIES ) {
2014-05-12 10:14:58 +04:00
WARN ( 1 , " Trying mapping on 1MiB@%#08x that is mapped " ,
2014-05-12 10:14:47 +04:00
iova ) ;
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
2014-05-12 10:14:47 +04:00
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:48 +04:00
kmem_cache_free ( lv2table_kmem_cache , page_entry ( sent , 0 ) ) ;
2012-05-12 00:56:09 +04:00
* pgcnt = 0 ;
}
2020-07-14 14:59:28 +03:00
exynos_iommu_set_pte ( sent , mk_lv1ent_sect ( paddr , prot ) ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
spin_lock ( & domain - > lock ) ;
2014-05-12 10:15:04 +04:00
if ( lv1ent_page_zero ( sent ) ) {
2015-05-19 16:20:27 +03:00
struct sysmmu_drvdata * data ;
2014-05-12 10:15:04 +04:00
/*
* Flushing FLPD cache in System MMU v3 .3 that may cache a FLPD
* entry by speculative prefetch of SLPD which has no mapping .
*/
2015-05-19 16:20:28 +03:00
list_for_each_entry ( data , & domain - > clients , domain_node )
2015-05-19 16:20:27 +03:00
sysmmu_tlb_invalidate_flpdcache ( data , iova ) ;
2014-05-12 10:15:04 +04:00
}
2015-05-19 16:20:28 +03:00
spin_unlock ( & domain - > lock ) ;
2014-05-12 10:15:04 +04:00
2012-05-12 00:56:09 +04:00
return 0 ;
}
2014-05-12 10:14:58 +04:00
static int lv2set_page ( sysmmu_pte_t * pent , phys_addr_t paddr , size_t size ,
2016-11-03 11:04:45 +03:00
int prot , short * pgcnt )
2012-05-12 00:56:09 +04:00
{
if ( size = = SPAGE_SIZE ) {
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( ! lv2ent_fault ( pent ) ) )
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
2020-07-14 14:59:28 +03:00
exynos_iommu_set_pte ( pent , mk_lv2ent_spage ( paddr , prot ) ) ;
2012-05-12 00:56:09 +04:00
* pgcnt - = 1 ;
} else { /* size == LPAGE_SIZE */
int i ;
2016-02-18 17:12:50 +03:00
dma_addr_t pent_base = virt_to_phys ( pent ) ;
2014-05-22 08:20:56 +04:00
2016-02-18 17:12:50 +03:00
dma_sync_single_for_cpu ( dma_dev , pent_base ,
sizeof ( * pent ) * SPAGES_PER_LPAGE ,
DMA_TO_DEVICE ) ;
2012-05-12 00:56:09 +04:00
for ( i = 0 ; i < SPAGES_PER_LPAGE ; i + + , pent + + ) {
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( ! lv2ent_fault ( pent ) ) ) {
2014-05-12 10:14:47 +04:00
if ( i > 0 )
memset ( pent - i , 0 , sizeof ( * pent ) * i ) ;
2012-05-12 00:56:09 +04:00
return - EADDRINUSE ;
}
2016-11-03 11:04:45 +03:00
* pent = mk_lv2ent_lpage ( paddr , prot ) ;
2012-05-12 00:56:09 +04:00
}
2016-02-18 17:12:50 +03:00
dma_sync_single_for_device ( dma_dev , pent_base ,
sizeof ( * pent ) * SPAGES_PER_LPAGE ,
DMA_TO_DEVICE ) ;
2012-05-12 00:56:09 +04:00
* pgcnt - = SPAGES_PER_LPAGE ;
}
return 0 ;
}
2014-05-12 10:15:04 +04:00
/*
* * CAUTION * to the I / O virtual memory managers that support exynos - iommu :
*
2014-08-04 08:36:28 +04:00
* System MMU v3 . x has advanced logic to improve address translation
2014-05-12 10:15:04 +04:00
* performance with caching more page table entries by a page table walk .
2014-08-04 08:36:28 +04:00
* However , the logic has a bug that while caching faulty page table entries ,
* System MMU reports page fault if the cached fault entry is hit even though
* the fault entry is updated to a valid entry after the entry is cached .
* To prevent caching faulty page table entries which may be updated to valid
* entries later , the virtual memory manager should care about the workaround
* for the problem . The following describes the workaround .
2014-05-12 10:15:04 +04:00
*
* Any two consecutive I / O virtual address regions must have a hole of 128 KiB
2014-08-04 08:36:28 +04:00
* at maximum to prevent misbehavior of System MMU 3. x ( workaround for h / w bug ) .
2014-05-12 10:15:04 +04:00
*
2014-08-04 08:36:28 +04:00
* Precisely , any start address of I / O virtual region must be aligned with
2014-05-12 10:15:04 +04:00
* the following sizes for System MMU v3 .1 and v3 .2 .
* System MMU v3 .1 : 128 KiB
* System MMU v3 .2 : 256 KiB
*
* Because System MMU v3 .3 caches page table entries more aggressively , it needs
2014-08-04 08:36:28 +04:00
* more workarounds .
* - Any two consecutive I / O virtual regions must have a hole of size larger
* than or equal to 128 KiB .
2014-05-12 10:15:04 +04:00
* - Start address of an I / O virtual region must be aligned by 128 KiB .
*/
2015-05-19 16:20:28 +03:00
static int exynos_iommu_map ( struct iommu_domain * iommu_domain ,
unsigned long l_iova , phys_addr_t paddr , size_t size ,
2019-09-08 19:56:38 +03:00
int prot , gfp_t gfp )
2012-05-12 00:56:09 +04:00
{
2015-05-19 16:20:28 +03:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * entry ;
sysmmu_iova_t iova = ( sysmmu_iova_t ) l_iova ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
int ret = - ENOMEM ;
2015-05-19 16:20:28 +03:00
BUG_ON ( domain - > pgtable = = NULL ) ;
2016-11-03 11:04:45 +03:00
prot & = SYSMMU_SUPPORTED_PROT_BITS ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
spin_lock_irqsave ( & domain - > pgtablelock , flags ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
entry = section_entry ( domain - > pgtable , iova ) ;
2012-05-12 00:56:09 +04:00
if ( size = = SECT_SIZE ) {
2016-11-03 11:04:45 +03:00
ret = lv1set_section ( domain , entry , iova , paddr , prot ,
2015-05-19 16:20:28 +03:00
& domain - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
2012-05-12 00:56:09 +04:00
} else {
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * pent ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
pent = alloc_lv2entry ( domain , entry , iova ,
& domain - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:47 +04:00
if ( IS_ERR ( pent ) )
ret = PTR_ERR ( pent ) ;
2012-05-12 00:56:09 +04:00
else
2016-11-03 11:04:45 +03:00
ret = lv2set_page ( pent , paddr , size , prot ,
2015-05-19 16:20:28 +03:00
& domain - > lv2entcnt [ lv1ent_offset ( iova ) ] ) ;
2012-05-12 00:56:09 +04:00
}
2014-05-12 10:14:47 +04:00
if ( ret )
2014-05-12 10:15:00 +04:00
pr_err ( " %s: Failed(%d) to map %#zx bytes @ %#x \n " ,
__func__ , ret , size , iova ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2012-05-12 00:56:09 +04:00
return ret ;
}
2015-05-19 16:20:28 +03:00
static void exynos_iommu_tlb_invalidate_entry ( struct exynos_iommu_domain * domain ,
sysmmu_iova_t iova , size_t size )
2014-05-12 10:15:04 +04:00
{
2015-05-19 16:20:27 +03:00
struct sysmmu_drvdata * data ;
2014-05-12 10:15:04 +04:00
unsigned long flags ;
2015-05-19 16:20:28 +03:00
spin_lock_irqsave ( & domain - > lock , flags ) ;
2014-05-12 10:15:04 +04:00
2015-05-19 16:20:28 +03:00
list_for_each_entry ( data , & domain - > clients , domain_node )
2015-05-19 16:20:27 +03:00
sysmmu_tlb_invalidate_entry ( data , iova , size ) ;
2014-05-12 10:15:04 +04:00
2015-05-19 16:20:28 +03:00
spin_unlock_irqrestore ( & domain - > lock , flags ) ;
2014-05-12 10:15:04 +04:00
}
2015-05-19 16:20:28 +03:00
static size_t exynos_iommu_unmap ( struct iommu_domain * iommu_domain ,
2019-07-02 18:44:06 +03:00
unsigned long l_iova , size_t size ,
struct iommu_iotlb_gather * gather )
2012-05-12 00:56:09 +04:00
{
2015-05-19 16:20:28 +03:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2014-05-12 10:14:58 +04:00
sysmmu_iova_t iova = ( sysmmu_iova_t ) l_iova ;
sysmmu_pte_t * ent ;
2014-05-12 10:14:47 +04:00
size_t err_pgsize ;
2014-05-12 10:14:58 +04:00
unsigned long flags ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
BUG_ON ( domain - > pgtable = = NULL ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
spin_lock_irqsave ( & domain - > pgtablelock , flags ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
ent = section_entry ( domain - > pgtable , iova ) ;
2012-05-12 00:56:09 +04:00
if ( lv1ent_section ( ent ) ) {
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( size < SECT_SIZE ) ) {
2014-05-12 10:14:47 +04:00
err_pgsize = SECT_SIZE ;
goto err ;
}
2012-05-12 00:56:09 +04:00
2014-08-04 08:36:28 +04:00
/* workaround for h/w bug in System MMU v3.3 */
2020-07-14 14:59:28 +03:00
exynos_iommu_set_pte ( ent , ZERO_LV2LINK ) ;
2012-05-12 00:56:09 +04:00
size = SECT_SIZE ;
goto done ;
}
if ( unlikely ( lv1ent_fault ( ent ) ) ) {
if ( size > SECT_SIZE )
size = SECT_SIZE ;
goto done ;
}
/* lv1ent_page(sent) == true here */
ent = page_entry ( ent , iova ) ;
if ( unlikely ( lv2ent_fault ( ent ) ) ) {
size = SPAGE_SIZE ;
goto done ;
}
if ( lv2ent_small ( ent ) ) {
2020-07-14 14:59:28 +03:00
exynos_iommu_set_pte ( ent , 0 ) ;
2012-05-12 00:56:09 +04:00
size = SPAGE_SIZE ;
2015-05-19 16:20:28 +03:00
domain - > lv2entcnt [ lv1ent_offset ( iova ) ] + = 1 ;
2012-05-12 00:56:09 +04:00
goto done ;
}
/* lv1ent_large(ent) == true here */
2014-05-12 10:15:00 +04:00
if ( WARN_ON ( size < LPAGE_SIZE ) ) {
2014-05-12 10:14:47 +04:00
err_pgsize = LPAGE_SIZE ;
goto err ;
}
2012-05-12 00:56:09 +04:00
2016-02-18 17:12:50 +03:00
dma_sync_single_for_cpu ( dma_dev , virt_to_phys ( ent ) ,
sizeof ( * ent ) * SPAGES_PER_LPAGE ,
DMA_TO_DEVICE ) ;
2012-05-12 00:56:09 +04:00
memset ( ent , 0 , sizeof ( * ent ) * SPAGES_PER_LPAGE ) ;
2016-02-18 17:12:50 +03:00
dma_sync_single_for_device ( dma_dev , virt_to_phys ( ent ) ,
sizeof ( * ent ) * SPAGES_PER_LPAGE ,
DMA_TO_DEVICE ) ;
2012-05-12 00:56:09 +04:00
size = LPAGE_SIZE ;
2015-05-19 16:20:28 +03:00
domain - > lv2entcnt [ lv1ent_offset ( iova ) ] + = SPAGES_PER_LPAGE ;
2012-05-12 00:56:09 +04:00
done :
2015-05-19 16:20:28 +03:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
exynos_iommu_tlb_invalidate_entry ( domain , iova , size ) ;
2012-05-12 00:56:09 +04:00
return size ;
2014-05-12 10:14:47 +04:00
err :
2015-05-19 16:20:28 +03:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2014-05-12 10:14:47 +04:00
2014-05-12 10:15:00 +04:00
pr_err ( " %s: Failed: size(%#zx) @ %#x is smaller than page size %#zx \n " ,
__func__ , size , iova , err_pgsize ) ;
2014-05-12 10:14:47 +04:00
return 0 ;
2012-05-12 00:56:09 +04:00
}
2015-05-19 16:20:28 +03:00
static phys_addr_t exynos_iommu_iova_to_phys ( struct iommu_domain * iommu_domain ,
2013-03-28 23:53:58 +04:00
dma_addr_t iova )
2012-05-12 00:56:09 +04:00
{
2015-05-19 16:20:28 +03:00
struct exynos_iommu_domain * domain = to_exynos_domain ( iommu_domain ) ;
2014-05-12 10:14:58 +04:00
sysmmu_pte_t * entry ;
2012-05-12 00:56:09 +04:00
unsigned long flags ;
phys_addr_t phys = 0 ;
2015-05-19 16:20:28 +03:00
spin_lock_irqsave ( & domain - > pgtablelock , flags ) ;
2012-05-12 00:56:09 +04:00
2015-05-19 16:20:28 +03:00
entry = section_entry ( domain - > pgtable , iova ) ;
2012-05-12 00:56:09 +04:00
if ( lv1ent_section ( entry ) ) {
phys = section_phys ( entry ) + section_offs ( iova ) ;
} else if ( lv1ent_page ( entry ) ) {
entry = page_entry ( entry , iova ) ;
if ( lv2ent_large ( entry ) )
phys = lpage_phys ( entry ) + lpage_offs ( iova ) ;
else if ( lv2ent_small ( entry ) )
phys = spage_phys ( entry ) + spage_offs ( iova ) ;
}
2015-05-19 16:20:28 +03:00
spin_unlock_irqrestore ( & domain - > pgtablelock , flags ) ;
2012-05-12 00:56:09 +04:00
return phys ;
}
2020-04-29 16:37:09 +03:00
static struct iommu_device * exynos_iommu_probe_device ( struct device * dev )
2014-05-12 10:14:59 +04:00
{
2020-06-25 16:08:24 +03:00
struct exynos_iommu_owner * owner = dev_iommu_priv_get ( dev ) ;
2017-09-15 14:05:08 +03:00
struct sysmmu_drvdata * data ;
2014-05-12 10:14:59 +04:00
2015-05-19 16:20:32 +03:00
if ( ! has_sysmmu ( dev ) )
2020-04-29 16:37:09 +03:00
return ERR_PTR ( - ENODEV ) ;
2014-05-12 10:14:59 +04:00
2017-09-15 14:05:08 +03:00
list_for_each_entry ( data , & owner - > controllers , owner_node ) {
/*
* SYSMMU will be runtime activated via device link
* ( dependency ) to its master device , so there are no
* direct calls to pm_runtime_get / put in this driver .
*/
data - > link = device_link_add ( dev , data - > sysmmu ,
2019-02-01 03:54:21 +03:00
DL_FLAG_STATELESS |
2017-09-15 14:05:08 +03:00
DL_FLAG_PM_RUNTIME ) ;
}
2014-05-12 10:14:59 +04:00
2020-04-29 16:37:08 +03:00
/* There is always at least one entry, see exynos_iommu_of_xlate() */
data = list_first_entry ( & owner - > controllers ,
struct sysmmu_drvdata , owner_node ) ;
2020-04-29 16:37:09 +03:00
return & data - > iommu ;
2014-05-12 10:14:59 +04:00
}
2020-04-29 16:37:09 +03:00
static void exynos_iommu_release_device ( struct device * dev )
2014-05-12 10:14:59 +04:00
{
2020-06-25 16:08:24 +03:00
struct exynos_iommu_owner * owner = dev_iommu_priv_get ( dev ) ;
2017-09-15 14:05:08 +03:00
struct sysmmu_drvdata * data ;
2017-01-09 15:03:56 +03:00
2015-05-19 16:20:32 +03:00
if ( ! has_sysmmu ( dev ) )
return ;
2017-01-09 15:03:56 +03:00
if ( owner - > domain ) {
struct iommu_group * group = iommu_group_get ( dev ) ;
if ( group ) {
WARN_ON ( owner - > domain ! =
iommu_group_default_domain ( group ) ) ;
exynos_iommu_detach_device ( owner - > domain , dev ) ;
iommu_group_put ( group ) ;
}
}
2017-09-15 14:05:08 +03:00
list_for_each_entry ( data , & owner - > controllers , owner_node )
device_link_del ( data - > link ) ;
2014-05-12 10:14:59 +04:00
}
2015-05-19 16:20:37 +03:00
static int exynos_iommu_of_xlate ( struct device * dev ,
struct of_phandle_args * spec )
{
struct platform_device * sysmmu = of_find_device_by_node ( spec - > np ) ;
2020-06-25 16:08:24 +03:00
struct exynos_iommu_owner * owner = dev_iommu_priv_get ( dev ) ;
2017-01-09 15:03:55 +03:00
struct sysmmu_drvdata * data , * entry ;
2015-05-19 16:20:37 +03:00
if ( ! sysmmu )
return - ENODEV ;
data = platform_get_drvdata ( sysmmu ) ;
2020-09-18 04:13:35 +03:00
if ( ! data ) {
put_device ( & sysmmu - > dev ) ;
2015-05-19 16:20:37 +03:00
return - ENODEV ;
2020-09-18 04:13:35 +03:00
}
2015-05-19 16:20:37 +03:00
if ( ! owner ) {
owner = kzalloc ( sizeof ( * owner ) , GFP_KERNEL ) ;
2020-09-18 04:13:35 +03:00
if ( ! owner ) {
put_device ( & sysmmu - > dev ) ;
2015-05-19 16:20:37 +03:00
return - ENOMEM ;
2020-09-18 04:13:35 +03:00
}
2015-05-19 16:20:37 +03:00
INIT_LIST_HEAD ( & owner - > controllers ) ;
2016-11-14 13:08:11 +03:00
mutex_init ( & owner - > rpm_lock ) ;
2020-06-25 16:08:24 +03:00
dev_iommu_priv_set ( dev , owner ) ;
2015-05-19 16:20:37 +03:00
}
2017-01-09 15:03:55 +03:00
list_for_each_entry ( entry , & owner - > controllers , owner_node )
if ( entry = = data )
return 0 ;
2015-05-19 16:20:37 +03:00
list_add_tail ( & data - > owner_node , & owner - > controllers ) ;
2016-11-14 13:08:09 +03:00
data - > master = dev ;
2016-11-14 13:08:12 +03:00
2015-05-19 16:20:37 +03:00
return 0 ;
}
2017-08-28 15:12:05 +03:00
static const struct iommu_ops exynos_iommu_ops = {
2015-03-26 15:43:11 +03:00
. domain_alloc = exynos_iommu_domain_alloc ,
. domain_free = exynos_iommu_domain_free ,
2014-05-09 00:49:14 +04:00
. attach_dev = exynos_iommu_attach_device ,
. detach_dev = exynos_iommu_detach_device ,
. map = exynos_iommu_map ,
. unmap = exynos_iommu_unmap ,
. iova_to_phys = exynos_iommu_iova_to_phys ,
2018-01-24 17:22:09 +03:00
. device_group = generic_device_group ,
2020-04-29 16:37:09 +03:00
. probe_device = exynos_iommu_probe_device ,
. release_device = exynos_iommu_release_device ,
2012-05-12 00:56:09 +04:00
. pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE ,
2015-05-19 16:20:37 +03:00
. of_xlate = exynos_iommu_of_xlate ,
2012-05-12 00:56:09 +04:00
} ;
static int __init exynos_iommu_init ( void )
{
2018-01-09 18:34:07 +03:00
struct device_node * np ;
2012-05-12 00:56:09 +04:00
int ret ;
2018-01-09 18:34:07 +03:00
np = of_find_matching_node ( NULL , sysmmu_of_match ) ;
if ( ! np )
return 0 ;
of_node_put ( np ) ;
2014-05-12 10:14:48 +04:00
lv2table_kmem_cache = kmem_cache_create ( " exynos-iommu-lv2table " ,
LV2TABLE_SIZE , LV2TABLE_SIZE , 0 , NULL ) ;
if ( ! lv2table_kmem_cache ) {
pr_err ( " %s: Failed to create kmem cache \n " , __func__ ) ;
return - ENOMEM ;
}
2012-05-12 00:56:09 +04:00
ret = platform_driver_register ( & exynos_sysmmu_driver ) ;
2014-05-12 10:14:48 +04:00
if ( ret ) {
pr_err ( " %s: Failed to register driver \n " , __func__ ) ;
goto err_reg_driver ;
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:15:04 +04:00
zero_lv2_table = kmem_cache_zalloc ( lv2table_kmem_cache , GFP_KERNEL ) ;
if ( zero_lv2_table = = NULL ) {
pr_err ( " %s: Failed to allocate zero level2 page table \n " ,
__func__ ) ;
ret = - ENOMEM ;
goto err_zero_lv2 ;
}
2014-05-12 10:14:48 +04:00
ret = bus_set_iommu ( & platform_bus_type , & exynos_iommu_ops ) ;
if ( ret ) {
pr_err ( " %s: Failed to register exynos-iommu driver. \n " ,
__func__ ) ;
goto err_set_iommu ;
}
2012-05-12 00:56:09 +04:00
2014-05-12 10:14:48 +04:00
return 0 ;
err_set_iommu :
2014-05-12 10:15:04 +04:00
kmem_cache_free ( lv2table_kmem_cache , zero_lv2_table ) ;
err_zero_lv2 :
2014-05-12 10:14:48 +04:00
platform_driver_unregister ( & exynos_sysmmu_driver ) ;
err_reg_driver :
kmem_cache_destroy ( lv2table_kmem_cache ) ;
2012-05-12 00:56:09 +04:00
return ret ;
}
2017-08-04 13:28:33 +03:00
core_initcall ( exynos_iommu_init ) ;