2019-05-29 07:12:40 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2013-07-15 10:20:57 +05:30
/*
*
* Copyright ( C ) 2013 Freescale Semiconductor , Inc .
* Author : Varun Sethi < varun . sethi @ freescale . com >
*/
# define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
# include "fsl_pamu_domain.h"
2022-04-02 12:08:38 +02:00
# include <linux/platform_device.h>
2015-01-28 08:34:33 -06:00
# include <sysdev/fsl_pci.h>
2013-07-15 10:20:57 +05:30
/*
* Global spinlock that needs to be held while
* configuring PAMU .
*/
static DEFINE_SPINLOCK ( iommu_lock ) ;
static struct kmem_cache * fsl_pamu_domain_cache ;
static struct kmem_cache * iommu_devinfo_cache ;
static DEFINE_SPINLOCK ( device_domain_lock ) ;
2017-08-23 16:28:09 +02:00
struct iommu_device pamu_iommu ; /* IOMMU core code handle */
2015-03-26 13:43:18 +01:00
static struct fsl_dma_domain * to_fsl_dma_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct fsl_dma_domain , iommu_domain ) ;
}
2013-07-15 10:20:57 +05:30
static int __init iommu_init_mempool ( void )
{
fsl_pamu_domain_cache = kmem_cache_create ( " fsl_pamu_domain " ,
2015-01-28 08:34:33 -06:00
sizeof ( struct fsl_dma_domain ) ,
0 ,
SLAB_HWCACHE_ALIGN ,
NULL ) ;
2013-07-15 10:20:57 +05:30
if ( ! fsl_pamu_domain_cache ) {
pr_debug ( " Couldn't create fsl iommu_domain cache \n " ) ;
return - ENOMEM ;
}
iommu_devinfo_cache = kmem_cache_create ( " iommu_devinfo " ,
2015-01-28 08:34:33 -06:00
sizeof ( struct device_domain_info ) ,
0 ,
SLAB_HWCACHE_ALIGN ,
NULL ) ;
2013-07-15 10:20:57 +05:30
if ( ! iommu_devinfo_cache ) {
pr_debug ( " Couldn't create devinfo cache \n " ) ;
kmem_cache_destroy ( fsl_pamu_domain_cache ) ;
return - ENOMEM ;
}
return 0 ;
}
static int update_liodn_stash ( int liodn , struct fsl_dma_domain * dma_domain ,
2015-01-28 08:34:33 -06:00
u32 val )
2013-07-15 10:20:57 +05:30
{
2021-04-15 16:44:42 +02:00
int ret = 0 ;
2013-07-15 10:20:57 +05:30
unsigned long flags ;
spin_lock_irqsave ( & iommu_lock , flags ) ;
2021-04-01 17:52:41 +02:00
ret = pamu_update_paace_stash ( liodn , val ) ;
if ( ret ) {
2021-04-15 16:44:42 +02:00
pr_debug ( " Failed to update SPAACE for liodn %d \n " , liodn ) ;
2013-07-15 10:20:57 +05:30
spin_unlock_irqrestore ( & iommu_lock , flags ) ;
2021-04-01 17:52:41 +02:00
return ret ;
2013-07-15 10:20:57 +05:30
}
spin_unlock_irqrestore ( & iommu_lock , flags ) ;
return ret ;
}
/* Set the geometry parameters for a LIODN */
2021-04-01 17:52:44 +02:00
static int pamu_set_liodn ( struct fsl_dma_domain * dma_domain , struct device * dev ,
int liodn )
2013-07-15 10:20:57 +05:30
{
u32 omi_index = ~ ( u32 ) 0 ;
unsigned long flags ;
2021-04-01 17:52:41 +02:00
int ret ;
2013-07-15 10:20:57 +05:30
/*
* Configure the omi_index at the geometry setup time .
* This is a static value which depends on the type of
* device and would not change thereafter .
*/
get_ome_index ( & omi_index , dev ) ;
spin_lock_irqsave ( & iommu_lock , flags ) ;
ret = pamu_disable_liodn ( liodn ) ;
2021-04-01 17:52:44 +02:00
if ( ret )
goto out_unlock ;
2021-04-01 17:52:49 +02:00
ret = pamu_config_ppaace ( liodn , omi_index , dma_domain - > stash_id , 0 ) ;
2021-04-01 17:52:44 +02:00
if ( ret )
goto out_unlock ;
2021-04-01 17:52:49 +02:00
ret = pamu_config_ppaace ( liodn , ~ ( u32 ) 0 , dma_domain - > stash_id ,
2021-04-01 17:52:44 +02:00
PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE ) ;
out_unlock :
2013-07-15 10:20:57 +05:30
spin_unlock_irqrestore ( & iommu_lock , flags ) ;
if ( ret ) {
2021-04-01 17:52:41 +02:00
pr_debug ( " PAACE configuration failed for liodn %d \n " ,
liodn ) ;
2013-07-15 10:20:57 +05:30
}
return ret ;
}
2021-04-01 17:52:41 +02:00
static void remove_device_ref ( struct device_domain_info * info )
2013-07-15 10:20:57 +05:30
{
unsigned long flags ;
list_del ( & info - > link ) ;
spin_lock_irqsave ( & iommu_lock , flags ) ;
pamu_disable_liodn ( info - > liodn ) ;
spin_unlock_irqrestore ( & iommu_lock , flags ) ;
spin_lock_irqsave ( & device_domain_lock , flags ) ;
2020-06-25 15:08:30 +02:00
dev_iommu_priv_set ( info - > dev , NULL ) ;
2013-07-15 10:20:57 +05:30
kmem_cache_free ( iommu_devinfo_cache , info ) ;
spin_unlock_irqrestore ( & device_domain_lock , flags ) ;
}
static void detach_device ( struct device * dev , struct fsl_dma_domain * dma_domain )
{
struct device_domain_info * info , * tmp ;
unsigned long flags ;
spin_lock_irqsave ( & dma_domain - > domain_lock , flags ) ;
/* Remove the device from the domain device list */
list_for_each_entry_safe ( info , tmp , & dma_domain - > devices , link ) {
if ( ! dev | | ( info - > dev = = dev ) )
2021-04-01 17:52:41 +02:00
remove_device_ref ( info ) ;
2013-07-15 10:20:57 +05:30
}
spin_unlock_irqrestore ( & dma_domain - > domain_lock , flags ) ;
}
static void attach_device ( struct fsl_dma_domain * dma_domain , int liodn , struct device * dev )
{
struct device_domain_info * info , * old_domain_info ;
unsigned long flags ;
spin_lock_irqsave ( & device_domain_lock , flags ) ;
/*
* Check here if the device is already attached to domain or not .
* If the device is already attached to a domain detach it .
*/
2020-06-25 15:08:30 +02:00
old_domain_info = dev_iommu_priv_get ( dev ) ;
2013-07-15 10:20:57 +05:30
if ( old_domain_info & & old_domain_info - > domain ! = dma_domain ) {
spin_unlock_irqrestore ( & device_domain_lock , flags ) ;
detach_device ( dev , old_domain_info - > domain ) ;
spin_lock_irqsave ( & device_domain_lock , flags ) ;
}
info = kmem_cache_zalloc ( iommu_devinfo_cache , GFP_ATOMIC ) ;
info - > dev = dev ;
info - > liodn = liodn ;
info - > domain = dma_domain ;
list_add ( & info - > link , & dma_domain - > devices ) ;
/*
* In case of devices with multiple LIODNs just store
* the info for the first LIODN as all
* LIODNs share the same domain
*/
2020-06-25 15:08:30 +02:00
if ( ! dev_iommu_priv_get ( dev ) )
dev_iommu_priv_set ( dev , info ) ;
2013-07-15 10:20:57 +05:30
spin_unlock_irqrestore ( & device_domain_lock , flags ) ;
}
static phys_addr_t fsl_pamu_iova_to_phys ( struct iommu_domain * domain ,
2015-01-28 08:34:33 -06:00
dma_addr_t iova )
2013-07-15 10:20:57 +05:30
{
2015-01-28 08:34:33 -06:00
if ( iova < domain - > geometry . aperture_start | |
iova > domain - > geometry . aperture_end )
2013-07-15 10:20:57 +05:30
return 0 ;
2021-04-01 17:52:42 +02:00
return iova ;
2013-07-15 10:20:57 +05:30
}
2022-08-15 16:26:49 +01:00
static bool fsl_pamu_capable ( struct device * dev , enum iommu_cap cap )
2013-07-15 10:20:57 +05:30
{
return cap = = IOMMU_CAP_CACHE_COHERENCY ;
}
2015-03-26 13:43:18 +01:00
static void fsl_pamu_domain_free ( struct iommu_domain * domain )
2013-07-15 10:20:57 +05:30
{
2015-03-26 13:43:18 +01:00
struct fsl_dma_domain * dma_domain = to_fsl_dma_domain ( domain ) ;
2013-07-15 10:20:57 +05:30
/* remove all the devices from the device list */
detach_device ( NULL , dma_domain ) ;
kmem_cache_free ( fsl_pamu_domain_cache , dma_domain ) ;
}
2015-03-26 13:43:18 +01:00
static struct iommu_domain * fsl_pamu_domain_alloc ( unsigned type )
2013-07-15 10:20:57 +05:30
{
struct fsl_dma_domain * dma_domain ;
2023-09-13 10:43:38 -03:00
/*
* FIXME : This isn ' t creating an unmanaged domain since the
* default_domain_ops do not have any map / unmap function it doesn ' t meet
* the requirements for __IOMMU_DOMAIN_PAGING . The only purpose seems to
* allow drivers / soc / fsl / qbman / qman_portal . c to do
* fsl_pamu_configure_l1_stash ( )
*/
2015-03-26 13:43:18 +01:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED )
return NULL ;
2021-04-01 17:52:40 +02:00
dma_domain = kmem_cache_zalloc ( fsl_pamu_domain_cache , GFP_KERNEL ) ;
if ( ! dma_domain )
2015-03-26 13:43:18 +01:00
return NULL ;
2021-04-01 17:52:40 +02:00
dma_domain - > stash_id = ~ ( u32 ) 0 ;
INIT_LIST_HEAD ( & dma_domain - > devices ) ;
spin_lock_init ( & dma_domain - > domain_lock ) ;
/* default geometry 64 GB i.e. maximum system address */
2015-03-26 13:43:18 +01:00
dma_domain - > iommu_domain . geometry . aperture_start = 0 ;
dma_domain - > iommu_domain . geometry . aperture_end = ( 1ULL < < 36 ) - 1 ;
dma_domain - > iommu_domain . geometry . force_aperture = true ;
2013-07-15 10:20:57 +05:30
2015-03-26 13:43:18 +01:00
return & dma_domain - > iommu_domain ;
2013-07-15 10:20:57 +05:30
}
/* Update stash destination for all LIODNs associated with the domain */
static int update_domain_stash ( struct fsl_dma_domain * dma_domain , u32 val )
{
struct device_domain_info * info ;
int ret = 0 ;
list_for_each_entry ( info , & dma_domain - > devices , link ) {
ret = update_liodn_stash ( info - > liodn , dma_domain , val ) ;
if ( ret )
break ;
}
return ret ;
}
static int fsl_pamu_attach_device ( struct iommu_domain * domain ,
struct device * dev )
{
2015-03-26 13:43:18 +01:00
struct fsl_dma_domain * dma_domain = to_fsl_dma_domain ( domain ) ;
2021-04-01 17:52:45 +02:00
unsigned long flags ;
int len , ret = 0 , i ;
2013-07-15 10:20:57 +05:30
const u32 * liodn ;
struct pci_dev * pdev = NULL ;
struct pci_controller * pci_ctl ;
/*
* Use LIODN of the PCI controller while attaching a
* PCI device .
*/
2013-12-05 19:42:49 +08:00
if ( dev_is_pci ( dev ) ) {
2013-07-15 10:20:57 +05:30
pdev = to_pci_dev ( dev ) ;
pci_ctl = pci_bus_to_host ( pdev - > bus ) ;
/*
* make dev point to pci controller device
* so we can get the LIODN programmed by
* u - boot .
*/
dev = pci_ctl - > parent ;
}
liodn = of_get_property ( dev - > of_node , " fsl,liodn " , & len ) ;
2021-04-01 17:52:45 +02:00
if ( ! liodn ) {
2017-07-18 16:43:09 -05:00
pr_debug ( " missing fsl,liodn property at %pOF \n " , dev - > of_node ) ;
2022-10-17 16:02:13 -07:00
return - ENODEV ;
2013-07-15 10:20:57 +05:30
}
2021-04-01 17:52:45 +02:00
spin_lock_irqsave ( & dma_domain - > domain_lock , flags ) ;
for ( i = 0 ; i < len / sizeof ( u32 ) ; i + + ) {
/* Ensure that LIODN value is valid */
if ( liodn [ i ] > = PAACE_NUMBER_ENTRIES ) {
pr_debug ( " Invalid liodn %d, attach device failed for %pOF \n " ,
liodn [ i ] , dev - > of_node ) ;
2022-10-17 16:02:13 -07:00
ret = - ENODEV ;
2021-04-01 17:52:45 +02:00
break ;
}
attach_device ( dma_domain , liodn [ i ] , dev ) ;
ret = pamu_set_liodn ( dma_domain , dev , liodn [ i ] ) ;
if ( ret )
break ;
2021-04-01 17:52:46 +02:00
ret = pamu_enable_liodn ( liodn [ i ] ) ;
if ( ret )
break ;
2021-04-01 17:52:45 +02:00
}
spin_unlock_irqrestore ( & dma_domain - > domain_lock , flags ) ;
2013-07-15 10:20:57 +05:30
return ret ;
}
2023-09-13 10:43:38 -03:00
/*
* FIXME : fsl / pamu is completely broken in terms of how it works with the iommu
* API . Immediately after probe the HW is left in an IDENTITY translation and
* the driver provides a non - working UNMANAGED domain that it can switch over
* to . However it cannot switch back to an IDENTITY translation , instead it
* switches to what looks like BLOCKING .
*/
static int fsl_pamu_platform_attach ( struct iommu_domain * platform_domain ,
struct device * dev )
2013-07-15 10:20:57 +05:30
{
2023-01-10 10:54:06 +08:00
struct iommu_domain * domain = iommu_get_domain_for_dev ( dev ) ;
2023-09-13 10:43:38 -03:00
struct fsl_dma_domain * dma_domain ;
2013-07-15 10:20:57 +05:30
const u32 * prop ;
int len ;
struct pci_dev * pdev = NULL ;
struct pci_controller * pci_ctl ;
2023-09-13 10:43:38 -03:00
/*
* Hack to keep things working as they always have , only leaving an
* UNMANAGED domain makes it BLOCKING .
*/
if ( domain = = platform_domain | | ! domain | |
domain - > type ! = IOMMU_DOMAIN_UNMANAGED )
return 0 ;
dma_domain = to_fsl_dma_domain ( domain ) ;
2013-07-15 10:20:57 +05:30
/*
* Use LIODN of the PCI controller while detaching a
* PCI device .
*/
2013-12-05 19:42:49 +08:00
if ( dev_is_pci ( dev ) ) {
2013-07-15 10:20:57 +05:30
pdev = to_pci_dev ( dev ) ;
pci_ctl = pci_bus_to_host ( pdev - > bus ) ;
/*
* make dev point to pci controller device
* so we can get the LIODN programmed by
* u - boot .
*/
dev = pci_ctl - > parent ;
}
prop = of_get_property ( dev - > of_node , " fsl,liodn " , & len ) ;
if ( prop )
detach_device ( dev , dma_domain ) ;
else
2017-07-18 16:43:09 -05:00
pr_debug ( " missing fsl,liodn property at %pOF \n " , dev - > of_node ) ;
2023-09-13 10:43:38 -03:00
return 0 ;
2013-07-15 10:20:57 +05:30
}
2023-09-13 10:43:38 -03:00
static struct iommu_domain_ops fsl_pamu_platform_ops = {
. attach_dev = fsl_pamu_platform_attach ,
} ;
static struct iommu_domain fsl_pamu_platform_domain = {
. type = IOMMU_DOMAIN_PLATFORM ,
. ops = & fsl_pamu_platform_ops ,
} ;
2013-07-15 10:20:57 +05:30
/* Set the domain stash attribute */
2021-04-01 17:52:43 +02:00
int fsl_pamu_configure_l1_stash ( struct iommu_domain * domain , u32 cpu )
2013-07-15 10:20:57 +05:30
{
2021-04-01 17:52:43 +02:00
struct fsl_dma_domain * dma_domain = to_fsl_dma_domain ( domain ) ;
2013-07-15 10:20:57 +05:30
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & dma_domain - > domain_lock , flags ) ;
2021-04-01 17:52:43 +02:00
dma_domain - > stash_id = get_stash_id ( PAMU_ATTR_CACHE_L1 , cpu ) ;
2013-07-15 10:20:57 +05:30
if ( dma_domain - > stash_id = = ~ ( u32 ) 0 ) {
pr_debug ( " Invalid stash attributes \n " ) ;
spin_unlock_irqrestore ( & dma_domain - > domain_lock , flags ) ;
return - EINVAL ;
}
ret = update_domain_stash ( dma_domain , dma_domain - > stash_id ) ;
spin_unlock_irqrestore ( & dma_domain - > domain_lock , flags ) ;
return ret ;
}
static bool check_pci_ctl_endpt_part ( struct pci_controller * pci_ctl )
{
u32 version ;
/* Check the PCI controller version number by readding BRR1 register */
version = in_be32 ( pci_ctl - > cfg_addr + ( PCI_FSL_BRR1 > > 2 ) ) ;
version & = PCI_FSL_BRR1_VER ;
2015-01-28 08:34:33 -06:00
/* If PCI controller version is >= 0x204 we can partition endpoints */
return version > = 0x204 ;
2013-07-15 10:20:57 +05:30
}
2023-05-16 21:35:28 -03:00
static struct iommu_group * fsl_pamu_device_group ( struct device * dev )
2013-07-15 10:20:57 +05:30
{
struct iommu_group * group ;
2023-05-16 21:35:28 -03:00
struct pci_dev * pdev ;
2013-07-15 10:20:57 +05:30
2013-08-14 11:42:29 +02:00
/*
2023-05-16 21:35:28 -03:00
* For platform devices we allocate a separate group for each of the
* devices .
2013-07-15 10:20:57 +05:30
*/
2023-05-16 21:35:28 -03:00
if ( ! dev_is_pci ( dev ) )
return generic_device_group ( dev ) ;
2013-07-15 10:20:57 +05:30
/*
2023-05-16 21:35:28 -03:00
* We can partition PCIe devices so assign device group to the device
2013-07-15 10:20:57 +05:30
*/
2023-05-16 21:35:28 -03:00
pdev = to_pci_dev ( dev ) ;
if ( check_pci_ctl_endpt_part ( pci_bus_to_host ( pdev - > bus ) ) )
return pci_device_group ( & pdev - > dev ) ;
2013-07-15 10:20:57 +05:30
2023-05-16 21:35:28 -03:00
/*
* All devices connected to the controller will share the same device
* group .
*
* Due to ordering between fsl_pamu_init ( ) and fsl_pci_init ( ) it is
* guaranteed that the pci_ctl - > parent platform_device will have the
* iommu driver bound and will already have a group set . So we just
* re - use this group as the group for every device in the hose .
*/
group = iommu_group_get ( pci_bus_to_host ( pdev - > bus ) - > parent ) ;
if ( WARN_ON ( ! group ) )
return ERR_PTR ( - EINVAL ) ;
return group ;
2015-10-21 23:51:40 +02:00
}
2013-07-15 10:20:57 +05:30
2020-04-29 15:36:56 +02:00
static struct iommu_device * fsl_pamu_probe_device ( struct device * dev )
2015-10-21 23:51:40 +02:00
{
2023-05-16 21:35:27 -03:00
int len ;
/*
* uboot must fill the fsl , liodn for platform devices to be supported by
* the iommu .
*/
if ( ! dev_is_pci ( dev ) & &
! of_get_property ( dev - > of_node , " fsl,liodn " , & len ) )
return ERR_PTR ( - ENODEV ) ;
2020-04-29 15:36:56 +02:00
return & pamu_iommu ;
2013-07-15 10:20:57 +05:30
}
2014-06-27 09:03:12 +02:00
static const struct iommu_ops fsl_pamu_ops = {
2023-09-13 10:43:38 -03:00
. default_domain = & fsl_pamu_platform_domain ,
2014-09-05 10:50:27 +02:00
. capable = fsl_pamu_capable ,
2015-03-26 13:43:18 +01:00
. domain_alloc = fsl_pamu_domain_alloc ,
2020-04-29 15:36:56 +02:00
. probe_device = fsl_pamu_probe_device ,
2015-10-21 23:51:40 +02:00
. device_group = fsl_pamu_device_group ,
2022-02-16 10:52:49 +08:00
. default_domain_ops = & ( const struct iommu_domain_ops ) {
. attach_dev = fsl_pamu_attach_device ,
. iova_to_phys = fsl_pamu_iova_to_phys ,
. free = fsl_pamu_domain_free ,
}
2013-07-15 10:20:57 +05:30
} ;
2015-01-28 08:34:33 -06:00
int __init pamu_domain_init ( void )
2013-07-15 10:20:57 +05:30
{
int ret = 0 ;
ret = iommu_init_mempool ( ) ;
if ( ret )
return ret ;
2017-08-23 16:28:09 +02:00
ret = iommu_device_sysfs_add ( & pamu_iommu , NULL , NULL , " iommu0 " ) ;
if ( ret )
return ret ;
2021-04-01 14:56:26 +01:00
ret = iommu_device_register ( & pamu_iommu , & fsl_pamu_ops , NULL ) ;
2017-08-23 16:28:09 +02:00
if ( ret ) {
iommu_device_sysfs_remove ( & pamu_iommu ) ;
pr_err ( " Can't register iommu device \n " ) ;
}
2013-07-15 10:20:57 +05:30
return ret ;
}