2019-05-29 07:18:02 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2016-09-12 20:54:20 +02:00
/*
* Copyright ( C ) 2016 , Semihalf
* Author : Tomasz Nowicki < tn @ semihalf . com >
*
* This file implements early detection / parsing of I / O mapping
* reported to OS through firmware via I / O Remapping Table ( IORT )
* IORT document number : ARM DEN 004 9 A
*/
# define pr_fmt(fmt) "ACPI: IORT: " fmt
# include <linux/acpi_iort.h>
2020-01-15 13:52:30 +01:00
# include <linux/bitfield.h>
2016-11-21 10:01:41 +00:00
# include <linux/iommu.h>
2016-09-12 20:54:20 +02:00
# include <linux/kernel.h>
2016-11-21 10:01:35 +00:00
# include <linux/list.h>
2016-09-12 20:54:20 +02:00
# include <linux/pci.h>
2016-11-21 10:01:41 +00:00
# include <linux/platform_device.h>
2016-11-21 10:01:35 +00:00
# include <linux/slab.h>
2020-09-22 15:31:03 +02:00
# include <linux/dma-map-ops.h>
2016-09-12 20:54:20 +02:00
2016-11-21 10:01:46 +00:00
# define IORT_TYPE_MASK(type) (1 << (type))
# define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
2016-11-21 10:01:48 +00:00
# define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
( 1 < < ACPI_IORT_NODE_SMMU_V3 ) )
2016-11-21 10:01:46 +00:00
2016-09-12 20:32:21 +02:00
struct iort_its_msi_chip {
struct list_head list ;
struct fwnode_handle * fw_node ;
2018-02-13 15:20:50 +00:00
phys_addr_t base_addr ;
2016-09-12 20:32:21 +02:00
u32 translation_id ;
} ;
2016-11-21 10:01:35 +00:00
struct iort_fwnode {
struct list_head list ;
struct acpi_iort_node * iort_node ;
struct fwnode_handle * fwnode ;
} ;
static LIST_HEAD ( iort_fwnode_list ) ;
static DEFINE_SPINLOCK ( iort_fwnode_lock ) ;
/**
* iort_set_fwnode ( ) - Create iort_fwnode and use it to register
* iommu data in the iort_fwnode_list
*
2020-10-14 10:31:39 +01:00
* @ iort_node : IORT table node associated with the IOMMU
2016-11-21 10:01:35 +00:00
* @ fwnode : fwnode associated with the IORT node
*
* Returns : 0 on success
* < 0 on failure
*/
static inline int iort_set_fwnode ( struct acpi_iort_node * iort_node ,
struct fwnode_handle * fwnode )
{
struct iort_fwnode * np ;
np = kzalloc ( sizeof ( struct iort_fwnode ) , GFP_ATOMIC ) ;
if ( WARN_ON ( ! np ) )
return - ENOMEM ;
INIT_LIST_HEAD ( & np - > list ) ;
np - > iort_node = iort_node ;
np - > fwnode = fwnode ;
spin_lock ( & iort_fwnode_lock ) ;
list_add_tail ( & np - > list , & iort_fwnode_list ) ;
spin_unlock ( & iort_fwnode_lock ) ;
return 0 ;
}
/**
* iort_get_fwnode ( ) - Retrieve fwnode associated with an IORT node
*
* @ node : IORT table node to be looked - up
*
* Returns : fwnode_handle pointer on success , NULL on failure
*/
2017-09-28 14:03:33 +01:00
static inline struct fwnode_handle * iort_get_fwnode (
struct acpi_iort_node * node )
2016-11-21 10:01:35 +00:00
{
struct iort_fwnode * curr ;
struct fwnode_handle * fwnode = NULL ;
spin_lock ( & iort_fwnode_lock ) ;
list_for_each_entry ( curr , & iort_fwnode_list , list ) {
if ( curr - > iort_node = = node ) {
fwnode = curr - > fwnode ;
break ;
}
}
spin_unlock ( & iort_fwnode_lock ) ;
return fwnode ;
}
/**
* iort_delete_fwnode ( ) - Delete fwnode associated with an IORT node
*
* @ node : IORT table node associated with fwnode to delete
*/
static inline void iort_delete_fwnode ( struct acpi_iort_node * node )
{
struct iort_fwnode * curr , * tmp ;
spin_lock ( & iort_fwnode_lock ) ;
list_for_each_entry_safe ( curr , tmp , & iort_fwnode_list , list ) {
if ( curr - > iort_node = = node ) {
list_del ( & curr - > list ) ;
kfree ( curr ) ;
break ;
}
}
spin_unlock ( & iort_fwnode_lock ) ;
}
2017-10-13 15:09:47 +08:00
/**
* iort_get_iort_node ( ) - Retrieve iort_node associated with an fwnode
*
* @ fwnode : fwnode associated with device to be looked - up
*
* Returns : iort_node pointer on success , NULL on failure
*/
static inline struct acpi_iort_node * iort_get_iort_node (
struct fwnode_handle * fwnode )
{
struct iort_fwnode * curr ;
struct acpi_iort_node * iort_node = NULL ;
spin_lock ( & iort_fwnode_lock ) ;
list_for_each_entry ( curr , & iort_fwnode_list , list ) {
if ( curr - > fwnode = = fwnode ) {
iort_node = curr - > iort_node ;
break ;
}
}
spin_unlock ( & iort_fwnode_lock ) ;
return iort_node ;
}
2016-09-12 20:54:20 +02:00
typedef acpi_status ( * iort_find_node_callback )
( struct acpi_iort_node * node , void * context ) ;
/* Root pointer to the mapped IORT table */
static struct acpi_table_header * iort_table ;
static LIST_HEAD ( iort_msi_chip_list ) ;
static DEFINE_SPINLOCK ( iort_msi_chip_lock ) ;
2016-09-12 20:32:21 +02:00
/**
2018-02-13 15:20:50 +00:00
* iort_register_domain_token ( ) - register domain token along with related
* ITS ID and base address to the list from where we can get it back later on .
2016-09-12 20:32:21 +02:00
* @ trans_id : ITS ID .
2018-02-13 15:20:50 +00:00
* @ base : ITS base address .
2016-09-12 20:32:21 +02:00
* @ fw_node : Domain token .
*
* Returns : 0 on success , - ENOMEM if no memory when allocating list element
*/
2018-02-13 15:20:50 +00:00
int iort_register_domain_token ( int trans_id , phys_addr_t base ,
struct fwnode_handle * fw_node )
2016-09-12 20:32:21 +02:00
{
struct iort_its_msi_chip * its_msi_chip ;
its_msi_chip = kzalloc ( sizeof ( * its_msi_chip ) , GFP_KERNEL ) ;
if ( ! its_msi_chip )
return - ENOMEM ;
its_msi_chip - > fw_node = fw_node ;
its_msi_chip - > translation_id = trans_id ;
2018-02-13 15:20:50 +00:00
its_msi_chip - > base_addr = base ;
2016-09-12 20:32:21 +02:00
spin_lock ( & iort_msi_chip_lock ) ;
list_add ( & its_msi_chip - > list , & iort_msi_chip_list ) ;
spin_unlock ( & iort_msi_chip_lock ) ;
return 0 ;
}
/**
* iort_deregister_domain_token ( ) - Deregister domain token based on ITS ID
* @ trans_id : ITS ID .
*
* Returns : none .
*/
void iort_deregister_domain_token ( int trans_id )
{
struct iort_its_msi_chip * its_msi_chip , * t ;
spin_lock ( & iort_msi_chip_lock ) ;
list_for_each_entry_safe ( its_msi_chip , t , & iort_msi_chip_list , list ) {
if ( its_msi_chip - > translation_id = = trans_id ) {
list_del ( & its_msi_chip - > list ) ;
kfree ( its_msi_chip ) ;
break ;
}
}
spin_unlock ( & iort_msi_chip_lock ) ;
}
/**
* iort_find_domain_token ( ) - Find domain token based on given ITS ID
* @ trans_id : ITS ID .
*
* Returns : domain token when find on the list , NULL otherwise
*/
struct fwnode_handle * iort_find_domain_token ( int trans_id )
{
struct fwnode_handle * fw_node = NULL ;
struct iort_its_msi_chip * its_msi_chip ;
spin_lock ( & iort_msi_chip_lock ) ;
list_for_each_entry ( its_msi_chip , & iort_msi_chip_list , list ) {
if ( its_msi_chip - > translation_id = = trans_id ) {
fw_node = its_msi_chip - > fw_node ;
break ;
}
}
spin_unlock ( & iort_msi_chip_lock ) ;
return fw_node ;
}
2016-09-12 20:54:20 +02:00
static struct acpi_iort_node * iort_scan_node ( enum acpi_iort_node_type type ,
iort_find_node_callback callback ,
void * context )
{
struct acpi_iort_node * iort_node , * iort_end ;
struct acpi_table_iort * iort ;
int i ;
if ( ! iort_table )
return NULL ;
/* Get the first IORT node */
iort = ( struct acpi_table_iort * ) iort_table ;
iort_node = ACPI_ADD_PTR ( struct acpi_iort_node , iort ,
iort - > node_offset ) ;
iort_end = ACPI_ADD_PTR ( struct acpi_iort_node , iort_table ,
iort_table - > length ) ;
for ( i = 0 ; i < iort - > node_count ; i + + ) {
if ( WARN_TAINT ( iort_node > = iort_end , TAINT_FIRMWARE_WORKAROUND ,
" IORT node pointer overflows, bad table! \n " ) )
return NULL ;
if ( iort_node - > type = = type & &
ACPI_SUCCESS ( callback ( iort_node , context ) ) )
2017-03-07 20:39:56 +08:00
return iort_node ;
2016-09-12 20:54:20 +02:00
iort_node = ACPI_ADD_PTR ( struct acpi_iort_node , iort_node ,
iort_node - > length ) ;
}
return NULL ;
}
static acpi_status iort_match_node_callback ( struct acpi_iort_node * node ,
void * context )
{
struct device * dev = context ;
2017-03-07 20:39:58 +08:00
acpi_status status = AE_NOT_FOUND ;
2016-09-12 20:54:20 +02:00
if ( node - > type = = ACPI_IORT_NODE_NAMED_COMPONENT ) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER , NULL } ;
2020-06-19 09:20:02 +01:00
struct acpi_device * adev ;
2016-09-12 20:54:20 +02:00
struct acpi_iort_named_component * ncomp ;
2020-06-19 09:20:02 +01:00
struct device * nc_dev = dev ;
/*
* Walk the device tree to find a device with an
* ACPI companion ; there is no point in scanning
* IORT for a device matching a named component if
* the device does not have an ACPI companion to
* start with .
*/
do {
adev = ACPI_COMPANION ( nc_dev ) ;
if ( adev )
break ;
nc_dev = nc_dev - > parent ;
} while ( nc_dev ) ;
2016-09-12 20:54:20 +02:00
2017-03-07 20:39:58 +08:00
if ( ! adev )
2016-09-12 20:54:20 +02:00
goto out ;
status = acpi_get_name ( adev - > handle , ACPI_FULL_PATHNAME , & buf ) ;
if ( ACPI_FAILURE ( status ) ) {
2020-06-19 09:20:02 +01:00
dev_warn ( nc_dev , " Can't get device full path name \n " ) ;
2016-09-12 20:54:20 +02:00
goto out ;
}
ncomp = ( struct acpi_iort_named_component * ) node - > node_data ;
status = ! strcmp ( ncomp - > device_name , buf . pointer ) ?
AE_OK : AE_NOT_FOUND ;
acpi_os_free ( buf . pointer ) ;
} else if ( node - > type = = ACPI_IORT_NODE_PCI_ROOT_COMPLEX ) {
struct acpi_iort_root_complex * pci_rc ;
struct pci_bus * bus ;
bus = to_pci_bus ( dev ) ;
pci_rc = ( struct acpi_iort_root_complex * ) node - > node_data ;
/*
* It is assumed that PCI segment numbers maps one - to - one
* with root complexes . Each segment number can represent only
* one root complex .
*/
status = pci_rc - > pci_segment_number = = pci_domain_nr ( bus ) ?
AE_OK : AE_NOT_FOUND ;
}
out :
return status ;
}
static int iort_id_map ( struct acpi_iort_id_mapping * map , u8 type , u32 rid_in ,
2020-05-01 18:10:14 +02:00
u32 * rid_out , bool check_overlap )
2016-09-12 20:54:20 +02:00
{
/* Single mapping does not care for input id */
if ( map - > flags & ACPI_IORT_ID_SINGLE_MAPPING ) {
if ( type = = ACPI_IORT_NODE_NAMED_COMPONENT | |
type = = ACPI_IORT_NODE_PCI_ROOT_COMPLEX ) {
* rid_out = map - > output_base ;
return 0 ;
}
pr_warn ( FW_BUG " [map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map \n " ,
map , type ) ;
return - ENXIO ;
}
2020-05-01 18:10:13 +02:00
if ( rid_in < map - > input_base | |
2020-05-01 18:10:14 +02:00
( rid_in > map - > input_base + map - > id_count ) )
2016-09-12 20:54:20 +02:00
return - ENXIO ;
2020-05-01 18:10:14 +02:00
if ( check_overlap ) {
/*
* We already found a mapping for this input ID at the end of
* another region . If it coincides with the start of this
* region , we assume the prior match was due to the off - by - 1
* issue mentioned below , and allow it to be superseded .
* Otherwise , things are * really * broken , and we just disregard
* duplicate matches entirely to retain compatibility .
*/
pr_err ( FW_BUG " [map %p] conflicting mapping for input ID 0x%x \n " ,
map , rid_in ) ;
if ( rid_in ! = map - > input_base )
return - ENXIO ;
2020-05-08 11:56:38 +08:00
pr_err ( FW_BUG " applying workaround. \n " ) ;
2020-05-01 18:10:14 +02:00
}
2016-09-12 20:54:20 +02:00
* rid_out = map - > output_base + ( rid_in - map - > input_base ) ;
2020-05-01 18:10:14 +02:00
/*
* Due to confusion regarding the meaning of the id_count field ( which
* carries the number of IDs * minus 1 * ) , we may have to disregard this
* match if it is at the end of the range , and overlaps with the start
* of another one .
*/
if ( map - > id_count > 0 & & rid_in = = map - > input_base + map - > id_count )
return - EAGAIN ;
2016-09-12 20:54:20 +02:00
return 0 ;
}
2017-09-28 14:03:33 +01:00
static struct acpi_iort_node * iort_node_get_id ( struct acpi_iort_node * node ,
u32 * id_out , int index )
2016-11-21 10:01:47 +00:00
{
struct acpi_iort_node * parent ;
struct acpi_iort_id_mapping * map ;
if ( ! node - > mapping_offset | | ! node - > mapping_count | |
index > = node - > mapping_count )
return NULL ;
map = ACPI_ADD_PTR ( struct acpi_iort_id_mapping , node ,
2017-01-05 17:32:16 +00:00
node - > mapping_offset + index * sizeof ( * map ) ) ;
2016-11-21 10:01:47 +00:00
/* Firmware bug! */
if ( ! map - > output_reference ) {
pr_err ( FW_BUG " [node %p type %d] ID map has NULL parent reference \n " ,
node , node - > type ) ;
return NULL ;
}
parent = ACPI_ADD_PTR ( struct acpi_iort_node , iort_table ,
map - > output_reference ) ;
2017-01-05 17:32:16 +00:00
if ( map - > flags & ACPI_IORT_ID_SINGLE_MAPPING ) {
2016-11-21 10:01:47 +00:00
if ( node - > type = = ACPI_IORT_NODE_NAMED_COMPONENT | |
2017-10-13 15:09:49 +08:00
node - > type = = ACPI_IORT_NODE_PCI_ROOT_COMPLEX | |
2019-03-26 15:17:50 +00:00
node - > type = = ACPI_IORT_NODE_SMMU_V3 | |
node - > type = = ACPI_IORT_NODE_PMCG ) {
2017-01-05 17:32:16 +00:00
* id_out = map - > output_base ;
2016-11-21 10:01:47 +00:00
return parent ;
}
}
return NULL ;
}
2017-10-13 15:09:49 +08:00
static int iort_get_id_mapping_index ( struct acpi_iort_node * node )
{
struct acpi_iort_smmu_v3 * smmu ;
2020-05-20 10:13:07 -07:00
struct acpi_iort_pmcg * pmcg ;
2017-10-13 15:09:49 +08:00
switch ( node - > type ) {
case ACPI_IORT_NODE_SMMU_V3 :
/*
* SMMUv3 dev ID mapping index was introduced in revision 1
* table , not available in revision 0
*/
if ( node - > revision < 1 )
return - EINVAL ;
smmu = ( struct acpi_iort_smmu_v3 * ) node - > node_data ;
/*
* ID mapping index is only ignored if all interrupts are
* GSIV based
*/
if ( smmu - > event_gsiv & & smmu - > pri_gsiv & & smmu - > gerr_gsiv
& & smmu - > sync_gsiv )
return - EINVAL ;
if ( smmu - > id_mapping_index > = node - > mapping_count ) {
pr_err ( FW_BUG " [node %p type %d] ID mapping index overflows valid mappings \n " ,
node , node - > type ) ;
return - EINVAL ;
}
return smmu - > id_mapping_index ;
2019-03-26 15:17:50 +00:00
case ACPI_IORT_NODE_PMCG :
2020-05-20 10:13:07 -07:00
pmcg = ( struct acpi_iort_pmcg * ) node - > node_data ;
if ( pmcg - > overflow_gsiv | | node - > mapping_count = = 0 )
return - EINVAL ;
2019-03-26 15:17:50 +00:00
return 0 ;
2017-10-13 15:09:49 +08:00
default :
return - EINVAL ;
}
}
2017-10-13 15:09:48 +08:00
2017-03-07 20:40:03 +08:00
static struct acpi_iort_node * iort_node_map_id ( struct acpi_iort_node * node ,
u32 id_in , u32 * id_out ,
u8 type_mask )
2016-09-12 20:54:20 +02:00
{
2017-03-07 20:40:03 +08:00
u32 id = id_in ;
2016-09-12 20:54:20 +02:00
/* Parse the ID mapping tree to find specified node type */
while ( node ) {
struct acpi_iort_id_mapping * map ;
2020-05-01 18:10:14 +02:00
int i , index , rc = 0 ;
u32 out_ref = 0 , map_id = id ;
2016-09-12 20:54:20 +02:00
2016-11-21 10:01:46 +00:00
if ( IORT_TYPE_MASK ( node - > type ) & type_mask ) {
2017-03-07 20:40:03 +08:00
if ( id_out )
* id_out = id ;
2016-09-12 20:54:20 +02:00
return node ;
}
if ( ! node - > mapping_offset | | ! node - > mapping_count )
goto fail_map ;
map = ACPI_ADD_PTR ( struct acpi_iort_id_mapping , node ,
node - > mapping_offset ) ;
/* Firmware bug! */
if ( ! map - > output_reference ) {
pr_err ( FW_BUG " [node %p type %d] ID map has NULL parent reference \n " ,
node , node - > type ) ;
goto fail_map ;
}
2017-10-13 15:09:48 +08:00
/*
* Get the special ID mapping index ( if any ) and skip its
* associated ID map to prevent erroneous multi - stage
* IORT ID translations .
*/
index = iort_get_id_mapping_index ( node ) ;
2017-03-07 20:40:03 +08:00
/* Do the ID translation */
2016-09-12 20:54:20 +02:00
for ( i = 0 ; i < node - > mapping_count ; i + + , map + + ) {
2017-10-13 15:09:48 +08:00
/* if it is special mapping index, skip it */
if ( i = = index )
continue ;
2020-05-01 18:10:14 +02:00
rc = iort_id_map ( map , node - > type , map_id , & id , out_ref ) ;
if ( ! rc )
2016-09-12 20:54:20 +02:00
break ;
2020-05-01 18:10:14 +02:00
if ( rc = = - EAGAIN )
out_ref = map - > output_reference ;
2016-09-12 20:54:20 +02:00
}
2020-05-01 18:10:14 +02:00
if ( i = = node - > mapping_count & & ! out_ref )
2016-09-12 20:54:20 +02:00
goto fail_map ;
node = ACPI_ADD_PTR ( struct acpi_iort_node , iort_table ,
2020-05-01 18:10:14 +02:00
rc ? out_ref : map - > output_reference ) ;
2016-09-12 20:54:20 +02:00
}
fail_map :
2017-03-07 20:40:03 +08:00
/* Map input ID to output ID unchanged on mapping failure */
if ( id_out )
* id_out = id_in ;
2016-09-12 20:54:20 +02:00
return NULL ;
}
2017-09-28 14:03:33 +01:00
static struct acpi_iort_node * iort_node_map_platform_id (
struct acpi_iort_node * node , u32 * id_out , u8 type_mask ,
int index )
2017-03-07 20:40:04 +08:00
{
struct acpi_iort_node * parent ;
u32 id ;
/* step 1: retrieve the initial dev id */
parent = iort_node_get_id ( node , & id , index ) ;
if ( ! parent )
return NULL ;
/*
* optional step 2 : map the initial dev id if its parent is not
* the target type we want , map it again for the use cases such
* as NC ( named component ) - > SMMU - > ITS . If the type is matched ,
* return the initial dev id and its parent pointer directly .
*/
if ( ! ( IORT_TYPE_MASK ( parent - > type ) & type_mask ) )
parent = iort_node_map_id ( parent , id , id_out , type_mask ) ;
else
if ( id_out )
* id_out = id ;
return parent ;
}
2016-09-12 20:54:20 +02:00
static struct acpi_iort_node * iort_find_dev_node ( struct device * dev )
{
struct pci_bus * pbus ;
2017-10-13 15:09:47 +08:00
if ( ! dev_is_pci ( dev ) ) {
struct acpi_iort_node * node ;
/*
* scan iort_fwnode_list to see if it ' s an iort platform
* device ( such as SMMU , PMCG ) , its iort node already cached
* and associated with fwnode when iort platform devices
* were initialized .
*/
node = iort_get_iort_node ( dev - > fwnode ) ;
if ( node )
return node ;
/*
* if not , then it should be a platform device defined in
* DSDT / SSDT ( with Named Component node in IORT )
*/
2016-09-12 20:54:20 +02:00
return iort_scan_node ( ACPI_IORT_NODE_NAMED_COMPONENT ,
iort_match_node_callback , dev ) ;
2017-10-13 15:09:47 +08:00
}
2016-09-12 20:54:20 +02:00
pbus = to_pci_dev ( dev ) - > bus ;
return iort_scan_node ( ACPI_IORT_NODE_PCI_ROOT_COMPLEX ,
iort_match_node_callback , & pbus - > dev ) ;
}
2016-09-12 20:32:21 +02:00
/**
2020-06-19 09:20:04 +01:00
* iort_msi_map_id ( ) - Map a MSI input ID for a device
2016-09-12 20:32:21 +02:00
* @ dev : The device for which the mapping is to be done .
2020-06-19 09:20:04 +01:00
* @ input_id : The device input ID .
2016-09-12 20:32:21 +02:00
*
2020-06-19 09:20:04 +01:00
* Returns : mapped MSI ID on success , input ID otherwise
2016-09-12 20:32:21 +02:00
*/
2020-06-19 09:20:04 +01:00
u32 iort_msi_map_id ( struct device * dev , u32 input_id )
2016-09-12 20:32:21 +02:00
{
struct acpi_iort_node * node ;
u32 dev_id ;
node = iort_find_dev_node ( dev ) ;
if ( ! node )
2020-06-19 09:20:04 +01:00
return input_id ;
2016-09-12 20:32:21 +02:00
2020-06-19 09:20:04 +01:00
iort_node_map_id ( node , input_id , & dev_id , IORT_MSI_TYPE ) ;
2016-09-12 20:32:21 +02:00
return dev_id ;
}
2017-03-07 20:40:05 +08:00
/**
* iort_pmsi_get_dev_id ( ) - Get the device id for a device
* @ dev : The device for which the mapping is to be done .
* @ dev_id : The device ID found .
*
* Returns : 0 for successful find a dev id , - ENODEV on error
*/
int iort_pmsi_get_dev_id ( struct device * dev , u32 * dev_id )
{
2017-10-13 15:09:48 +08:00
int i , index ;
2017-03-07 20:40:05 +08:00
struct acpi_iort_node * node ;
node = iort_find_dev_node ( dev ) ;
if ( ! node )
return - ENODEV ;
2017-10-13 15:09:48 +08:00
index = iort_get_id_mapping_index ( node ) ;
/* if there is a valid index, go get the dev_id directly */
if ( index > = 0 ) {
if ( iort_node_get_id ( node , dev_id , index ) )
2017-03-07 20:40:05 +08:00
return 0 ;
2017-10-13 15:09:48 +08:00
} else {
for ( i = 0 ; i < node - > mapping_count ; i + + ) {
if ( iort_node_map_platform_id ( node , dev_id ,
IORT_MSI_TYPE , i ) )
return 0 ;
}
2017-03-07 20:40:05 +08:00
}
return - ENODEV ;
}
2018-02-13 15:20:50 +00:00
static int __maybe_unused iort_find_its_base ( u32 its_id , phys_addr_t * base )
{
struct iort_its_msi_chip * its_msi_chip ;
int ret = - ENODEV ;
spin_lock ( & iort_msi_chip_lock ) ;
list_for_each_entry ( its_msi_chip , & iort_msi_chip_list , list ) {
if ( its_msi_chip - > translation_id = = its_id ) {
* base = its_msi_chip - > base_addr ;
ret = 0 ;
break ;
}
}
spin_unlock ( & iort_msi_chip_lock ) ;
return ret ;
}
2016-09-12 20:32:21 +02:00
/**
* iort_dev_find_its_id ( ) - Find the ITS identifier for a device
* @ dev : The device .
2020-06-19 09:20:03 +01:00
* @ id : Device ' s ID
2016-09-12 20:32:21 +02:00
* @ idx : Index of the ITS identifier list .
* @ its_id : ITS identifier .
*
* Returns : 0 on success , appropriate error value otherwise
*/
2020-06-19 09:20:03 +01:00
static int iort_dev_find_its_id ( struct device * dev , u32 id ,
2016-09-12 20:32:21 +02:00
unsigned int idx , int * its_id )
{
struct acpi_iort_its_group * its ;
struct acpi_iort_node * node ;
node = iort_find_dev_node ( dev ) ;
if ( ! node )
return - ENXIO ;
2020-06-19 09:20:03 +01:00
node = iort_node_map_id ( node , id , NULL , IORT_MSI_TYPE ) ;
2016-09-12 20:32:21 +02:00
if ( ! node )
return - ENXIO ;
/* Move to ITS specific data */
its = ( struct acpi_iort_its_group * ) node - > node_data ;
2019-07-22 17:25:48 +01:00
if ( idx > = its - > its_count ) {
dev_err ( dev , " requested ITS ID index [%d] overruns ITS entries [%d] \n " ,
2016-09-12 20:32:21 +02:00
idx , its - > its_count ) ;
return - ENXIO ;
}
* its_id = its - > identifiers [ idx ] ;
return 0 ;
}
/**
* iort_get_device_domain ( ) - Find MSI domain related to a device
* @ dev : The device .
2020-10-14 10:31:39 +01:00
* @ id : Requester ID for the device .
* @ bus_token : irq domain bus token .
2016-09-12 20:32:21 +02:00
*
* Returns : the MSI domain for this device , NULL otherwise
*/
2020-06-19 09:20:03 +01:00
struct irq_domain * iort_get_device_domain ( struct device * dev , u32 id ,
enum irq_domain_bus_token bus_token )
2016-09-12 20:32:21 +02:00
{
struct fwnode_handle * handle ;
int its_id ;
2020-06-19 09:20:03 +01:00
if ( iort_dev_find_its_id ( dev , id , 0 , & its_id ) )
2016-09-12 20:32:21 +02:00
return NULL ;
handle = iort_find_domain_token ( its_id ) ;
if ( ! handle )
return NULL ;
2020-06-19 09:20:03 +01:00
return irq_find_matching_fwnode ( handle , bus_token ) ;
2016-09-12 20:32:21 +02:00
}
2017-10-13 15:09:50 +08:00
static void iort_set_device_domain ( struct device * dev ,
struct acpi_iort_node * node )
{
struct acpi_iort_its_group * its ;
struct acpi_iort_node * msi_parent ;
struct acpi_iort_id_mapping * map ;
struct fwnode_handle * iort_fwnode ;
struct irq_domain * domain ;
int index ;
index = iort_get_id_mapping_index ( node ) ;
if ( index < 0 )
return ;
map = ACPI_ADD_PTR ( struct acpi_iort_id_mapping , node ,
node - > mapping_offset + index * sizeof ( * map ) ) ;
/* Firmware bug! */
if ( ! map - > output_reference | |
! ( map - > flags & ACPI_IORT_ID_SINGLE_MAPPING ) ) {
pr_err ( FW_BUG " [node %p type %d] Invalid MSI mapping \n " ,
node , node - > type ) ;
return ;
}
msi_parent = ACPI_ADD_PTR ( struct acpi_iort_node , iort_table ,
map - > output_reference ) ;
if ( ! msi_parent | | msi_parent - > type ! = ACPI_IORT_NODE_ITS_GROUP )
return ;
/* Move to ITS specific data */
its = ( struct acpi_iort_its_group * ) msi_parent - > node_data ;
iort_fwnode = iort_find_domain_token ( its - > identifiers [ 0 ] ) ;
if ( ! iort_fwnode )
return ;
domain = irq_find_matching_fwnode ( iort_fwnode , DOMAIN_BUS_PLATFORM_MSI ) ;
if ( domain )
dev_set_msi_domain ( dev , domain ) ;
}
2017-03-07 20:40:06 +08:00
/**
* iort_get_platform_device_domain ( ) - Find MSI domain related to a
* platform device
* @ dev : the dev pointer associated with the platform device
*
* Returns : the MSI domain for this device , NULL otherwise
*/
static struct irq_domain * iort_get_platform_device_domain ( struct device * dev )
{
2018-11-29 09:55:59 +00:00
struct acpi_iort_node * node , * msi_parent = NULL ;
2017-03-07 20:40:06 +08:00
struct fwnode_handle * iort_fwnode ;
struct acpi_iort_its_group * its ;
int i ;
/* find its associated iort node */
node = iort_scan_node ( ACPI_IORT_NODE_NAMED_COMPONENT ,
iort_match_node_callback , dev ) ;
if ( ! node )
return NULL ;
/* then find its msi parent node */
for ( i = 0 ; i < node - > mapping_count ; i + + ) {
msi_parent = iort_node_map_platform_id ( node , NULL ,
IORT_MSI_TYPE , i ) ;
if ( msi_parent )
break ;
}
if ( ! msi_parent )
return NULL ;
/* Move to ITS specific data */
its = ( struct acpi_iort_its_group * ) msi_parent - > node_data ;
iort_fwnode = iort_find_domain_token ( its - > identifiers [ 0 ] ) ;
if ( ! iort_fwnode )
return NULL ;
return irq_find_matching_fwnode ( iort_fwnode , DOMAIN_BUS_PLATFORM_MSI ) ;
}
void acpi_configure_pmsi_domain ( struct device * dev )
{
struct irq_domain * msi_domain ;
msi_domain = iort_get_platform_device_domain ( dev ) ;
if ( msi_domain )
dev_set_msi_domain ( dev , msi_domain ) ;
}
2017-04-28 16:59:49 +01:00
# ifdef CONFIG_IOMMU_API
2018-02-13 15:20:50 +00:00
static struct acpi_iort_node * iort_get_msi_resv_iommu ( struct device * dev )
{
struct acpi_iort_node * iommu ;
2018-11-29 14:01:00 +01:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2018-02-13 15:20:50 +00:00
iommu = iort_get_iort_node ( fwspec - > iommu_fwnode ) ;
if ( iommu & & ( iommu - > type = = ACPI_IORT_NODE_SMMU_V3 ) ) {
struct acpi_iort_smmu_v3 * smmu ;
smmu = ( struct acpi_iort_smmu_v3 * ) iommu - > node_data ;
if ( smmu - > model = = ACPI_IORT_SMMU_V3_HISILICON_HI161X )
return iommu ;
}
return NULL ;
}
/**
* iort_iommu_msi_get_resv_regions - Reserved region driver helper
* @ dev : Device from iommu_get_resv_regions ( )
* @ head : Reserved region list from iommu_get_resv_regions ( )
*
* Returns : Number of msi reserved regions on success ( 0 if platform
* doesn ' t require the reservation or no associated msi regions ) ,
* appropriate error value otherwise . The ITS interrupt translation
* spaces ( ITS_base + SZ_64K , SZ_64K ) associated with the device
* are the msi reserved regions .
*/
int iort_iommu_msi_get_resv_regions ( struct device * dev , struct list_head * head )
{
2018-11-29 14:01:00 +01:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2018-02-13 15:20:50 +00:00
struct acpi_iort_its_group * its ;
struct acpi_iort_node * iommu_node , * its_node = NULL ;
int i , resv = 0 ;
iommu_node = iort_get_msi_resv_iommu ( dev ) ;
if ( ! iommu_node )
return 0 ;
/*
* Current logic to reserve ITS regions relies on HW topologies
* where a given PCI or named component maps its IDs to only one
* ITS group ; if a PCI or named component can map its IDs to
* different ITS groups through IORT mappings this function has
* to be reworked to ensure we reserve regions for all ITS groups
* a given PCI or named component may map IDs to .
*/
2018-11-29 14:01:00 +01:00
for ( i = 0 ; i < fwspec - > num_ids ; i + + ) {
2018-02-13 15:20:50 +00:00
its_node = iort_node_map_id ( iommu_node ,
2018-11-29 14:01:00 +01:00
fwspec - > ids [ i ] ,
2018-02-13 15:20:50 +00:00
NULL , IORT_MSI_TYPE ) ;
if ( its_node )
break ;
}
if ( ! its_node )
return 0 ;
/* Move to ITS specific data */
its = ( struct acpi_iort_its_group * ) its_node - > node_data ;
for ( i = 0 ; i < its - > its_count ; i + + ) {
phys_addr_t base ;
if ( ! iort_find_its_base ( its - > identifiers [ i ] , & base ) ) {
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO ;
struct iommu_resv_region * region ;
region = iommu_alloc_resv_region ( base + SZ_64K , SZ_64K ,
prot , IOMMU_RESV_MSI ) ;
if ( region ) {
list_add_tail ( & region - > list , head ) ;
resv + + ;
}
}
}
return ( resv = = its - > its_count ) ? resv : - ENODEV ;
}
2019-05-16 16:52:58 +01:00
static inline bool iort_iommu_driver_enabled ( u8 type )
{
switch ( type ) {
case ACPI_IORT_NODE_SMMU_V3 :
2019-12-19 12:03:48 +00:00
return IS_ENABLED ( CONFIG_ARM_SMMU_V3 ) ;
2019-05-16 16:52:58 +01:00
case ACPI_IORT_NODE_SMMU :
2019-12-19 12:03:48 +00:00
return IS_ENABLED ( CONFIG_ARM_SMMU ) ;
2019-05-16 16:52:58 +01:00
default :
pr_warn ( " IORT node type %u does not describe an SMMU \n " , type ) ;
return false ;
}
}
static bool iort_pci_rc_supports_ats ( struct acpi_iort_node * node )
{
struct acpi_iort_root_complex * pci_rc ;
pci_rc = ( struct acpi_iort_root_complex * ) node - > node_data ;
return pci_rc - > ats_attribute & ACPI_IORT_ATS_SUPPORTED ;
}
2017-04-28 16:59:49 +01:00
2017-08-04 17:42:06 +01:00
static int iort_iommu_xlate ( struct device * dev , struct acpi_iort_node * node ,
u32 streamid )
2016-11-21 10:01:48 +00:00
{
2017-08-04 17:42:06 +01:00
const struct iommu_ops * ops ;
2016-11-21 10:01:48 +00:00
struct fwnode_handle * iort_fwnode ;
2017-08-04 17:42:06 +01:00
if ( ! node )
return - ENODEV ;
2016-11-21 10:01:48 +00:00
2017-08-04 17:42:06 +01:00
iort_fwnode = iort_get_fwnode ( node ) ;
if ( ! iort_fwnode )
return - ENODEV ;
2016-11-21 10:01:48 +00:00
2017-08-04 17:42:06 +01:00
/*
* If the ops look - up fails , this means that either
* the SMMU drivers have not been probed yet or that
* the SMMU drivers are not built in the kernel ;
* Depending on whether the SMMU drivers are built - in
* in the kernel or not , defer the IOMMU configuration
* or just abort it .
*/
ops = iommu_ops_from_fwnode ( iort_fwnode ) ;
if ( ! ops )
return iort_iommu_driver_enabled ( node - > type ) ?
- EPROBE_DEFER : - ENODEV ;
2021-06-18 17:20:57 +02:00
return acpi_iommu_fwspec_init ( dev , streamid , iort_fwnode , ops ) ;
2017-08-04 17:42:06 +01:00
}
struct iort_pci_alias_info {
struct device * dev ;
struct acpi_iort_node * node ;
} ;
static int iort_pci_iommu_init ( struct pci_dev * pdev , u16 alias , void * data )
{
struct iort_pci_alias_info * info = data ;
struct acpi_iort_node * parent ;
u32 streamid ;
2016-11-21 10:01:48 +00:00
2017-08-04 17:42:06 +01:00
parent = iort_node_map_id ( info - > node , alias , & streamid ,
IORT_IOMMU_TYPE ) ;
return iort_iommu_xlate ( info - > dev , parent , streamid ) ;
2016-11-21 10:01:48 +00:00
}
2020-01-15 13:52:30 +01:00
static void iort_named_component_init ( struct device * dev ,
struct acpi_iort_node * node )
{
2021-05-26 18:19:27 +02:00
struct property_entry props [ 3 ] = { } ;
2020-01-15 13:52:30 +01:00
struct acpi_iort_named_component * nc ;
nc = ( struct acpi_iort_named_component * ) node - > node_data ;
2021-04-01 17:47:11 +02:00
props [ 0 ] = PROPERTY_ENTRY_U32 ( " pasid-num-bits " ,
FIELD_GET ( ACPI_IORT_NC_PASID_BITS ,
nc - > node_flags ) ) ;
2021-05-26 18:19:27 +02:00
if ( nc - > node_flags & ACPI_IORT_NC_STALL_SUPPORTED )
props [ 1 ] = PROPERTY_ENTRY_BOOL ( " dma-can-stall " ) ;
2021-04-01 17:47:11 +02:00
2021-05-11 15:55:28 +03:00
if ( device_create_managed_software_node ( dev , props , NULL ) )
2021-04-01 17:47:11 +02:00
dev_warn ( dev , " Could not add device properties \n " ) ;
2020-01-15 13:52:30 +01:00
}
2020-06-19 09:20:06 +01:00
static int iort_nc_iommu_map ( struct device * dev , struct acpi_iort_node * node )
{
struct acpi_iort_node * parent ;
int err = - ENODEV , i = 0 ;
u32 streamid = 0 ;
do {
parent = iort_node_map_platform_id ( node , & streamid ,
IORT_IOMMU_TYPE ,
i + + ) ;
if ( parent )
err = iort_iommu_xlate ( dev , parent , streamid ) ;
} while ( parent & & ! err ) ;
return err ;
}
static int iort_nc_iommu_map_id ( struct device * dev ,
struct acpi_iort_node * node ,
const u32 * in_id )
{
struct acpi_iort_node * parent ;
u32 streamid ;
parent = iort_node_map_id ( node , * in_id , & streamid , IORT_IOMMU_TYPE ) ;
if ( parent )
return iort_iommu_xlate ( dev , parent , streamid ) ;
return - ENODEV ;
}
2019-05-16 16:52:58 +01:00
/**
2020-06-19 09:20:06 +01:00
* iort_iommu_configure_id - Set - up IOMMU configuration for a device .
2019-05-16 16:52:58 +01:00
*
* @ dev : device to configure
2020-06-19 09:20:06 +01:00
* @ id_in : optional input id const value pointer
2019-05-16 16:52:58 +01:00
*
2021-06-18 17:20:57 +02:00
* Returns : 0 on success , < 0 on failure
2019-05-16 16:52:58 +01:00
*/
2021-06-18 17:20:57 +02:00
int iort_iommu_configure_id ( struct device * dev , const u32 * id_in )
2019-05-16 16:52:58 +01:00
{
2020-06-19 09:20:06 +01:00
struct acpi_iort_node * node ;
2019-05-16 16:52:58 +01:00
int err = - ENODEV ;
if ( dev_is_pci ( dev ) ) {
2020-03-26 16:08:27 +01:00
struct iommu_fwspec * fwspec ;
2019-05-16 16:52:58 +01:00
struct pci_bus * bus = to_pci_dev ( dev ) - > bus ;
struct iort_pci_alias_info info = { . dev = dev } ;
node = iort_scan_node ( ACPI_IORT_NODE_PCI_ROOT_COMPLEX ,
iort_match_node_callback , & bus - > dev ) ;
if ( ! node )
2021-06-18 17:20:57 +02:00
return - ENODEV ;
2019-05-16 16:52:58 +01:00
info . node = node ;
err = pci_for_each_dma_alias ( to_pci_dev ( dev ) ,
iort_pci_iommu_init , & info ) ;
2020-03-26 16:08:27 +01:00
fwspec = dev_iommu_fwspec_get ( dev ) ;
if ( fwspec & & iort_pci_rc_supports_ats ( node ) )
fwspec - > flags | = IOMMU_FWSPEC_PCI_RC_ATS ;
2019-05-16 16:52:58 +01:00
} else {
node = iort_scan_node ( ACPI_IORT_NODE_NAMED_COMPONENT ,
iort_match_node_callback , dev ) ;
if ( ! node )
2021-06-18 17:20:57 +02:00
return - ENODEV ;
2019-05-16 16:52:58 +01:00
2020-06-19 09:20:06 +01:00
err = id_in ? iort_nc_iommu_map_id ( dev , node , id_in ) :
iort_nc_iommu_map ( dev , node ) ;
2020-01-15 13:52:30 +01:00
if ( ! err )
iort_named_component_init ( dev , node ) ;
2019-05-16 16:52:58 +01:00
}
2021-06-18 17:20:57 +02:00
return err ;
2019-05-16 16:52:58 +01:00
}
2020-06-19 09:20:06 +01:00
2019-05-16 16:52:58 +01:00
# else
int iort_iommu_msi_get_resv_regions ( struct device * dev , struct list_head * head )
{ return 0 ; }
2021-06-18 17:20:57 +02:00
int iort_iommu_configure_id ( struct device * dev , const u32 * input_id )
{ return - ENODEV ; }
2019-05-16 16:52:58 +01:00
# endif
2017-08-03 13:32:39 +01:00
static int nc_dma_get_range ( struct device * dev , u64 * size )
{
struct acpi_iort_node * node ;
struct acpi_iort_named_component * ncomp ;
node = iort_scan_node ( ACPI_IORT_NODE_NAMED_COMPONENT ,
iort_match_node_callback , dev ) ;
if ( ! node )
return - ENODEV ;
ncomp = ( struct acpi_iort_named_component * ) node - > node_data ;
2021-01-21 17:24:19 -08:00
if ( ! ncomp - > memory_address_limit ) {
pr_warn ( FW_BUG " Named component missing memory address limit \n " ) ;
return - EINVAL ;
}
2017-08-03 13:32:39 +01:00
* size = ncomp - > memory_address_limit > = 64 ? U64_MAX :
1ULL < < ncomp - > memory_address_limit ;
return 0 ;
}
2018-07-23 23:16:06 +01:00
static int rc_dma_get_range ( struct device * dev , u64 * size )
{
struct acpi_iort_node * node ;
struct acpi_iort_root_complex * rc ;
2019-01-10 18:41:51 +00:00
struct pci_bus * pbus = to_pci_dev ( dev ) - > bus ;
2018-07-23 23:16:06 +01:00
node = iort_scan_node ( ACPI_IORT_NODE_PCI_ROOT_COMPLEX ,
2019-01-10 18:41:51 +00:00
iort_match_node_callback , & pbus - > dev ) ;
2018-07-23 23:16:06 +01:00
if ( ! node | | node - > revision < 1 )
return - ENODEV ;
rc = ( struct acpi_iort_root_complex * ) node - > node_data ;
2021-01-21 17:24:19 -08:00
if ( ! rc - > memory_address_limit ) {
pr_warn ( FW_BUG " Root complex missing memory address limit \n " ) ;
return - EINVAL ;
}
2018-07-23 23:16:06 +01:00
* size = rc - > memory_address_limit > = 64 ? U64_MAX :
1ULL < < rc - > memory_address_limit ;
return 0 ;
}
2016-12-06 14:20:11 +00:00
/**
2021-06-18 17:20:56 +02:00
* iort_dma_get_ranges ( ) - Look up DMA addressing limit for the device
* @ dev : device to lookup
* @ size : DMA range size result pointer
2016-12-06 14:20:11 +00:00
*
2021-06-18 17:20:56 +02:00
* Return : 0 on success , an error otherwise .
2016-12-06 14:20:11 +00:00
*/
2021-06-18 17:20:56 +02:00
int iort_dma_get_ranges ( struct device * dev , u64 * size )
2016-12-06 14:20:11 +00:00
{
2021-06-18 17:20:56 +02:00
if ( dev_is_pci ( dev ) )
return rc_dma_get_range ( dev , size ) ;
2018-07-23 23:16:11 +01:00
else
2021-06-18 17:20:56 +02:00
return nc_dma_get_range ( dev , size ) ;
2016-12-06 14:20:11 +00:00
}
2016-11-21 10:01:43 +00:00
static void __init acpi_iort_register_irq ( int hwirq , const char * name ,
int trigger ,
struct resource * res )
{
int irq = acpi_register_gsi ( NULL , hwirq , trigger ,
ACPI_ACTIVE_HIGH ) ;
if ( irq < = 0 ) {
pr_err ( " could not register gsi hwirq %d name [%s] \n " , hwirq ,
name ) ;
return ;
}
res - > start = irq ;
res - > end = irq ;
res - > flags = IORESOURCE_IRQ ;
res - > name = name ;
}
static int __init arm_smmu_v3_count_resources ( struct acpi_iort_node * node )
{
struct acpi_iort_smmu_v3 * smmu ;
/* Always present mem resource */
int num_res = 1 ;
/* Retrieve SMMUv3 specific data */
smmu = ( struct acpi_iort_smmu_v3 * ) node - > node_data ;
if ( smmu - > event_gsiv )
num_res + + ;
if ( smmu - > pri_gsiv )
num_res + + ;
if ( smmu - > gerr_gsiv )
num_res + + ;
if ( smmu - > sync_gsiv )
num_res + + ;
return num_res ;
}
2017-06-23 19:04:36 +05:30
static bool arm_smmu_v3_is_combined_irq ( struct acpi_iort_smmu_v3 * smmu )
{
/*
* Cavium ThunderX2 implementation doesn ' t not support unique
* irq line . Use single irq line for all the SMMUv3 interrupts .
*/
if ( smmu - > model ! = ACPI_IORT_SMMU_V3_CAVIUM_CN99XX )
return false ;
/*
* ThunderX2 doesn ' t support MSIs from the SMMU , so we ' re checking
* SPI numbers here .
*/
return smmu - > event_gsiv = = smmu - > pri_gsiv & &
smmu - > event_gsiv = = smmu - > gerr_gsiv & &
smmu - > event_gsiv = = smmu - > sync_gsiv ;
}
2017-06-22 17:35:36 +05:30
static unsigned long arm_smmu_v3_resource_size ( struct acpi_iort_smmu_v3 * smmu )
{
/*
* Override the size , for Cavium ThunderX2 implementation
* which doesn ' t support the page 1 SMMU register space .
*/
if ( smmu - > model = = ACPI_IORT_SMMU_V3_CAVIUM_CN99XX )
return SZ_64K ;
return SZ_128K ;
}
2016-11-21 10:01:43 +00:00
static void __init arm_smmu_v3_init_resources ( struct resource * res ,
struct acpi_iort_node * node )
{
struct acpi_iort_smmu_v3 * smmu ;
int num_res = 0 ;
/* Retrieve SMMUv3 specific data */
smmu = ( struct acpi_iort_smmu_v3 * ) node - > node_data ;
res [ num_res ] . start = smmu - > base_address ;
2017-06-22 17:35:36 +05:30
res [ num_res ] . end = smmu - > base_address +
arm_smmu_v3_resource_size ( smmu ) - 1 ;
2016-11-21 10:01:43 +00:00
res [ num_res ] . flags = IORESOURCE_MEM ;
num_res + + ;
2017-06-23 19:04:36 +05:30
if ( arm_smmu_v3_is_combined_irq ( smmu ) ) {
if ( smmu - > event_gsiv )
acpi_iort_register_irq ( smmu - > event_gsiv , " combined " ,
ACPI_EDGE_SENSITIVE ,
& res [ num_res + + ] ) ;
} else {
2016-11-21 10:01:43 +00:00
2017-06-23 19:04:36 +05:30
if ( smmu - > event_gsiv )
acpi_iort_register_irq ( smmu - > event_gsiv , " eventq " ,
ACPI_EDGE_SENSITIVE ,
& res [ num_res + + ] ) ;
if ( smmu - > pri_gsiv )
acpi_iort_register_irq ( smmu - > pri_gsiv , " priq " ,
ACPI_EDGE_SENSITIVE ,
& res [ num_res + + ] ) ;
if ( smmu - > gerr_gsiv )
acpi_iort_register_irq ( smmu - > gerr_gsiv , " gerror " ,
ACPI_EDGE_SENSITIVE ,
& res [ num_res + + ] ) ;
if ( smmu - > sync_gsiv )
acpi_iort_register_irq ( smmu - > sync_gsiv , " cmdq-sync " ,
ACPI_EDGE_SENSITIVE ,
& res [ num_res + + ] ) ;
}
2016-11-21 10:01:43 +00:00
}
2019-03-26 15:17:50 +00:00
static void __init arm_smmu_v3_dma_configure ( struct device * dev ,
struct acpi_iort_node * node )
2016-11-21 10:01:43 +00:00
{
struct acpi_iort_smmu_v3 * smmu ;
2019-03-26 15:17:50 +00:00
enum dev_dma_attr attr ;
2016-11-21 10:01:43 +00:00
/* Retrieve SMMUv3 specific data */
smmu = ( struct acpi_iort_smmu_v3 * ) node - > node_data ;
2019-03-26 15:17:50 +00:00
attr = ( smmu - > flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE ) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT ;
/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
dev - > dma_mask = & dev - > coherent_dma_mask ;
/* Configure DMA for the page table walker */
acpi_dma_configure ( dev , attr ) ;
2016-11-21 10:01:43 +00:00
}
2017-09-28 13:57:10 +01:00
# if defined(CONFIG_ACPI_NUMA)
2017-08-02 10:58:25 -07:00
/*
* set numa proximity domain for smmuv3 device
*/
ACPI/IORT: Reject platform device creation on NUMA node mapping failure
In a system where, through IORT firmware mappings, the SMMU device is
mapped to a NUMA node that is not online, the kernel bootstrap results
in the following crash:
Unable to handle kernel paging request at virtual address 0000000000001388
Mem abort info:
ESR = 0x96000004
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000004
CM = 0, WnR = 0
[0000000000001388] user address but active_mm is swapper
Internal error: Oops: 96000004 [#1] SMP
Modules linked in:
CPU: 5 PID: 1 Comm: swapper/0 Not tainted 5.0.0 #15
pstate: 80c00009 (Nzcv daif +PAN +UAO)
pc : __alloc_pages_nodemask+0x13c/0x1068
lr : __alloc_pages_nodemask+0xdc/0x1068
...
Process swapper/0 (pid: 1, stack limit = 0x(____ptrval____))
Call trace:
__alloc_pages_nodemask+0x13c/0x1068
new_slab+0xec/0x570
___slab_alloc+0x3e0/0x4f8
__slab_alloc+0x60/0x80
__kmalloc_node_track_caller+0x10c/0x478
devm_kmalloc+0x44/0xb0
pinctrl_bind_pins+0x4c/0x188
really_probe+0x78/0x2b8
driver_probe_device+0x64/0x110
device_driver_attach+0x74/0x98
__driver_attach+0x9c/0xe8
bus_for_each_dev+0x84/0xd8
driver_attach+0x30/0x40
bus_add_driver+0x170/0x218
driver_register+0x64/0x118
__platform_driver_register+0x54/0x60
arm_smmu_driver_init+0x24/0x2c
do_one_initcall+0xbc/0x328
kernel_init_freeable+0x304/0x3ac
kernel_init+0x18/0x110
ret_from_fork+0x10/0x1c
Code: f90013b5 b9410fa1 1a9f0694 b50014c2 (b9400804)
---[ end trace dfeaed4c373a32da ]--
Change the dev_set_proximity() hook prototype so that it returns a
value and make it return failure if the PXM->NUMA-node mapping
corresponds to an offline node, fixing the crash.
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20190315021940.86905-1-wangkefeng.wang@huawei.com/
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-04-08 23:21:12 +08:00
static int __init arm_smmu_v3_set_proximity ( struct device * dev ,
2017-08-02 10:58:25 -07:00
struct acpi_iort_node * node )
{
struct acpi_iort_smmu_v3 * smmu ;
smmu = ( struct acpi_iort_smmu_v3 * ) node - > node_data ;
if ( smmu - > flags & ACPI_IORT_SMMU_V3_PXM_VALID ) {
ACPI: Do not create new NUMA domains from ACPI static tables that are not SRAT
Several ACPI static tables contain references to proximity domains.
ACPI 6.3 has clarified that only entries in SRAT may define a new
domain (sec 5.2.16).
Those tables described in the ACPI spec have additional clarifying text.
NFIT: Table 5-132,
"Integer that represents the proximity domain to which the memory
belongs. This number must match with corresponding entry in the
SRAT table."
HMAT: Table 5-145,
"... This number must match with the corresponding entry in the SRAT
table's processor affinity structure ... if the initiator is a processor,
or the Generic Initiator Affinity Structure if the initiator is a generic
initiator".
IORT and DMAR are defined by external specifications.
Intel Virtualization Technology for Directed I/O Rev 3.1 does not make any
explicit statements, but the general SRAT statement above will still apply.
https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf
IO Remapping Table, Platform Design Document rev D, also makes not explicit
statement, but refers to ACPI SRAT table for more information and again the
generic SRAT statement above applies.
https://developer.arm.com/documentation/den0049/d/
In conclusion, any proximity domain specified in these tables, should be a
reference to a proximity domain also found in SRAT, and they should not be
able to instantiate a new domain. Hence we switch to pxm_to_node() which
will only return existing nodes.
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: Barry Song <song.bao.hua@hisilicon.com>
Reviewed-by: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-08-18 22:24:26 +08:00
int dev_node = pxm_to_node ( smmu - > pxm ) ;
ACPI/IORT: Reject platform device creation on NUMA node mapping failure
In a system where, through IORT firmware mappings, the SMMU device is
mapped to a NUMA node that is not online, the kernel bootstrap results
in the following crash:
Unable to handle kernel paging request at virtual address 0000000000001388
Mem abort info:
ESR = 0x96000004
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000004
CM = 0, WnR = 0
[0000000000001388] user address but active_mm is swapper
Internal error: Oops: 96000004 [#1] SMP
Modules linked in:
CPU: 5 PID: 1 Comm: swapper/0 Not tainted 5.0.0 #15
pstate: 80c00009 (Nzcv daif +PAN +UAO)
pc : __alloc_pages_nodemask+0x13c/0x1068
lr : __alloc_pages_nodemask+0xdc/0x1068
...
Process swapper/0 (pid: 1, stack limit = 0x(____ptrval____))
Call trace:
__alloc_pages_nodemask+0x13c/0x1068
new_slab+0xec/0x570
___slab_alloc+0x3e0/0x4f8
__slab_alloc+0x60/0x80
__kmalloc_node_track_caller+0x10c/0x478
devm_kmalloc+0x44/0xb0
pinctrl_bind_pins+0x4c/0x188
really_probe+0x78/0x2b8
driver_probe_device+0x64/0x110
device_driver_attach+0x74/0x98
__driver_attach+0x9c/0xe8
bus_for_each_dev+0x84/0xd8
driver_attach+0x30/0x40
bus_add_driver+0x170/0x218
driver_register+0x64/0x118
__platform_driver_register+0x54/0x60
arm_smmu_driver_init+0x24/0x2c
do_one_initcall+0xbc/0x328
kernel_init_freeable+0x304/0x3ac
kernel_init+0x18/0x110
ret_from_fork+0x10/0x1c
Code: f90013b5 b9410fa1 1a9f0694 b50014c2 (b9400804)
---[ end trace dfeaed4c373a32da ]--
Change the dev_set_proximity() hook prototype so that it returns a
value and make it return failure if the PXM->NUMA-node mapping
corresponds to an offline node, fixing the crash.
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20190315021940.86905-1-wangkefeng.wang@huawei.com/
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-04-08 23:21:12 +08:00
2019-07-22 17:14:33 +01:00
if ( dev_node ! = NUMA_NO_NODE & & ! node_online ( dev_node ) )
ACPI/IORT: Reject platform device creation on NUMA node mapping failure
In a system where, through IORT firmware mappings, the SMMU device is
mapped to a NUMA node that is not online, the kernel bootstrap results
in the following crash:
Unable to handle kernel paging request at virtual address 0000000000001388
Mem abort info:
ESR = 0x96000004
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000004
CM = 0, WnR = 0
[0000000000001388] user address but active_mm is swapper
Internal error: Oops: 96000004 [#1] SMP
Modules linked in:
CPU: 5 PID: 1 Comm: swapper/0 Not tainted 5.0.0 #15
pstate: 80c00009 (Nzcv daif +PAN +UAO)
pc : __alloc_pages_nodemask+0x13c/0x1068
lr : __alloc_pages_nodemask+0xdc/0x1068
...
Process swapper/0 (pid: 1, stack limit = 0x(____ptrval____))
Call trace:
__alloc_pages_nodemask+0x13c/0x1068
new_slab+0xec/0x570
___slab_alloc+0x3e0/0x4f8
__slab_alloc+0x60/0x80
__kmalloc_node_track_caller+0x10c/0x478
devm_kmalloc+0x44/0xb0
pinctrl_bind_pins+0x4c/0x188
really_probe+0x78/0x2b8
driver_probe_device+0x64/0x110
device_driver_attach+0x74/0x98
__driver_attach+0x9c/0xe8
bus_for_each_dev+0x84/0xd8
driver_attach+0x30/0x40
bus_add_driver+0x170/0x218
driver_register+0x64/0x118
__platform_driver_register+0x54/0x60
arm_smmu_driver_init+0x24/0x2c
do_one_initcall+0xbc/0x328
kernel_init_freeable+0x304/0x3ac
kernel_init+0x18/0x110
ret_from_fork+0x10/0x1c
Code: f90013b5 b9410fa1 1a9f0694 b50014c2 (b9400804)
---[ end trace dfeaed4c373a32da ]--
Change the dev_set_proximity() hook prototype so that it returns a
value and make it return failure if the PXM->NUMA-node mapping
corresponds to an offline node, fixing the crash.
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20190315021940.86905-1-wangkefeng.wang@huawei.com/
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-04-08 23:21:12 +08:00
return - EINVAL ;
2019-07-22 17:14:33 +01:00
set_dev_node ( dev , dev_node ) ;
2017-08-02 10:58:25 -07:00
pr_info ( " SMMU-v3[%llx] Mapped to Proximity domain %d \n " ,
smmu - > base_address ,
smmu - > pxm ) ;
}
ACPI/IORT: Reject platform device creation on NUMA node mapping failure
In a system where, through IORT firmware mappings, the SMMU device is
mapped to a NUMA node that is not online, the kernel bootstrap results
in the following crash:
Unable to handle kernel paging request at virtual address 0000000000001388
Mem abort info:
ESR = 0x96000004
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000004
CM = 0, WnR = 0
[0000000000001388] user address but active_mm is swapper
Internal error: Oops: 96000004 [#1] SMP
Modules linked in:
CPU: 5 PID: 1 Comm: swapper/0 Not tainted 5.0.0 #15
pstate: 80c00009 (Nzcv daif +PAN +UAO)
pc : __alloc_pages_nodemask+0x13c/0x1068
lr : __alloc_pages_nodemask+0xdc/0x1068
...
Process swapper/0 (pid: 1, stack limit = 0x(____ptrval____))
Call trace:
__alloc_pages_nodemask+0x13c/0x1068
new_slab+0xec/0x570
___slab_alloc+0x3e0/0x4f8
__slab_alloc+0x60/0x80
__kmalloc_node_track_caller+0x10c/0x478
devm_kmalloc+0x44/0xb0
pinctrl_bind_pins+0x4c/0x188
really_probe+0x78/0x2b8
driver_probe_device+0x64/0x110
device_driver_attach+0x74/0x98
__driver_attach+0x9c/0xe8
bus_for_each_dev+0x84/0xd8
driver_attach+0x30/0x40
bus_add_driver+0x170/0x218
driver_register+0x64/0x118
__platform_driver_register+0x54/0x60
arm_smmu_driver_init+0x24/0x2c
do_one_initcall+0xbc/0x328
kernel_init_freeable+0x304/0x3ac
kernel_init+0x18/0x110
ret_from_fork+0x10/0x1c
Code: f90013b5 b9410fa1 1a9f0694 b50014c2 (b9400804)
---[ end trace dfeaed4c373a32da ]--
Change the dev_set_proximity() hook prototype so that it returns a
value and make it return failure if the PXM->NUMA-node mapping
corresponds to an offline node, fixing the crash.
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20190315021940.86905-1-wangkefeng.wang@huawei.com/
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-04-08 23:21:12 +08:00
return 0 ;
2017-08-02 10:58:25 -07:00
}
# else
# define arm_smmu_v3_set_proximity NULL
# endif
2016-11-21 10:01:45 +00:00
static int __init arm_smmu_count_resources ( struct acpi_iort_node * node )
{
struct acpi_iort_smmu * smmu ;
/* Retrieve SMMU specific data */
smmu = ( struct acpi_iort_smmu * ) node - > node_data ;
/*
* Only consider the global fault interrupt and ignore the
* configuration access interrupt .
*
* MMIO address and global fault interrupt resources are always
* present so add them to the context interrupt count as a static
* value .
*/
return smmu - > context_interrupt_count + 2 ;
}
static void __init arm_smmu_init_resources ( struct resource * res ,
struct acpi_iort_node * node )
{
struct acpi_iort_smmu * smmu ;
int i , hw_irq , trigger , num_res = 0 ;
u64 * ctx_irq , * glb_irq ;
/* Retrieve SMMU specific data */
smmu = ( struct acpi_iort_smmu * ) node - > node_data ;
res [ num_res ] . start = smmu - > base_address ;
res [ num_res ] . end = smmu - > base_address + smmu - > span - 1 ;
res [ num_res ] . flags = IORESOURCE_MEM ;
num_res + + ;
glb_irq = ACPI_ADD_PTR ( u64 , node , smmu - > global_interrupt_offset ) ;
/* Global IRQs */
hw_irq = IORT_IRQ_MASK ( glb_irq [ 0 ] ) ;
trigger = IORT_IRQ_TRIGGER_MASK ( glb_irq [ 0 ] ) ;
acpi_iort_register_irq ( hw_irq , " arm-smmu-global " , trigger ,
& res [ num_res + + ] ) ;
/* Context IRQs */
ctx_irq = ACPI_ADD_PTR ( u64 , node , smmu - > context_interrupt_offset ) ;
for ( i = 0 ; i < smmu - > context_interrupt_count ; i + + ) {
hw_irq = IORT_IRQ_MASK ( ctx_irq [ i ] ) ;
trigger = IORT_IRQ_TRIGGER_MASK ( ctx_irq [ i ] ) ;
acpi_iort_register_irq ( hw_irq , " arm-smmu-context " , trigger ,
& res [ num_res + + ] ) ;
}
}
2019-03-26 15:17:50 +00:00
static void __init arm_smmu_dma_configure ( struct device * dev ,
struct acpi_iort_node * node )
2016-11-21 10:01:45 +00:00
{
struct acpi_iort_smmu * smmu ;
2019-03-26 15:17:50 +00:00
enum dev_dma_attr attr ;
2016-11-21 10:01:45 +00:00
/* Retrieve SMMU specific data */
smmu = ( struct acpi_iort_smmu * ) node - > node_data ;
2019-03-26 15:17:50 +00:00
attr = ( smmu - > flags & ACPI_IORT_SMMU_COHERENT_WALK ) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT ;
/* We expect the dma masks to be equivalent for SMMU set-ups */
dev - > dma_mask = & dev - > coherent_dma_mask ;
/* Configure DMA for the page table walker */
acpi_dma_configure ( dev , attr ) ;
}
static int __init arm_smmu_v3_pmcg_count_resources ( struct acpi_iort_node * node )
{
struct acpi_iort_pmcg * pmcg ;
/* Retrieve PMCG specific data */
pmcg = ( struct acpi_iort_pmcg * ) node - > node_data ;
/*
* There are always 2 memory resources .
* If the overflow_gsiv is present then add that for a total of 3.
*/
return pmcg - > overflow_gsiv ? 3 : 2 ;
}
static void __init arm_smmu_v3_pmcg_init_resources ( struct resource * res ,
struct acpi_iort_node * node )
{
struct acpi_iort_pmcg * pmcg ;
/* Retrieve PMCG specific data */
pmcg = ( struct acpi_iort_pmcg * ) node - > node_data ;
res [ 0 ] . start = pmcg - > page0_base_address ;
res [ 0 ] . end = pmcg - > page0_base_address + SZ_4K - 1 ;
res [ 0 ] . flags = IORESOURCE_MEM ;
2022-02-03 19:31:24 +00:00
/*
* The initial version in DEN0049C lacked a way to describe register
* page 1 , which makes it broken for most PMCG implementations ; in
* that case , just let the driver fail gracefully if it expects to
* find a second memory resource .
*/
if ( node - > revision > 0 ) {
res [ 1 ] . start = pmcg - > page1_base_address ;
res [ 1 ] . end = pmcg - > page1_base_address + SZ_4K - 1 ;
res [ 1 ] . flags = IORESOURCE_MEM ;
}
2019-03-26 15:17:50 +00:00
if ( pmcg - > overflow_gsiv )
acpi_iort_register_irq ( pmcg - > overflow_gsiv , " overflow " ,
ACPI_EDGE_SENSITIVE , & res [ 2 ] ) ;
}
2019-03-26 15:17:53 +00:00
static struct acpi_platform_list pmcg_plat_info [ ] __initdata = {
/* HiSilicon Hip08 Platform */
{ " HISI " , " HIP08 " , 0 , ACPI_SIG_IORT , greater_than_or_equal ,
" Erratum #162001800 " , IORT_SMMU_V3_PMCG_HISI_HIP08 } ,
{ }
} ;
2019-03-26 15:17:50 +00:00
static int __init arm_smmu_v3_pmcg_add_platdata ( struct platform_device * pdev )
{
2019-03-26 15:17:53 +00:00
u32 model ;
int idx ;
idx = acpi_match_platform_list ( pmcg_plat_info ) ;
if ( idx > = 0 )
model = pmcg_plat_info [ idx ] . data ;
else
model = IORT_SMMU_V3_PMCG_GENERIC ;
2019-03-26 15:17:50 +00:00
return platform_device_add_data ( pdev , & model , sizeof ( model ) ) ;
2016-11-21 10:01:45 +00:00
}
2017-09-20 17:03:58 +01:00
struct iort_dev_config {
2016-11-21 10:01:41 +00:00
const char * name ;
2017-09-20 17:03:58 +01:00
int ( * dev_init ) ( struct acpi_iort_node * node ) ;
2019-03-26 15:17:50 +00:00
void ( * dev_dma_configure ) ( struct device * dev ,
struct acpi_iort_node * node ) ;
2017-09-20 17:03:58 +01:00
int ( * dev_count_resources ) ( struct acpi_iort_node * node ) ;
void ( * dev_init_resources ) ( struct resource * res ,
2016-11-21 10:01:41 +00:00
struct acpi_iort_node * node ) ;
ACPI/IORT: Reject platform device creation on NUMA node mapping failure
In a system where, through IORT firmware mappings, the SMMU device is
mapped to a NUMA node that is not online, the kernel bootstrap results
in the following crash:
Unable to handle kernel paging request at virtual address 0000000000001388
Mem abort info:
ESR = 0x96000004
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000004
CM = 0, WnR = 0
[0000000000001388] user address but active_mm is swapper
Internal error: Oops: 96000004 [#1] SMP
Modules linked in:
CPU: 5 PID: 1 Comm: swapper/0 Not tainted 5.0.0 #15
pstate: 80c00009 (Nzcv daif +PAN +UAO)
pc : __alloc_pages_nodemask+0x13c/0x1068
lr : __alloc_pages_nodemask+0xdc/0x1068
...
Process swapper/0 (pid: 1, stack limit = 0x(____ptrval____))
Call trace:
__alloc_pages_nodemask+0x13c/0x1068
new_slab+0xec/0x570
___slab_alloc+0x3e0/0x4f8
__slab_alloc+0x60/0x80
__kmalloc_node_track_caller+0x10c/0x478
devm_kmalloc+0x44/0xb0
pinctrl_bind_pins+0x4c/0x188
really_probe+0x78/0x2b8
driver_probe_device+0x64/0x110
device_driver_attach+0x74/0x98
__driver_attach+0x9c/0xe8
bus_for_each_dev+0x84/0xd8
driver_attach+0x30/0x40
bus_add_driver+0x170/0x218
driver_register+0x64/0x118
__platform_driver_register+0x54/0x60
arm_smmu_driver_init+0x24/0x2c
do_one_initcall+0xbc/0x328
kernel_init_freeable+0x304/0x3ac
kernel_init+0x18/0x110
ret_from_fork+0x10/0x1c
Code: f90013b5 b9410fa1 1a9f0694 b50014c2 (b9400804)
---[ end trace dfeaed4c373a32da ]--
Change the dev_set_proximity() hook prototype so that it returns a
value and make it return failure if the PXM->NUMA-node mapping
corresponds to an offline node, fixing the crash.
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20190315021940.86905-1-wangkefeng.wang@huawei.com/
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-04-08 23:21:12 +08:00
int ( * dev_set_proximity ) ( struct device * dev ,
2017-08-02 10:58:25 -07:00
struct acpi_iort_node * node ) ;
2019-03-26 15:17:50 +00:00
int ( * dev_add_platdata ) ( struct platform_device * pdev ) ;
2016-11-21 10:01:41 +00:00
} ;
2017-09-20 17:03:58 +01:00
static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
2016-11-21 10:01:43 +00:00
. name = " arm-smmu-v3 " ,
2019-03-26 15:17:50 +00:00
. dev_dma_configure = arm_smmu_v3_dma_configure ,
2017-09-20 17:03:58 +01:00
. dev_count_resources = arm_smmu_v3_count_resources ,
. dev_init_resources = arm_smmu_v3_init_resources ,
. dev_set_proximity = arm_smmu_v3_set_proximity ,
2016-11-21 10:01:43 +00:00
} ;
2017-09-20 17:03:58 +01:00
static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
2016-11-21 10:01:45 +00:00
. name = " arm-smmu " ,
2019-03-26 15:17:50 +00:00
. dev_dma_configure = arm_smmu_dma_configure ,
2017-09-20 17:03:58 +01:00
. dev_count_resources = arm_smmu_count_resources ,
2019-03-26 15:17:50 +00:00
. dev_init_resources = arm_smmu_init_resources ,
} ;
static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
. name = " arm-smmu-v3-pmcg " ,
. dev_count_resources = arm_smmu_v3_pmcg_count_resources ,
. dev_init_resources = arm_smmu_v3_pmcg_init_resources ,
. dev_add_platdata = arm_smmu_v3_pmcg_add_platdata ,
2016-11-21 10:01:45 +00:00
} ;
2017-09-20 17:03:58 +01:00
static __init const struct iort_dev_config * iort_get_dev_cfg (
2017-09-28 14:03:33 +01:00
struct acpi_iort_node * node )
2016-11-21 10:01:41 +00:00
{
2016-11-21 10:01:43 +00:00
switch ( node - > type ) {
case ACPI_IORT_NODE_SMMU_V3 :
return & iort_arm_smmu_v3_cfg ;
2016-11-21 10:01:45 +00:00
case ACPI_IORT_NODE_SMMU :
return & iort_arm_smmu_cfg ;
2019-03-26 15:17:50 +00:00
case ACPI_IORT_NODE_PMCG :
return & iort_arm_smmu_v3_pmcg_cfg ;
2016-11-21 10:01:43 +00:00
default :
return NULL ;
}
2016-11-21 10:01:41 +00:00
}
/**
2017-09-20 17:03:58 +01:00
* iort_add_platform_device ( ) - Allocate a platform device for IORT node
* @ node : Pointer to device ACPI IORT node
2020-10-14 10:31:39 +01:00
* @ ops : Pointer to IORT device config struct
2016-11-21 10:01:41 +00:00
*
* Returns : 0 on success , < 0 failure
*/
2017-09-20 17:03:58 +01:00
static int __init iort_add_platform_device ( struct acpi_iort_node * node ,
const struct iort_dev_config * ops )
2016-11-21 10:01:41 +00:00
{
struct fwnode_handle * fwnode ;
struct platform_device * pdev ;
struct resource * r ;
int ret , count ;
pdev = platform_device_alloc ( ops - > name , PLATFORM_DEVID_AUTO ) ;
if ( ! pdev )
2017-01-17 16:36:23 +03:00
return - ENOMEM ;
2016-11-21 10:01:41 +00:00
ACPI/IORT: Reject platform device creation on NUMA node mapping failure
In a system where, through IORT firmware mappings, the SMMU device is
mapped to a NUMA node that is not online, the kernel bootstrap results
in the following crash:
Unable to handle kernel paging request at virtual address 0000000000001388
Mem abort info:
ESR = 0x96000004
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000004
CM = 0, WnR = 0
[0000000000001388] user address but active_mm is swapper
Internal error: Oops: 96000004 [#1] SMP
Modules linked in:
CPU: 5 PID: 1 Comm: swapper/0 Not tainted 5.0.0 #15
pstate: 80c00009 (Nzcv daif +PAN +UAO)
pc : __alloc_pages_nodemask+0x13c/0x1068
lr : __alloc_pages_nodemask+0xdc/0x1068
...
Process swapper/0 (pid: 1, stack limit = 0x(____ptrval____))
Call trace:
__alloc_pages_nodemask+0x13c/0x1068
new_slab+0xec/0x570
___slab_alloc+0x3e0/0x4f8
__slab_alloc+0x60/0x80
__kmalloc_node_track_caller+0x10c/0x478
devm_kmalloc+0x44/0xb0
pinctrl_bind_pins+0x4c/0x188
really_probe+0x78/0x2b8
driver_probe_device+0x64/0x110
device_driver_attach+0x74/0x98
__driver_attach+0x9c/0xe8
bus_for_each_dev+0x84/0xd8
driver_attach+0x30/0x40
bus_add_driver+0x170/0x218
driver_register+0x64/0x118
__platform_driver_register+0x54/0x60
arm_smmu_driver_init+0x24/0x2c
do_one_initcall+0xbc/0x328
kernel_init_freeable+0x304/0x3ac
kernel_init+0x18/0x110
ret_from_fork+0x10/0x1c
Code: f90013b5 b9410fa1 1a9f0694 b50014c2 (b9400804)
---[ end trace dfeaed4c373a32da ]--
Change the dev_set_proximity() hook prototype so that it returns a
value and make it return failure if the PXM->NUMA-node mapping
corresponds to an offline node, fixing the crash.
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20190315021940.86905-1-wangkefeng.wang@huawei.com/
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-04-08 23:21:12 +08:00
if ( ops - > dev_set_proximity ) {
ret = ops - > dev_set_proximity ( & pdev - > dev , node ) ;
if ( ret )
goto dev_put ;
}
2017-08-02 10:58:25 -07:00
2017-09-20 17:03:58 +01:00
count = ops - > dev_count_resources ( node ) ;
2016-11-21 10:01:41 +00:00
r = kcalloc ( count , sizeof ( * r ) , GFP_KERNEL ) ;
if ( ! r ) {
ret = - ENOMEM ;
goto dev_put ;
}
2017-09-20 17:03:58 +01:00
ops - > dev_init_resources ( r , node ) ;
2016-11-21 10:01:41 +00:00
ret = platform_device_add_resources ( pdev , r , count ) ;
/*
* Resources are duplicated in platform_device_add_resources ,
* free their allocated memory
*/
kfree ( r ) ;
if ( ret )
goto dev_put ;
/*
2019-03-26 15:17:50 +00:00
* Platform devices based on PMCG nodes uses platform_data to
* pass the hardware model info to the driver . For others , add
* a copy of IORT node pointer to platform_data to be used to
* retrieve IORT data information .
2016-11-21 10:01:41 +00:00
*/
2019-03-26 15:17:50 +00:00
if ( ops - > dev_add_platdata )
ret = ops - > dev_add_platdata ( pdev ) ;
else
ret = platform_device_add_data ( pdev , & node , sizeof ( node ) ) ;
2016-11-21 10:01:41 +00:00
if ( ret )
goto dev_put ;
fwnode = iort_get_fwnode ( node ) ;
if ( ! fwnode ) {
ret = - ENODEV ;
goto dev_put ;
}
pdev - > dev . fwnode = fwnode ;
2019-03-26 15:17:50 +00:00
if ( ops - > dev_dma_configure )
ops - > dev_dma_configure ( & pdev - > dev , node ) ;
2016-11-21 10:01:41 +00:00
2017-10-13 15:09:50 +08:00
iort_set_device_domain ( & pdev - > dev , node ) ;
2016-11-21 10:01:41 +00:00
ret = platform_device_add ( pdev ) ;
if ( ret )
goto dma_deconfigure ;
return 0 ;
dma_deconfigure :
2018-08-24 10:28:18 +02:00
arch_teardown_dma_ops ( & pdev - > dev ) ;
2016-11-21 10:01:41 +00:00
dev_put :
platform_device_put ( pdev ) ;
return ret ;
}
2018-12-19 22:46:58 +00:00
# ifdef CONFIG_PCI
static void __init iort_enable_acs ( struct acpi_iort_node * iort_node )
2017-10-02 18:28:44 +01:00
{
2018-12-19 22:46:58 +00:00
static bool acs_enabled __initdata ;
if ( acs_enabled )
return ;
2017-10-02 18:28:44 +01:00
if ( iort_node - > type = = ACPI_IORT_NODE_PCI_ROOT_COMPLEX ) {
struct acpi_iort_node * parent ;
struct acpi_iort_id_mapping * map ;
int i ;
map = ACPI_ADD_PTR ( struct acpi_iort_id_mapping , iort_node ,
iort_node - > mapping_offset ) ;
for ( i = 0 ; i < iort_node - > mapping_count ; i + + , map + + ) {
if ( ! map - > output_reference )
continue ;
parent = ACPI_ADD_PTR ( struct acpi_iort_node ,
iort_table , map - > output_reference ) ;
/*
* If we detect a RC - > SMMU mapping , make sure
* we enable ACS on the system .
*/
if ( ( parent - > type = = ACPI_IORT_NODE_SMMU ) | |
( parent - > type = = ACPI_IORT_NODE_SMMU_V3 ) ) {
pci_request_acs ( ) ;
2018-12-19 22:46:58 +00:00
acs_enabled = true ;
return ;
2017-10-02 18:28:44 +01:00
}
}
}
}
2018-12-19 22:46:58 +00:00
# else
static inline void iort_enable_acs ( struct acpi_iort_node * iort_node ) { }
# endif
2017-10-02 18:28:44 +01:00
2016-11-21 10:01:41 +00:00
static void __init iort_init_platform_devices ( void )
{
struct acpi_iort_node * iort_node , * iort_end ;
struct acpi_table_iort * iort ;
struct fwnode_handle * fwnode ;
int i , ret ;
2017-09-20 17:03:58 +01:00
const struct iort_dev_config * ops ;
2016-11-21 10:01:41 +00:00
/*
* iort_table and iort both point to the start of IORT table , but
* have different struct types
*/
iort = ( struct acpi_table_iort * ) iort_table ;
/* Get the first IORT node */
iort_node = ACPI_ADD_PTR ( struct acpi_iort_node , iort ,
iort - > node_offset ) ;
iort_end = ACPI_ADD_PTR ( struct acpi_iort_node , iort ,
iort_table - > length ) ;
for ( i = 0 ; i < iort - > node_count ; i + + ) {
if ( iort_node > = iort_end ) {
pr_err ( " iort node pointer overflows, bad table \n " ) ;
return ;
}
2018-12-19 22:46:58 +00:00
iort_enable_acs ( iort_node ) ;
2017-10-02 18:28:44 +01:00
2017-09-20 17:03:58 +01:00
ops = iort_get_dev_cfg ( iort_node ) ;
if ( ops ) {
2016-11-21 10:01:41 +00:00
fwnode = acpi_alloc_fwnode_static ( ) ;
if ( ! fwnode )
return ;
iort_set_fwnode ( iort_node , fwnode ) ;
2017-09-20 17:03:58 +01:00
ret = iort_add_platform_device ( iort_node , ops ) ;
2016-11-21 10:01:41 +00:00
if ( ret ) {
iort_delete_fwnode ( iort_node ) ;
acpi_free_fwnode_static ( fwnode ) ;
return ;
}
}
iort_node = ACPI_ADD_PTR ( struct acpi_iort_node , iort_node ,
iort_node - > length ) ;
}
}
2016-09-12 20:54:20 +02:00
void __init acpi_iort_init ( void )
{
acpi_status status ;
2020-05-08 12:05:53 +08:00
/* iort_table will be used at runtime after the iort init,
* so we don ' t need to call acpi_put_table ( ) to release
* the IORT table mapping .
*/
2016-09-12 20:54:20 +02:00
status = acpi_get_table ( ACPI_SIG_IORT , 0 , & iort_table ) ;
2016-11-21 10:01:34 +00:00
if ( ACPI_FAILURE ( status ) ) {
if ( status ! = AE_NOT_FOUND ) {
const char * msg = acpi_format_exception ( status ) ;
pr_err ( " Failed to get table, %s \n " , msg ) ;
}
return ;
2016-09-12 20:54:20 +02:00
}
2016-11-21 10:01:34 +00:00
2016-11-21 10:01:41 +00:00
iort_init_platform_devices ( ) ;
2016-09-12 20:54:20 +02:00
}
arm64: mm: Set ZONE_DMA size based on early IORT scan
We recently introduced a 1 GB sized ZONE_DMA to cater for platforms
incorporating masters that can address less than 32 bits of DMA, in
particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has
peripherals that can only address up to 1 GB (and its PCIe host
bridge can only access the bottom 3 GB)
Instructing the DMA layer about these limitations is straight-forward,
even though we had to fix some issues regarding memory limits set in
the IORT for named components, and regarding the handling of ACPI _DMA
methods. However, the DMA layer also needs to be able to allocate
memory that is guaranteed to meet those DMA constraints, for bounce
buffering as well as allocating the backing for consistent mappings.
This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately,
it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes
problems with kdump, and potentially in other places where allocations
cannot cross zone boundaries. Therefore, we should avoid having two
separate DMA zones when possible.
So let's do an early scan of the IORT, and only create the ZONE_DMA
if we encounter any devices that need it. This puts the burden on
the firmware to describe such limitations in the IORT, which may be
redundant (and less precise) if _DMA methods are also being provided.
However, it should be noted that this situation is highly unusual for
arm64 ACPI machines. Also, the DMA subsystem still gives precedence to
the _DMA method if implemented, and so we will not lose the ability to
perform streaming DMA outside the ZONE_DMA if the _DMA method permits
it.
[nsaenz: unified implementation with DT's counterpart]
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Acked-by: Hanjun Guo <guohanjun@huawei.com>
Cc: Jeremy Linton <jeremy.linton@arm.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Sudeep Holla <sudeep.holla@arm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-11-19 18:53:58 +01:00
# ifdef CONFIG_ZONE_DMA
/*
* Extract the highest CPU physical address accessible to all DMA masters in
* the system . PHYS_ADDR_MAX is returned when no constrained device is found .
*/
phys_addr_t __init acpi_iort_dma_get_max_cpu_address ( void )
{
phys_addr_t limit = PHYS_ADDR_MAX ;
struct acpi_iort_node * node , * end ;
struct acpi_table_iort * iort ;
acpi_status status ;
int i ;
if ( acpi_disabled )
return limit ;
status = acpi_get_table ( ACPI_SIG_IORT , 0 ,
( struct acpi_table_header * * ) & iort ) ;
if ( ACPI_FAILURE ( status ) )
return limit ;
node = ACPI_ADD_PTR ( struct acpi_iort_node , iort , iort - > node_offset ) ;
end = ACPI_ADD_PTR ( struct acpi_iort_node , iort , iort - > header . length ) ;
for ( i = 0 ; i < iort - > node_count ; i + + ) {
if ( node > = end )
break ;
switch ( node - > type ) {
struct acpi_iort_named_component * ncomp ;
struct acpi_iort_root_complex * rc ;
phys_addr_t local_limit ;
case ACPI_IORT_NODE_NAMED_COMPONENT :
ncomp = ( struct acpi_iort_named_component * ) node - > node_data ;
local_limit = DMA_BIT_MASK ( ncomp - > memory_address_limit ) ;
limit = min_not_zero ( limit , local_limit ) ;
break ;
case ACPI_IORT_NODE_PCI_ROOT_COMPLEX :
if ( node - > revision < 1 )
break ;
rc = ( struct acpi_iort_root_complex * ) node - > node_data ;
local_limit = DMA_BIT_MASK ( rc - > memory_address_limit ) ;
limit = min_not_zero ( limit , local_limit ) ;
break ;
}
node = ACPI_ADD_PTR ( struct acpi_iort_node , node , node - > length ) ;
}
acpi_put_table ( & iort - > header ) ;
return limit ;
}
# endif