2008-06-26 21:27:39 +02:00
/*
2009-11-20 13:39:19 +01:00
* Copyright ( C ) 2007 - 2009 Advanced Micro Devices , Inc .
2008-06-26 21:27:39 +02:00
* Author : Joerg Roedel < joerg . roedel @ amd . com >
* Leo Duran < leo . duran @ amd . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include <linux/pci.h>
# include <linux/acpi.h>
# include <linux/gfp.h>
# include <linux/list.h>
2008-06-30 20:18:02 +02:00
# include <linux/sysdev.h>
2008-09-11 16:51:41 +02:00
# include <linux/interrupt.h>
# include <linux/msi.h>
2008-06-26 21:27:39 +02:00
# include <asm/pci-direct.h>
2009-11-20 13:22:21 +01:00
# include <asm/amd_iommu_proto.h>
2008-06-26 21:27:39 +02:00
# include <asm/amd_iommu_types.h>
2008-06-26 21:28:06 +02:00
# include <asm/amd_iommu.h>
2008-07-11 10:23:42 +09:00
# include <asm/iommu.h>
2008-11-27 18:39:15 +01:00
# include <asm/gart.h>
2009-11-10 19:46:15 +09:00
# include <asm/x86_init.h>
2008-06-26 21:27:39 +02:00
/*
* definitions for the ACPI scanning code
*/
# define IVRS_HEADER_LENGTH 48
# define ACPI_IVHD_TYPE 0x10
# define ACPI_IVMD_TYPE_ALL 0x20
# define ACPI_IVMD_TYPE 0x21
# define ACPI_IVMD_TYPE_RANGE 0x22
# define IVHD_DEV_ALL 0x01
# define IVHD_DEV_SELECT 0x02
# define IVHD_DEV_SELECT_RANGE_START 0x03
# define IVHD_DEV_RANGE_END 0x04
# define IVHD_DEV_ALIAS 0x42
# define IVHD_DEV_ALIAS_RANGE 0x43
# define IVHD_DEV_EXT_SELECT 0x46
# define IVHD_DEV_EXT_SELECT_RANGE 0x47
2009-05-04 11:44:38 +02:00
# define IVHD_FLAG_HT_TUN_EN_MASK 0x01
# define IVHD_FLAG_PASSPW_EN_MASK 0x02
# define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
# define IVHD_FLAG_ISOC_EN_MASK 0x08
2008-06-26 21:27:39 +02:00
# define IVMD_FLAG_EXCL_RANGE 0x08
# define IVMD_FLAG_UNITY_MAP 0x01
# define ACPI_DEVFLAG_INITPASS 0x01
# define ACPI_DEVFLAG_EXTINT 0x02
# define ACPI_DEVFLAG_NMI 0x04
# define ACPI_DEVFLAG_SYSMGT1 0x10
# define ACPI_DEVFLAG_SYSMGT2 0x20
# define ACPI_DEVFLAG_LINT0 0x40
# define ACPI_DEVFLAG_LINT1 0x80
# define ACPI_DEVFLAG_ATSDIS 0x10000000
2008-07-11 17:14:21 +02:00
/*
* ACPI table definitions
*
* These data structures are laid over the table to parse the important values
* out of it .
*/
/*
* structure describing one IOMMU in the ACPI table . Typically followed by one
* or more ivhd_entrys .
*/
2008-06-26 21:27:39 +02:00
struct ivhd_header {
u8 type ;
u8 flags ;
u16 length ;
u16 devid ;
u16 cap_ptr ;
u64 mmio_phys ;
u16 pci_seg ;
u16 info ;
u32 reserved ;
} __attribute__ ( ( packed ) ) ;
2008-07-11 17:14:21 +02:00
/*
* A device entry describing which devices a specific IOMMU translates and
* which requestor ids they use .
*/
2008-06-26 21:27:39 +02:00
struct ivhd_entry {
u8 type ;
u16 devid ;
u8 flags ;
u32 ext ;
} __attribute__ ( ( packed ) ) ;
2008-07-11 17:14:21 +02:00
/*
* An AMD IOMMU memory definition structure . It defines things like exclusion
* ranges for devices and regions that should be unity mapped .
*/
2008-06-26 21:27:39 +02:00
struct ivmd_header {
u8 type ;
u8 flags ;
u16 length ;
u16 devid ;
u16 aux ;
u64 resv ;
u64 range_start ;
u64 range_length ;
} __attribute__ ( ( packed ) ) ;
2009-05-20 12:21:42 +02:00
bool amd_iommu_dump ;
2008-07-03 19:35:10 +02:00
static int __initdata amd_iommu_detected ;
2008-07-11 17:14:21 +02:00
u16 amd_iommu_last_bdf ; /* largest PCI device id we have
to handle */
2008-07-11 17:14:31 +02:00
LIST_HEAD ( amd_iommu_unity_map ) ; /* a list of required unity mappings
2008-07-11 17:14:21 +02:00
we find in ACPI */
2008-09-20 01:23:30 +09:00
bool amd_iommu_unmap_flush ; /* if true, flush on every unmap */
2008-06-26 21:27:40 +02:00
2008-07-11 17:14:31 +02:00
LIST_HEAD ( amd_iommu_list ) ; /* list of all AMD IOMMUs in the
2008-07-11 17:14:21 +02:00
system */
2008-06-26 21:27:40 +02:00
2009-11-20 14:31:51 +01:00
/* Array to assign indices to IOMMUs*/
struct amd_iommu * amd_iommus [ MAX_IOMMUS ] ;
int amd_iommus_present ;
2009-11-23 18:32:38 +01:00
/* IOMMUs have a non-present cache? */
bool amd_iommu_np_cache __read_mostly ;
2009-11-20 16:44:01 +01:00
/*
* List of protection domains - used during resume
*/
LIST_HEAD ( amd_iommu_pd_list ) ;
spinlock_t amd_iommu_pd_lock ;
2008-07-11 17:14:21 +02:00
/*
* Pointer to the device table which is shared by all AMD IOMMUs
* it is indexed by the PCI device id or the HT unit id and contains
* information about the domain the device belongs to as well as the
* page table root pointer .
*/
2008-06-26 21:27:40 +02:00
struct dev_table_entry * amd_iommu_dev_table ;
2008-07-11 17:14:21 +02:00
/*
* The alias table is a driver specific data structure which contains the
* mappings of the PCI device ids to the actual requestor ids on the IOMMU .
* More than one device can share the same requestor id .
*/
2008-06-26 21:27:40 +02:00
u16 * amd_iommu_alias_table ;
2008-07-11 17:14:21 +02:00
/*
* The rlookup table is used to find the IOMMU which is responsible
* for a specific device . It is also indexed by the PCI device id .
*/
2008-06-26 21:27:40 +02:00
struct amd_iommu * * amd_iommu_rlookup_table ;
2008-07-11 17:14:21 +02:00
/*
* AMD IOMMU allows up to 2 ^ 16 differend protection domains . This is a bitmap
* to know which ones are already in use .
*/
2008-06-26 21:27:40 +02:00
unsigned long * amd_iommu_pd_alloc_bitmap ;
2008-07-11 17:14:21 +02:00
static u32 dev_table_size ; /* size of the device table */
static u32 alias_table_size ; /* size of the alias table */
static u32 rlookup_table_size ; /* size if the rlookup table */
2008-06-26 21:27:41 +02:00
2008-07-11 17:14:24 +02:00
static inline void update_last_devid ( u16 devid )
{
if ( devid > amd_iommu_last_bdf )
amd_iommu_last_bdf = devid ;
}
2008-07-11 17:14:25 +02:00
static inline unsigned long tbl_size ( int entry_size )
{
unsigned shift = PAGE_SHIFT +
2009-05-14 14:00:35 +01:00
get_order ( ( ( int ) amd_iommu_last_bdf + 1 ) * entry_size ) ;
2008-07-11 17:14:25 +02:00
return 1UL < < shift ;
}
2008-07-11 17:14:21 +02:00
/****************************************************************************
*
* AMD IOMMU MMIO register space handling functions
*
* These functions are used to program the IOMMU device registers in
* MMIO space required for that driver .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2008-06-26 21:27:41 +02:00
2008-07-11 17:14:21 +02:00
/*
* This function set the exclusion range in the IOMMU . DMA accesses to the
* exclusion range are passed through untranslated
*/
2009-05-12 09:52:46 +02:00
static void iommu_set_exclusion_range ( struct amd_iommu * iommu )
2008-06-26 21:27:44 +02:00
{
u64 start = iommu - > exclusion_start & PAGE_MASK ;
u64 limit = ( start + iommu - > exclusion_length ) & PAGE_MASK ;
u64 entry ;
if ( ! iommu - > exclusion_start )
return ;
entry = start | MMIO_EXCL_ENABLE_MASK ;
memcpy_toio ( iommu - > mmio_base + MMIO_EXCL_BASE_OFFSET ,
& entry , sizeof ( entry ) ) ;
entry = limit ;
memcpy_toio ( iommu - > mmio_base + MMIO_EXCL_LIMIT_OFFSET ,
& entry , sizeof ( entry ) ) ;
}
2008-07-11 17:14:21 +02:00
/* Programs the physical address of the device table into the IOMMU hardware */
2008-06-26 21:27:44 +02:00
static void __init iommu_set_device_table ( struct amd_iommu * iommu )
{
2008-10-16 16:27:36 +02:00
u64 entry ;
2008-06-26 21:27:44 +02:00
BUG_ON ( iommu - > mmio_base = = NULL ) ;
entry = virt_to_phys ( amd_iommu_dev_table ) ;
entry | = ( dev_table_size > > 12 ) - 1 ;
memcpy_toio ( iommu - > mmio_base + MMIO_DEV_TABLE_OFFSET ,
& entry , sizeof ( entry ) ) ;
}
2008-07-11 17:14:21 +02:00
/* Generic functions to enable/disable certain features of the IOMMU. */
2009-05-12 09:52:46 +02:00
static void iommu_feature_enable ( struct amd_iommu * iommu , u8 bit )
2008-06-26 21:27:44 +02:00
{
u32 ctrl ;
ctrl = readl ( iommu - > mmio_base + MMIO_CONTROL_OFFSET ) ;
ctrl | = ( 1 < < bit ) ;
writel ( ctrl , iommu - > mmio_base + MMIO_CONTROL_OFFSET ) ;
}
2009-10-28 18:02:26 +01:00
static void iommu_feature_disable ( struct amd_iommu * iommu , u8 bit )
2008-06-26 21:27:44 +02:00
{
u32 ctrl ;
2008-09-17 16:45:59 +02:00
ctrl = readl ( iommu - > mmio_base + MMIO_CONTROL_OFFSET ) ;
2008-06-26 21:27:44 +02:00
ctrl & = ~ ( 1 < < bit ) ;
writel ( ctrl , iommu - > mmio_base + MMIO_CONTROL_OFFSET ) ;
}
2008-07-11 17:14:21 +02:00
/* Function to enable the hardware */
2009-05-12 09:52:46 +02:00
static void iommu_enable ( struct amd_iommu * iommu )
2008-06-26 21:27:44 +02:00
{
2009-09-01 16:43:58 +02:00
printk ( KERN_INFO " AMD-Vi: Enabling IOMMU at %s cap 0x%hx \n " ,
2008-12-10 20:04:18 +01:00
dev_name ( & iommu - > dev - > dev ) , iommu - > cap_ptr ) ;
2008-06-26 21:27:44 +02:00
iommu_feature_enable ( iommu , CONTROL_IOMMU_EN ) ;
}
2009-05-19 19:06:27 +02:00
static void iommu_disable ( struct amd_iommu * iommu )
2008-09-09 16:47:35 +02:00
{
2009-06-15 15:53:45 +02:00
/* Disable command buffer */
iommu_feature_disable ( iommu , CONTROL_CMDBUF_EN ) ;
/* Disable event logging and event interrupts */
iommu_feature_disable ( iommu , CONTROL_EVT_INT_EN ) ;
iommu_feature_disable ( iommu , CONTROL_EVT_LOG_EN ) ;
/* Disable IOMMU hardware itself */
2009-05-19 19:06:27 +02:00
iommu_feature_disable ( iommu , CONTROL_IOMMU_EN ) ;
2008-09-09 16:47:35 +02:00
}
2008-07-11 17:14:21 +02:00
/*
* mapping and unmapping functions for the IOMMU MMIO space . Each AMD IOMMU in
* the system has one .
*/
2008-06-26 21:27:43 +02:00
static u8 * __init iommu_map_mmio_space ( u64 address )
{
u8 * ret ;
if ( ! request_mem_region ( address , MMIO_REGION_LENGTH , " amd_iommu " ) )
return NULL ;
ret = ioremap_nocache ( address , MMIO_REGION_LENGTH ) ;
if ( ret ! = NULL )
return ret ;
release_mem_region ( address , MMIO_REGION_LENGTH ) ;
return NULL ;
}
static void __init iommu_unmap_mmio_space ( struct amd_iommu * iommu )
{
if ( iommu - > mmio_base )
iounmap ( iommu - > mmio_base ) ;
release_mem_region ( iommu - > mmio_phys , MMIO_REGION_LENGTH ) ;
}
2008-07-11 17:14:21 +02:00
/****************************************************************************
*
* The functions below belong to the first pass of AMD IOMMU ACPI table
* parsing . In this pass we try to find out the highest device id this
* code has to handle . Upon this information the size of the shared data
* structures is determined later .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2008-09-17 17:14:27 +02:00
/*
* This function calculates the length of a given IVHD entry
*/
static inline int ivhd_entry_length ( u8 * ivhd )
{
return 0x04 < < ( * ivhd > > 6 ) ;
}
2008-07-11 17:14:21 +02:00
/*
* This function reads the last device id the IOMMU has to handle from the PCI
* capability header for this IOMMU
*/
2008-06-26 21:27:41 +02:00
static int __init find_last_devid_on_pci ( int bus , int dev , int fn , int cap_ptr )
{
u32 cap ;
cap = read_pci_config ( bus , dev , fn , cap_ptr + MMIO_RANGE_OFFSET ) ;
2008-07-11 17:14:35 +02:00
update_last_devid ( calc_devid ( MMIO_GET_BUS ( cap ) , MMIO_GET_LD ( cap ) ) ) ;
2008-06-26 21:27:41 +02:00
return 0 ;
}
2008-07-11 17:14:21 +02:00
/*
* After reading the highest device id from the IOMMU PCI capability header
* this function looks if there is a higher device id defined in the ACPI table
*/
2008-06-26 21:27:41 +02:00
static int __init find_last_devid_from_ivhd ( struct ivhd_header * h )
{
u8 * p = ( void * ) h , * end = ( void * ) h ;
struct ivhd_entry * dev ;
p + = sizeof ( * h ) ;
end + = h - > length ;
find_last_devid_on_pci ( PCI_BUS ( h - > devid ) ,
PCI_SLOT ( h - > devid ) ,
PCI_FUNC ( h - > devid ) ,
h - > cap_ptr ) ;
while ( p < end ) {
dev = ( struct ivhd_entry * ) p ;
switch ( dev - > type ) {
case IVHD_DEV_SELECT :
case IVHD_DEV_RANGE_END :
case IVHD_DEV_ALIAS :
case IVHD_DEV_EXT_SELECT :
2008-07-11 17:14:21 +02:00
/* all the above subfield types refer to device ids */
2008-07-11 17:14:24 +02:00
update_last_devid ( dev - > devid ) ;
2008-06-26 21:27:41 +02:00
break ;
default :
break ;
}
2008-09-17 17:14:27 +02:00
p + = ivhd_entry_length ( p ) ;
2008-06-26 21:27:41 +02:00
}
WARN_ON ( p ! = end ) ;
return 0 ;
}
2008-07-11 17:14:21 +02:00
/*
* Iterate over all IVHD entries in the ACPI table and find the highest device
* id which we need to handle . This is the first of three functions which parse
* the ACPI table . So we check the checksum here .
*/
2008-06-26 21:27:41 +02:00
static int __init find_last_devid_acpi ( struct acpi_table_header * table )
{
int i ;
u8 checksum = 0 , * p = ( u8 * ) table , * end = ( u8 * ) table ;
struct ivhd_header * h ;
/*
* Validate checksum here so we don ' t need to do it when
* we actually parse the table
*/
for ( i = 0 ; i < table - > length ; + + i )
checksum + = p [ i ] ;
if ( checksum ! = 0 )
/* ACPI table corrupt */
return - ENODEV ;
p + = IVRS_HEADER_LENGTH ;
end + = table - > length ;
while ( p < end ) {
h = ( struct ivhd_header * ) p ;
switch ( h - > type ) {
case ACPI_IVHD_TYPE :
find_last_devid_from_ivhd ( h ) ;
break ;
default :
break ;
}
p + = h - > length ;
}
WARN_ON ( p ! = end ) ;
return 0 ;
}
2008-07-11 17:14:21 +02:00
/****************************************************************************
*
* The following functions belong the the code path which parses the ACPI table
* the second time . In this ACPI parsing iteration we allocate IOMMU specific
* data structures , initialize the device / alias / rlookup table and also
* basically initialize the hardware .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Allocates the command buffer . This buffer is per AMD IOMMU . We can
* write commands to that buffer later and the IOMMU will execute them
* asynchronously
*/
2008-06-26 21:27:45 +02:00
static u8 * __init alloc_command_buffer ( struct amd_iommu * iommu )
{
2008-07-11 17:14:29 +02:00
u8 * cmd_buf = ( u8 * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO ,
2008-06-26 21:27:45 +02:00
get_order ( CMD_BUFFER_SIZE ) ) ;
if ( cmd_buf = = NULL )
return NULL ;
iommu - > cmd_buf_size = CMD_BUFFER_SIZE ;
2009-05-04 18:41:16 +02:00
return cmd_buf ;
}
2009-09-03 14:50:20 +02:00
/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it .
*/
void amd_iommu_reset_cmd_buffer ( struct amd_iommu * iommu )
{
iommu_feature_disable ( iommu , CONTROL_CMDBUF_EN ) ;
writel ( 0x00 , iommu - > mmio_base + MMIO_CMD_HEAD_OFFSET ) ;
writel ( 0x00 , iommu - > mmio_base + MMIO_CMD_TAIL_OFFSET ) ;
iommu_feature_enable ( iommu , CONTROL_CMDBUF_EN ) ;
}
2009-05-04 18:41:16 +02:00
/*
* This function writes the command buffer address to the hardware and
* enables it .
*/
static void iommu_enable_command_buffer ( struct amd_iommu * iommu )
{
u64 entry ;
BUG_ON ( iommu - > cmd_buf = = NULL ) ;
entry = ( u64 ) virt_to_phys ( iommu - > cmd_buf ) ;
2008-06-26 21:27:45 +02:00
entry | = MMIO_CMD_SIZE_512 ;
2009-05-04 18:41:16 +02:00
2008-06-26 21:27:45 +02:00
memcpy_toio ( iommu - > mmio_base + MMIO_CMD_BUF_OFFSET ,
2009-05-04 18:41:16 +02:00
& entry , sizeof ( entry ) ) ;
2008-06-26 21:27:45 +02:00
2009-09-03 14:50:20 +02:00
amd_iommu_reset_cmd_buffer ( iommu ) ;
2008-06-26 21:27:45 +02:00
}
static void __init free_command_buffer ( struct amd_iommu * iommu )
{
2008-09-17 17:18:17 +02:00
free_pages ( ( unsigned long ) iommu - > cmd_buf ,
get_order ( iommu - > cmd_buf_size ) ) ;
2008-06-26 21:27:45 +02:00
}
2008-09-05 14:29:07 +02:00
/* allocates the memory where the IOMMU will log its events to */
static u8 * __init alloc_event_buffer ( struct amd_iommu * iommu )
{
iommu - > evt_buf = ( u8 * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO ,
get_order ( EVT_BUFFER_SIZE ) ) ;
if ( iommu - > evt_buf = = NULL )
return NULL ;
2009-07-02 18:32:05 +02:00
iommu - > evt_buf_size = EVT_BUFFER_SIZE ;
2009-05-04 18:41:16 +02:00
return iommu - > evt_buf ;
}
static void iommu_enable_event_buffer ( struct amd_iommu * iommu )
{
u64 entry ;
BUG_ON ( iommu - > evt_buf = = NULL ) ;
2008-09-05 14:29:07 +02:00
entry = ( u64 ) virt_to_phys ( iommu - > evt_buf ) | EVT_LEN_MASK ;
2009-05-04 18:41:16 +02:00
2008-09-05 14:29:07 +02:00
memcpy_toio ( iommu - > mmio_base + MMIO_EVT_BUF_OFFSET ,
& entry , sizeof ( entry ) ) ;
2009-06-15 16:06:48 +02:00
/* set head and tail to zero manually */
writel ( 0x00 , iommu - > mmio_base + MMIO_EVT_HEAD_OFFSET ) ;
writel ( 0x00 , iommu - > mmio_base + MMIO_EVT_TAIL_OFFSET ) ;
2009-05-04 18:41:16 +02:00
iommu_feature_enable ( iommu , CONTROL_EVT_LOG_EN ) ;
2008-09-05 14:29:07 +02:00
}
static void __init free_event_buffer ( struct amd_iommu * iommu )
{
free_pages ( ( unsigned long ) iommu - > evt_buf , get_order ( EVT_BUFFER_SIZE ) ) ;
}
2008-07-11 17:14:21 +02:00
/* sets a specific bit in the device table entry. */
2008-06-26 21:27:46 +02:00
static void set_dev_entry_bit ( u16 devid , u8 bit )
{
int i = ( bit > > 5 ) & 0x07 ;
int _bit = bit & 0x1f ;
amd_iommu_dev_table [ devid ] . data [ i ] | = ( 1 < < _bit ) ;
}
2009-10-09 18:31:20 +02:00
static int get_dev_entry_bit ( u16 devid , u8 bit )
{
int i = ( bit > > 5 ) & 0x07 ;
int _bit = bit & 0x1f ;
return ( amd_iommu_dev_table [ devid ] . data [ i ] & ( 1 < < _bit ) ) > > _bit ;
}
void amd_iommu_apply_erratum_63 ( u16 devid )
{
int sysmgt ;
sysmgt = get_dev_entry_bit ( devid , DEV_ENTRY_SYSMGT1 ) |
( get_dev_entry_bit ( devid , DEV_ENTRY_SYSMGT2 ) < < 1 ) ;
if ( sysmgt = = 0x01 )
set_dev_entry_bit ( devid , DEV_ENTRY_IW ) ;
}
2008-07-14 20:11:18 +02:00
/* Writes the specific IOMMU for a device into the rlookup table */
static void __init set_iommu_for_device ( struct amd_iommu * iommu , u16 devid )
{
amd_iommu_rlookup_table [ devid ] = iommu ;
}
2008-07-11 17:14:21 +02:00
/*
* This function takes the device specific flags read from the ACPI
* table and sets up the device table entry with that information
*/
2008-07-14 20:11:18 +02:00
static void __init set_dev_entry_from_acpi ( struct amd_iommu * iommu ,
u16 devid , u32 flags , u32 ext_flags )
2008-06-26 21:27:46 +02:00
{
if ( flags & ACPI_DEVFLAG_INITPASS )
set_dev_entry_bit ( devid , DEV_ENTRY_INIT_PASS ) ;
if ( flags & ACPI_DEVFLAG_EXTINT )
set_dev_entry_bit ( devid , DEV_ENTRY_EINT_PASS ) ;
if ( flags & ACPI_DEVFLAG_NMI )
set_dev_entry_bit ( devid , DEV_ENTRY_NMI_PASS ) ;
if ( flags & ACPI_DEVFLAG_SYSMGT1 )
set_dev_entry_bit ( devid , DEV_ENTRY_SYSMGT1 ) ;
if ( flags & ACPI_DEVFLAG_SYSMGT2 )
set_dev_entry_bit ( devid , DEV_ENTRY_SYSMGT2 ) ;
if ( flags & ACPI_DEVFLAG_LINT0 )
set_dev_entry_bit ( devid , DEV_ENTRY_LINT0_PASS ) ;
if ( flags & ACPI_DEVFLAG_LINT1 )
set_dev_entry_bit ( devid , DEV_ENTRY_LINT1_PASS ) ;
2009-10-09 18:31:20 +02:00
amd_iommu_apply_erratum_63 ( devid ) ;
2008-07-14 20:11:18 +02:00
set_iommu_for_device ( iommu , devid ) ;
2008-06-26 21:27:46 +02:00
}
2008-07-11 17:14:21 +02:00
/*
* Reads the device exclusion range from ACPI and initialize IOMMU with
* it
*/
2008-06-26 21:27:46 +02:00
static void __init set_device_exclusion_range ( u16 devid , struct ivmd_header * m )
{
struct amd_iommu * iommu = amd_iommu_rlookup_table [ devid ] ;
if ( ! ( m - > flags & IVMD_FLAG_EXCL_RANGE ) )
return ;
if ( iommu ) {
2008-07-11 17:14:21 +02:00
/*
* We only can configure exclusion ranges per IOMMU , not
* per device . But we can enable the exclusion range per
* device . This is done here
*/
2008-06-26 21:27:46 +02:00
set_dev_entry_bit ( m - > devid , DEV_ENTRY_EX ) ;
iommu - > exclusion_start = m - > range_start ;
iommu - > exclusion_length = m - > range_length ;
}
}
2008-07-11 17:14:21 +02:00
/*
* This function reads some important data from the IOMMU PCI space and
* initializes the driver data structure with it . It reads the hardware
* capabilities and the first / last device entries
*/
2008-06-26 21:27:47 +02:00
static void __init init_iommu_from_pci ( struct amd_iommu * iommu )
{
int cap_ptr = iommu - > cap_ptr ;
2008-09-11 16:51:41 +02:00
u32 range , misc ;
2008-06-26 21:27:47 +02:00
2008-09-08 15:55:10 +02:00
pci_read_config_dword ( iommu - > dev , cap_ptr + MMIO_CAP_HDR_OFFSET ,
& iommu - > cap ) ;
pci_read_config_dword ( iommu - > dev , cap_ptr + MMIO_RANGE_OFFSET ,
& range ) ;
2008-09-11 16:51:41 +02:00
pci_read_config_dword ( iommu - > dev , cap_ptr + MMIO_MISC_OFFSET ,
& misc ) ;
2008-06-26 21:27:47 +02:00
2008-07-11 17:14:35 +02:00
iommu - > first_device = calc_devid ( MMIO_GET_BUS ( range ) ,
MMIO_GET_FD ( range ) ) ;
iommu - > last_device = calc_devid ( MMIO_GET_BUS ( range ) ,
MMIO_GET_LD ( range ) ) ;
2008-09-11 16:51:41 +02:00
iommu - > evt_msi_num = MMIO_MSI_NUM ( misc ) ;
2008-06-26 21:27:47 +02:00
}
2008-07-11 17:14:21 +02:00
/*
* Takes a pointer to an AMD IOMMU entry in the ACPI table and
* initializes the hardware and our data structures with it .
*/
2008-06-26 21:27:47 +02:00
static void __init init_iommu_from_acpi ( struct amd_iommu * iommu ,
struct ivhd_header * h )
{
u8 * p = ( u8 * ) h ;
u8 * end = p , flags = 0 ;
u16 dev_i , devid = 0 , devid_start = 0 , devid_to = 0 ;
u32 ext_flags = 0 ;
2008-07-11 17:14:30 +02:00
bool alias = false ;
2008-06-26 21:27:47 +02:00
struct ivhd_entry * e ;
/*
* First set the recommended feature enable bits from ACPI
* into the IOMMU control registers
*/
2009-05-04 11:44:38 +02:00
h - > flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2008-06-26 21:27:47 +02:00
iommu_feature_enable ( iommu , CONTROL_HT_TUN_EN ) :
iommu_feature_disable ( iommu , CONTROL_HT_TUN_EN ) ;
2009-05-04 11:44:38 +02:00
h - > flags & IVHD_FLAG_PASSPW_EN_MASK ?
2008-06-26 21:27:47 +02:00
iommu_feature_enable ( iommu , CONTROL_PASSPW_EN ) :
iommu_feature_disable ( iommu , CONTROL_PASSPW_EN ) ;
2009-05-04 11:44:38 +02:00
h - > flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2008-06-26 21:27:47 +02:00
iommu_feature_enable ( iommu , CONTROL_RESPASSPW_EN ) :
iommu_feature_disable ( iommu , CONTROL_RESPASSPW_EN ) ;
2009-05-04 11:44:38 +02:00
h - > flags & IVHD_FLAG_ISOC_EN_MASK ?
2008-06-26 21:27:47 +02:00
iommu_feature_enable ( iommu , CONTROL_ISOC_EN ) :
iommu_feature_disable ( iommu , CONTROL_ISOC_EN ) ;
/*
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable ( iommu , CONTROL_COHERENT_EN ) ;
/*
* Done . Now parse the device entries
*/
p + = sizeof ( struct ivhd_header ) ;
end + = h - > length ;
2009-05-20 15:41:28 +02:00
2008-06-26 21:27:47 +02:00
while ( p < end ) {
e = ( struct ivhd_entry * ) p ;
switch ( e - > type ) {
case IVHD_DEV_ALL :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_ALL \t \t \t first devid: %02x:%02x.%x "
" last device %02x:%02x.%x flags: %02x \n " ,
PCI_BUS ( iommu - > first_device ) ,
PCI_SLOT ( iommu - > first_device ) ,
PCI_FUNC ( iommu - > first_device ) ,
PCI_BUS ( iommu - > last_device ) ,
PCI_SLOT ( iommu - > last_device ) ,
PCI_FUNC ( iommu - > last_device ) ,
e - > flags ) ;
2008-06-26 21:27:47 +02:00
for ( dev_i = iommu - > first_device ;
dev_i < = iommu - > last_device ; + + dev_i )
2008-07-14 20:11:18 +02:00
set_dev_entry_from_acpi ( iommu , dev_i ,
e - > flags , 0 ) ;
2008-06-26 21:27:47 +02:00
break ;
case IVHD_DEV_SELECT :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_SELECT \t \t \t devid: %02x:%02x.%x "
" flags: %02x \n " ,
PCI_BUS ( e - > devid ) ,
PCI_SLOT ( e - > devid ) ,
PCI_FUNC ( e - > devid ) ,
e - > flags ) ;
2008-06-26 21:27:47 +02:00
devid = e - > devid ;
2008-07-14 20:11:18 +02:00
set_dev_entry_from_acpi ( iommu , devid , e - > flags , 0 ) ;
2008-06-26 21:27:47 +02:00
break ;
case IVHD_DEV_SELECT_RANGE_START :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_SELECT_RANGE_START \t "
" devid: %02x:%02x.%x flags: %02x \n " ,
PCI_BUS ( e - > devid ) ,
PCI_SLOT ( e - > devid ) ,
PCI_FUNC ( e - > devid ) ,
e - > flags ) ;
2008-06-26 21:27:47 +02:00
devid_start = e - > devid ;
flags = e - > flags ;
ext_flags = 0 ;
2008-07-11 17:14:30 +02:00
alias = false ;
2008-06-26 21:27:47 +02:00
break ;
case IVHD_DEV_ALIAS :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_ALIAS \t \t \t devid: %02x:%02x.%x "
" flags: %02x devid_to: %02x:%02x.%x \n " ,
PCI_BUS ( e - > devid ) ,
PCI_SLOT ( e - > devid ) ,
PCI_FUNC ( e - > devid ) ,
e - > flags ,
PCI_BUS ( e - > ext > > 8 ) ,
PCI_SLOT ( e - > ext > > 8 ) ,
PCI_FUNC ( e - > ext > > 8 ) ) ;
2008-06-26 21:27:47 +02:00
devid = e - > devid ;
devid_to = e - > ext > > 8 ;
2009-07-02 12:23:23 +02:00
set_dev_entry_from_acpi ( iommu , devid , e - > flags , 0 ) ;
2009-05-14 14:08:11 +01:00
set_dev_entry_from_acpi ( iommu , devid_to , e - > flags , 0 ) ;
2008-06-26 21:27:47 +02:00
amd_iommu_alias_table [ devid ] = devid_to ;
break ;
case IVHD_DEV_ALIAS_RANGE :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_ALIAS_RANGE \t \t "
" devid: %02x:%02x.%x flags: %02x "
" devid_to: %02x:%02x.%x \n " ,
PCI_BUS ( e - > devid ) ,
PCI_SLOT ( e - > devid ) ,
PCI_FUNC ( e - > devid ) ,
e - > flags ,
PCI_BUS ( e - > ext > > 8 ) ,
PCI_SLOT ( e - > ext > > 8 ) ,
PCI_FUNC ( e - > ext > > 8 ) ) ;
2008-06-26 21:27:47 +02:00
devid_start = e - > devid ;
flags = e - > flags ;
devid_to = e - > ext > > 8 ;
ext_flags = 0 ;
2008-07-11 17:14:30 +02:00
alias = true ;
2008-06-26 21:27:47 +02:00
break ;
case IVHD_DEV_EXT_SELECT :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_EXT_SELECT \t \t devid: %02x:%02x.%x "
" flags: %02x ext: %08x \n " ,
PCI_BUS ( e - > devid ) ,
PCI_SLOT ( e - > devid ) ,
PCI_FUNC ( e - > devid ) ,
e - > flags , e - > ext ) ;
2008-06-26 21:27:47 +02:00
devid = e - > devid ;
2008-07-14 20:11:18 +02:00
set_dev_entry_from_acpi ( iommu , devid , e - > flags ,
e - > ext ) ;
2008-06-26 21:27:47 +02:00
break ;
case IVHD_DEV_EXT_SELECT_RANGE :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_EXT_SELECT_RANGE \t devid: "
" %02x:%02x.%x flags: %02x ext: %08x \n " ,
PCI_BUS ( e - > devid ) ,
PCI_SLOT ( e - > devid ) ,
PCI_FUNC ( e - > devid ) ,
e - > flags , e - > ext ) ;
2008-06-26 21:27:47 +02:00
devid_start = e - > devid ;
flags = e - > flags ;
ext_flags = e - > ext ;
2008-07-11 17:14:30 +02:00
alias = false ;
2008-06-26 21:27:47 +02:00
break ;
case IVHD_DEV_RANGE_END :
2009-05-20 15:41:28 +02:00
DUMP_printk ( " DEV_RANGE_END \t \t devid: %02x:%02x.%x \n " ,
PCI_BUS ( e - > devid ) ,
PCI_SLOT ( e - > devid ) ,
PCI_FUNC ( e - > devid ) ) ;
2008-06-26 21:27:47 +02:00
devid = e - > devid ;
for ( dev_i = devid_start ; dev_i < = devid ; + + dev_i ) {
2009-07-02 12:23:23 +02:00
if ( alias ) {
2008-06-26 21:27:47 +02:00
amd_iommu_alias_table [ dev_i ] = devid_to ;
2009-07-02 12:23:23 +02:00
set_dev_entry_from_acpi ( iommu ,
devid_to , flags , ext_flags ) ;
}
set_dev_entry_from_acpi ( iommu , dev_i ,
flags , ext_flags ) ;
2008-06-26 21:27:47 +02:00
}
break ;
default :
break ;
}
2008-09-17 17:14:27 +02:00
p + = ivhd_entry_length ( p ) ;
2008-06-26 21:27:47 +02:00
}
}
2008-07-11 17:14:21 +02:00
/* Initializes the device->iommu mapping for the driver */
2008-06-26 21:27:47 +02:00
static int __init init_iommu_devices ( struct amd_iommu * iommu )
{
u16 i ;
for ( i = iommu - > first_device ; i < = iommu - > last_device ; + + i )
set_iommu_for_device ( iommu , i ) ;
return 0 ;
}
2008-06-26 21:27:48 +02:00
static void __init free_iommu_one ( struct amd_iommu * iommu )
{
free_command_buffer ( iommu ) ;
2008-09-05 14:29:07 +02:00
free_event_buffer ( iommu ) ;
2008-06-26 21:27:48 +02:00
iommu_unmap_mmio_space ( iommu ) ;
}
static void __init free_iommu_all ( void )
{
struct amd_iommu * iommu , * next ;
2009-05-04 15:06:20 +02:00
for_each_iommu_safe ( iommu , next ) {
2008-06-26 21:27:48 +02:00
list_del ( & iommu - > list ) ;
free_iommu_one ( iommu ) ;
kfree ( iommu ) ;
}
}
2008-07-11 17:14:21 +02:00
/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware . It does NOT enable the IOMMU . This is done afterwards .
*/
2008-06-26 21:27:48 +02:00
static int __init init_iommu_one ( struct amd_iommu * iommu , struct ivhd_header * h )
{
spin_lock_init ( & iommu - > lock ) ;
2009-11-20 14:31:51 +01:00
/* Add IOMMU to internal data structures */
2008-06-26 21:27:48 +02:00
list_add_tail ( & iommu - > list , & amd_iommu_list ) ;
2009-11-20 14:31:51 +01:00
iommu - > index = amd_iommus_present + + ;
if ( unlikely ( iommu - > index > = MAX_IOMMUS ) ) {
WARN ( 1 , " AMD-Vi: System has more IOMMUs than supported by this driver \n " ) ;
return - ENOSYS ;
}
/* Index is fine - add IOMMU to the array */
amd_iommus [ iommu - > index ] = iommu ;
2008-06-26 21:27:48 +02:00
/*
* Copy data from ACPI table entry to the iommu struct
*/
2008-09-08 15:55:10 +02:00
iommu - > dev = pci_get_bus_and_slot ( PCI_BUS ( h - > devid ) , h - > devid & 0xff ) ;
if ( ! iommu - > dev )
return 1 ;
2008-06-26 21:27:48 +02:00
iommu - > cap_ptr = h - > cap_ptr ;
2008-09-08 14:48:04 +02:00
iommu - > pci_seg = h - > pci_seg ;
2008-06-26 21:27:48 +02:00
iommu - > mmio_phys = h - > mmio_phys ;
iommu - > mmio_base = iommu_map_mmio_space ( h - > mmio_phys ) ;
if ( ! iommu - > mmio_base )
return - ENOMEM ;
iommu - > cmd_buf = alloc_command_buffer ( iommu ) ;
if ( ! iommu - > cmd_buf )
return - ENOMEM ;
2008-09-05 14:29:07 +02:00
iommu - > evt_buf = alloc_event_buffer ( iommu ) ;
if ( ! iommu - > evt_buf )
return - ENOMEM ;
2008-09-11 16:51:41 +02:00
iommu - > int_enabled = false ;
2008-06-26 21:27:48 +02:00
init_iommu_from_pci ( iommu ) ;
init_iommu_from_acpi ( iommu , h ) ;
init_iommu_devices ( iommu ) ;
2009-11-23 18:32:38 +01:00
if ( iommu - > cap & ( 1UL < < IOMMU_CAP_NPCACHE ) )
amd_iommu_np_cache = true ;
2008-10-12 15:24:53 +02:00
return pci_enable_device ( iommu - > dev ) ;
2008-06-26 21:27:48 +02:00
}
2008-07-11 17:14:21 +02:00
/*
* Iterates over all IOMMU entries in the ACPI table , allocates the
* IOMMU structure and initializes it with init_iommu_one ( )
*/
2008-06-26 21:27:48 +02:00
static int __init init_iommu_all ( struct acpi_table_header * table )
{
u8 * p = ( u8 * ) table , * end = ( u8 * ) table ;
struct ivhd_header * h ;
struct amd_iommu * iommu ;
int ret ;
end + = table - > length ;
p + = IVRS_HEADER_LENGTH ;
while ( p < end ) {
h = ( struct ivhd_header * ) p ;
switch ( * p ) {
case ACPI_IVHD_TYPE :
2009-05-20 13:53:57 +02:00
2009-09-01 16:52:16 +02:00
DUMP_printk ( " device: %02x:%02x.%01x cap: %04x "
2009-05-20 13:53:57 +02:00
" seg: %d flags: %01x info %04x \n " ,
PCI_BUS ( h - > devid ) , PCI_SLOT ( h - > devid ) ,
PCI_FUNC ( h - > devid ) , h - > cap_ptr ,
h - > pci_seg , h - > flags , h - > info ) ;
DUMP_printk ( " mmio-addr: %016llx \n " ,
h - > mmio_phys ) ;
2008-06-26 21:27:48 +02:00
iommu = kzalloc ( sizeof ( struct amd_iommu ) , GFP_KERNEL ) ;
if ( iommu = = NULL )
return - ENOMEM ;
ret = init_iommu_one ( iommu , h ) ;
if ( ret )
return ret ;
break ;
default :
break ;
}
p + = h - > length ;
}
WARN_ON ( p ! = end ) ;
return 0 ;
}
2008-09-11 16:51:41 +02:00
/****************************************************************************
*
* The following functions initialize the MSI interrupts for all IOMMUs
* in the system . Its a bit challenging because there could be multiple
* IOMMUs per PCI BDF but we can call pci_enable_msi ( x ) only once per
* pci_dev .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2009-11-23 12:45:25 +01:00
static int iommu_setup_msi ( struct amd_iommu * iommu )
2008-09-11 16:51:41 +02:00
{
int r ;
if ( pci_enable_msi ( iommu - > dev ) )
return 1 ;
r = request_irq ( iommu - > dev - > irq , amd_iommu_int_handler ,
IRQF_SAMPLE_RANDOM ,
2009-09-01 16:43:58 +02:00
" AMD-Vi " ,
2008-09-11 16:51:41 +02:00
NULL ) ;
if ( r ) {
pci_disable_msi ( iommu - > dev ) ;
return 1 ;
}
2009-05-04 18:46:34 +02:00
iommu - > int_enabled = true ;
2009-05-04 18:41:16 +02:00
iommu_feature_enable ( iommu , CONTROL_EVT_INT_EN ) ;
2008-09-11 16:51:41 +02:00
return 0 ;
}
2009-05-12 09:52:46 +02:00
static int iommu_init_msi ( struct amd_iommu * iommu )
2008-09-11 16:51:41 +02:00
{
if ( iommu - > int_enabled )
return 0 ;
2009-05-04 18:51:00 +02:00
if ( pci_find_capability ( iommu - > dev , PCI_CAP_ID_MSI ) )
2008-09-11 16:51:41 +02:00
return iommu_setup_msi ( iommu ) ;
return 1 ;
}
2008-07-11 17:14:21 +02:00
/****************************************************************************
*
* The next functions belong to the third pass of parsing the ACPI
* table . In this last pass the memory mapping requirements are
* gathered ( like exclusion and unity mapping reanges ) .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2008-06-26 21:27:49 +02:00
static void __init free_unity_maps ( void )
{
struct unity_map_entry * entry , * next ;
list_for_each_entry_safe ( entry , next , & amd_iommu_unity_map , list ) {
list_del ( & entry - > list ) ;
kfree ( entry ) ;
}
}
2008-07-11 17:14:21 +02:00
/* called when we find an exclusion range definition in ACPI */
2008-06-26 21:27:49 +02:00
static int __init init_exclusion_range ( struct ivmd_header * m )
{
int i ;
switch ( m - > type ) {
case ACPI_IVMD_TYPE :
set_device_exclusion_range ( m - > devid , m ) ;
break ;
case ACPI_IVMD_TYPE_ALL :
2008-07-25 13:07:50 +02:00
for ( i = 0 ; i < = amd_iommu_last_bdf ; + + i )
2008-06-26 21:27:49 +02:00
set_device_exclusion_range ( i , m ) ;
break ;
case ACPI_IVMD_TYPE_RANGE :
for ( i = m - > devid ; i < = m - > aux ; + + i )
set_device_exclusion_range ( i , m ) ;
break ;
default :
break ;
}
return 0 ;
}
2008-07-11 17:14:21 +02:00
/* called for unity map ACPI definition */
2008-06-26 21:27:49 +02:00
static int __init init_unity_map_range ( struct ivmd_header * m )
{
struct unity_map_entry * e = 0 ;
2009-05-20 16:24:21 +02:00
char * s ;
2008-06-26 21:27:49 +02:00
e = kzalloc ( sizeof ( * e ) , GFP_KERNEL ) ;
if ( e = = NULL )
return - ENOMEM ;
switch ( m - > type ) {
default :
2009-05-22 12:48:05 +02:00
kfree ( e ) ;
return 0 ;
2008-06-26 21:27:49 +02:00
case ACPI_IVMD_TYPE :
2009-05-20 16:24:21 +02:00
s = " IVMD_TYPEi \t \t \t " ;
2008-06-26 21:27:49 +02:00
e - > devid_start = e - > devid_end = m - > devid ;
break ;
case ACPI_IVMD_TYPE_ALL :
2009-05-20 16:24:21 +02:00
s = " IVMD_TYPE_ALL \t \t " ;
2008-06-26 21:27:49 +02:00
e - > devid_start = 0 ;
e - > devid_end = amd_iommu_last_bdf ;
break ;
case ACPI_IVMD_TYPE_RANGE :
2009-05-20 16:24:21 +02:00
s = " IVMD_TYPE_RANGE \t \t " ;
2008-06-26 21:27:49 +02:00
e - > devid_start = m - > devid ;
e - > devid_end = m - > aux ;
break ;
}
e - > address_start = PAGE_ALIGN ( m - > range_start ) ;
e - > address_end = e - > address_start + PAGE_ALIGN ( m - > range_length ) ;
e - > prot = m - > flags > > 1 ;
2009-05-20 16:24:21 +02:00
DUMP_printk ( " %s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x "
" range_start: %016llx range_end: %016llx flags: %x \n " , s ,
PCI_BUS ( e - > devid_start ) , PCI_SLOT ( e - > devid_start ) ,
PCI_FUNC ( e - > devid_start ) , PCI_BUS ( e - > devid_end ) ,
PCI_SLOT ( e - > devid_end ) , PCI_FUNC ( e - > devid_end ) ,
e - > address_start , e - > address_end , m - > flags ) ;
2008-06-26 21:27:49 +02:00
list_add_tail ( & e - > list , & amd_iommu_unity_map ) ;
return 0 ;
}
2008-07-11 17:14:21 +02:00
/* iterates over all memory definitions we find in the ACPI table */
2008-06-26 21:27:49 +02:00
static int __init init_memory_definitions ( struct acpi_table_header * table )
{
u8 * p = ( u8 * ) table , * end = ( u8 * ) table ;
struct ivmd_header * m ;
end + = table - > length ;
p + = IVRS_HEADER_LENGTH ;
while ( p < end ) {
m = ( struct ivmd_header * ) p ;
if ( m - > flags & IVMD_FLAG_EXCL_RANGE )
init_exclusion_range ( m ) ;
else if ( m - > flags & IVMD_FLAG_UNITY_MAP )
init_unity_map_range ( m ) ;
p + = m - > length ;
}
return 0 ;
}
2008-08-14 19:55:16 +02:00
/*
* Init the device table to not allow DMA access for devices and
* suppress all page faults
*/
static void init_device_table ( void )
{
u16 devid ;
for ( devid = 0 ; devid < = amd_iommu_last_bdf ; + + devid ) {
set_dev_entry_bit ( devid , DEV_ENTRY_VALID ) ;
set_dev_entry_bit ( devid , DEV_ENTRY_TRANSLATION ) ;
}
}
2008-07-11 17:14:21 +02:00
/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
*/
2009-05-12 09:52:46 +02:00
static void enable_iommus ( void )
2008-06-26 21:28:07 +02:00
{
struct amd_iommu * iommu ;
2009-05-04 15:06:20 +02:00
for_each_iommu ( iommu ) {
2009-06-15 15:53:45 +02:00
iommu_disable ( iommu ) ;
2009-05-04 18:41:16 +02:00
iommu_set_device_table ( iommu ) ;
iommu_enable_command_buffer ( iommu ) ;
iommu_enable_event_buffer ( iommu ) ;
2008-06-26 21:28:07 +02:00
iommu_set_exclusion_range ( iommu ) ;
2008-09-11 16:51:41 +02:00
iommu_init_msi ( iommu ) ;
2008-06-26 21:28:07 +02:00
iommu_enable ( iommu ) ;
}
}
2009-05-19 19:06:27 +02:00
static void disable_iommus ( void )
{
struct amd_iommu * iommu ;
for_each_iommu ( iommu )
iommu_disable ( iommu ) ;
}
2008-06-30 20:18:02 +02:00
/*
* Suspend / Resume support
* disable suspend until real resume implemented
*/
static int amd_iommu_resume ( struct sys_device * dev )
{
2009-05-12 09:56:12 +02:00
/* re-load the hardware */
enable_iommus ( ) ;
/*
* we have to flush after the IOMMUs are enabled because a
* disabled IOMMU will never execute the commands we send
*/
amd_iommu_flush_all_devices ( ) ;
2009-06-16 03:01:37 -04:00
amd_iommu_flush_all_domains ( ) ;
2009-05-12 09:56:12 +02:00
2008-06-30 20:18:02 +02:00
return 0 ;
}
static int amd_iommu_suspend ( struct sys_device * dev , pm_message_t state )
{
2009-05-12 09:56:12 +02:00
/* disable IOMMUs to go out of the way for BIOS */
disable_iommus ( ) ;
return 0 ;
2008-06-30 20:18:02 +02:00
}
static struct sysdev_class amd_iommu_sysdev_class = {
. name = " amd_iommu " ,
. suspend = amd_iommu_suspend ,
. resume = amd_iommu_resume ,
} ;
static struct sys_device device_amd_iommu = {
. id = 0 ,
. cls = & amd_iommu_sysdev_class ,
} ;
2008-07-11 17:14:21 +02:00
/*
* This is the core init function for AMD IOMMU hardware in the system .
* This function is called from the generic x86 DMA layer initialization
* code .
*
* This function basically parses the ACPI table for AMD IOMMU ( IVRS )
* three times :
*
* 1 pass ) Find the highest PCI device id the driver has to handle .
* Upon this information the size of the data structures is
* determined that needs to be allocated .
*
* 2 pass ) Initialize the data structures just allocated with the
* information in the ACPI table about available AMD IOMMUs
* in the system . It also maps the PCI devices in the
* system to specific IOMMUs
*
* 3 pass ) After the basic data structures are allocated and
* initialized we update them with information about memory
* remapping requirements parsed out of the ACPI table in
* this last pass .
*
* After that the hardware is initialized and ready to go . In the last
* step we do some Linux specific things like registering the driver in
* the dma_ops interface and initializing the suspend / resume support
* functions . Finally it prints some information about AMD IOMMUs and
* the driver state and enables the hardware .
*/
2009-11-10 19:46:15 +09:00
static int __init amd_iommu_init ( void )
2008-06-26 21:27:50 +02:00
{
int i , ret = 0 ;
/*
* First parse ACPI tables to find the largest Bus / Dev / Func
* we need to handle . Upon this information the shared data
* structures for the IOMMUs in the system will be allocated
*/
if ( acpi_table_parse ( " IVRS " , find_last_devid_acpi ) ! = 0 )
return - ENODEV ;
2008-07-11 17:14:25 +02:00
dev_table_size = tbl_size ( DEV_TABLE_ENTRY_SIZE ) ;
alias_table_size = tbl_size ( ALIAS_TABLE_ENTRY_SIZE ) ;
rlookup_table_size = tbl_size ( RLOOKUP_TABLE_ENTRY_SIZE ) ;
2008-06-26 21:27:50 +02:00
ret = - ENOMEM ;
/* Device table - directly used by all IOMMUs */
2008-07-11 17:14:32 +02:00
amd_iommu_dev_table = ( void * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO ,
2008-06-26 21:27:50 +02:00
get_order ( dev_table_size ) ) ;
if ( amd_iommu_dev_table = = NULL )
goto out ;
/*
* Alias table - map PCI Bus / Dev / Func to Bus / Dev / Func the
* IOMMU see for that device
*/
amd_iommu_alias_table = ( void * ) __get_free_pages ( GFP_KERNEL ,
get_order ( alias_table_size ) ) ;
if ( amd_iommu_alias_table = = NULL )
goto free ;
/* IOMMU rlookup table - find the IOMMU for a specific device */
2008-12-16 19:17:11 +01:00
amd_iommu_rlookup_table = ( void * ) __get_free_pages (
GFP_KERNEL | __GFP_ZERO ,
2008-06-26 21:27:50 +02:00
get_order ( rlookup_table_size ) ) ;
if ( amd_iommu_rlookup_table = = NULL )
goto free ;
2008-07-11 17:14:32 +02:00
amd_iommu_pd_alloc_bitmap = ( void * ) __get_free_pages (
GFP_KERNEL | __GFP_ZERO ,
2008-06-26 21:27:50 +02:00
get_order ( MAX_DOMAIN_ID / 8 ) ) ;
if ( amd_iommu_pd_alloc_bitmap = = NULL )
goto free ;
2008-08-14 19:55:16 +02:00
/* init the device table */
init_device_table ( ) ;
2008-06-26 21:27:50 +02:00
/*
2008-07-11 17:14:32 +02:00
* let all alias entries point to itself
2008-06-26 21:27:50 +02:00
*/
2008-07-25 13:07:50 +02:00
for ( i = 0 ; i < = amd_iommu_last_bdf ; + + i )
2008-06-26 21:27:50 +02:00
amd_iommu_alias_table [ i ] = i ;
/*
* never allocate domain 0 because its used as the non - allocated and
* error value placeholder
*/
amd_iommu_pd_alloc_bitmap [ 0 ] = 1 ;
2009-11-20 16:44:01 +01:00
spin_lock_init ( & amd_iommu_pd_lock ) ;
2008-06-26 21:27:50 +02:00
/*
* now the data structures are allocated and basically initialized
* start the real acpi table scan
*/
ret = - ENODEV ;
if ( acpi_table_parse ( " IVRS " , init_iommu_all ) ! = 0 )
goto free ;
if ( acpi_table_parse ( " IVRS " , init_memory_definitions ) ! = 0 )
goto free ;
2008-08-14 19:55:18 +02:00
ret = sysdev_class_register ( & amd_iommu_sysdev_class ) ;
2008-06-26 21:28:07 +02:00
if ( ret )
goto free ;
2008-08-14 19:55:18 +02:00
ret = sysdev_register ( & device_amd_iommu ) ;
2008-06-30 20:18:02 +02:00
if ( ret )
goto free ;
2009-12-10 11:03:39 +01:00
ret = amd_iommu_init_devices ( ) ;
if ( ret )
goto free ;
2009-09-01 15:53:54 +02:00
if ( iommu_pass_through )
ret = amd_iommu_init_passthrough ( ) ;
else
ret = amd_iommu_init_dma_ops ( ) ;
2008-06-30 20:18:02 +02:00
if ( ret )
goto free ;
2009-12-10 11:12:25 +01:00
amd_iommu_init_notifier ( ) ;
2008-06-26 21:28:07 +02:00
enable_iommus ( ) ;
2009-09-01 15:53:54 +02:00
if ( iommu_pass_through )
goto out ;
2008-09-20 01:23:30 +09:00
if ( amd_iommu_unmap_flush )
2009-09-01 16:43:58 +02:00
printk ( KERN_INFO " AMD-Vi: IO/TLB flush on unmap enabled \n " ) ;
2008-09-04 18:40:05 +02:00
else
2009-09-01 16:43:58 +02:00
printk ( KERN_INFO " AMD-Vi: Lazy IO/TLB flushing enabled \n " ) ;
2008-09-04 18:40:05 +02:00
2009-10-27 16:34:44 +09:00
x86_platform . iommu_shutdown = disable_iommus ;
2008-06-26 21:27:50 +02:00
out :
return ret ;
free :
2009-12-10 11:03:39 +01:00
amd_iommu_uninit_devices ( ) ;
2008-09-17 12:19:58 +02:00
free_pages ( ( unsigned long ) amd_iommu_pd_alloc_bitmap ,
get_order ( MAX_DOMAIN_ID / 8 ) ) ;
2008-06-26 21:27:50 +02:00
2008-07-11 17:14:26 +02:00
free_pages ( ( unsigned long ) amd_iommu_rlookup_table ,
get_order ( rlookup_table_size ) ) ;
2008-06-26 21:27:50 +02:00
2008-07-11 17:14:26 +02:00
free_pages ( ( unsigned long ) amd_iommu_alias_table ,
get_order ( alias_table_size ) ) ;
2008-06-26 21:27:50 +02:00
2008-07-11 17:14:26 +02:00
free_pages ( ( unsigned long ) amd_iommu_dev_table ,
get_order ( dev_table_size ) ) ;
2008-06-26 21:27:50 +02:00
free_iommu_all ( ) ;
free_unity_maps ( ) ;
goto out ;
}
2008-07-11 17:14:21 +02:00
/****************************************************************************
*
* Early detect code . This code runs at IOMMU detection time in the DMA
* layer . It just looks if there is an IVRS ACPI table to detect AMD
* IOMMUs
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2008-06-26 21:27:51 +02:00
static int __init early_amd_iommu_detect ( struct acpi_table_header * table )
{
return 0 ;
}
void __init amd_iommu_detect ( void )
{
2009-11-10 19:46:20 +09:00
if ( no_iommu | | ( iommu_detected & & ! gart_iommu_aperture ) )
2008-06-26 21:27:51 +02:00
return ;
if ( acpi_table_parse ( " IVRS " , early_amd_iommu_detect ) = = 0 ) {
iommu_detected = 1 ;
2008-07-03 19:35:10 +02:00
amd_iommu_detected = 1 ;
2009-11-10 19:46:15 +09:00
x86_init . iommu . iommu_init = amd_iommu_init ;
2009-12-11 12:18:16 -08:00
2009-12-04 12:15:21 -08:00
/* Make sure ACS will be enabled */
pci_request_acs ( ) ;
2008-06-26 21:27:51 +02:00
}
}
2008-07-11 17:14:21 +02:00
/****************************************************************************
*
* Parsing functions for the AMD IOMMU specific kernel command line
* options .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2009-05-20 12:21:42 +02:00
static int __init parse_amd_iommu_dump ( char * str )
{
amd_iommu_dump = true ;
return 1 ;
}
2008-06-26 21:27:52 +02:00
static int __init parse_amd_iommu_options ( char * str )
{
for ( ; * str ; + + str ) {
2008-11-17 15:16:43 +01:00
if ( strncmp ( str , " fullflush " , 9 ) = = 0 )
2008-09-20 01:23:30 +09:00
amd_iommu_unmap_flush = true ;
2008-06-26 21:27:52 +02:00
}
return 1 ;
}
2009-05-20 12:21:42 +02:00
__setup ( " amd_iommu_dump " , parse_amd_iommu_dump ) ;
2008-06-26 21:27:52 +02:00
__setup ( " amd_iommu= " , parse_amd_iommu_options ) ;