2007-10-21 16:41:49 -07:00
/*
* Copyright ( c ) 2006 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59 Temple
* Place - Suite 330 , Boston , MA 02111 - 1307 USA .
*
2008-02-23 15:23:35 -08:00
* Copyright ( C ) 2006 - 2008 Intel Corporation
* Author : Ashok Raj < ashok . raj @ intel . com >
* Author : Anil S Keshavamurthy < anil . s . keshavamurthy @ intel . com >
2007-10-21 16:41:49 -07:00
*/
# ifndef _INTEL_IOMMU_H_
# define _INTEL_IOMMU_H_
# include <linux/types.h>
# include <linux/msi.h>
2008-02-06 01:36:23 -08:00
# include <linux/sysdev.h>
2007-10-21 16:41:49 -07:00
# include "iova.h"
# include <linux/io.h>
2008-07-10 11:16:42 -07:00
# include <asm/cacheflush.h>
2008-07-10 11:16:35 -07:00
# include "dma_remapping.h"
2008-02-06 01:36:23 -08:00
2007-10-21 16:41:49 -07:00
/*
* Intel IOMMU register specification per version 1.0 public spec .
*/
# define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
# define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
# define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
# define DMAR_GCMD_REG 0x18 /* Global command register */
# define DMAR_GSTS_REG 0x1c /* Global status register */
# define DMAR_RTADDR_REG 0x20 /* Root entry table */
# define DMAR_CCMD_REG 0x28 /* Context command reg */
# define DMAR_FSTS_REG 0x34 /* Fault Status register */
# define DMAR_FECTL_REG 0x38 /* Fault control register */
# define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
# define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
# define DMAR_FEUADDR_REG 0x44 /* Upper address register */
# define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
# define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
# define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
# define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
# define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
# define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
2008-07-10 11:16:42 -07:00
# define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
# define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
# define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
# define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
2008-07-10 11:16:43 -07:00
# define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
2007-10-21 16:41:49 -07:00
# define OFFSET_STRIDE (9)
/*
# define dmar_readl(dmar, reg) readl(dmar + reg)
# define dmar_readq(dmar, reg) ({ \
u32 lo , hi ; \
lo = readl ( dmar + reg ) ; \
hi = readl ( dmar + reg + 4 ) ; \
( ( ( u64 ) hi ) < < 32 ) + lo ; } )
*/
2007-10-29 04:51:16 +00:00
static inline u64 dmar_readq ( void __iomem * addr )
2007-10-21 16:41:49 -07:00
{
u32 lo , hi ;
lo = readl ( addr ) ;
hi = readl ( addr + 4 ) ;
return ( ( ( u64 ) hi ) < < 32 ) + lo ;
}
static inline void dmar_writeq ( void __iomem * addr , u64 val )
{
writel ( ( u32 ) val , addr ) ;
writel ( ( u32 ) ( val > > 32 ) , addr + 4 ) ;
}
# define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
# define DMAR_VER_MINOR(v) ((v) & 0x0f)
/*
* Decoding Capability Register
*/
# define cap_read_drain(c) (((c) >> 55) & 1)
# define cap_write_drain(c) (((c) >> 54) & 1)
# define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
# define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
# define cap_pgsel_inv(c) (((c) >> 39) & 1)
# define cap_super_page_val(c) (((c) >> 34) & 0xf)
# define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
* OFFSET_STRIDE ) + 21 )
# define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
# define cap_max_fault_reg_offset(c) \
( cap_fault_reg_offset ( c ) + cap_num_fault_regs ( c ) * 16 )
# define cap_zlr(c) (((c) >> 22) & 1)
# define cap_isoch(c) (((c) >> 23) & 1)
# define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
# define cap_sagaw(c) (((c) >> 8) & 0x1f)
# define cap_caching_mode(c) (((c) >> 7) & 1)
# define cap_phmr(c) (((c) >> 6) & 1)
# define cap_plmr(c) (((c) >> 5) & 1)
# define cap_rwbf(c) (((c) >> 4) & 1)
# define cap_afl(c) (((c) >> 3) & 1)
# define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
/*
* Extended Capability Register
*/
# define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
# define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
# define ecap_max_iotlb_offset(e) \
( ecap_iotlb_offset ( e ) + ecap_niotlb_iunits ( e ) * 16 )
# define ecap_coherent(e) ((e) & 0x1)
2008-07-10 11:16:42 -07:00
# define ecap_qis(e) ((e) & 0x2)
2008-07-10 11:16:40 -07:00
# define ecap_eim_support(e) ((e >> 4) & 0x1)
# define ecap_ir_support(e) ((e >> 3) & 0x1)
2008-07-10 11:16:44 -07:00
# define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
2007-10-21 16:41:49 -07:00
/* IOTLB_REG */
# define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
# define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
# define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
# define DMA_TLB_IIRG(type) ((type >> 60) & 7)
# define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
# define DMA_TLB_READ_DRAIN (((u64)1) << 49)
# define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
# define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
# define DMA_TLB_IVT (((u64)1) << 63)
# define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
# define DMA_TLB_MAX_SIZE (0x3f)
2008-07-10 11:16:42 -07:00
/* INVALID_DESC */
# define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
# define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
# define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
# define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
# define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
# define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
# define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
# define DMA_ID_TLB_ADDR(addr) (addr)
# define DMA_ID_TLB_ADDR_MASK(mask) (mask)
2008-02-08 04:18:38 -08:00
/* PMEN_REG */
# define DMA_PMEN_EPM (((u32)1)<<31)
# define DMA_PMEN_PRS (((u32)1)<<0)
2007-10-21 16:41:49 -07:00
/* GCMD_REG */
# define DMA_GCMD_TE (((u32)1) << 31)
# define DMA_GCMD_SRTP (((u32)1) << 30)
# define DMA_GCMD_SFL (((u32)1) << 29)
# define DMA_GCMD_EAFL (((u32)1) << 28)
# define DMA_GCMD_WBF (((u32)1) << 27)
2008-07-10 11:16:43 -07:00
# define DMA_GCMD_QIE (((u32)1) << 26)
# define DMA_GCMD_SIRTP (((u32)1) << 24)
# define DMA_GCMD_IRE (((u32) 1) << 25)
2007-10-21 16:41:49 -07:00
/* GSTS_REG */
# define DMA_GSTS_TES (((u32)1) << 31)
# define DMA_GSTS_RTPS (((u32)1) << 30)
# define DMA_GSTS_FLS (((u32)1) << 29)
# define DMA_GSTS_AFLS (((u32)1) << 28)
# define DMA_GSTS_WBFS (((u32)1) << 27)
2008-07-10 11:16:43 -07:00
# define DMA_GSTS_QIES (((u32)1) << 26)
# define DMA_GSTS_IRTPS (((u32)1) << 24)
# define DMA_GSTS_IRES (((u32)1) << 25)
2007-10-21 16:41:49 -07:00
/* CCMD_REG */
# define DMA_CCMD_ICC (((u64)1) << 63)
# define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
# define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
# define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
# define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
# define DMA_CCMD_MASK_NOBIT 0
# define DMA_CCMD_MASK_1BIT 1
# define DMA_CCMD_MASK_2BIT 2
# define DMA_CCMD_MASK_3BIT 3
# define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
# define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
/* FECTL_REG */
# define DMA_FECTL_IM (((u32)1) << 31)
/* FSTS_REG */
# define DMA_FSTS_PPF ((u32)2)
# define DMA_FSTS_PFO ((u32)1)
# define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
# define DMA_FRCD_F (((u32)1) << 31)
# define dma_frcd_type(d) ((d >> 30) & 1)
# define dma_frcd_fault_reason(c) (c & 0xff)
# define dma_frcd_source_id(c) (c & 0xffff)
# define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
2008-07-10 11:16:41 -07:00
# define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
# define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
{ \
cycles_t start_time = get_cycles ( ) ; \
while ( 1 ) { \
sts = op ( iommu - > reg + offset ) ; \
if ( cond ) \
break ; \
if ( DMAR_OPERATION_TIMEOUT < ( get_cycles ( ) - start_time ) ) \
panic ( " DMAR hardware is malfunctioning \n " ) ; \
cpu_relax ( ) ; \
} \
}
2008-07-10 11:16:42 -07:00
# define QI_LENGTH 256 /* queue length */
enum {
QI_FREE ,
QI_IN_USE ,
QI_DONE
} ;
# define QI_CC_TYPE 0x1
# define QI_IOTLB_TYPE 0x2
# define QI_DIOTLB_TYPE 0x3
# define QI_IEC_TYPE 0x4
# define QI_IWD_TYPE 0x5
# define QI_IEC_SELECTIVE (((u64)1) << 4)
# define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
# define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
# define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
# define QI_IWD_STATUS_WRITE (((u64)1) << 5)
struct qi_desc {
u64 low , high ;
} ;
struct q_inval {
spinlock_t q_lock ;
struct qi_desc * desc ; /* invalidation queue */
int * desc_status ; /* desc status */
int free_head ; /* first free entry */
int free_tail ; /* last free entry */
int free_cnt ;
} ;
2008-07-10 11:16:43 -07:00
# ifdef CONFIG_INTR_REMAP
/* 1MB - maximum possible interrupt remapping table size */
# define INTR_REMAP_PAGE_ORDER 8
# define INTR_REMAP_TABLE_REG_SIZE 0xf
2008-07-10 11:16:44 -07:00
# define INTR_REMAP_TABLE_ENTRIES 65536
2008-07-10 11:16:43 -07:00
struct ir_table {
struct irte * base ;
} ;
# endif
2007-10-21 16:41:49 -07:00
struct intel_iommu {
void __iomem * reg ; /* Pointer to hardware regs, virtual addr */
u64 cap ;
u64 ecap ;
int seg ;
u32 gcmd ; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
spinlock_t register_lock ; /* protect register handling */
2008-07-10 11:16:36 -07:00
int seq_id ; /* sequence id of the iommu */
2008-07-10 11:16:35 -07:00
# ifdef CONFIG_DMAR
unsigned long * domain_ids ; /* bitmap of domains */
struct dmar_domain * * domains ; /* ptr to domains */
spinlock_t lock ; /* protect context, domain ids */
2007-10-21 16:41:49 -07:00
struct root_entry * root_entry ; /* virtual address */
unsigned int irq ;
unsigned char name [ 7 ] ; /* Device Name */
struct msi_msg saved_msg ;
struct sys_device sysdev ;
2008-07-10 11:16:35 -07:00
# endif
2008-07-10 11:16:42 -07:00
struct q_inval * qi ; /* Queued invalidation info */
2008-07-10 11:16:43 -07:00
# ifdef CONFIG_INTR_REMAP
struct ir_table * ir_table ; /* Interrupt remapping info */
# endif
2007-10-21 16:41:49 -07:00
} ;
2008-07-10 11:16:42 -07:00
static inline void __iommu_flush_cache (
struct intel_iommu * iommu , void * addr , int size )
{
if ( ! ecap_coherent ( iommu - > ecap ) )
clflush_cache_range ( addr , size ) ;
}
2008-07-10 11:16:35 -07:00
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit ( struct pci_dev * dev ) ;
2008-07-10 11:16:37 -07:00
extern int alloc_iommu ( struct dmar_drhd_unit * drhd ) ;
2008-07-10 11:16:35 -07:00
extern void free_iommu ( struct intel_iommu * iommu ) ;
2008-07-10 11:16:43 -07:00
extern int dmar_enable_qi ( struct intel_iommu * iommu ) ;
extern void qi_global_iec ( struct intel_iommu * iommu ) ;
2007-10-21 16:41:55 -07:00
2008-07-10 11:16:44 -07:00
extern void qi_submit_sync ( struct qi_desc * desc , struct intel_iommu * iommu ) ;
2007-10-21 16:41:49 -07:00
# endif