2008-09-14 04:48:28 +04:00
/*
* Copyright ( c ) 2006 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59 Temple
* Place - Suite 330 , Boston , MA 02111 - 1307 USA .
*
* Copyright ( C ) 2006 - 2008 Intel Corporation
* Copyright IBM Corporation , 2008
2010-05-23 19:37:00 +04:00
* Copyright 2010 Red Hat , Inc . and / or its affiliates .
*
2008-09-14 04:48:28 +04:00
* Author : Allen M . Kay < allen . m . kay @ intel . com >
* Author : Weidong Han < weidong . han @ intel . com >
* Author : Ben - Ami Yassour < benami @ il . ibm . com >
*/
# include <linux/list.h>
# include <linux/kvm_host.h>
# include <linux/pci.h>
# include <linux/dmar.h>
2008-12-03 16:43:34 +03:00
# include <linux/iommu.h>
2008-09-14 04:48:28 +04:00
# include <linux/intel-iommu.h>
2011-07-14 23:27:03 +04:00
static int allow_unsafe_assigned_interrupts ;
module_param_named ( allow_unsafe_assigned_interrupts ,
allow_unsafe_assigned_interrupts , bool , S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( allow_unsafe_assigned_interrupts ,
" Enable device assignment on platforms without interrupt remapping support. " ) ;
2008-09-14 04:48:28 +04:00
static int kvm_iommu_unmap_memslots ( struct kvm * kvm ) ;
static void kvm_iommu_put_pages ( struct kvm * kvm ,
gfn_t base_gfn , unsigned long npages ) ;
2010-01-11 18:38:18 +03:00
static pfn_t kvm_pin_pages ( struct kvm * kvm , struct kvm_memory_slot * slot ,
gfn_t gfn , unsigned long size )
{
gfn_t end_gfn ;
pfn_t pfn ;
pfn = gfn_to_pfn_memslot ( kvm , slot , gfn ) ;
end_gfn = gfn + ( size > > PAGE_SHIFT ) ;
gfn + = 1 ;
if ( is_error_pfn ( pfn ) )
return pfn ;
while ( gfn < end_gfn )
gfn_to_pfn_memslot ( kvm , slot , gfn + + ) ;
return pfn ;
}
2009-12-23 19:35:20 +03:00
int kvm_iommu_map_pages ( struct kvm * kvm , struct kvm_memory_slot * slot )
2008-09-14 04:48:28 +04:00
{
2010-01-11 18:38:18 +03:00
gfn_t gfn , end_gfn ;
2008-09-14 04:48:28 +04:00
pfn_t pfn ;
2010-01-11 18:38:18 +03:00
int r = 0 ;
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2009-04-27 16:35:43 +04:00
int flags ;
2008-09-14 04:48:28 +04:00
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
2010-01-11 18:38:18 +03:00
gfn = slot - > base_gfn ;
end_gfn = gfn + slot - > npages ;
2009-04-27 16:35:43 +04:00
flags = IOMMU_READ | IOMMU_WRITE ;
if ( kvm - > arch . iommu_flags & KVM_IOMMU_CACHE_COHERENCY )
flags | = IOMMU_CACHE ;
2010-01-11 18:38:18 +03:00
while ( gfn < end_gfn ) {
unsigned long page_size ;
/* Check if already mapped */
if ( iommu_iova_to_phys ( domain , gfn_to_gpa ( gfn ) ) ) {
gfn + = 1 ;
continue ;
}
/* Get the page size we could use to map */
page_size = kvm_host_page_size ( kvm , gfn ) ;
/* Make sure the page_size does not exceed the memslot */
while ( ( gfn + ( page_size > > PAGE_SHIFT ) ) > end_gfn )
page_size > > = 1 ;
/* Make sure gfn is aligned to the page size we want to map */
while ( ( gfn < < PAGE_SHIFT ) & ( page_size - 1 ) )
page_size > > = 1 ;
/*
* Pin all pages we are about to map in memory . This is
* important because we unmap and unpin in 4 kb steps later .
*/
pfn = kvm_pin_pages ( kvm , slot , gfn , page_size ) ;
if ( is_error_pfn ( pfn ) ) {
gfn + = 1 ;
2008-09-14 04:48:28 +04:00
continue ;
2010-01-11 18:38:18 +03:00
}
2008-09-14 04:48:28 +04:00
2010-01-11 18:38:18 +03:00
/* Map into IO address space */
r = iommu_map ( domain , gfn_to_gpa ( gfn ) , pfn_to_hpa ( pfn ) ,
get_order ( page_size ) , flags ) ;
2008-09-25 19:32:02 +04:00
if ( r ) {
2008-12-02 16:03:39 +03:00
printk ( KERN_ERR " kvm_iommu_map_address: "
2010-07-01 18:00:12 +04:00
" iommu failed to map pfn=%llx \n " , pfn ) ;
2008-09-14 04:48:28 +04:00
goto unmap_pages ;
}
2010-01-11 18:38:18 +03:00
gfn + = page_size > > PAGE_SHIFT ;
2008-09-14 04:48:28 +04:00
}
2010-01-11 18:38:18 +03:00
2008-09-14 04:48:28 +04:00
return 0 ;
unmap_pages :
2010-01-11 18:38:18 +03:00
kvm_iommu_put_pages ( kvm , slot - > base_gfn , gfn ) ;
2008-09-14 04:48:28 +04:00
return r ;
}
static int kvm_iommu_map_memslots ( struct kvm * kvm )
{
2010-07-01 11:00:50 +04:00
int i , idx , r = 0 ;
2009-12-23 19:35:16 +03:00
struct kvm_memslots * slots ;
2008-09-14 04:48:28 +04:00
2010-07-01 11:00:50 +04:00
idx = srcu_read_lock ( & kvm - > srcu ) ;
2010-04-19 13:41:23 +04:00
slots = kvm_memslots ( kvm ) ;
2009-12-23 19:35:16 +03:00
for ( i = 0 ; i < slots - > nmemslots ; i + + ) {
2009-12-23 19:35:20 +03:00
r = kvm_iommu_map_pages ( kvm , & slots - > memslots [ i ] ) ;
2008-09-14 04:48:28 +04:00
if ( r )
break ;
}
2010-07-01 11:00:50 +04:00
srcu_read_unlock ( & kvm - > srcu , idx ) ;
2009-02-05 21:23:46 +03:00
2008-09-14 04:48:28 +04:00
return r ;
}
2008-12-02 16:03:39 +03:00
int kvm_assign_device ( struct kvm * kvm ,
struct kvm_assigned_dev_kernel * assigned_dev )
2008-09-14 04:48:28 +04:00
{
struct pci_dev * pdev = NULL ;
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2009-04-27 16:35:43 +04:00
int r , last_flags ;
2008-09-14 04:48:28 +04:00
2008-12-02 16:03:39 +03:00
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
pdev = assigned_dev - > dev ;
if ( pdev = = NULL )
2008-09-14 04:48:28 +04:00
return - ENODEV ;
2008-12-02 16:03:39 +03:00
2008-12-03 16:43:34 +03:00
r = iommu_attach_device ( domain , & pdev - > dev ) ;
2008-12-02 16:03:39 +03:00
if ( r ) {
2010-01-29 09:38:44 +03:00
printk ( KERN_ERR " assign device %x:%x:%x.%x failed " ,
pci_domain_nr ( pdev - > bus ) ,
2008-12-02 16:03:39 +03:00
pdev - > bus - > number ,
PCI_SLOT ( pdev - > devfn ) ,
PCI_FUNC ( pdev - > devfn ) ) ;
return r ;
2008-09-14 04:48:28 +04:00
}
2009-04-27 16:35:43 +04:00
last_flags = kvm - > arch . iommu_flags ;
if ( iommu_domain_has_cap ( kvm - > arch . iommu_domain ,
IOMMU_CAP_CACHE_COHERENCY ) )
kvm - > arch . iommu_flags | = KVM_IOMMU_CACHE_COHERENCY ;
/* Check if need to update IOMMU page table for guest memory */
if ( ( last_flags ^ kvm - > arch . iommu_flags ) = =
KVM_IOMMU_CACHE_COHERENCY ) {
kvm_iommu_unmap_memslots ( kvm ) ;
r = kvm_iommu_map_memslots ( kvm ) ;
if ( r )
goto out_unmap ;
}
2010-01-29 09:38:44 +03:00
printk ( KERN_DEBUG " assign device %x:%x:%x.%x \n " ,
assigned_dev - > host_segnr ,
2008-12-02 16:03:39 +03:00
assigned_dev - > host_busnr ,
PCI_SLOT ( assigned_dev - > host_devfn ) ,
PCI_FUNC ( assigned_dev - > host_devfn ) ) ;
2008-09-14 04:48:28 +04:00
2008-12-02 16:03:39 +03:00
return 0 ;
2009-04-27 16:35:43 +04:00
out_unmap :
kvm_iommu_unmap_memslots ( kvm ) ;
return r ;
2008-12-02 16:03:39 +03:00
}
2008-09-14 04:48:28 +04:00
2008-12-02 16:24:23 +03:00
int kvm_deassign_device ( struct kvm * kvm ,
struct kvm_assigned_dev_kernel * assigned_dev )
{
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2008-12-02 16:24:23 +03:00
struct pci_dev * pdev = NULL ;
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
pdev = assigned_dev - > dev ;
if ( pdev = = NULL )
return - ENODEV ;
2008-12-03 16:43:34 +03:00
iommu_detach_device ( domain , & pdev - > dev ) ;
2008-12-02 16:24:23 +03:00
2010-01-29 09:38:44 +03:00
printk ( KERN_DEBUG " deassign device %x:%x:%x.%x \n " ,
assigned_dev - > host_segnr ,
2008-12-02 16:24:23 +03:00
assigned_dev - > host_busnr ,
PCI_SLOT ( assigned_dev - > host_devfn ) ,
PCI_FUNC ( assigned_dev - > host_devfn ) ) ;
return 0 ;
}
2008-12-02 16:03:39 +03:00
int kvm_iommu_map_guest ( struct kvm * kvm )
{
int r ;
2011-09-06 20:46:34 +04:00
if ( ! iommu_present ( & pci_bus_type ) ) {
2008-12-03 16:43:34 +03:00
printk ( KERN_ERR " %s: iommu not found \n " , __func__ ) ;
2008-09-14 04:48:28 +04:00
return - ENODEV ;
}
2011-09-06 18:03:26 +04:00
kvm - > arch . iommu_domain = iommu_domain_alloc ( & pci_bus_type ) ;
2008-12-03 16:43:34 +03:00
if ( ! kvm - > arch . iommu_domain )
2008-12-02 16:03:39 +03:00
return - ENOMEM ;
2008-09-14 04:48:28 +04:00
2011-07-14 23:27:03 +04:00
if ( ! allow_unsafe_assigned_interrupts & &
! iommu_domain_has_cap ( kvm - > arch . iommu_domain ,
IOMMU_CAP_INTR_REMAP ) ) {
printk ( KERN_WARNING " %s: No interrupt remapping support, "
" disallowing device assignment. "
" Re-enble with \" allow_unsafe_assigned_interrupts=1 \" "
" module option. \n " , __func__ ) ;
iommu_domain_free ( kvm - > arch . iommu_domain ) ;
kvm - > arch . iommu_domain = NULL ;
return - EPERM ;
}
2008-09-14 04:48:28 +04:00
r = kvm_iommu_map_memslots ( kvm ) ;
if ( r )
goto out_unmap ;
return 0 ;
out_unmap :
kvm_iommu_unmap_memslots ( kvm ) ;
return r ;
}
2010-01-11 18:38:18 +03:00
static void kvm_unpin_pages ( struct kvm * kvm , pfn_t pfn , unsigned long npages )
{
unsigned long i ;
for ( i = 0 ; i < npages ; + + i )
kvm_release_pfn_clean ( pfn + i ) ;
}
2008-09-14 04:48:28 +04:00
static void kvm_iommu_put_pages ( struct kvm * kvm ,
2008-12-02 16:03:39 +03:00
gfn_t base_gfn , unsigned long npages )
2008-09-14 04:48:28 +04:00
{
2010-01-11 18:38:18 +03:00
struct iommu_domain * domain ;
gfn_t end_gfn , gfn ;
2008-09-14 04:48:28 +04:00
pfn_t pfn ;
2008-12-02 16:03:39 +03:00
u64 phys ;
2010-01-11 18:38:18 +03:00
domain = kvm - > arch . iommu_domain ;
end_gfn = base_gfn + npages ;
gfn = base_gfn ;
2008-12-02 16:03:39 +03:00
/* check if iommu exists and in use */
if ( ! domain )
return ;
2008-09-14 04:48:28 +04:00
2010-01-11 18:38:18 +03:00
while ( gfn < end_gfn ) {
unsigned long unmap_pages ;
int order ;
/* Get physical address */
2008-12-03 16:43:34 +03:00
phys = iommu_iova_to_phys ( domain , gfn_to_gpa ( gfn ) ) ;
2010-01-11 18:38:18 +03:00
pfn = phys > > PAGE_SHIFT ;
/* Unmap address from IO address space */
2010-05-26 23:36:33 +04:00
order = iommu_unmap ( domain , gfn_to_gpa ( gfn ) , 0 ) ;
2010-01-11 18:38:18 +03:00
unmap_pages = 1ULL < < order ;
2008-12-02 16:03:39 +03:00
2010-01-11 18:38:18 +03:00
/* Unpin all pages we just unmapped to not leak any memory */
kvm_unpin_pages ( kvm , pfn , unmap_pages ) ;
gfn + = unmap_pages ;
}
2008-09-14 04:48:28 +04:00
}
static int kvm_iommu_unmap_memslots ( struct kvm * kvm )
{
2010-07-01 11:00:50 +04:00
int i , idx ;
2009-12-23 19:35:16 +03:00
struct kvm_memslots * slots ;
2010-07-01 11:00:50 +04:00
idx = srcu_read_lock ( & kvm - > srcu ) ;
2010-04-19 13:41:23 +04:00
slots = kvm_memslots ( kvm ) ;
2009-02-05 21:23:46 +03:00
2009-12-23 19:35:16 +03:00
for ( i = 0 ; i < slots - > nmemslots ; i + + ) {
kvm_iommu_put_pages ( kvm , slots - > memslots [ i ] . base_gfn ,
slots - > memslots [ i ] . npages ) ;
2008-09-14 04:48:28 +04:00
}
2010-07-01 11:00:50 +04:00
srcu_read_unlock ( & kvm - > srcu , idx ) ;
2008-09-14 04:48:28 +04:00
return 0 ;
}
int kvm_iommu_unmap_guest ( struct kvm * kvm )
{
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2008-09-14 04:48:28 +04:00
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
kvm_iommu_unmap_memslots ( kvm ) ;
2008-12-03 16:43:34 +03:00
iommu_domain_free ( domain ) ;
2008-09-14 04:48:28 +04:00
return 0 ;
}