2008-09-14 04:48:28 +04:00
/*
* Copyright ( c ) 2006 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59 Temple
* Place - Suite 330 , Boston , MA 02111 - 1307 USA .
*
* Copyright ( C ) 2006 - 2008 Intel Corporation
* Copyright IBM Corporation , 2008
* Author : Allen M . Kay < allen . m . kay @ intel . com >
* Author : Weidong Han < weidong . han @ intel . com >
* Author : Ben - Ami Yassour < benami @ il . ibm . com >
*/
# include <linux/list.h>
# include <linux/kvm_host.h>
# include <linux/pci.h>
# include <linux/dmar.h>
2008-12-03 16:43:34 +03:00
# include <linux/iommu.h>
2008-09-14 04:48:28 +04:00
# include <linux/intel-iommu.h>
static int kvm_iommu_unmap_memslots ( struct kvm * kvm ) ;
static void kvm_iommu_put_pages ( struct kvm * kvm ,
gfn_t base_gfn , unsigned long npages ) ;
int kvm_iommu_map_pages ( struct kvm * kvm ,
gfn_t base_gfn , unsigned long npages )
{
gfn_t gfn = base_gfn ;
pfn_t pfn ;
2008-09-25 19:32:02 +04:00
int i , r = 0 ;
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2009-04-27 16:35:43 +04:00
int flags ;
2008-09-14 04:48:28 +04:00
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
2009-04-27 16:35:43 +04:00
flags = IOMMU_READ | IOMMU_WRITE ;
if ( kvm - > arch . iommu_flags & KVM_IOMMU_CACHE_COHERENCY )
flags | = IOMMU_CACHE ;
2008-09-14 04:48:28 +04:00
for ( i = 0 ; i < npages ; i + + ) {
/* check if already mapped */
2008-12-03 16:43:34 +03:00
if ( iommu_iova_to_phys ( domain , gfn_to_gpa ( gfn ) ) )
2008-09-14 04:48:28 +04:00
continue ;
pfn = gfn_to_pfn ( kvm , gfn ) ;
2008-12-03 16:43:34 +03:00
r = iommu_map_range ( domain ,
gfn_to_gpa ( gfn ) ,
pfn_to_hpa ( pfn ) ,
2009-04-27 16:35:43 +04:00
PAGE_SIZE , flags ) ;
2008-09-25 19:32:02 +04:00
if ( r ) {
2008-12-02 16:03:39 +03:00
printk ( KERN_ERR " kvm_iommu_map_address: "
2008-09-25 19:32:02 +04:00
" iommu failed to map pfn=%lx \n " , pfn ) ;
2008-09-14 04:48:28 +04:00
goto unmap_pages ;
}
gfn + + ;
}
return 0 ;
unmap_pages :
kvm_iommu_put_pages ( kvm , base_gfn , i ) ;
return r ;
}
static int kvm_iommu_map_memslots ( struct kvm * kvm )
{
2009-01-03 18:37:53 +03:00
int i , r = 0 ;
2008-09-14 04:48:28 +04:00
for ( i = 0 ; i < kvm - > nmemslots ; i + + ) {
r = kvm_iommu_map_pages ( kvm , kvm - > memslots [ i ] . base_gfn ,
kvm - > memslots [ i ] . npages ) ;
if ( r )
break ;
}
2009-02-05 21:23:46 +03:00
2008-09-14 04:48:28 +04:00
return r ;
}
2008-12-02 16:03:39 +03:00
int kvm_assign_device ( struct kvm * kvm ,
struct kvm_assigned_dev_kernel * assigned_dev )
2008-09-14 04:48:28 +04:00
{
struct pci_dev * pdev = NULL ;
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2009-04-27 16:35:43 +04:00
int r , last_flags ;
2008-09-14 04:48:28 +04:00
2008-12-02 16:03:39 +03:00
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
pdev = assigned_dev - > dev ;
if ( pdev = = NULL )
2008-09-14 04:48:28 +04:00
return - ENODEV ;
2008-12-02 16:03:39 +03:00
2008-12-03 16:43:34 +03:00
r = iommu_attach_device ( domain , & pdev - > dev ) ;
2008-12-02 16:03:39 +03:00
if ( r ) {
printk ( KERN_ERR " assign device %x:%x.%x failed " ,
pdev - > bus - > number ,
PCI_SLOT ( pdev - > devfn ) ,
PCI_FUNC ( pdev - > devfn ) ) ;
return r ;
2008-09-14 04:48:28 +04:00
}
2009-04-27 16:35:43 +04:00
last_flags = kvm - > arch . iommu_flags ;
if ( iommu_domain_has_cap ( kvm - > arch . iommu_domain ,
IOMMU_CAP_CACHE_COHERENCY ) )
kvm - > arch . iommu_flags | = KVM_IOMMU_CACHE_COHERENCY ;
/* Check if need to update IOMMU page table for guest memory */
if ( ( last_flags ^ kvm - > arch . iommu_flags ) = =
KVM_IOMMU_CACHE_COHERENCY ) {
kvm_iommu_unmap_memslots ( kvm ) ;
r = kvm_iommu_map_memslots ( kvm ) ;
if ( r )
goto out_unmap ;
}
2008-12-02 16:03:39 +03:00
printk ( KERN_DEBUG " assign device: host bdf = %x:%x:%x \n " ,
assigned_dev - > host_busnr ,
PCI_SLOT ( assigned_dev - > host_devfn ) ,
PCI_FUNC ( assigned_dev - > host_devfn ) ) ;
2008-09-14 04:48:28 +04:00
2008-12-02 16:03:39 +03:00
return 0 ;
2009-04-27 16:35:43 +04:00
out_unmap :
kvm_iommu_unmap_memslots ( kvm ) ;
return r ;
2008-12-02 16:03:39 +03:00
}
2008-09-14 04:48:28 +04:00
2008-12-02 16:24:23 +03:00
int kvm_deassign_device ( struct kvm * kvm ,
struct kvm_assigned_dev_kernel * assigned_dev )
{
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2008-12-02 16:24:23 +03:00
struct pci_dev * pdev = NULL ;
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
pdev = assigned_dev - > dev ;
if ( pdev = = NULL )
return - ENODEV ;
2008-12-03 16:43:34 +03:00
iommu_detach_device ( domain , & pdev - > dev ) ;
2008-12-02 16:24:23 +03:00
printk ( KERN_DEBUG " deassign device: host bdf = %x:%x:%x \n " ,
assigned_dev - > host_busnr ,
PCI_SLOT ( assigned_dev - > host_devfn ) ,
PCI_FUNC ( assigned_dev - > host_devfn ) ) ;
return 0 ;
}
2008-12-02 16:03:39 +03:00
int kvm_iommu_map_guest ( struct kvm * kvm )
{
int r ;
2008-12-03 16:43:34 +03:00
if ( ! iommu_found ( ) ) {
printk ( KERN_ERR " %s: iommu not found \n " , __func__ ) ;
2008-09-14 04:48:28 +04:00
return - ENODEV ;
}
2008-12-03 16:43:34 +03:00
kvm - > arch . iommu_domain = iommu_domain_alloc ( ) ;
if ( ! kvm - > arch . iommu_domain )
2008-12-02 16:03:39 +03:00
return - ENOMEM ;
2008-09-14 04:48:28 +04:00
r = kvm_iommu_map_memslots ( kvm ) ;
if ( r )
goto out_unmap ;
return 0 ;
out_unmap :
kvm_iommu_unmap_memslots ( kvm ) ;
return r ;
}
static void kvm_iommu_put_pages ( struct kvm * kvm ,
2008-12-02 16:03:39 +03:00
gfn_t base_gfn , unsigned long npages )
2008-09-14 04:48:28 +04:00
{
gfn_t gfn = base_gfn ;
pfn_t pfn ;
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2008-12-02 16:03:39 +03:00
unsigned long i ;
u64 phys ;
/* check if iommu exists and in use */
if ( ! domain )
return ;
2008-09-14 04:48:28 +04:00
for ( i = 0 ; i < npages ; i + + ) {
2008-12-03 16:43:34 +03:00
phys = iommu_iova_to_phys ( domain , gfn_to_gpa ( gfn ) ) ;
2008-12-02 16:03:39 +03:00
pfn = phys > > PAGE_SHIFT ;
2008-09-14 04:48:28 +04:00
kvm_release_pfn_clean ( pfn ) ;
gfn + + ;
}
2008-12-02 16:03:39 +03:00
2008-12-03 16:43:34 +03:00
iommu_unmap_range ( domain , gfn_to_gpa ( base_gfn ) , PAGE_SIZE * npages ) ;
2008-09-14 04:48:28 +04:00
}
static int kvm_iommu_unmap_memslots ( struct kvm * kvm )
{
int i ;
2009-02-05 21:23:46 +03:00
2008-09-14 04:48:28 +04:00
for ( i = 0 ; i < kvm - > nmemslots ; i + + ) {
kvm_iommu_put_pages ( kvm , kvm - > memslots [ i ] . base_gfn ,
kvm - > memslots [ i ] . npages ) ;
}
return 0 ;
}
int kvm_iommu_unmap_guest ( struct kvm * kvm )
{
2008-12-03 16:43:34 +03:00
struct iommu_domain * domain = kvm - > arch . iommu_domain ;
2008-09-14 04:48:28 +04:00
/* check if iommu exists and in use */
if ( ! domain )
return 0 ;
kvm_iommu_unmap_memslots ( kvm ) ;
2008-12-03 16:43:34 +03:00
iommu_domain_free ( domain ) ;
2008-09-14 04:48:28 +04:00
return 0 ;
}