2019-05-29 17:18:02 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-03-24 17:54:56 +03:00
/*
* Copyright © 2015 Intel Corporation .
*
* Authors : David Woodhouse < dwmw2 @ infradead . org >
*/
# include <linux/intel-iommu.h>
2015-09-09 13:40:47 +03:00
# include <linux/mmu_notifier.h>
# include <linux/sched.h>
2017-02-08 20:51:29 +03:00
# include <linux/sched/mm.h>
2015-09-09 13:40:47 +03:00
# include <linux/slab.h>
# include <linux/intel-svm.h>
# include <linux/rculist.h>
# include <linux/pci.h>
# include <linux/pci-ats.h>
2015-10-08 01:35:18 +03:00
# include <linux/dmar.h>
# include <linux/interrupt.h>
2018-08-18 01:44:47 +03:00
# include <linux/mm_types.h>
2020-01-02 03:18:08 +03:00
# include <linux/ioasid.h>
2017-08-08 23:29:27 +03:00
# include <asm/page.h>
2020-09-15 19:30:13 +03:00
# include <asm/fpu/api.h>
2015-10-08 01:35:18 +03:00
2020-07-24 04:49:25 +03:00
# include "pasid.h"
2018-07-14 10:46:56 +03:00
2015-10-08 01:35:18 +03:00
static irqreturn_t prq_event_thread ( int irq , void * d ) ;
2020-09-15 19:30:05 +03:00
static void intel_svm_drain_prq ( struct device * dev , u32 pasid ) ;
2015-09-09 13:40:47 +03:00
2015-10-08 01:35:18 +03:00
# define PRQ_ORDER 0
int intel_svm_enable_prq ( struct intel_iommu * iommu )
{
struct page * pages ;
int irq , ret ;
pages = alloc_pages ( GFP_KERNEL | __GFP_ZERO , PRQ_ORDER ) ;
if ( ! pages ) {
pr_warn ( " IOMMU: %s: Failed to allocate page request queue \n " ,
iommu - > name ) ;
return - ENOMEM ;
}
iommu - > prq = page_address ( pages ) ;
irq = dmar_alloc_hwirq ( DMAR_UNITS_SUPPORTED + iommu - > seq_id , iommu - > node , iommu ) ;
if ( irq < = 0 ) {
pr_err ( " IOMMU: %s: Failed to create IRQ vector for page request queue \n " ,
iommu - > name ) ;
ret = - EINVAL ;
err :
free_pages ( ( unsigned long ) iommu - > prq , PRQ_ORDER ) ;
iommu - > prq = NULL ;
return ret ;
}
iommu - > pr_irq = irq ;
snprintf ( iommu - > prq_name , sizeof ( iommu - > prq_name ) , " dmar%d-prq " , iommu - > seq_id ) ;
ret = request_threaded_irq ( irq , NULL , prq_event_thread , IRQF_ONESHOT ,
iommu - > prq_name , iommu ) ;
if ( ret ) {
pr_err ( " IOMMU: %s: Failed to request IRQ for page request queue \n " ,
iommu - > name ) ;
dmar_free_hwirq ( irq ) ;
2017-12-20 19:48:56 +03:00
iommu - > pr_irq = 0 ;
2015-10-08 01:35:18 +03:00
goto err ;
}
dmar_writeq ( iommu - > reg + DMAR_PQH_REG , 0ULL ) ;
dmar_writeq ( iommu - > reg + DMAR_PQT_REG , 0ULL ) ;
dmar_writeq ( iommu - > reg + DMAR_PQA_REG , virt_to_phys ( iommu - > prq ) | PRQ_ORDER ) ;
2020-05-16 09:20:58 +03:00
init_completion ( & iommu - > prq_complete ) ;
2015-10-08 01:35:18 +03:00
return 0 ;
}
int intel_svm_finish_prq ( struct intel_iommu * iommu )
{
dmar_writeq ( iommu - > reg + DMAR_PQH_REG , 0ULL ) ;
dmar_writeq ( iommu - > reg + DMAR_PQT_REG , 0ULL ) ;
dmar_writeq ( iommu - > reg + DMAR_PQA_REG , 0ULL ) ;
2017-12-20 19:48:56 +03:00
if ( iommu - > pr_irq ) {
free_irq ( iommu - > pr_irq , iommu ) ;
dmar_free_hwirq ( iommu - > pr_irq ) ;
iommu - > pr_irq = 0 ;
}
2015-10-08 01:35:18 +03:00
free_pages ( ( unsigned long ) iommu - > prq , PRQ_ORDER ) ;
iommu - > prq = NULL ;
return 0 ;
}
2020-01-02 03:18:03 +03:00
static inline bool intel_svm_capable ( struct intel_iommu * iommu )
{
return iommu - > flags & VTD_FLAG_SVM_CAPABLE ;
}
void intel_svm_check ( struct intel_iommu * iommu )
{
if ( ! pasid_supported ( iommu ) )
return ;
if ( cpu_feature_enabled ( X86_FEATURE_GBPAGES ) & &
! cap_fl1gp_support ( iommu - > cap ) ) {
pr_err ( " %s SVM disabled, incompatible 1GB page capability \n " ,
iommu - > name ) ;
return ;
}
if ( cpu_feature_enabled ( X86_FEATURE_LA57 ) & &
! cap_5lp_support ( iommu - > cap ) ) {
pr_err ( " %s SVM disabled, incompatible paging mode \n " ,
iommu - > name ) ;
return ;
}
iommu - > flags | = VTD_FLAG_SVM_CAPABLE ;
}
2015-09-09 13:40:47 +03:00
static void intel_flush_svm_range_dev ( struct intel_svm * svm , struct intel_svm_dev * sdev ,
2019-08-26 18:53:29 +03:00
unsigned long address , unsigned long pages , int ih )
2015-09-09 13:40:47 +03:00
{
struct qi_desc desc ;
2019-11-20 09:10:16 +03:00
if ( pages = = - 1 ) {
2019-08-26 18:53:29 +03:00
desc . qw0 = QI_EIOTLB_PASID ( svm - > pasid ) |
QI_EIOTLB_DID ( sdev - > did ) |
QI_EIOTLB_GRAN ( QI_GRAN_NONG_PASID ) |
QI_EIOTLB_TYPE ;
2018-12-10 04:58:58 +03:00
desc . qw1 = 0 ;
2015-09-09 13:40:47 +03:00
} else {
2015-10-20 17:52:13 +03:00
int mask = ilog2 ( __roundup_pow_of_two ( pages ) ) ;
2018-12-10 04:58:58 +03:00
desc . qw0 = QI_EIOTLB_PASID ( svm - > pasid ) |
QI_EIOTLB_DID ( sdev - > did ) |
QI_EIOTLB_GRAN ( QI_GRAN_PSI_PASID ) |
QI_EIOTLB_TYPE ;
desc . qw1 = QI_EIOTLB_ADDR ( address ) |
QI_EIOTLB_IH ( ih ) |
QI_EIOTLB_AM ( mask ) ;
2015-09-09 13:40:47 +03:00
}
2018-12-10 04:58:58 +03:00
desc . qw2 = 0 ;
desc . qw3 = 0 ;
2020-05-16 09:20:55 +03:00
qi_submit_sync ( svm - > iommu , & desc , 1 , 0 ) ;
2015-09-09 13:40:47 +03:00
if ( sdev - > dev_iotlb ) {
2018-12-10 04:58:58 +03:00
desc . qw0 = QI_DEV_EIOTLB_PASID ( svm - > pasid ) |
QI_DEV_EIOTLB_SID ( sdev - > sid ) |
QI_DEV_EIOTLB_QDEP ( sdev - > qdep ) |
QI_DEIOTLB_TYPE ;
2015-10-20 17:52:13 +03:00
if ( pages = = - 1 ) {
2018-12-10 04:58:58 +03:00
desc . qw1 = QI_DEV_EIOTLB_ADDR ( - 1ULL > > 1 ) |
QI_DEV_EIOTLB_SIZE ;
2015-10-20 17:52:13 +03:00
} else if ( pages > 1 ) {
/* The least significant zero bit indicates the size. So,
* for example , an " address " value of 0x12345f000 will
* flush from 0x123440000 to 0x12347ffff ( 256 KiB ) . */
unsigned long last = address + ( ( unsigned long ) ( pages - 1 ) < < VTD_PAGE_SHIFT ) ;
2018-02-22 12:54:55 +03:00
unsigned long mask = __rounddown_pow_of_two ( address ^ last ) ;
2015-10-20 17:52:13 +03:00
2018-12-10 04:58:58 +03:00
desc . qw1 = QI_DEV_EIOTLB_ADDR ( ( address & ~ mask ) |
( mask - 1 ) ) | QI_DEV_EIOTLB_SIZE ;
2015-09-09 13:40:47 +03:00
} else {
2018-12-10 04:58:58 +03:00
desc . qw1 = QI_DEV_EIOTLB_ADDR ( address ) ;
2015-09-09 13:40:47 +03:00
}
2018-12-10 04:58:58 +03:00
desc . qw2 = 0 ;
desc . qw3 = 0 ;
2020-05-16 09:20:55 +03:00
qi_submit_sync ( svm - > iommu , & desc , 1 , 0 ) ;
2015-09-09 13:40:47 +03:00
}
}
static void intel_flush_svm_range ( struct intel_svm * svm , unsigned long address ,
2019-08-26 18:53:29 +03:00
unsigned long pages , int ih )
2015-09-09 13:40:47 +03:00
{
struct intel_svm_dev * sdev ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( sdev , & svm - > devs , list )
2019-08-26 18:53:29 +03:00
intel_flush_svm_range_dev ( svm , sdev , address , pages , ih ) ;
2015-09-09 13:40:47 +03:00
rcu_read_unlock ( ) ;
}
/* Pages have been freed at this point */
static void intel_invalidate_range ( struct mmu_notifier * mn ,
struct mm_struct * mm ,
unsigned long start , unsigned long end )
{
struct intel_svm * svm = container_of ( mn , struct intel_svm , notifier ) ;
intel_flush_svm_range ( svm , start ,
2019-08-26 18:53:29 +03:00
( end - start + PAGE_SIZE - 1 ) > > VTD_PAGE_SHIFT , 0 ) ;
2015-09-09 13:40:47 +03:00
}
static void intel_mm_release ( struct mmu_notifier * mn , struct mm_struct * mm )
{
struct intel_svm * svm = container_of ( mn , struct intel_svm , notifier ) ;
iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Holding mm_users works OK for graphics, which was the first user of SVM
with VT-d. However, it works less well for other devices, where we actually
do a mmap() from the file descriptor to which the SVM PASID state is tied.
In this case on process exit we end up with a recursive reference count:
- The MM remains alive until the file is closed and the driver's release()
call ends up unbinding the PASID.
- The VMA corresponding to the mmap() remains intact until the MM is
destroyed.
- Thus the file isn't closed, even when exit_files() runs, because the
VMA is still holding a reference to it. And the MM remains alive…
To address this issue, we *stop* holding mm_users while the PASID is bound.
We already hold mm_count by virtue of the MMU notifier, and that can be
made to be sufficient.
It means that for a period during process exit, the fun part of mmput()
has happened and exit_mmap() has been called so the MM is basically
defunct. But the PGD still exists and the PASID is still bound to it.
During this period, we have to be very careful — exit_mmap() doesn't use
mm->mmap_sem because it doesn't expect anyone else to be touching the MM
(quite reasonably, since mm_users is zero). So we also need to fix the
fault handler to just report failure if mm_users is already zero, and to
temporarily bump mm_users while handling any faults.
Additionally, exit_mmap() calls mmu_notifier_release() *before* it tears
down the page tables, which is too early for us to flush the IOTLB for
this PASID. And __mmu_notifier_release() removes every notifier from the
list, so when exit_mmap() finally *does* tear down the mappings and
clear the page tables, we don't get notified. So we work around this by
clearing the PASID table entry in our MMU notifier release() callback.
That way, the hardware *can't* get any pages back from the page tables
before they get cleared.
Hardware designers have confirmed that the resulting 'PASID not present'
faults should be handled just as gracefully as 'page not present' faults,
the important criterion being that they don't perturb the operation for
any *other* PASID in the system.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@vger.kernel.org
2016-01-12 22:18:06 +03:00
struct intel_svm_dev * sdev ;
2015-09-09 13:40:47 +03:00
iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Holding mm_users works OK for graphics, which was the first user of SVM
with VT-d. However, it works less well for other devices, where we actually
do a mmap() from the file descriptor to which the SVM PASID state is tied.
In this case on process exit we end up with a recursive reference count:
- The MM remains alive until the file is closed and the driver's release()
call ends up unbinding the PASID.
- The VMA corresponding to the mmap() remains intact until the MM is
destroyed.
- Thus the file isn't closed, even when exit_files() runs, because the
VMA is still holding a reference to it. And the MM remains alive…
To address this issue, we *stop* holding mm_users while the PASID is bound.
We already hold mm_count by virtue of the MMU notifier, and that can be
made to be sufficient.
It means that for a period during process exit, the fun part of mmput()
has happened and exit_mmap() has been called so the MM is basically
defunct. But the PGD still exists and the PASID is still bound to it.
During this period, we have to be very careful — exit_mmap() doesn't use
mm->mmap_sem because it doesn't expect anyone else to be touching the MM
(quite reasonably, since mm_users is zero). So we also need to fix the
fault handler to just report failure if mm_users is already zero, and to
temporarily bump mm_users while handling any faults.
Additionally, exit_mmap() calls mmu_notifier_release() *before* it tears
down the page tables, which is too early for us to flush the IOTLB for
this PASID. And __mmu_notifier_release() removes every notifier from the
list, so when exit_mmap() finally *does* tear down the mappings and
clear the page tables, we don't get notified. So we work around this by
clearing the PASID table entry in our MMU notifier release() callback.
That way, the hardware *can't* get any pages back from the page tables
before they get cleared.
Hardware designers have confirmed that the resulting 'PASID not present'
faults should be handled just as gracefully as 'page not present' faults,
the important criterion being that they don't perturb the operation for
any *other* PASID in the system.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@vger.kernel.org
2016-01-12 22:18:06 +03:00
/* This might end up being called from exit_mmap(), *before* the page
* tables are cleared . And __mmu_notifier_release ( ) will delete us from
* the list of notifiers so that our invalidate_range ( ) callback doesn ' t
* get called when the page tables are cleared . So we need to protect
* against hardware accessing those page tables .
*
* We do it by clearing the entry in the PASID table and then flushing
* the IOTLB and the PASID table caches . This might upset hardware ;
* perhaps we ' ll want to point the PASID to a dummy PGD ( like the zero
* page ) so that we end up taking a fault that the hardware really
* * has * to handle gracefully without affecting other processes .
*/
rcu_read_lock ( ) ;
2020-05-16 09:20:59 +03:00
list_for_each_entry_rcu ( sdev , & svm - > devs , list )
2020-05-16 09:20:57 +03:00
intel_pasid_tear_down_entry ( svm - > iommu , sdev - > dev ,
svm - > pasid , true ) ;
iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Holding mm_users works OK for graphics, which was the first user of SVM
with VT-d. However, it works less well for other devices, where we actually
do a mmap() from the file descriptor to which the SVM PASID state is tied.
In this case on process exit we end up with a recursive reference count:
- The MM remains alive until the file is closed and the driver's release()
call ends up unbinding the PASID.
- The VMA corresponding to the mmap() remains intact until the MM is
destroyed.
- Thus the file isn't closed, even when exit_files() runs, because the
VMA is still holding a reference to it. And the MM remains alive…
To address this issue, we *stop* holding mm_users while the PASID is bound.
We already hold mm_count by virtue of the MMU notifier, and that can be
made to be sufficient.
It means that for a period during process exit, the fun part of mmput()
has happened and exit_mmap() has been called so the MM is basically
defunct. But the PGD still exists and the PASID is still bound to it.
During this period, we have to be very careful — exit_mmap() doesn't use
mm->mmap_sem because it doesn't expect anyone else to be touching the MM
(quite reasonably, since mm_users is zero). So we also need to fix the
fault handler to just report failure if mm_users is already zero, and to
temporarily bump mm_users while handling any faults.
Additionally, exit_mmap() calls mmu_notifier_release() *before* it tears
down the page tables, which is too early for us to flush the IOTLB for
this PASID. And __mmu_notifier_release() removes every notifier from the
list, so when exit_mmap() finally *does* tear down the mappings and
clear the page tables, we don't get notified. So we work around this by
clearing the PASID table entry in our MMU notifier release() callback.
That way, the hardware *can't* get any pages back from the page tables
before they get cleared.
Hardware designers have confirmed that the resulting 'PASID not present'
faults should be handled just as gracefully as 'page not present' faults,
the important criterion being that they don't perturb the operation for
any *other* PASID in the system.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@vger.kernel.org
2016-01-12 22:18:06 +03:00
rcu_read_unlock ( ) ;
2015-09-09 13:40:47 +03:00
}
static const struct mmu_notifier_ops intel_mmuops = {
. release = intel_mm_release ,
. invalidate_range = intel_invalidate_range ,
} ;
static DEFINE_MUTEX ( pasid_mutex ) ;
2018-07-14 10:46:55 +03:00
static LIST_HEAD ( global_svm_list ) ;
2015-09-09 13:40:47 +03:00
2020-01-02 03:18:10 +03:00
# define for_each_svm_dev(sdev, svm, d) \
list_for_each_entry ( ( sdev ) , & ( svm ) - > devs , list ) \
if ( ( d ) ! = ( sdev ) - > dev ) { } else
2020-07-24 04:49:22 +03:00
static int pasid_to_svm_sdev ( struct device * dev , unsigned int pasid ,
struct intel_svm * * rsvm ,
struct intel_svm_dev * * rsdev )
{
struct intel_svm_dev * d , * sdev = NULL ;
struct intel_svm * svm ;
/* The caller should hold the pasid_mutex lock */
if ( WARN_ON ( ! mutex_is_locked ( & pasid_mutex ) ) )
return - EINVAL ;
if ( pasid = = INVALID_IOASID | | pasid > = PASID_MAX )
return - EINVAL ;
svm = ioasid_find ( NULL , pasid , NULL ) ;
if ( IS_ERR ( svm ) )
return PTR_ERR ( svm ) ;
if ( ! svm )
goto out ;
/*
* If we found svm for the PASID , there must be at least one device
* bond .
*/
if ( WARN_ON ( list_empty ( & svm - > devs ) ) )
return - EINVAL ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( d , & svm - > devs , list ) {
if ( d - > dev = = dev ) {
sdev = d ;
break ;
}
}
rcu_read_unlock ( ) ;
out :
* rsvm = svm ;
* rsdev = sdev ;
return 0 ;
}
2020-05-16 09:20:47 +03:00
int intel_svm_bind_gpasid ( struct iommu_domain * domain , struct device * dev ,
struct iommu_gpasid_bind_data * data )
{
2020-07-24 04:49:21 +03:00
struct intel_iommu * iommu = device_to_iommu ( dev , NULL , NULL ) ;
2020-07-24 04:49:22 +03:00
struct intel_svm_dev * sdev = NULL ;
2020-05-16 09:20:47 +03:00
struct dmar_domain * dmar_domain ;
2020-10-30 05:37:23 +03:00
struct device_domain_info * info ;
2020-07-24 04:49:22 +03:00
struct intel_svm * svm = NULL ;
2020-12-31 03:53:23 +03:00
unsigned long iflags ;
2020-05-16 09:20:47 +03:00
int ret = 0 ;
if ( WARN_ON ( ! iommu ) | | ! data )
return - EINVAL ;
2020-09-25 19:32:47 +03:00
if ( data - > format ! = IOMMU_PASID_FORMAT_INTEL_VTD )
return - EINVAL ;
/* IOMMU core ensures argsz is more than the start of the union */
if ( data - > argsz < offsetofend ( struct iommu_gpasid_bind_data , vendor . vtd ) )
return - EINVAL ;
/* Make sure no undefined flags are used in vendor data */
if ( data - > vendor . vtd . flags & ~ ( IOMMU_SVA_VTD_GPASID_LAST - 1 ) )
2020-05-16 09:20:47 +03:00
return - EINVAL ;
if ( ! dev_is_pci ( dev ) )
return - ENOTSUPP ;
/* VT-d supports devices with full 20 bit PASIDs only */
if ( pci_max_pasids ( to_pci_dev ( dev ) ) ! = PASID_MAX )
return - EINVAL ;
/*
* We only check host PASID range , we have no knowledge to check
* guest PASID range .
*/
if ( data - > hpasid < = 0 | | data - > hpasid > = PASID_MAX )
return - EINVAL ;
2020-10-30 05:37:23 +03:00
info = get_domain_info ( dev ) ;
if ( ! info )
return - EINVAL ;
2020-05-16 09:20:47 +03:00
dmar_domain = to_dmar_domain ( domain ) ;
mutex_lock ( & pasid_mutex ) ;
2020-07-24 04:49:22 +03:00
ret = pasid_to_svm_sdev ( dev , data - > hpasid , & svm , & sdev ) ;
if ( ret )
2020-05-16 09:20:47 +03:00
goto out ;
2020-07-24 04:49:22 +03:00
if ( sdev ) {
2020-07-24 04:49:20 +03:00
/*
* Do not allow multiple bindings of the same device - PASID since
* there is only one SL page tables per PASID . We may revisit
* once sharing PGD across domains are supported .
*/
2020-07-24 04:49:22 +03:00
dev_warn_ratelimited ( dev , " Already bound with PASID %u \n " ,
svm - > pasid ) ;
ret = - EBUSY ;
goto out ;
}
if ( ! svm ) {
2020-05-16 09:20:47 +03:00
/* We come here when PASID has never been bond to a device. */
svm = kzalloc ( sizeof ( * svm ) , GFP_KERNEL ) ;
if ( ! svm ) {
ret = - ENOMEM ;
goto out ;
}
/* REVISIT: upper layer/VFIO can track host process that bind
* the PASID . ioasid_set = mm might be sufficient for vfio to
* check pasid VMM ownership . We can drop the following line
* once VFIO and IOASID set check is in place .
*/
svm - > mm = get_task_mm ( current ) ;
svm - > pasid = data - > hpasid ;
if ( data - > flags & IOMMU_SVA_GPASID_VAL ) {
svm - > gpasid = data - > gpasid ;
svm - > flags | = SVM_FLAG_GUEST_PASID ;
}
ioasid_set_data ( data - > hpasid , svm ) ;
INIT_LIST_HEAD_RCU ( & svm - > devs ) ;
mmput ( svm - > mm ) ;
}
sdev = kzalloc ( sizeof ( * sdev ) , GFP_KERNEL ) ;
if ( ! sdev ) {
ret = - ENOMEM ;
goto out ;
}
sdev - > dev = dev ;
2020-10-30 05:37:23 +03:00
sdev - > sid = PCI_DEVID ( info - > bus , info - > devfn ) ;
2020-05-16 09:20:47 +03:00
/* Only count users if device has aux domains */
if ( iommu_dev_feature_enabled ( dev , IOMMU_DEV_FEAT_AUX ) )
sdev - > users = 1 ;
/* Set up device context entry for PASID if not enabled already */
ret = intel_iommu_enable_pasid ( iommu , sdev - > dev ) ;
if ( ret ) {
dev_err_ratelimited ( dev , " Failed to enable PASID capability \n " ) ;
kfree ( sdev ) ;
goto out ;
}
/*
* PASID table is per device for better security . Therefore , for
* each bind of a new device even with an existing PASID , we need to
* call the nested mode setup function here .
*/
2020-12-31 03:53:23 +03:00
spin_lock_irqsave ( & iommu - > lock , iflags ) ;
2020-05-19 04:34:23 +03:00
ret = intel_pasid_setup_nested ( iommu , dev ,
( pgd_t * ) ( uintptr_t ) data - > gpgd ,
2020-09-25 19:32:44 +03:00
data - > hpasid , & data - > vendor . vtd , dmar_domain ,
2020-05-16 09:20:47 +03:00
data - > addr_width ) ;
2020-12-31 03:53:23 +03:00
spin_unlock_irqrestore ( & iommu - > lock , iflags ) ;
2020-05-16 09:20:47 +03:00
if ( ret ) {
dev_err_ratelimited ( dev , " Failed to set up PASID %llu in nested mode, Err %d \n " ,
data - > hpasid , ret ) ;
/*
* PASID entry should be in cleared state if nested mode
* set up failed . So we only need to clear IOASID tracking
* data such that free call will succeed .
*/
kfree ( sdev ) ;
goto out ;
}
svm - > flags | = SVM_FLAG_GUEST_MODE ;
init_rcu_head ( & sdev - > rcu ) ;
list_add_rcu ( & sdev - > list , & svm - > devs ) ;
out :
if ( ! IS_ERR_OR_NULL ( svm ) & & list_empty ( & svm - > devs ) ) {
ioasid_set_data ( data - > hpasid , NULL ) ;
kfree ( svm ) ;
}
mutex_unlock ( & pasid_mutex ) ;
return ret ;
}
2020-09-15 19:30:05 +03:00
int intel_svm_unbind_gpasid ( struct device * dev , u32 pasid )
2020-05-16 09:20:47 +03:00
{
2020-07-24 04:49:21 +03:00
struct intel_iommu * iommu = device_to_iommu ( dev , NULL , NULL ) ;
2020-05-16 09:20:47 +03:00
struct intel_svm_dev * sdev ;
struct intel_svm * svm ;
2020-07-24 04:49:22 +03:00
int ret ;
2020-05-16 09:20:47 +03:00
if ( WARN_ON ( ! iommu ) )
return - EINVAL ;
mutex_lock ( & pasid_mutex ) ;
2020-07-24 04:49:22 +03:00
ret = pasid_to_svm_sdev ( dev , pasid , & svm , & sdev ) ;
if ( ret )
2020-05-16 09:20:47 +03:00
goto out ;
2020-07-24 04:49:22 +03:00
if ( sdev ) {
2020-05-16 09:20:47 +03:00
if ( iommu_dev_feature_enabled ( dev , IOMMU_DEV_FEAT_AUX ) )
sdev - > users - - ;
if ( ! sdev - > users ) {
list_del_rcu ( & sdev - > list ) ;
2020-05-16 09:20:57 +03:00
intel_pasid_tear_down_entry ( iommu , dev ,
svm - > pasid , false ) ;
2020-05-16 09:20:58 +03:00
intel_svm_drain_prq ( dev , svm - > pasid ) ;
2020-05-16 09:20:47 +03:00
kfree_rcu ( sdev , rcu ) ;
if ( list_empty ( & svm - > devs ) ) {
/*
* We do not free the IOASID here in that
* IOMMU driver did not allocate it .
* Unlike native SVM , IOASID for guest use was
* allocated prior to the bind call .
* In any case , if the free call comes before
* the unbind , IOMMU driver will get notified
* and perform cleanup .
*/
ioasid_set_data ( pasid , NULL ) ;
kfree ( svm ) ;
}
}
}
out :
mutex_unlock ( & pasid_mutex ) ;
return ret ;
}
2020-09-15 19:30:13 +03:00
static void _load_pasid ( void * unused )
{
update_pasid ( ) ;
}
static void load_pasid ( struct mm_struct * mm , u32 pasid )
{
mutex_lock ( & mm - > context . lock ) ;
/* Synchronize with READ_ONCE in update_pasid(). */
smp_store_release ( & mm - > pasid , pasid ) ;
/* Update PASID MSR on all CPUs running the mm's tasks. */
on_each_cpu_mask ( mm_cpumask ( mm ) , _load_pasid , NULL , true ) ;
mutex_unlock ( & mm - > context . lock ) ;
}
2020-05-16 09:20:54 +03:00
/* Caller must hold pasid_mutex, mm reference */
static int
2020-09-15 19:30:06 +03:00
intel_svm_bind_mm ( struct device * dev , unsigned int flags ,
struct svm_dev_ops * ops ,
2020-05-16 09:20:54 +03:00
struct mm_struct * mm , struct intel_svm_dev * * sd )
2015-09-09 13:40:47 +03:00
{
2020-07-24 04:49:21 +03:00
struct intel_iommu * iommu = device_to_iommu ( dev , NULL , NULL ) ;
2019-03-25 04:30:29 +03:00
struct device_domain_info * info ;
2015-09-09 13:40:47 +03:00
struct intel_svm_dev * sdev ;
struct intel_svm * svm = NULL ;
2020-12-31 03:53:23 +03:00
unsigned long iflags ;
2015-09-09 13:40:47 +03:00
int pasid_max ;
int ret ;
2019-03-01 06:23:12 +03:00
if ( ! iommu | | dmar_disabled )
2015-09-09 13:40:47 +03:00
return - EINVAL ;
2020-01-02 03:18:05 +03:00
if ( ! intel_svm_capable ( iommu ) )
return - ENOTSUPP ;
2015-09-09 13:40:47 +03:00
if ( dev_is_pci ( dev ) ) {
pasid_max = pci_max_pasids ( to_pci_dev ( dev ) ) ;
if ( pasid_max < 0 )
return - EINVAL ;
} else
pasid_max = 1 < < 20 ;
2020-05-16 09:20:54 +03:00
/* Bind supervisor PASID shuld have mm = NULL */
2018-05-04 08:08:19 +03:00
if ( flags & SVM_FLAG_SUPERVISOR_MODE ) {
2020-05-16 09:20:54 +03:00
if ( ! ecap_srs ( iommu - > ecap ) | | mm ) {
pr_err ( " Supervisor PASID with user provided mm. \n " ) ;
2015-10-15 17:52:15 +03:00
return - EINVAL ;
2020-05-16 09:20:54 +03:00
}
2015-10-15 17:52:15 +03:00
}
2020-05-16 09:20:54 +03:00
if ( ! ( flags & SVM_FLAG_PRIVATE_PASID ) ) {
2018-07-14 10:46:55 +03:00
struct intel_svm * t ;
2015-09-09 13:40:47 +03:00
2018-07-14 10:46:55 +03:00
list_for_each_entry ( t , & global_svm_list , list ) {
if ( t - > mm ! = mm | | ( t - > flags & SVM_FLAG_PRIVATE_PASID ) )
2015-09-09 13:40:47 +03:00
continue ;
2018-07-14 10:46:55 +03:00
svm = t ;
2015-09-09 13:40:47 +03:00
if ( svm - > pasid > = pasid_max ) {
dev_warn ( dev ,
" Limited PASID width. Cannot use existing PASID %d \n " ,
svm - > pasid ) ;
ret = - ENOSPC ;
goto out ;
}
2020-01-02 03:18:10 +03:00
/* Find the matching device in svm list */
for_each_svm_dev ( sdev , svm , dev ) {
if ( sdev - > ops ! = ops ) {
ret = - EBUSY ;
goto out ;
2015-09-09 13:40:47 +03:00
}
2020-01-02 03:18:10 +03:00
sdev - > users + + ;
goto success ;
2015-09-09 13:40:47 +03:00
}
break ;
}
}
sdev = kzalloc ( sizeof ( * sdev ) , GFP_KERNEL ) ;
if ( ! sdev ) {
ret = - ENOMEM ;
goto out ;
}
sdev - > dev = dev ;
2019-03-25 04:30:29 +03:00
ret = intel_iommu_enable_pasid ( iommu , dev ) ;
2020-05-16 09:20:54 +03:00
if ( ret ) {
2015-09-09 13:40:47 +03:00
kfree ( sdev ) ;
goto out ;
}
2019-03-25 04:30:29 +03:00
2020-05-16 09:20:52 +03:00
info = get_domain_info ( dev ) ;
2019-03-25 04:30:29 +03:00
sdev - > did = FLPT_DEFAULT_DID ;
sdev - > sid = PCI_DEVID ( info - > bus , info - > devfn ) ;
if ( info - > ats_enabled ) {
sdev - > dev_iotlb = 1 ;
sdev - > qdep = info - > ats_qdep ;
if ( sdev - > qdep > = QI_DEV_EIOTLB_MAX_INVS )
sdev - > qdep = 0 ;
}
2015-09-09 13:40:47 +03:00
/* Finish the setup now we know we're keeping it */
sdev - > users = 1 ;
2015-10-13 19:18:10 +03:00
sdev - > ops = ops ;
2015-09-09 13:40:47 +03:00
init_rcu_head ( & sdev - > rcu ) ;
if ( ! svm ) {
svm = kzalloc ( sizeof ( * svm ) , GFP_KERNEL ) ;
if ( ! svm ) {
ret = - ENOMEM ;
kfree ( sdev ) ;
goto out ;
}
svm - > iommu = iommu ;
2018-07-14 10:47:01 +03:00
if ( pasid_max > intel_pasid_max_id )
pasid_max = intel_pasid_max_id ;
2015-09-09 13:40:47 +03:00
2020-01-02 03:18:08 +03:00
/* Do not use PASID 0, reserved for RID to PASID */
svm - > pasid = ioasid_alloc ( NULL , PASID_MIN ,
pasid_max - 1 , svm ) ;
if ( svm - > pasid = = INVALID_IOASID ) {
2015-09-09 13:40:47 +03:00
kfree ( svm ) ;
2018-02-24 08:42:27 +03:00
kfree ( sdev ) ;
2020-01-02 03:18:08 +03:00
ret = - ENOSPC ;
2015-09-09 13:40:47 +03:00
goto out ;
}
svm - > notifier . ops = & intel_mmuops ;
2015-10-15 17:52:15 +03:00
svm - > mm = mm ;
2015-10-15 15:59:14 +03:00
svm - > flags = flags ;
2015-09-09 13:40:47 +03:00
INIT_LIST_HEAD_RCU ( & svm - > devs ) ;
2018-07-14 10:46:55 +03:00
INIT_LIST_HEAD ( & svm - > list ) ;
2015-09-09 13:40:47 +03:00
ret = - ENOMEM ;
2015-10-15 17:52:15 +03:00
if ( mm ) {
ret = mmu_notifier_register ( & svm - > notifier , mm ) ;
if ( ret ) {
2020-11-06 18:50:47 +03:00
ioasid_put ( svm - > pasid ) ;
2015-10-15 17:52:15 +03:00
kfree ( svm ) ;
kfree ( sdev ) ;
goto out ;
}
2018-12-10 04:59:05 +03:00
}
2018-03-16 07:31:36 +03:00
2020-12-31 03:53:23 +03:00
spin_lock_irqsave ( & iommu - > lock , iflags ) ;
2018-12-10 04:59:05 +03:00
ret = intel_pasid_setup_first_level ( iommu , dev ,
mm ? mm - > pgd : init_mm . pgd ,
svm - > pasid , FLPT_DEFAULT_DID ,
2020-01-02 03:18:16 +03:00
( mm ? 0 : PASID_FLAG_SUPERVISOR_MODE ) |
( cpu_feature_enabled ( X86_FEATURE_LA57 ) ?
PASID_FLAG_FL5LP : 0 ) ) ;
2020-12-31 03:53:23 +03:00
spin_unlock_irqrestore ( & iommu - > lock , iflags ) ;
2018-12-10 04:59:05 +03:00
if ( ret ) {
if ( mm )
mmu_notifier_unregister ( & svm - > notifier , mm ) ;
2020-11-06 18:50:47 +03:00
ioasid_put ( svm - > pasid ) ;
2018-12-10 04:59:05 +03:00
kfree ( svm ) ;
kfree ( sdev ) ;
goto out ;
}
2018-07-14 10:46:55 +03:00
list_add_tail ( & svm - > list , & global_svm_list ) ;
2020-09-15 19:30:13 +03:00
if ( mm ) {
/* The newly allocated pasid is loaded to the mm. */
load_pasid ( mm , svm - > pasid ) ;
}
2019-05-08 22:22:46 +03:00
} else {
/*
* Binding a new device with existing PASID , need to setup
* the PASID entry .
*/
2020-12-31 03:53:23 +03:00
spin_lock_irqsave ( & iommu - > lock , iflags ) ;
2019-05-08 22:22:46 +03:00
ret = intel_pasid_setup_first_level ( iommu , dev ,
mm ? mm - > pgd : init_mm . pgd ,
svm - > pasid , FLPT_DEFAULT_DID ,
2020-01-02 03:18:16 +03:00
( mm ? 0 : PASID_FLAG_SUPERVISOR_MODE ) |
( cpu_feature_enabled ( X86_FEATURE_LA57 ) ?
PASID_FLAG_FL5LP : 0 ) ) ;
2020-12-31 03:53:23 +03:00
spin_unlock_irqrestore ( & iommu - > lock , iflags ) ;
2019-05-08 22:22:46 +03:00
if ( ret ) {
kfree ( sdev ) ;
goto out ;
}
2015-09-09 13:40:47 +03:00
}
list_add_rcu ( & sdev - > list , & svm - > devs ) ;
2020-05-16 09:20:54 +03:00
success :
sdev - > pasid = svm - > pasid ;
sdev - > sva . dev = dev ;
if ( sd )
* sd = sdev ;
2015-09-09 13:40:47 +03:00
ret = 0 ;
2020-07-24 04:49:22 +03:00
out :
2015-09-09 13:40:47 +03:00
return ret ;
}
2020-05-16 09:20:54 +03:00
/* Caller must hold pasid_mutex */
2020-09-15 19:30:05 +03:00
static int intel_svm_unbind_mm ( struct device * dev , u32 pasid )
2015-09-09 13:40:47 +03:00
{
struct intel_svm_dev * sdev ;
struct intel_iommu * iommu ;
struct intel_svm * svm ;
int ret = - EINVAL ;
2020-07-24 04:49:21 +03:00
iommu = device_to_iommu ( dev , NULL , NULL ) ;
2018-07-14 10:47:01 +03:00
if ( ! iommu )
2015-09-09 13:40:47 +03:00
goto out ;
2020-07-24 04:49:22 +03:00
ret = pasid_to_svm_sdev ( dev , pasid , & svm , & sdev ) ;
if ( ret )
2020-01-02 03:18:08 +03:00
goto out ;
2020-07-24 04:49:22 +03:00
if ( sdev ) {
2020-01-02 03:18:10 +03:00
sdev - > users - - ;
if ( ! sdev - > users ) {
list_del_rcu ( & sdev - > list ) ;
/* Flush the PASID cache and IOTLB for this device.
* Note that we do depend on the hardware * not * using
* the PASID any more . Just as we depend on other
* devices never using PASIDs that they have no right
* to use . We have a * shared * PASID table , because it ' s
* large and has to be physically contiguous . So it ' s
* hard to be as defensive as we might like . */
2020-05-16 09:20:57 +03:00
intel_pasid_tear_down_entry ( iommu , dev ,
svm - > pasid , false ) ;
2020-05-16 09:20:58 +03:00
intel_svm_drain_prq ( dev , svm - > pasid ) ;
2020-01-02 03:18:10 +03:00
kfree_rcu ( sdev , rcu ) ;
if ( list_empty ( & svm - > devs ) ) {
2020-11-06 18:50:47 +03:00
ioasid_put ( svm - > pasid ) ;
2020-09-15 19:30:13 +03:00
if ( svm - > mm ) {
2020-01-02 03:18:10 +03:00
mmu_notifier_unregister ( & svm - > notifier , svm - > mm ) ;
2020-09-15 19:30:13 +03:00
/* Clear mm's pasid. */
load_pasid ( svm - > mm , PASID_DISABLED ) ;
}
2020-01-02 03:18:10 +03:00
list_del ( & svm - > list ) ;
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm ( ) is called .
* If that is not obeyed , subtle errors will happen .
* Let ' s make them less subtle . . . */
memset ( svm , 0x6b , sizeof ( * svm ) ) ;
kfree ( svm ) ;
2015-09-09 13:40:47 +03:00
}
}
}
2020-07-24 04:49:22 +03:00
out :
2015-09-09 13:40:47 +03:00
return ret ;
}
2017-05-10 21:39:03 +03:00
2015-10-08 01:35:18 +03:00
/* Page request queue descriptor */
struct page_req_dsc {
2019-01-11 08:04:57 +03:00
union {
struct {
u64 type : 8 ;
u64 pasid_present : 1 ;
u64 priv_data_present : 1 ;
u64 rsvd : 6 ;
u64 rid : 16 ;
u64 pasid : 20 ;
u64 exe_req : 1 ;
u64 pm_req : 1 ;
u64 rsvd2 : 10 ;
} ;
u64 qw_0 ;
} ;
union {
struct {
u64 rd_req : 1 ;
u64 wr_req : 1 ;
u64 lpig : 1 ;
u64 prg_index : 9 ;
u64 addr : 52 ;
} ;
u64 qw_1 ;
} ;
u64 priv_data [ 2 ] ;
2015-10-08 01:35:18 +03:00
} ;
2020-03-17 04:10:18 +03:00
# define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
2015-11-17 18:11:39 +03:00
static bool access_error ( struct vm_area_struct * vma , struct page_req_dsc * req )
{
unsigned long requested = 0 ;
if ( req - > exe_req )
requested | = VM_EXEC ;
if ( req - > rd_req )
requested | = VM_READ ;
if ( req - > wr_req )
requested | = VM_WRITE ;
return ( requested & ~ vma - > vm_flags ) ! = 0 ;
}
2017-08-08 23:29:27 +03:00
static bool is_canonical_address ( u64 addr )
{
int shift = 64 - ( __VIRTUAL_MASK_SHIFT + 1 ) ;
long saddr = ( long ) addr ;
return ( ( ( saddr < < shift ) > > shift ) = = saddr ) ;
}
2020-05-16 09:20:58 +03:00
/**
* intel_svm_drain_prq - Drain page requests and responses for a pasid
* @ dev : target device
* @ pasid : pasid for draining
*
* Drain all pending page requests and responses related to @ pasid in both
* software and hardware . This is supposed to be called after the device
* driver has stopped DMA , the pasid entry has been cleared , and both IOTLB
* and DevTLB have been invalidated .
*
* It waits until all pending page requests for @ pasid in the page fault
* queue are completed by the prq handling thread . Then follow the steps
* described in VT - d spec CH7 .10 to drain all page requests and page
* responses pending in the hardware .
*/
2020-09-15 19:30:05 +03:00
static void intel_svm_drain_prq ( struct device * dev , u32 pasid )
2020-05-16 09:20:58 +03:00
{
struct device_domain_info * info ;
struct dmar_domain * domain ;
struct intel_iommu * iommu ;
struct qi_desc desc [ 3 ] ;
struct pci_dev * pdev ;
int head , tail ;
u16 sid , did ;
int qdep ;
info = get_domain_info ( dev ) ;
if ( WARN_ON ( ! info | | ! dev_is_pci ( dev ) ) )
return ;
if ( ! info - > pri_enabled )
return ;
iommu = info - > iommu ;
domain = info - > domain ;
pdev = to_pci_dev ( dev ) ;
sid = PCI_DEVID ( info - > bus , info - > devfn ) ;
did = domain - > iommu_did [ iommu - > seq_id ] ;
qdep = pci_ats_queue_depth ( pdev ) ;
/*
* Check and wait until all pending page requests in the queue are
* handled by the prq handling thread .
*/
prq_retry :
reinit_completion ( & iommu - > prq_complete ) ;
tail = dmar_readq ( iommu - > reg + DMAR_PQT_REG ) & PRQ_RING_MASK ;
head = dmar_readq ( iommu - > reg + DMAR_PQH_REG ) & PRQ_RING_MASK ;
while ( head ! = tail ) {
struct page_req_dsc * req ;
req = & iommu - > prq [ head / sizeof ( * req ) ] ;
if ( ! req - > pasid_present | | req - > pasid ! = pasid ) {
head = ( head + sizeof ( * req ) ) & PRQ_RING_MASK ;
continue ;
}
wait_for_completion ( & iommu - > prq_complete ) ;
goto prq_retry ;
}
/*
* Perform steps described in VT - d spec CH7 .10 to drain page
* requests and responses in hardware .
*/
memset ( desc , 0 , sizeof ( desc ) ) ;
desc [ 0 ] . qw0 = QI_IWD_STATUS_DATA ( QI_DONE ) |
QI_IWD_FENCE |
QI_IWD_TYPE ;
desc [ 1 ] . qw0 = QI_EIOTLB_PASID ( pasid ) |
QI_EIOTLB_DID ( did ) |
QI_EIOTLB_GRAN ( QI_GRAN_NONG_PASID ) |
QI_EIOTLB_TYPE ;
desc [ 2 ] . qw0 = QI_DEV_EIOTLB_PASID ( pasid ) |
QI_DEV_EIOTLB_SID ( sid ) |
QI_DEV_EIOTLB_QDEP ( qdep ) |
QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID ( info - > pfsid ) ;
qi_retry :
reinit_completion ( & iommu - > prq_complete ) ;
qi_submit_sync ( iommu , desc , 3 , QI_OPT_WAIT_DRAIN ) ;
if ( readl ( iommu - > reg + DMAR_PRS_REG ) & DMA_PRS_PRO ) {
wait_for_completion ( & iommu - > prq_complete ) ;
goto qi_retry ;
}
}
2020-07-24 04:49:23 +03:00
static int prq_to_iommu_prot ( struct page_req_dsc * req )
{
int prot = 0 ;
if ( req - > rd_req )
prot | = IOMMU_FAULT_PERM_READ ;
if ( req - > wr_req )
prot | = IOMMU_FAULT_PERM_WRITE ;
if ( req - > exe_req )
prot | = IOMMU_FAULT_PERM_EXEC ;
if ( req - > pm_req )
prot | = IOMMU_FAULT_PERM_PRIV ;
return prot ;
}
static int
intel_svm_prq_report ( struct device * dev , struct page_req_dsc * desc )
{
struct iommu_fault_event event ;
if ( ! dev | | ! dev_is_pci ( dev ) )
return - ENODEV ;
/* Fill in event data for device specific processing */
memset ( & event , 0 , sizeof ( struct iommu_fault_event ) ) ;
event . fault . type = IOMMU_FAULT_PAGE_REQ ;
event . fault . prm . addr = desc - > addr ;
event . fault . prm . pasid = desc - > pasid ;
event . fault . prm . grpid = desc - > prg_index ;
event . fault . prm . perm = prq_to_iommu_prot ( desc ) ;
if ( desc - > lpig )
event . fault . prm . flags | = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE ;
if ( desc - > pasid_present ) {
event . fault . prm . flags | = IOMMU_FAULT_PAGE_REQUEST_PASID_VALID ;
event . fault . prm . flags | = IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID ;
}
if ( desc - > priv_data_present ) {
/*
* Set last page in group bit if private data is present ,
* page response is required as it does for LPIG .
* iommu_report_device_fault ( ) doesn ' t understand this vendor
* specific requirement thus we set last_page as a workaround .
*/
event . fault . prm . flags | = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE ;
event . fault . prm . flags | = IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA ;
memcpy ( event . fault . prm . private_data , desc - > priv_data ,
sizeof ( desc - > priv_data ) ) ;
}
return iommu_report_device_fault ( dev , & event ) ;
}
2015-10-08 01:35:18 +03:00
static irqreturn_t prq_event_thread ( int irq , void * d )
{
2020-07-24 04:49:23 +03:00
struct intel_svm_dev * sdev = NULL ;
2015-10-08 01:35:18 +03:00
struct intel_iommu * iommu = d ;
struct intel_svm * svm = NULL ;
int head , tail , handled = 0 ;
2016-02-15 15:42:38 +03:00
/* Clear PPR bit before reading head/tail registers, to
* ensure that we get a new interrupt if needed . */
writel ( DMA_PRS_PPR , iommu - > reg + DMAR_PRS_REG ) ;
2015-10-08 01:35:18 +03:00
tail = dmar_readq ( iommu - > reg + DMAR_PQT_REG ) & PRQ_RING_MASK ;
head = dmar_readq ( iommu - > reg + DMAR_PQH_REG ) & PRQ_RING_MASK ;
while ( head ! = tail ) {
struct vm_area_struct * vma ;
struct page_req_dsc * req ;
struct qi_desc resp ;
2018-08-18 01:44:47 +03:00
int result ;
vm_fault_t ret ;
2015-10-08 01:35:18 +03:00
u64 address ;
handled = 1 ;
req = & iommu - > prq [ head / sizeof ( * req ) ] ;
result = QI_RESP_FAILURE ;
2015-10-16 19:22:31 +03:00
address = ( u64 ) req - > addr < < VTD_PAGE_SHIFT ;
2015-10-08 01:35:18 +03:00
if ( ! req - > pasid_present ) {
pr_err ( " %s: Page request without PASID: %08llx %08llx \n " ,
iommu - > name , ( ( unsigned long long * ) req ) [ 0 ] ,
( ( unsigned long long * ) req ) [ 1 ] ) ;
2018-11-05 05:18:58 +03:00
goto no_pasid ;
2015-10-08 01:35:18 +03:00
}
if ( ! svm | | svm - > pasid ! = req - > pasid ) {
rcu_read_lock ( ) ;
2020-01-02 03:18:08 +03:00
svm = ioasid_find ( NULL , req - > pasid , NULL ) ;
2015-10-08 01:35:18 +03:00
/* It *can't* go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding .
* So we only need RCU to protect the internal idr code . */
rcu_read_unlock ( ) ;
2020-01-02 03:18:08 +03:00
if ( IS_ERR_OR_NULL ( svm ) ) {
2015-10-08 01:35:18 +03:00
pr_err ( " %s: Page request for invalid PASID %d: %08llx %08llx \n " ,
iommu - > name , req - > pasid , ( ( unsigned long long * ) req ) [ 0 ] ,
( ( unsigned long long * ) req ) [ 1 ] ) ;
2015-10-15 23:12:56 +03:00
goto no_pasid ;
2015-10-08 01:35:18 +03:00
}
}
2020-07-24 04:49:23 +03:00
if ( ! sdev | | sdev - > sid ! = req - > rid ) {
struct intel_svm_dev * t ;
sdev = NULL ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( t , & svm - > devs , list ) {
if ( t - > sid = = req - > rid ) {
sdev = t ;
break ;
}
}
rcu_read_unlock ( ) ;
}
2015-10-08 01:35:18 +03:00
result = QI_RESP_INVALID ;
2015-10-15 17:52:15 +03:00
/* Since we're using init_mm.pgd directly, we should never take
* any faults on kernel addresses . */
if ( ! svm - > mm )
goto bad_req ;
2017-08-08 23:29:27 +03:00
/* If address is not canonical, return invalid response */
if ( ! is_canonical_address ( address ) )
goto bad_req ;
2020-07-24 04:49:23 +03:00
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers , we skip the page response here .
*/
if ( svm - > flags & SVM_FLAG_GUEST_MODE ) {
if ( sdev & & ! intel_svm_prq_report ( sdev - > dev , req ) )
goto prq_advance ;
else
goto bad_req ;
}
2020-03-20 07:32:30 +03:00
/* If the mm is already defunct, don't handle faults. */
if ( ! mmget_not_zero ( svm - > mm ) )
goto bad_req ;
2020-06-09 07:33:25 +03:00
mmap_read_lock ( svm - > mm ) ;
2015-10-08 01:35:18 +03:00
vma = find_extend_vma ( svm - > mm , address ) ;
if ( ! vma | | address < vma - > vm_start )
goto invalid ;
2015-11-17 18:11:39 +03:00
if ( access_error ( vma , req ) )
goto invalid ;
2016-07-27 01:25:18 +03:00
ret = handle_mm_fault ( vma , address ,
mm: do page fault accounting in handle_mm_fault
Patch series "mm: Page fault accounting cleanups", v5.
This is v5 of the pf accounting cleanup series. It originates from Gerald
Schaefer's report on an issue a week ago regarding to incorrect page fault
accountings for retried page fault after commit 4064b9827063 ("mm: allow
VM_FAULT_RETRY for multiple times"):
https://lore.kernel.org/lkml/20200610174811.44b94525@thinkpad/
What this series did:
- Correct page fault accounting: we do accounting for a page fault
(no matter whether it's from #PF handling, or gup, or anything else)
only with the one that completed the fault. For example, page fault
retries should not be counted in page fault counters. Same to the
perf events.
- Unify definition of PERF_COUNT_SW_PAGE_FAULTS: currently this perf
event is used in an adhoc way across different archs.
Case (1): for many archs it's done at the entry of a page fault
handler, so that it will also cover e.g. errornous faults.
Case (2): for some other archs, it is only accounted when the page
fault is resolved successfully.
Case (3): there're still quite some archs that have not enabled
this perf event.
Since this series will touch merely all the archs, we unify this
perf event to always follow case (1), which is the one that makes most
sense. And since we moved the accounting into handle_mm_fault, the
other two MAJ/MIN perf events are well taken care of naturally.
- Unify definition of "major faults": the definition of "major
fault" is slightly changed when used in accounting (not
VM_FAULT_MAJOR). More information in patch 1.
- Always account the page fault onto the one that triggered the page
fault. This does not matter much for #PF handlings, but mostly for
gup. More information on this in patch 25.
Patchset layout:
Patch 1: Introduced the accounting in handle_mm_fault(), not enabled.
Patch 2-23: Enable the new accounting for arch #PF handlers one by one.
Patch 24: Enable the new accounting for the rest outliers (gup, iommu, etc.)
Patch 25: Cleanup GUP task_struct pointer since it's not needed any more
This patch (of 25):
This is a preparation patch to move page fault accountings into the
general code in handle_mm_fault(). This includes both the per task
flt_maj/flt_min counters, and the major/minor page fault perf events. To
do this, the pt_regs pointer is passed into handle_mm_fault().
PERF_COUNT_SW_PAGE_FAULTS should still be kept in per-arch page fault
handlers.
So far, all the pt_regs pointer that passed into handle_mm_fault() is
NULL, which means this patch should have no intented functional change.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200707225021.200906-1-peterx@redhat.com
Link: http://lkml.kernel.org/r/20200707225021.200906-2-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-08-12 04:37:44 +03:00
req - > wr_req ? FAULT_FLAG_WRITE : 0 ,
NULL ) ;
2015-10-08 01:35:18 +03:00
if ( ret & VM_FAULT_ERROR )
goto invalid ;
result = QI_RESP_SUCCESS ;
2020-07-24 04:49:23 +03:00
invalid :
2020-06-09 07:33:25 +03:00
mmap_read_unlock ( svm - > mm ) ;
iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Holding mm_users works OK for graphics, which was the first user of SVM
with VT-d. However, it works less well for other devices, where we actually
do a mmap() from the file descriptor to which the SVM PASID state is tied.
In this case on process exit we end up with a recursive reference count:
- The MM remains alive until the file is closed and the driver's release()
call ends up unbinding the PASID.
- The VMA corresponding to the mmap() remains intact until the MM is
destroyed.
- Thus the file isn't closed, even when exit_files() runs, because the
VMA is still holding a reference to it. And the MM remains alive…
To address this issue, we *stop* holding mm_users while the PASID is bound.
We already hold mm_count by virtue of the MMU notifier, and that can be
made to be sufficient.
It means that for a period during process exit, the fun part of mmput()
has happened and exit_mmap() has been called so the MM is basically
defunct. But the PGD still exists and the PASID is still bound to it.
During this period, we have to be very careful — exit_mmap() doesn't use
mm->mmap_sem because it doesn't expect anyone else to be touching the MM
(quite reasonably, since mm_users is zero). So we also need to fix the
fault handler to just report failure if mm_users is already zero, and to
temporarily bump mm_users while handling any faults.
Additionally, exit_mmap() calls mmu_notifier_release() *before* it tears
down the page tables, which is too early for us to flush the IOTLB for
this PASID. And __mmu_notifier_release() removes every notifier from the
list, so when exit_mmap() finally *does* tear down the mappings and
clear the page tables, we don't get notified. So we work around this by
clearing the PASID table entry in our MMU notifier release() callback.
That way, the hardware *can't* get any pages back from the page tables
before they get cleared.
Hardware designers have confirmed that the resulting 'PASID not present'
faults should be handled just as gracefully as 'page not present' faults,
the important criterion being that they don't perturb the operation for
any *other* PASID in the system.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@vger.kernel.org
2016-01-12 22:18:06 +03:00
mmput ( svm - > mm ) ;
2020-07-24 04:49:23 +03:00
bad_req :
WARN_ON ( ! sdev ) ;
2015-10-13 19:18:10 +03:00
if ( sdev & & sdev - > ops & & sdev - > ops - > fault_cb ) {
int rwxp = ( req - > rd_req < < 3 ) | ( req - > wr_req < < 2 ) |
2019-01-11 08:04:57 +03:00
( req - > exe_req < < 1 ) | ( req - > pm_req ) ;
sdev - > ops - > fault_cb ( sdev - > dev , req - > pasid , req - > addr ,
req - > priv_data , rwxp , result ) ;
2015-10-13 19:18:10 +03:00
}
2015-10-15 23:12:56 +03:00
/* We get here in the error case where the PASID lookup failed,
and these can be NULL . Do not use them below this point ! */
sdev = NULL ;
svm = NULL ;
2020-07-24 04:49:23 +03:00
no_pasid :
2019-01-11 08:04:57 +03:00
if ( req - > lpig | | req - > priv_data_present ) {
/*
* Per VT - d spec . v3 .0 ch7 .7 , system software must
* respond with page group response if private data
* is present ( PDP ) or last page in group ( LPIG ) bit
* is set . This is an additional VT - d feature beyond
* PCI ATS spec .
*/
2018-12-10 04:58:58 +03:00
resp . qw0 = QI_PGRP_PASID ( req - > pasid ) |
2019-01-11 08:04:57 +03:00
QI_PGRP_DID ( req - > rid ) |
2015-10-08 01:35:18 +03:00
QI_PGRP_PASID_P ( req - > pasid_present ) |
2020-10-30 05:37:24 +03:00
QI_PGRP_PDP ( req - > priv_data_present ) |
2019-01-11 08:04:57 +03:00
QI_PGRP_RESP_CODE ( result ) |
2015-10-08 01:35:18 +03:00
QI_PGRP_RESP_TYPE ;
2018-12-10 04:58:58 +03:00
resp . qw1 = QI_PGRP_IDX ( req - > prg_index ) |
2019-01-11 08:04:57 +03:00
QI_PGRP_LPIG ( req - > lpig ) ;
if ( req - > priv_data_present )
memcpy ( & resp . qw2 , req - > priv_data ,
sizeof ( req - > priv_data ) ) ;
2020-01-02 03:18:09 +03:00
resp . qw2 = 0 ;
resp . qw3 = 0 ;
2020-05-16 09:20:55 +03:00
qi_submit_sync ( iommu , & resp , 1 , 0 ) ;
2015-10-08 01:35:18 +03:00
}
2020-07-24 04:49:23 +03:00
prq_advance :
2015-10-08 01:35:18 +03:00
head = ( head + sizeof ( * req ) ) & PRQ_RING_MASK ;
}
dmar_writeq ( iommu - > reg + DMAR_PQH_REG , tail ) ;
2020-05-16 09:20:58 +03:00
/*
* Clear the page request overflow bit and wake up all threads that
* are waiting for the completion of this handling .
*/
if ( readl ( iommu - > reg + DMAR_PRS_REG ) & DMA_PRS_PRO )
writel ( DMA_PRS_PRO , iommu - > reg + DMAR_PRS_REG ) ;
if ( ! completion_done ( & iommu - > prq_complete ) )
complete ( & iommu - > prq_complete ) ;
2015-10-08 01:35:18 +03:00
return IRQ_RETVAL ( handled ) ;
}
2020-05-16 09:20:54 +03:00
# define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
struct iommu_sva *
intel_svm_bind ( struct device * dev , struct mm_struct * mm , void * drvdata )
{
struct iommu_sva * sva = ERR_PTR ( - EINVAL ) ;
struct intel_svm_dev * sdev = NULL ;
2020-09-15 19:30:06 +03:00
unsigned int flags = 0 ;
2020-05-16 09:20:54 +03:00
int ret ;
/*
* TODO : Consolidate with generic iommu - sva bind after it is merged .
* It will require shared SVM data structures , i . e . combine io_mm
* and intel_svm etc .
*/
if ( drvdata )
2020-09-15 19:30:06 +03:00
flags = * ( unsigned int * ) drvdata ;
2020-05-16 09:20:54 +03:00
mutex_lock ( & pasid_mutex ) ;
ret = intel_svm_bind_mm ( dev , flags , NULL , mm , & sdev ) ;
if ( ret )
sva = ERR_PTR ( ret ) ;
else if ( sdev )
sva = & sdev - > sva ;
else
WARN ( ! sdev , " SVM bind succeeded with no sdev! \n " ) ;
mutex_unlock ( & pasid_mutex ) ;
return sva ;
}
void intel_svm_unbind ( struct iommu_sva * sva )
{
struct intel_svm_dev * sdev ;
mutex_lock ( & pasid_mutex ) ;
sdev = to_intel_svm_dev ( sva ) ;
intel_svm_unbind_mm ( sdev - > dev , sdev - > pasid ) ;
mutex_unlock ( & pasid_mutex ) ;
}
2020-09-15 19:30:05 +03:00
u32 intel_svm_get_pasid ( struct iommu_sva * sva )
2020-05-16 09:20:54 +03:00
{
struct intel_svm_dev * sdev ;
2020-09-15 19:30:05 +03:00
u32 pasid ;
2020-05-16 09:20:54 +03:00
mutex_lock ( & pasid_mutex ) ;
sdev = to_intel_svm_dev ( sva ) ;
pasid = sdev - > pasid ;
mutex_unlock ( & pasid_mutex ) ;
return pasid ;
}
2020-07-24 04:49:24 +03:00
int intel_svm_page_response ( struct device * dev ,
struct iommu_fault_event * evt ,
struct iommu_page_response * msg )
{
struct iommu_fault_page_request * prm ;
struct intel_svm_dev * sdev = NULL ;
struct intel_svm * svm = NULL ;
struct intel_iommu * iommu ;
bool private_present ;
bool pasid_present ;
bool last_page ;
u8 bus , devfn ;
int ret = 0 ;
u16 sid ;
if ( ! dev | | ! dev_is_pci ( dev ) )
return - ENODEV ;
iommu = device_to_iommu ( dev , & bus , & devfn ) ;
if ( ! iommu )
return - ENODEV ;
if ( ! msg | | ! evt )
return - EINVAL ;
mutex_lock ( & pasid_mutex ) ;
prm = & evt - > fault . prm ;
sid = PCI_DEVID ( bus , devfn ) ;
pasid_present = prm - > flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID ;
private_present = prm - > flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA ;
last_page = prm - > flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE ;
if ( ! pasid_present ) {
ret = - EINVAL ;
goto out ;
}
if ( prm - > pasid = = 0 | | prm - > pasid > = PASID_MAX ) {
ret = - EINVAL ;
goto out ;
}
ret = pasid_to_svm_sdev ( dev , prm - > pasid , & svm , & sdev ) ;
if ( ret | | ! sdev ) {
ret = - ENODEV ;
goto out ;
}
/*
* For responses from userspace , need to make sure that the
* pasid has been bound to its mm .
*/
if ( svm - > flags & SVM_FLAG_GUEST_MODE ) {
struct mm_struct * mm ;
mm = get_task_mm ( current ) ;
if ( ! mm ) {
ret = - EINVAL ;
goto out ;
}
if ( mm ! = svm - > mm ) {
ret = - ENODEV ;
mmput ( mm ) ;
goto out ;
}
mmput ( mm ) ;
}
/*
* Per VT - d spec . v3 .0 ch7 .7 , system software must respond
* with page group response if private data is present ( PDP )
* or last page in group ( LPIG ) bit is set . This is an
* additional VT - d requirement beyond PCI ATS spec .
*/
if ( last_page | | private_present ) {
struct qi_desc desc ;
desc . qw0 = QI_PGRP_PASID ( prm - > pasid ) | QI_PGRP_DID ( sid ) |
QI_PGRP_PASID_P ( pasid_present ) |
QI_PGRP_PDP ( private_present ) |
QI_PGRP_RESP_CODE ( msg - > code ) |
QI_PGRP_RESP_TYPE ;
desc . qw1 = QI_PGRP_IDX ( prm - > grpid ) | QI_PGRP_LPIG ( last_page ) ;
desc . qw2 = 0 ;
desc . qw3 = 0 ;
if ( private_present )
memcpy ( & desc . qw2 , prm - > private_data ,
sizeof ( prm - > private_data ) ) ;
qi_submit_sync ( iommu , & desc , 1 , 0 ) ;
}
out :
mutex_unlock ( & pasid_mutex ) ;
return ret ;
}