virtio, vhost: bugfixes
Fixes in the iommu and balloon devices. Disable the meta-data optimization for now - I hope we can get it fixed shortly, but there's no point in making users suffer crashes while we are working on that. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJdPV3yAAoJECgfDbjSjVRp5qAIAIbzdgGkkuill7++e05fo3zJ Vus5ApnFb+VopuiKFAxHyrRhvFun2dftcpOEFC6qpZ1xMcErRa1JTDp+Z70gLPcf ZYrT7WoJv202cTQLjlrKwMA4C+hNTGf86KZWls+uzTXngbsrzib99M89wjOTP6UW fslOtznbaHw/oPqQSiL40vNUEhU6thnvSxWpaIGJTnU9cx508Q7dE8TpLA5UpuNj 0y0+0HJrwlNdO2CSOay+dLEkZ/3M0vbXxwcmMNwoPIOx3N58ScCTLF3w6/Zuudco XGhUzY6K5UqonVRVoxXMsQru9ZiAhKGMnf3+ugUojm+riPFOrWBbMNkU7mmNIo0= =nw3y -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio/vhost fixes from Michael Tsirkin: - Fixes in the iommu and balloon devices. - Disable the meta-data optimization for now - I hope we can get it fixed shortly, but there's no point in making users suffer crashes while we are working on that. * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: vhost: disable metadata prefetch optimization iommu/virtio: Update to most recent specification balloon: fix up comments mm/balloon_compaction: avoid duplicate page removal
This commit is contained in:
commit
2a11c76e53
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Virtio driver for the paravirtualized IOMMU
|
||||
*
|
||||
* Copyright (C) 2018 Arm Limited
|
||||
* Copyright (C) 2019 Arm Limited
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
@ -47,7 +47,10 @@ struct viommu_dev {
|
||||
/* Device configuration */
|
||||
struct iommu_domain_geometry geometry;
|
||||
u64 pgsize_bitmap;
|
||||
u8 domain_bits;
|
||||
u32 first_domain;
|
||||
u32 last_domain;
|
||||
/* Supported MAP flags */
|
||||
u32 map_flags;
|
||||
u32 probe_size;
|
||||
};
|
||||
|
||||
@ -62,6 +65,7 @@ struct viommu_domain {
|
||||
struct viommu_dev *viommu;
|
||||
struct mutex mutex; /* protects viommu pointer */
|
||||
unsigned int id;
|
||||
u32 map_flags;
|
||||
|
||||
spinlock_t mappings_lock;
|
||||
struct rb_root_cached mappings;
|
||||
@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
|
||||
return -ENOENT;
|
||||
case VIRTIO_IOMMU_S_FAULT:
|
||||
return -EFAULT;
|
||||
case VIRTIO_IOMMU_S_NOMEM:
|
||||
return -ENOMEM;
|
||||
case VIRTIO_IOMMU_S_IOERR:
|
||||
case VIRTIO_IOMMU_S_DEVERR:
|
||||
default:
|
||||
@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
|
||||
{
|
||||
int ret;
|
||||
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
||||
unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
|
||||
(1U << viommu->domain_bits) - 1;
|
||||
|
||||
vdomain->viommu = viommu;
|
||||
vdomain->map_flags = viommu->map_flags;
|
||||
|
||||
domain->pgsize_bitmap = viommu->pgsize_bitmap;
|
||||
domain->geometry = viommu->geometry;
|
||||
|
||||
ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
|
||||
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
|
||||
viommu->last_domain, GFP_KERNEL);
|
||||
if (ret >= 0)
|
||||
vdomain->id = (unsigned int)ret;
|
||||
|
||||
@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
{
|
||||
int ret;
|
||||
int flags;
|
||||
u32 flags;
|
||||
struct virtio_iommu_req_map map;
|
||||
struct viommu_domain *vdomain = to_viommu_domain(domain);
|
||||
|
||||
@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
|
||||
(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
|
||||
|
||||
if (flags & ~vdomain->map_flags)
|
||||
return -EINVAL;
|
||||
|
||||
ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev)
|
||||
goto err_free_vqs;
|
||||
}
|
||||
|
||||
viommu->domain_bits = 32;
|
||||
viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
|
||||
viommu->last_domain = ~0U;
|
||||
|
||||
/* Optional features */
|
||||
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
|
||||
@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev)
|
||||
struct virtio_iommu_config, input_range.end,
|
||||
&input_end);
|
||||
|
||||
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
|
||||
struct virtio_iommu_config, domain_bits,
|
||||
&viommu->domain_bits);
|
||||
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
|
||||
struct virtio_iommu_config, domain_range.start,
|
||||
&viommu->first_domain);
|
||||
|
||||
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
|
||||
struct virtio_iommu_config, domain_range.end,
|
||||
&viommu->last_domain);
|
||||
|
||||
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
|
||||
struct virtio_iommu_config, probe_size,
|
||||
@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev)
|
||||
.force_aperture = true,
|
||||
};
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
|
||||
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
|
||||
|
||||
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
|
||||
|
||||
static unsigned int features[] = {
|
||||
VIRTIO_IOMMU_F_MAP_UNMAP,
|
||||
VIRTIO_IOMMU_F_DOMAIN_BITS,
|
||||
VIRTIO_IOMMU_F_INPUT_RANGE,
|
||||
VIRTIO_IOMMU_F_DOMAIN_RANGE,
|
||||
VIRTIO_IOMMU_F_PROBE,
|
||||
VIRTIO_IOMMU_F_MMIO,
|
||||
};
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
|
@ -96,7 +96,7 @@ struct vhost_uaddr {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
|
||||
#define VHOST_ARCH_CAN_ACCEL_UACCESS 1
|
||||
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
|
||||
#else
|
||||
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
|
||||
#endif
|
||||
|
@ -1,8 +1,8 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause */
|
||||
/*
|
||||
* Virtio-iommu definition v0.9
|
||||
* Virtio-iommu definition v0.12
|
||||
*
|
||||
* Copyright (C) 2018 Arm Ltd.
|
||||
* Copyright (C) 2019 Arm Ltd.
|
||||
*/
|
||||
#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
|
||||
#define _UAPI_LINUX_VIRTIO_IOMMU_H
|
||||
@ -11,26 +11,31 @@
|
||||
|
||||
/* Feature bits */
|
||||
#define VIRTIO_IOMMU_F_INPUT_RANGE 0
|
||||
#define VIRTIO_IOMMU_F_DOMAIN_BITS 1
|
||||
#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1
|
||||
#define VIRTIO_IOMMU_F_MAP_UNMAP 2
|
||||
#define VIRTIO_IOMMU_F_BYPASS 3
|
||||
#define VIRTIO_IOMMU_F_PROBE 4
|
||||
#define VIRTIO_IOMMU_F_MMIO 5
|
||||
|
||||
struct virtio_iommu_range {
|
||||
__u64 start;
|
||||
__u64 end;
|
||||
struct virtio_iommu_range_64 {
|
||||
__le64 start;
|
||||
__le64 end;
|
||||
};
|
||||
|
||||
struct virtio_iommu_range_32 {
|
||||
__le32 start;
|
||||
__le32 end;
|
||||
};
|
||||
|
||||
struct virtio_iommu_config {
|
||||
/* Supported page sizes */
|
||||
__u64 page_size_mask;
|
||||
__le64 page_size_mask;
|
||||
/* Supported IOVA range */
|
||||
struct virtio_iommu_range input_range;
|
||||
struct virtio_iommu_range_64 input_range;
|
||||
/* Max domain ID size */
|
||||
__u8 domain_bits;
|
||||
__u8 padding[3];
|
||||
struct virtio_iommu_range_32 domain_range;
|
||||
/* Probe buffer size */
|
||||
__u32 probe_size;
|
||||
__le32 probe_size;
|
||||
};
|
||||
|
||||
/* Request types */
|
||||
@ -49,6 +54,7 @@ struct virtio_iommu_config {
|
||||
#define VIRTIO_IOMMU_S_RANGE 0x05
|
||||
#define VIRTIO_IOMMU_S_NOENT 0x06
|
||||
#define VIRTIO_IOMMU_S_FAULT 0x07
|
||||
#define VIRTIO_IOMMU_S_NOMEM 0x08
|
||||
|
||||
struct virtio_iommu_req_head {
|
||||
__u8 type;
|
||||
@ -78,12 +84,10 @@ struct virtio_iommu_req_detach {
|
||||
|
||||
#define VIRTIO_IOMMU_MAP_F_READ (1 << 0)
|
||||
#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1)
|
||||
#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2)
|
||||
#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3)
|
||||
#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2)
|
||||
|
||||
#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \
|
||||
VIRTIO_IOMMU_MAP_F_WRITE | \
|
||||
VIRTIO_IOMMU_MAP_F_EXEC | \
|
||||
VIRTIO_IOMMU_MAP_F_MMIO)
|
||||
|
||||
struct virtio_iommu_req_map {
|
||||
|
@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
|
||||
* memory corruption is possible and we should stop execution.
|
||||
*/
|
||||
BUG_ON(!trylock_page(page));
|
||||
list_del(&page->lru);
|
||||
balloon_page_insert(b_dev_info, page);
|
||||
unlock_page(page);
|
||||
__count_vm_event(BALLOON_INFLATE);
|
||||
@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
|
||||
* @b_dev_info: balloon device descriptor where we will insert a new page to
|
||||
* @pages: pages to enqueue - allocated using balloon_page_alloc.
|
||||
*
|
||||
* Driver must call it to properly enqueue a balloon pages before definitively
|
||||
* removing it from the guest system.
|
||||
* Driver must call this function to properly enqueue balloon pages before
|
||||
* definitively removing them from the guest system.
|
||||
*
|
||||
* Return: number of pages that were enqueued.
|
||||
*/
|
||||
@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
|
||||
|
||||
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
||||
list_for_each_entry_safe(page, tmp, pages, lru) {
|
||||
list_del(&page->lru);
|
||||
balloon_page_enqueue_one(b_dev_info, page);
|
||||
n_pages++;
|
||||
}
|
||||
@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
|
||||
* @n_req_pages: number of requested pages.
|
||||
*
|
||||
* Driver must call this function to properly de-allocate a previous enlisted
|
||||
* balloon pages before definetively releasing it back to the guest system.
|
||||
* balloon pages before definitively releasing it back to the guest system.
|
||||
* This function tries to remove @n_req_pages from the ballooned pages and
|
||||
* return them to the caller in the @pages list.
|
||||
*
|
||||
* Note that this function may fail to dequeue some pages temporarily empty due
|
||||
* to compaction isolated pages.
|
||||
* Note that this function may fail to dequeue some pages even if the balloon
|
||||
* isn't empty - since the page list can be temporarily empty due to compaction
|
||||
* of isolated pages.
|
||||
*
|
||||
* Return: number of pages that were added to the @pages list.
|
||||
*/
|
||||
@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
|
||||
|
||||
/*
|
||||
* balloon_page_alloc - allocates a new page for insertion into the balloon
|
||||
* page list.
|
||||
* page list.
|
||||
*
|
||||
* Driver must call it to properly allocate a new enlisted balloon page.
|
||||
* Driver must call balloon_page_enqueue before definitively removing it from
|
||||
* the guest system. This function returns the page address for the recently
|
||||
* allocated page or NULL in the case we fail to allocate a new page this turn.
|
||||
* Driver must call this function to properly allocate a new balloon page.
|
||||
* Driver must call balloon_page_enqueue before definitively removing the page
|
||||
* from the guest system.
|
||||
*
|
||||
* Return: struct page for the allocated page or NULL on allocation failure.
|
||||
*/
|
||||
struct page *balloon_page_alloc(void)
|
||||
{
|
||||
@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void)
|
||||
EXPORT_SYMBOL_GPL(balloon_page_alloc);
|
||||
|
||||
/*
|
||||
* balloon_page_enqueue - allocates a new page and inserts it into the balloon
|
||||
* page list.
|
||||
* @b_dev_info: balloon device descriptor where we will insert a new page to
|
||||
* balloon_page_enqueue - inserts a new page into the balloon page list.
|
||||
*
|
||||
* @b_dev_info: balloon device descriptor where we will insert a new page
|
||||
* @page: new page to enqueue - allocated using balloon_page_alloc.
|
||||
*
|
||||
* Driver must call it to properly enqueue a new allocated balloon page
|
||||
* before definitively removing it from the guest system.
|
||||
* This function returns the page address for the recently enqueued page or
|
||||
* NULL in the case we fail to allocate a new page this turn.
|
||||
* Drivers must call this function to properly enqueue a new allocated balloon
|
||||
* page before definitively removing the page from the guest system.
|
||||
*
|
||||
* Drivers must not call balloon_page_enqueue on pages that have been pushed to
|
||||
* a list with balloon_page_push before removing them with balloon_page_pop. To
|
||||
* enqueue a list of pages, use balloon_page_list_enqueue instead.
|
||||
*/
|
||||
void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
|
||||
struct page *page)
|
||||
@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
|
||||
|
||||
/*
|
||||
* balloon_page_dequeue - removes a page from balloon's page list and returns
|
||||
* the its address to allow the driver release the page.
|
||||
* its address to allow the driver to release the page.
|
||||
* @b_dev_info: balloon device decriptor where we will grab a page from.
|
||||
*
|
||||
* Driver must call it to properly de-allocate a previous enlisted balloon page
|
||||
* before definetively releasing it back to the guest system.
|
||||
* This function returns the page address for the recently dequeued page or
|
||||
* NULL in the case we find balloon's page list temporarily empty due to
|
||||
* compaction isolated pages.
|
||||
* Driver must call this function to properly dequeue a previously enqueued page
|
||||
* before definitively releasing it back to the guest system.
|
||||
*
|
||||
* Caller must perform its own accounting to ensure that this
|
||||
* function is called only if some pages are actually enqueued.
|
||||
*
|
||||
* Note that this function may fail to dequeue some pages even if there are
|
||||
* some enqueued pages - since the page list can be temporarily empty due to
|
||||
* the compaction of isolated pages.
|
||||
*
|
||||
* TODO: remove the caller accounting requirements, and allow caller to wait
|
||||
* until all pages can be dequeued.
|
||||
*
|
||||
* Return: struct page for the dequeued page, or NULL if no page was dequeued.
|
||||
*/
|
||||
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
|
||||
{
|
||||
@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
|
||||
if (n_pages != 1) {
|
||||
/*
|
||||
* If we are unable to dequeue a balloon page because the page
|
||||
* list is empty and there is no isolated pages, then something
|
||||
* list is empty and there are no isolated pages, then something
|
||||
* went out of track and some balloon pages are lost.
|
||||
* BUG() here, otherwise the balloon driver may get stuck into
|
||||
* BUG() here, otherwise the balloon driver may get stuck in
|
||||
* an infinite loop while attempting to release all its pages.
|
||||
*/
|
||||
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
||||
@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping,
|
||||
|
||||
/*
|
||||
* We can not easily support the no copy case here so ignore it as it
|
||||
* is unlikely to be use with ballon pages. See include/linux/hmm.h for
|
||||
* user of the MIGRATE_SYNC_NO_COPY mode.
|
||||
* is unlikely to be used with balloon pages. See include/linux/hmm.h
|
||||
* for a user of the MIGRATE_SYNC_NO_COPY mode.
|
||||
*/
|
||||
if (mode == MIGRATE_SYNC_NO_COPY)
|
||||
return -EINVAL;
|
||||
|
Loading…
Reference in New Issue
Block a user