An I/O request of a User VM, which is constructed by the hypervisor, is distributed by the ACRN Hypervisor Service Module to an I/O client corresponding to the address range of the I/O request. For each User VM, there is a shared 4-KByte memory region used for I/O requests communication between the hypervisor and Service VM. An I/O request is a 256-byte structure buffer, which is 'struct acrn_io_request', that is filled by an I/O handler of the hypervisor when a trapped I/O access happens in a User VM. ACRN userspace in the Service VM first allocates a 4-KByte page and passes the GPA (Guest Physical Address) of the buffer to the hypervisor. The buffer is used as an array of 16 I/O request slots with each I/O request slot being 256 bytes. This array is indexed by vCPU ID. An I/O client, which is 'struct acrn_ioreq_client', is responsible for handling User VM I/O requests whose accessed GPA falls in a certain range. Multiple I/O clients can be associated with each User VM. There is a special client associated with each User VM, called the default client, that handles all I/O requests that do not fit into the range of any other I/O clients. The ACRN userspace acts as the default client for each User VM. The state transitions of a ACRN I/O request are as follows. FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ... FREE: this I/O request slot is empty PENDING: a valid I/O request is pending in this slot PROCESSING: the I/O request is being processed COMPLETE: the I/O request has been processed An I/O request in COMPLETE or FREE state is owned by the hypervisor. HSM and ACRN userspace are in charge of processing the others. The processing flow of I/O requests are listed as following: a) The I/O handler of the hypervisor will fill an I/O request with PENDING state when a trapped I/O access happens in a User VM. b) The hypervisor makes an upcall, which is a notification interrupt, to the Service VM. c) The upcall handler schedules a worker to dispatch I/O requests. d) The worker looks for the PENDING I/O requests, assigns them to different registered clients based on the address of the I/O accesses, updates their state to PROCESSING, and notifies the corresponding client to handle. e) The notified client handles the assigned I/O requests. f) The HSM updates I/O requests states to COMPLETE and notifies the hypervisor of the completion via hypercalls. Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Zhi Wang <zhi.a.wang@intel.com> Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Cc: Yu Wang <yu1.wang@intel.com> Cc: Reinette Chatre <reinette.chatre@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Zhi Wang <zhi.a.wang@intel.com> Reviewed-by: Reinette Chatre <reinette.chatre@intel.com> Acked-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Shuo Liu <shuo.a.liu@intel.com> Link: https://lore.kernel.org/r/20210207031040.49576-10-shuo.a.liu@intel.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
86 lines
2.0 KiB
C
86 lines
2.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* ACRN_HSM: Virtual Machine management
|
|
*
|
|
* Copyright (C) 2020 Intel Corporation. All rights reserved.
|
|
*
|
|
* Authors:
|
|
* Jason Chen CJ <jason.cj.chen@intel.com>
|
|
* Yakui Zhao <yakui.zhao@intel.com>
|
|
*/
|
|
#include <linux/io.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include "acrn_drv.h"
|
|
|
|
/* List of VMs */
|
|
LIST_HEAD(acrn_vm_list);
|
|
/*
|
|
* acrn_vm_list is read in a worker thread which dispatch I/O requests and
|
|
* is wrote in VM creation ioctl. Use the rwlock mechanism to protect it.
|
|
*/
|
|
DEFINE_RWLOCK(acrn_vm_list_lock);
|
|
|
|
struct acrn_vm *acrn_vm_create(struct acrn_vm *vm,
|
|
struct acrn_vm_creation *vm_param)
|
|
{
|
|
int ret;
|
|
|
|
ret = hcall_create_vm(virt_to_phys(vm_param));
|
|
if (ret < 0 || vm_param->vmid == ACRN_INVALID_VMID) {
|
|
dev_err(acrn_dev.this_device,
|
|
"Failed to create VM! Error: %d\n", ret);
|
|
return NULL;
|
|
}
|
|
|
|
mutex_init(&vm->regions_mapping_lock);
|
|
INIT_LIST_HEAD(&vm->ioreq_clients);
|
|
spin_lock_init(&vm->ioreq_clients_lock);
|
|
vm->vmid = vm_param->vmid;
|
|
vm->vcpu_num = vm_param->vcpu_num;
|
|
|
|
if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) {
|
|
hcall_destroy_vm(vm_param->vmid);
|
|
vm->vmid = ACRN_INVALID_VMID;
|
|
return NULL;
|
|
}
|
|
|
|
write_lock_bh(&acrn_vm_list_lock);
|
|
list_add(&vm->list, &acrn_vm_list);
|
|
write_unlock_bh(&acrn_vm_list_lock);
|
|
|
|
dev_dbg(acrn_dev.this_device, "VM %u created.\n", vm->vmid);
|
|
return vm;
|
|
}
|
|
|
|
int acrn_vm_destroy(struct acrn_vm *vm)
|
|
{
|
|
int ret;
|
|
|
|
if (vm->vmid == ACRN_INVALID_VMID ||
|
|
test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
|
|
return 0;
|
|
|
|
/* Remove from global VM list */
|
|
write_lock_bh(&acrn_vm_list_lock);
|
|
list_del_init(&vm->list);
|
|
write_unlock_bh(&acrn_vm_list_lock);
|
|
|
|
acrn_ioreq_deinit(vm);
|
|
|
|
ret = hcall_destroy_vm(vm->vmid);
|
|
if (ret < 0) {
|
|
dev_err(acrn_dev.this_device,
|
|
"Failed to destroy VM %u\n", vm->vmid);
|
|
clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
|
|
return ret;
|
|
}
|
|
|
|
acrn_vm_all_ram_unmap(vm);
|
|
|
|
dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
|
|
vm->vmid = ACRN_INVALID_VMID;
|
|
return 0;
|
|
}
|