lguest: write more information to userspace about pending traps.
This is preparation for userspace handling MMIO and ioport accesses. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
18c137371b
commit
69a09dc174
@ -229,16 +229,17 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
|
|||||||
* It's possible the Guest did a NOTIFY hypercall to the
|
* It's possible the Guest did a NOTIFY hypercall to the
|
||||||
* Launcher.
|
* Launcher.
|
||||||
*/
|
*/
|
||||||
if (cpu->pending_notify) {
|
if (cpu->pending.trap) {
|
||||||
/*
|
/*
|
||||||
* Does it just needs to write to a registered
|
* Does it just needs to write to a registered
|
||||||
* eventfd (ie. the appropriate virtqueue thread)?
|
* eventfd (ie. the appropriate virtqueue thread)?
|
||||||
*/
|
*/
|
||||||
if (!send_notify_to_eventfd(cpu)) {
|
if (!send_notify_to_eventfd(cpu)) {
|
||||||
/* OK, we tell the main Launcher. */
|
/* OK, we tell the main Launcher. */
|
||||||
if (put_user(cpu->pending_notify, user))
|
if (copy_to_user(user, &cpu->pending,
|
||||||
|
sizeof(cpu->pending)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return sizeof(cpu->pending_notify);
|
return sizeof(cpu->pending);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +118,8 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
|
|||||||
cpu->halted = 1;
|
cpu->halted = 1;
|
||||||
break;
|
break;
|
||||||
case LHCALL_NOTIFY:
|
case LHCALL_NOTIFY:
|
||||||
cpu->pending_notify = args->arg1;
|
cpu->pending.trap = LGUEST_TRAP_ENTRY;
|
||||||
|
cpu->pending.addr = args->arg1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* It should be an architecture-specific hypercall. */
|
/* It should be an architecture-specific hypercall. */
|
||||||
@ -189,7 +190,7 @@ static void do_async_hcalls(struct lg_cpu *cpu)
|
|||||||
* Stop doing hypercalls if they want to notify the Launcher:
|
* Stop doing hypercalls if they want to notify the Launcher:
|
||||||
* it needs to service this first.
|
* it needs to service this first.
|
||||||
*/
|
*/
|
||||||
if (cpu->pending_notify)
|
if (cpu->pending.trap)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -280,7 +281,7 @@ void do_hypercalls(struct lg_cpu *cpu)
|
|||||||
* NOTIFY to the Launcher, we want to return now. Otherwise we do
|
* NOTIFY to the Launcher, we want to return now. Otherwise we do
|
||||||
* the hypercall.
|
* the hypercall.
|
||||||
*/
|
*/
|
||||||
if (!cpu->pending_notify) {
|
if (!cpu->pending.trap) {
|
||||||
do_hcall(cpu, cpu->hcall);
|
do_hcall(cpu, cpu->hcall);
|
||||||
/*
|
/*
|
||||||
* Tricky point: we reset the hcall pointer to mark the
|
* Tricky point: we reset the hcall pointer to mark the
|
||||||
|
@ -50,7 +50,8 @@ struct lg_cpu {
|
|||||||
/* Bitmap of what has changed: see CHANGED_* above. */
|
/* Bitmap of what has changed: see CHANGED_* above. */
|
||||||
int changed;
|
int changed;
|
||||||
|
|
||||||
unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */
|
/* Pending operation. */
|
||||||
|
struct lguest_pending pending;
|
||||||
|
|
||||||
unsigned long *reg_read; /* register from LHREQ_GETREG */
|
unsigned long *reg_read; /* register from LHREQ_GETREG */
|
||||||
|
|
||||||
|
@ -29,6 +29,10 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu)
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct lg_eventfd_map *map;
|
struct lg_eventfd_map *map;
|
||||||
|
|
||||||
|
/* We only connect LHCALL_NOTIFY to event fds, not other traps. */
|
||||||
|
if (cpu->pending.trap != LGUEST_TRAP_ENTRY)
|
||||||
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This "rcu_read_lock()" helps track when someone is still looking at
|
* This "rcu_read_lock()" helps track when someone is still looking at
|
||||||
* the (RCU-using) eventfds array. It's not actually a lock at all;
|
* the (RCU-using) eventfds array. It's not actually a lock at all;
|
||||||
@ -52,9 +56,9 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu)
|
|||||||
* we'll continue to use the old array and just won't see the new one.
|
* we'll continue to use the old array and just won't see the new one.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < map->num; i++) {
|
for (i = 0; i < map->num; i++) {
|
||||||
if (map->map[i].addr == cpu->pending_notify) {
|
if (map->map[i].addr == cpu->pending.addr) {
|
||||||
eventfd_signal(map->map[i].event, 1);
|
eventfd_signal(map->map[i].event, 1);
|
||||||
cpu->pending_notify = 0;
|
cpu->pending.trap = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -62,7 +66,7 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
/* If we cleared the notification, it's because we found a match. */
|
/* If we cleared the notification, it's because we found a match. */
|
||||||
return cpu->pending_notify == 0;
|
return cpu->pending.trap == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*L:055
|
/*L:055
|
||||||
@ -282,8 +286,8 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
|
|||||||
* If we returned from read() last time because the Guest sent I/O,
|
* If we returned from read() last time because the Guest sent I/O,
|
||||||
* clear the flag.
|
* clear the flag.
|
||||||
*/
|
*/
|
||||||
if (cpu->pending_notify)
|
if (cpu->pending.trap)
|
||||||
cpu->pending_notify = 0;
|
cpu->pending.trap = 0;
|
||||||
|
|
||||||
/* Run the Guest until something interesting happens. */
|
/* Run the Guest until something interesting happens. */
|
||||||
return run_guest(cpu, (unsigned long __user *)user);
|
return run_guest(cpu, (unsigned long __user *)user);
|
||||||
|
@ -67,6 +67,19 @@ enum lguest_req
|
|||||||
LHREQ_SETREG, /* + offset within struct pt_regs, value. */
|
LHREQ_SETREG, /* + offset within struct pt_regs, value. */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is what read() of the lguest fd populates. trap ==
|
||||||
|
* LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
|
||||||
|
* argument), 14 for a page fault in the MMIO region (addr is
|
||||||
|
* the trap address, insn is the instruction), or 13 for a GPF
|
||||||
|
* (insn is the instruction).
|
||||||
|
*/
|
||||||
|
struct lguest_pending {
|
||||||
|
__u8 trap;
|
||||||
|
__u8 insn[7];
|
||||||
|
__u32 addr;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The alignment to use between consumer and producer parts of vring.
|
* The alignment to use between consumer and producer parts of vring.
|
||||||
* x86 pagesize for historical reasons.
|
* x86 pagesize for historical reasons.
|
||||||
|
@ -1820,17 +1820,21 @@ static void __attribute__((noreturn)) restart_guest(void)
|
|||||||
static void __attribute__((noreturn)) run_guest(void)
|
static void __attribute__((noreturn)) run_guest(void)
|
||||||
{
|
{
|
||||||
for (;;) {
|
for (;;) {
|
||||||
unsigned long notify_addr;
|
struct lguest_pending notify;
|
||||||
int readval;
|
int readval;
|
||||||
|
|
||||||
/* We read from the /dev/lguest device to run the Guest. */
|
/* We read from the /dev/lguest device to run the Guest. */
|
||||||
readval = pread(lguest_fd, ¬ify_addr,
|
readval = pread(lguest_fd, ¬ify, sizeof(notify), cpu_id);
|
||||||
sizeof(notify_addr), cpu_id);
|
|
||||||
|
|
||||||
/* One unsigned long means the Guest did HCALL_NOTIFY */
|
/* One unsigned long means the Guest did HCALL_NOTIFY */
|
||||||
if (readval == sizeof(notify_addr)) {
|
if (readval == sizeof(notify)) {
|
||||||
verbose("Notify on address %#lx\n", notify_addr);
|
if (notify.trap == 0x1F) {
|
||||||
handle_output(notify_addr);
|
verbose("Notify on address %#08x\n",
|
||||||
|
notify.addr);
|
||||||
|
handle_output(notify.addr);
|
||||||
|
} else
|
||||||
|
errx(1, "Unknown trap %i addr %#08x\n",
|
||||||
|
notify.trap, notify.addr);
|
||||||
/* ENOENT means the Guest died. Reading tells us why. */
|
/* ENOENT means the Guest died. Reading tells us why. */
|
||||||
} else if (errno == ENOENT) {
|
} else if (errno == ENOENT) {
|
||||||
char reason[1024] = { 0 };
|
char reason[1024] = { 0 };
|
||||||
|
Loading…
x
Reference in New Issue
Block a user