lguest: update commentry
Every so often, after code shuffles, I need to go through and unbitrot the Lguest Journey (see drivers/lguest/README). Since we now use RCU in a simple form in one place I took the opportunity to expand that explanation. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Ingo Molnar <mingo@redhat.com> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
2e04ef7691
commit
a91d74a3c4
@ -49,7 +49,7 @@
|
|||||||
#include "linux/virtio_ring.h"
|
#include "linux/virtio_ring.h"
|
||||||
#include "asm/bootparam.h"
|
#include "asm/bootparam.h"
|
||||||
/*L:110
|
/*L:110
|
||||||
* We can ignore the 39 include files we need for this program, but I do want
|
* We can ignore the 42 include files we need for this program, but I do want
|
||||||
* to draw attention to the use of kernel-style types.
|
* to draw attention to the use of kernel-style types.
|
||||||
*
|
*
|
||||||
* As Linus said, "C is a Spartan language, and so should your naming be." I
|
* As Linus said, "C is a Spartan language, and so should your naming be." I
|
||||||
@ -305,6 +305,11 @@ static void *map_zeroed_pages(unsigned int num)
|
|||||||
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0);
|
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0);
|
||||||
if (addr == MAP_FAILED)
|
if (addr == MAP_FAILED)
|
||||||
err(1, "Mmaping %u pages of /dev/zero", num);
|
err(1, "Mmaping %u pages of /dev/zero", num);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* One neat mmap feature is that you can close the fd, and it
|
||||||
|
* stays mapped.
|
||||||
|
*/
|
||||||
close(fd);
|
close(fd);
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
@ -557,7 +562,7 @@ static void tell_kernel(unsigned long start)
|
|||||||
}
|
}
|
||||||
/*:*/
|
/*:*/
|
||||||
|
|
||||||
/*
|
/*L:200
|
||||||
* Device Handling.
|
* Device Handling.
|
||||||
*
|
*
|
||||||
* When the Guest gives us a buffer, it sends an array of addresses and sizes.
|
* When the Guest gives us a buffer, it sends an array of addresses and sizes.
|
||||||
@ -608,7 +613,10 @@ static unsigned next_desc(struct vring_desc *desc,
|
|||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This actually sends the interrupt for this virtqueue */
|
/*
|
||||||
|
* This actually sends the interrupt for this virtqueue, if we've used a
|
||||||
|
* buffer.
|
||||||
|
*/
|
||||||
static void trigger_irq(struct virtqueue *vq)
|
static void trigger_irq(struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
unsigned long buf[] = { LHREQ_IRQ, vq->config.irq };
|
unsigned long buf[] = { LHREQ_IRQ, vq->config.irq };
|
||||||
@ -629,12 +637,12 @@ static void trigger_irq(struct virtqueue *vq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This looks in the virtqueue and for the first available buffer, and converts
|
* This looks in the virtqueue for the first available buffer, and converts
|
||||||
* it to an iovec for convenient access. Since descriptors consist of some
|
* it to an iovec for convenient access. Since descriptors consist of some
|
||||||
* number of output then some number of input descriptors, it's actually two
|
* number of output then some number of input descriptors, it's actually two
|
||||||
* iovecs, but we pack them into one and note how many of each there were.
|
* iovecs, but we pack them into one and note how many of each there were.
|
||||||
*
|
*
|
||||||
* This function returns the descriptor number found.
|
* This function waits if necessary, and returns the descriptor number found.
|
||||||
*/
|
*/
|
||||||
static unsigned wait_for_vq_desc(struct virtqueue *vq,
|
static unsigned wait_for_vq_desc(struct virtqueue *vq,
|
||||||
struct iovec iov[],
|
struct iovec iov[],
|
||||||
@ -644,10 +652,14 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
|
|||||||
struct vring_desc *desc;
|
struct vring_desc *desc;
|
||||||
u16 last_avail = lg_last_avail(vq);
|
u16 last_avail = lg_last_avail(vq);
|
||||||
|
|
||||||
|
/* There's nothing available? */
|
||||||
while (last_avail == vq->vring.avail->idx) {
|
while (last_avail == vq->vring.avail->idx) {
|
||||||
u64 event;
|
u64 event;
|
||||||
|
|
||||||
/* OK, tell Guest about progress up to now. */
|
/*
|
||||||
|
* Since we're about to sleep, now is a good time to tell the
|
||||||
|
* Guest about what we've used up to now.
|
||||||
|
*/
|
||||||
trigger_irq(vq);
|
trigger_irq(vq);
|
||||||
|
|
||||||
/* OK, now we need to know about added descriptors. */
|
/* OK, now we need to know about added descriptors. */
|
||||||
@ -734,8 +746,9 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After we've used one of their buffers, we tell them about it. We'll then
|
* After we've used one of their buffers, we tell the Guest about it. Sometime
|
||||||
* want to send them an interrupt, using trigger_irq().
|
* later we'll want to send them an interrupt using trigger_irq(); note that
|
||||||
|
* wait_for_vq_desc() does that for us if it has to wait.
|
||||||
*/
|
*/
|
||||||
static void add_used(struct virtqueue *vq, unsigned int head, int len)
|
static void add_used(struct virtqueue *vq, unsigned int head, int len)
|
||||||
{
|
{
|
||||||
@ -782,12 +795,12 @@ static void console_input(struct virtqueue *vq)
|
|||||||
struct console_abort *abort = vq->dev->priv;
|
struct console_abort *abort = vq->dev->priv;
|
||||||
struct iovec iov[vq->vring.num];
|
struct iovec iov[vq->vring.num];
|
||||||
|
|
||||||
/* Make sure there's a descriptor waiting. */
|
/* Make sure there's a descriptor available. */
|
||||||
head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
|
head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
|
||||||
if (out_num)
|
if (out_num)
|
||||||
errx(1, "Output buffers in console in queue?");
|
errx(1, "Output buffers in console in queue?");
|
||||||
|
|
||||||
/* Read it in. */
|
/* Read into it. This is where we usually wait. */
|
||||||
len = readv(STDIN_FILENO, iov, in_num);
|
len = readv(STDIN_FILENO, iov, in_num);
|
||||||
if (len <= 0) {
|
if (len <= 0) {
|
||||||
/* Ran out of input? */
|
/* Ran out of input? */
|
||||||
@ -800,6 +813,7 @@ static void console_input(struct virtqueue *vq)
|
|||||||
pause();
|
pause();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Tell the Guest we used a buffer. */
|
||||||
add_used_and_trigger(vq, head, len);
|
add_used_and_trigger(vq, head, len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -834,15 +848,23 @@ static void console_output(struct virtqueue *vq)
|
|||||||
unsigned int head, out, in;
|
unsigned int head, out, in;
|
||||||
struct iovec iov[vq->vring.num];
|
struct iovec iov[vq->vring.num];
|
||||||
|
|
||||||
|
/* We usually wait in here, for the Guest to give us something. */
|
||||||
head = wait_for_vq_desc(vq, iov, &out, &in);
|
head = wait_for_vq_desc(vq, iov, &out, &in);
|
||||||
if (in)
|
if (in)
|
||||||
errx(1, "Input buffers in console output queue?");
|
errx(1, "Input buffers in console output queue?");
|
||||||
|
|
||||||
|
/* writev can return a partial write, so we loop here. */
|
||||||
while (!iov_empty(iov, out)) {
|
while (!iov_empty(iov, out)) {
|
||||||
int len = writev(STDOUT_FILENO, iov, out);
|
int len = writev(STDOUT_FILENO, iov, out);
|
||||||
if (len <= 0)
|
if (len <= 0)
|
||||||
err(1, "Write to stdout gave %i", len);
|
err(1, "Write to stdout gave %i", len);
|
||||||
iov_consume(iov, out, len);
|
iov_consume(iov, out, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We're finished with that buffer: if we're going to sleep,
|
||||||
|
* wait_for_vq_desc() will prod the Guest with an interrupt.
|
||||||
|
*/
|
||||||
add_used(vq, head, 0);
|
add_used(vq, head, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -862,15 +884,30 @@ static void net_output(struct virtqueue *vq)
|
|||||||
unsigned int head, out, in;
|
unsigned int head, out, in;
|
||||||
struct iovec iov[vq->vring.num];
|
struct iovec iov[vq->vring.num];
|
||||||
|
|
||||||
|
/* We usually wait in here for the Guest to give us a packet. */
|
||||||
head = wait_for_vq_desc(vq, iov, &out, &in);
|
head = wait_for_vq_desc(vq, iov, &out, &in);
|
||||||
if (in)
|
if (in)
|
||||||
errx(1, "Input buffers in net output queue?");
|
errx(1, "Input buffers in net output queue?");
|
||||||
|
/*
|
||||||
|
* Send the whole thing through to /dev/net/tun. It expects the exact
|
||||||
|
* same format: what a coincidence!
|
||||||
|
*/
|
||||||
if (writev(net_info->tunfd, iov, out) < 0)
|
if (writev(net_info->tunfd, iov, out) < 0)
|
||||||
errx(1, "Write to tun failed?");
|
errx(1, "Write to tun failed?");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Done with that one; wait_for_vq_desc() will send the interrupt if
|
||||||
|
* all packets are processed.
|
||||||
|
*/
|
||||||
add_used(vq, head, 0);
|
add_used(vq, head, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Will reading from this file descriptor block? */
|
/*
|
||||||
|
* Handling network input is a bit trickier, because I've tried to optimize it.
|
||||||
|
*
|
||||||
|
* First we have a helper routine which tells is if from this file descriptor
|
||||||
|
* (ie. the /dev/net/tun device) will block:
|
||||||
|
*/
|
||||||
static bool will_block(int fd)
|
static bool will_block(int fd)
|
||||||
{
|
{
|
||||||
fd_set fdset;
|
fd_set fdset;
|
||||||
@ -880,7 +917,11 @@ static bool will_block(int fd)
|
|||||||
return select(fd+1, &fdset, NULL, NULL, &zero) != 1;
|
return select(fd+1, &fdset, NULL, NULL, &zero) != 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This handles packets coming in from the tun device to our Guest. */
|
/*
|
||||||
|
* This handles packets coming in from the tun device to our Guest. Like all
|
||||||
|
* service routines, it gets called again as soon as it returns, so you don't
|
||||||
|
* see a while(1) loop here.
|
||||||
|
*/
|
||||||
static void net_input(struct virtqueue *vq)
|
static void net_input(struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
int len;
|
int len;
|
||||||
@ -888,21 +929,38 @@ static void net_input(struct virtqueue *vq)
|
|||||||
struct iovec iov[vq->vring.num];
|
struct iovec iov[vq->vring.num];
|
||||||
struct net_info *net_info = vq->dev->priv;
|
struct net_info *net_info = vq->dev->priv;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get a descriptor to write an incoming packet into. This will also
|
||||||
|
* send an interrupt if they're out of descriptors.
|
||||||
|
*/
|
||||||
head = wait_for_vq_desc(vq, iov, &out, &in);
|
head = wait_for_vq_desc(vq, iov, &out, &in);
|
||||||
if (out)
|
if (out)
|
||||||
errx(1, "Output buffers in net input queue?");
|
errx(1, "Output buffers in net input queue?");
|
||||||
|
|
||||||
/* Deliver interrupt now, since we're about to sleep. */
|
/*
|
||||||
|
* If it looks like we'll block reading from the tun device, send them
|
||||||
|
* an interrupt.
|
||||||
|
*/
|
||||||
if (vq->pending_used && will_block(net_info->tunfd))
|
if (vq->pending_used && will_block(net_info->tunfd))
|
||||||
trigger_irq(vq);
|
trigger_irq(vq);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read in the packet. This is where we normally wait (when there's no
|
||||||
|
* incoming network traffic).
|
||||||
|
*/
|
||||||
len = readv(net_info->tunfd, iov, in);
|
len = readv(net_info->tunfd, iov, in);
|
||||||
if (len <= 0)
|
if (len <= 0)
|
||||||
err(1, "Failed to read from tun.");
|
err(1, "Failed to read from tun.");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark that packet buffer as used, but don't interrupt here. We want
|
||||||
|
* to wait until we've done as much work as we can.
|
||||||
|
*/
|
||||||
add_used(vq, head, len);
|
add_used(vq, head, len);
|
||||||
}
|
}
|
||||||
|
/*:*/
|
||||||
|
|
||||||
/* This is the helper to create threads. */
|
/* This is the helper to create threads: run the service routine in a loop. */
|
||||||
static int do_thread(void *_vq)
|
static int do_thread(void *_vq)
|
||||||
{
|
{
|
||||||
struct virtqueue *vq = _vq;
|
struct virtqueue *vq = _vq;
|
||||||
@ -950,11 +1008,14 @@ static void reset_device(struct device *dev)
|
|||||||
signal(SIGCHLD, (void *)kill_launcher);
|
signal(SIGCHLD, (void *)kill_launcher);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*L:216
|
||||||
|
* This actually creates the thread which services the virtqueue for a device.
|
||||||
|
*/
|
||||||
static void create_thread(struct virtqueue *vq)
|
static void create_thread(struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Create stack for thread and run it. Since the stack grows upwards,
|
* Create stack for thread. Since the stack grows upwards, we point
|
||||||
* we point the stack pointer to the end of this region.
|
* the stack pointer to the end of this region.
|
||||||
*/
|
*/
|
||||||
char *stack = malloc(32768);
|
char *stack = malloc(32768);
|
||||||
unsigned long args[] = { LHREQ_EVENTFD,
|
unsigned long args[] = { LHREQ_EVENTFD,
|
||||||
@ -966,17 +1027,22 @@ static void create_thread(struct virtqueue *vq)
|
|||||||
err(1, "Creating eventfd");
|
err(1, "Creating eventfd");
|
||||||
args[2] = vq->eventfd;
|
args[2] = vq->eventfd;
|
||||||
|
|
||||||
/* Attach an eventfd to this virtqueue: it will go off
|
/*
|
||||||
* when the Guest does an LHCALL_NOTIFY for this vq. */
|
* Attach an eventfd to this virtqueue: it will go off when the Guest
|
||||||
|
* does an LHCALL_NOTIFY for this vq.
|
||||||
|
*/
|
||||||
if (write(lguest_fd, &args, sizeof(args)) != 0)
|
if (write(lguest_fd, &args, sizeof(args)) != 0)
|
||||||
err(1, "Attaching eventfd");
|
err(1, "Attaching eventfd");
|
||||||
|
|
||||||
/* CLONE_VM: because it has to access the Guest memory, and
|
/*
|
||||||
* SIGCHLD so we get a signal if it dies. */
|
* CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
|
||||||
|
* we get a signal if it dies.
|
||||||
|
*/
|
||||||
vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
|
vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
|
||||||
if (vq->thread == (pid_t)-1)
|
if (vq->thread == (pid_t)-1)
|
||||||
err(1, "Creating clone");
|
err(1, "Creating clone");
|
||||||
/* We close our local copy, now the child has it. */
|
|
||||||
|
/* We close our local copy now the child has it. */
|
||||||
close(vq->eventfd);
|
close(vq->eventfd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1028,7 +1094,10 @@ static void update_device_status(struct device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */
|
/*L:215
|
||||||
|
* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In
|
||||||
|
* particular, it's used to notify us of device status changes during boot.
|
||||||
|
*/
|
||||||
static void handle_output(unsigned long addr)
|
static void handle_output(unsigned long addr)
|
||||||
{
|
{
|
||||||
struct device *i;
|
struct device *i;
|
||||||
@ -1037,18 +1106,32 @@ static void handle_output(unsigned long addr)
|
|||||||
for (i = devices.dev; i; i = i->next) {
|
for (i = devices.dev; i; i = i->next) {
|
||||||
struct virtqueue *vq;
|
struct virtqueue *vq;
|
||||||
|
|
||||||
/* Notifications to device descriptors update device status. */
|
/*
|
||||||
|
* Notifications to device descriptors mean they updated the
|
||||||
|
* device status.
|
||||||
|
*/
|
||||||
if (from_guest_phys(addr) == i->desc) {
|
if (from_guest_phys(addr) == i->desc) {
|
||||||
update_device_status(i);
|
update_device_status(i);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Devices *can* be used before status is set to DRIVER_OK. */
|
/*
|
||||||
|
* Devices *can* be used before status is set to DRIVER_OK.
|
||||||
|
* The original plan was that they would never do this: they
|
||||||
|
* would always finish setting up their status bits before
|
||||||
|
* actually touching the virtqueues. In practice, we allowed
|
||||||
|
* them to, and they do (eg. the disk probes for partition
|
||||||
|
* tables as part of initialization).
|
||||||
|
*
|
||||||
|
* If we see this, we start the device: once it's running, we
|
||||||
|
* expect the device to catch all the notifications.
|
||||||
|
*/
|
||||||
for (vq = i->vq; vq; vq = vq->next) {
|
for (vq = i->vq; vq; vq = vq->next) {
|
||||||
if (addr != vq->config.pfn*getpagesize())
|
if (addr != vq->config.pfn*getpagesize())
|
||||||
continue;
|
continue;
|
||||||
if (i->running)
|
if (i->running)
|
||||||
errx(1, "Notification on running %s", i->name);
|
errx(1, "Notification on running %s", i->name);
|
||||||
|
/* This just calls create_thread() for each virtqueue */
|
||||||
start_device(i);
|
start_device(i);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1132,6 +1215,11 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs,
|
|||||||
vq->next = NULL;
|
vq->next = NULL;
|
||||||
vq->last_avail_idx = 0;
|
vq->last_avail_idx = 0;
|
||||||
vq->dev = dev;
|
vq->dev = dev;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is the routine the service thread will run, and its Process ID
|
||||||
|
* once it's running.
|
||||||
|
*/
|
||||||
vq->service = service;
|
vq->service = service;
|
||||||
vq->thread = (pid_t)-1;
|
vq->thread = (pid_t)-1;
|
||||||
|
|
||||||
@ -1202,7 +1290,8 @@ static void set_config(struct device *dev, unsigned len, const void *conf)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine does all the creation and setup of a new device, including
|
* This routine does all the creation and setup of a new device, including
|
||||||
* calling new_dev_desc() to allocate the descriptor and device memory.
|
* calling new_dev_desc() to allocate the descriptor and device memory. We
|
||||||
|
* don't actually start the service threads until later.
|
||||||
*
|
*
|
||||||
* See what I mean about userspace being boring?
|
* See what I mean about userspace being boring?
|
||||||
*/
|
*/
|
||||||
@ -1478,19 +1567,7 @@ static void setup_tun_net(char *arg)
|
|||||||
verbose("device %u: tun %s: %s\n",
|
verbose("device %u: tun %s: %s\n",
|
||||||
devices.device_num, tapif, arg);
|
devices.device_num, tapif, arg);
|
||||||
}
|
}
|
||||||
|
/*:*/
|
||||||
/*
|
|
||||||
* Our block (disk) device should be really simple: the Guest asks for a block
|
|
||||||
* number and we read or write that position in the file. Unfortunately, that
|
|
||||||
* was amazingly slow: the Guest waits until the read is finished before
|
|
||||||
* running anything else, even if it could have been doing useful work.
|
|
||||||
*
|
|
||||||
* We could use async I/O, except it's reputed to suck so hard that characters
|
|
||||||
* actually go missing from your code when you try to use it.
|
|
||||||
*
|
|
||||||
* So this was one reason why lguest now does all virtqueue servicing in
|
|
||||||
* separate threads: it's more efficient and more like a real device.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* This hangs off device->priv. */
|
/* This hangs off device->priv. */
|
||||||
struct vblk_info
|
struct vblk_info
|
||||||
@ -1512,8 +1589,16 @@ struct vblk_info
|
|||||||
/*L:210
|
/*L:210
|
||||||
* The Disk
|
* The Disk
|
||||||
*
|
*
|
||||||
* Remember that the block device is handled by a separate I/O thread. We head
|
* The disk only has one virtqueue, so it only has one thread. It is really
|
||||||
* straight into the core of that thread here:
|
* simple: the Guest asks for a block number and we read or write that position
|
||||||
|
* in the file.
|
||||||
|
*
|
||||||
|
* Before we serviced each virtqueue in a separate thread, that was unacceptably
|
||||||
|
* slow: the Guest waits until the read is finished before running anything
|
||||||
|
* else, even if it could have been doing useful work.
|
||||||
|
*
|
||||||
|
* We could have used async I/O, except it's reputed to suck so hard that
|
||||||
|
* characters actually go missing from your code when you try to use it.
|
||||||
*/
|
*/
|
||||||
static void blk_request(struct virtqueue *vq)
|
static void blk_request(struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
@ -1525,7 +1610,10 @@ static void blk_request(struct virtqueue *vq)
|
|||||||
struct iovec iov[vq->vring.num];
|
struct iovec iov[vq->vring.num];
|
||||||
off64_t off;
|
off64_t off;
|
||||||
|
|
||||||
/* Get the next request. */
|
/*
|
||||||
|
* Get the next request, where we normally wait. It triggers the
|
||||||
|
* interrupt to acknowledge previously serviced requests (if any).
|
||||||
|
*/
|
||||||
head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
|
head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1539,6 +1627,10 @@ static void blk_request(struct virtqueue *vq)
|
|||||||
|
|
||||||
out = convert(&iov[0], struct virtio_blk_outhdr);
|
out = convert(&iov[0], struct virtio_blk_outhdr);
|
||||||
in = convert(&iov[out_num+in_num-1], u8);
|
in = convert(&iov[out_num+in_num-1], u8);
|
||||||
|
/*
|
||||||
|
* For historical reasons, block operations are expressed in 512 byte
|
||||||
|
* "sectors".
|
||||||
|
*/
|
||||||
off = out->sector * 512;
|
off = out->sector * 512;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1614,6 +1706,7 @@ static void blk_request(struct virtqueue *vq)
|
|||||||
if (out->type & VIRTIO_BLK_T_BARRIER)
|
if (out->type & VIRTIO_BLK_T_BARRIER)
|
||||||
fdatasync(vblk->fd);
|
fdatasync(vblk->fd);
|
||||||
|
|
||||||
|
/* Finished that request. */
|
||||||
add_used(vq, head, wlen);
|
add_used(vq, head, wlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1682,9 +1775,8 @@ static void rng_input(struct virtqueue *vq)
|
|||||||
errx(1, "Output buffers in rng?");
|
errx(1, "Output buffers in rng?");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is why we convert to iovecs: the readv() call uses them, and so
|
* Just like the console write, we loop to cover the whole iovec.
|
||||||
* it reads straight into the Guest's buffer. We loop to make sure we
|
* In this case, short reads actually happen quite a bit.
|
||||||
* fill it.
|
|
||||||
*/
|
*/
|
||||||
while (!iov_empty(iov, in_num)) {
|
while (!iov_empty(iov, in_num)) {
|
||||||
len = readv(rng_info->rfd, iov, in_num);
|
len = readv(rng_info->rfd, iov, in_num);
|
||||||
@ -1818,7 +1910,9 @@ int main(int argc, char *argv[])
|
|||||||
devices.lastdev = NULL;
|
devices.lastdev = NULL;
|
||||||
devices.next_irq = 1;
|
devices.next_irq = 1;
|
||||||
|
|
||||||
|
/* We're CPU 0. In fact, that's the only CPU possible right now. */
|
||||||
cpu_id = 0;
|
cpu_id = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to know how much memory so we can set up the device
|
* We need to know how much memory so we can set up the device
|
||||||
* descriptor and memory pages for the devices as we parse the command
|
* descriptor and memory pages for the devices as we parse the command
|
||||||
@ -1926,7 +2020,7 @@ int main(int argc, char *argv[])
|
|||||||
*/
|
*/
|
||||||
tell_kernel(start);
|
tell_kernel(start);
|
||||||
|
|
||||||
/* Ensure that we terminate if a child dies. */
|
/* Ensure that we terminate if a device-servicing child dies. */
|
||||||
signal(SIGCHLD, kill_launcher);
|
signal(SIGCHLD, kill_launcher);
|
||||||
|
|
||||||
/* If we exit via err(), this kills all the threads, restores tty. */
|
/* If we exit via err(), this kills all the threads, restores tty. */
|
||||||
|
@ -35,10 +35,10 @@
|
|||||||
* operations? There are two ways: the direct way is to make a "hypercall",
|
* operations? There are two ways: the direct way is to make a "hypercall",
|
||||||
* to make requests of the Host Itself.
|
* to make requests of the Host Itself.
|
||||||
*
|
*
|
||||||
* We use the KVM hypercall mechanism. Seventeen hypercalls are
|
* We use the KVM hypercall mechanism, though completely different hypercall
|
||||||
* available: the hypercall number is put in the %eax register, and the
|
* numbers. Seventeen hypercalls are available: the hypercall number is put in
|
||||||
* arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
|
* the %eax register, and the arguments (when required) are placed in %ebx,
|
||||||
* If a return value makes sense, it's returned in %eax.
|
* %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax.
|
||||||
*
|
*
|
||||||
* Grossly invalid calls result in Sudden Death at the hands of the vengeful
|
* Grossly invalid calls result in Sudden Death at the hands of the vengeful
|
||||||
* Host, rather than returning failure. This reflects Winston Churchill's
|
* Host, rather than returning failure. This reflects Winston Churchill's
|
||||||
|
@ -154,6 +154,7 @@ static void lazy_hcall1(unsigned long call,
|
|||||||
async_hcall(call, arg1, 0, 0, 0);
|
async_hcall(call, arg1, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
|
||||||
static void lazy_hcall2(unsigned long call,
|
static void lazy_hcall2(unsigned long call,
|
||||||
unsigned long arg1,
|
unsigned long arg1,
|
||||||
unsigned long arg2)
|
unsigned long arg2)
|
||||||
@ -189,8 +190,10 @@ static void lazy_hcall4(unsigned long call,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
|
/*G:036
|
||||||
* issue the do-nothing hypercall to flush any stored calls. */
|
* When lazy mode is turned off reset the per-cpu lazy mode variable and then
|
||||||
|
* issue the do-nothing hypercall to flush any stored calls.
|
||||||
|
:*/
|
||||||
static void lguest_leave_lazy_mmu_mode(void)
|
static void lguest_leave_lazy_mmu_mode(void)
|
||||||
{
|
{
|
||||||
kvm_hypercall0(LHCALL_FLUSH_ASYNC);
|
kvm_hypercall0(LHCALL_FLUSH_ASYNC);
|
||||||
@ -250,13 +253,11 @@ extern void lg_irq_enable(void);
|
|||||||
extern void lg_restore_fl(unsigned long flags);
|
extern void lg_restore_fl(unsigned long flags);
|
||||||
|
|
||||||
/*M:003
|
/*M:003
|
||||||
* Note that we don't check for outstanding interrupts when we re-enable them
|
* We could be more efficient in our checking of outstanding interrupts, rather
|
||||||
* (or when we unmask an interrupt). This seems to work for the moment, since
|
* than using a branch. One way would be to put the "irq_enabled" field in a
|
||||||
* interrupts are rare and we'll just get the interrupt on the next timer tick,
|
* page by itself, and have the Host write-protect it when an interrupt comes
|
||||||
* but now we can run with CONFIG_NO_HZ, we should revisit this. One way would
|
* in when irqs are disabled. There will then be a page fault as soon as
|
||||||
* be to put the "irq_enabled" field in a page by itself, and have the Host
|
* interrupts are re-enabled.
|
||||||
* write-protect it when an interrupt comes in when irqs are disabled. There
|
|
||||||
* will then be a page fault as soon as interrupts are re-enabled.
|
|
||||||
*
|
*
|
||||||
* A better method is to implement soft interrupt disable generally for x86:
|
* A better method is to implement soft interrupt disable generally for x86:
|
||||||
* instead of disabling interrupts, we set a flag. If an interrupt does come
|
* instead of disabling interrupts, we set a flag. If an interrupt does come
|
||||||
@ -568,7 +569,7 @@ static void lguest_write_cr4(unsigned long val)
|
|||||||
* cr3 ---> +---------+
|
* cr3 ---> +---------+
|
||||||
* | --------->+---------+
|
* | --------->+---------+
|
||||||
* | | | PADDR1 |
|
* | | | PADDR1 |
|
||||||
* Top-level | | PADDR2 |
|
* Mid-level | | PADDR2 |
|
||||||
* (PMD) page | | |
|
* (PMD) page | | |
|
||||||
* | | Lower-level |
|
* | | Lower-level |
|
||||||
* | | (PTE) page |
|
* | | (PTE) page |
|
||||||
@ -588,23 +589,62 @@ static void lguest_write_cr4(unsigned long val)
|
|||||||
* Index into top Index into second Offset within page
|
* Index into top Index into second Offset within page
|
||||||
* page directory page pagetable page
|
* page directory page pagetable page
|
||||||
*
|
*
|
||||||
* The kernel spends a lot of time changing both the top-level page directory
|
* Now, unfortunately, this isn't the whole story: Intel added Physical Address
|
||||||
* and lower-level pagetable pages. The Guest doesn't know physical addresses,
|
* Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits).
|
||||||
* so while it maintains these page tables exactly like normal, it also needs
|
* These are held in 64-bit page table entries, so we can now only fit 512
|
||||||
* to keep the Host informed whenever it makes a change: the Host will create
|
* entries in a page, and the neat three-level tree breaks down.
|
||||||
* the real page tables based on the Guests'.
|
*
|
||||||
|
* The result is a four level page table:
|
||||||
|
*
|
||||||
|
* cr3 --> [ 4 Upper ]
|
||||||
|
* [ Level ]
|
||||||
|
* [ Entries ]
|
||||||
|
* [(PUD Page)]---> +---------+
|
||||||
|
* | --------->+---------+
|
||||||
|
* | | | PADDR1 |
|
||||||
|
* Mid-level | | PADDR2 |
|
||||||
|
* (PMD) page | | |
|
||||||
|
* | | Lower-level |
|
||||||
|
* | | (PTE) page |
|
||||||
|
* | | | |
|
||||||
|
* .... ....
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* And the virtual address is decoded as:
|
||||||
|
*
|
||||||
|
* 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
* |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>|
|
||||||
|
* Index into Index into mid Index into lower Offset within page
|
||||||
|
* top entries directory page pagetable page
|
||||||
|
*
|
||||||
|
* It's too hard to switch between these two formats at runtime, so Linux only
|
||||||
|
* supports one or the other depending on whether CONFIG_X86_PAE is set. Many
|
||||||
|
* distributions turn it on, and not just for people with silly amounts of
|
||||||
|
* memory: the larger PTE entries allow room for the NX bit, which lets the
|
||||||
|
* kernel disable execution of pages and increase security.
|
||||||
|
*
|
||||||
|
* This was a problem for lguest, which couldn't run on these distributions;
|
||||||
|
* then Matias Zabaljauregui figured it all out and implemented it, and only a
|
||||||
|
* handful of puppies were crushed in the process!
|
||||||
|
*
|
||||||
|
* Back to our point: the kernel spends a lot of time changing both the
|
||||||
|
* top-level page directory and lower-level pagetable pages. The Guest doesn't
|
||||||
|
* know physical addresses, so while it maintains these page tables exactly
|
||||||
|
* like normal, it also needs to keep the Host informed whenever it makes a
|
||||||
|
* change: the Host will create the real page tables based on the Guests'.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The Guest calls this to set a second-level entry (pte), ie. to map a page
|
* The Guest calls this after it has set a second-level entry (pte), ie. to map
|
||||||
* into a process' address space. We set the entry then tell the Host the
|
* a page into a process' address space. Wetell the Host the toplevel and
|
||||||
* toplevel and address this corresponds to. The Guest uses one pagetable per
|
* address this corresponds to. The Guest uses one pagetable per process, so
|
||||||
* process, so we need to tell the Host which one we're changing (mm->pgd).
|
* we need to tell the Host which one we're changing (mm->pgd).
|
||||||
*/
|
*/
|
||||||
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
|
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep)
|
pte_t *ptep)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
/* PAE needs to hand a 64 bit page table entry, so it uses two args. */
|
||||||
lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
|
lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
|
||||||
ptep->pte_low, ptep->pte_high);
|
ptep->pte_low, ptep->pte_high);
|
||||||
#else
|
#else
|
||||||
@ -612,6 +652,7 @@ static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This is the "set and update" combo-meal-deal version. */
|
||||||
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep, pte_t pteval)
|
pte_t *ptep, pte_t pteval)
|
||||||
{
|
{
|
||||||
@ -672,6 +713,11 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
/*
|
||||||
|
* With 64-bit PTE values, we need to be careful setting them: if we set 32
|
||||||
|
* bits at a time, the hardware could see a weird half-set entry. These
|
||||||
|
* versions ensure we update all 64 bits at once.
|
||||||
|
*/
|
||||||
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
|
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
|
||||||
{
|
{
|
||||||
native_set_pte_atomic(ptep, pte);
|
native_set_pte_atomic(ptep, pte);
|
||||||
@ -679,13 +725,14 @@ static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|||||||
lazy_hcall1(LHCALL_FLUSH_TLB, 1);
|
lazy_hcall1(LHCALL_FLUSH_TLB, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||||
|
pte_t *ptep)
|
||||||
{
|
{
|
||||||
native_pte_clear(mm, addr, ptep);
|
native_pte_clear(mm, addr, ptep);
|
||||||
lguest_pte_update(mm, addr, ptep);
|
lguest_pte_update(mm, addr, ptep);
|
||||||
}
|
}
|
||||||
|
|
||||||
void lguest_pmd_clear(pmd_t *pmdp)
|
static void lguest_pmd_clear(pmd_t *pmdp)
|
||||||
{
|
{
|
||||||
lguest_set_pmd(pmdp, __pmd(0));
|
lguest_set_pmd(pmdp, __pmd(0));
|
||||||
}
|
}
|
||||||
@ -784,6 +831,14 @@ static void __init lguest_init_IRQ(void)
|
|||||||
irq_ctx_init(smp_processor_id());
|
irq_ctx_init(smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* With CONFIG_SPARSE_IRQ, interrupt descriptors are allocated as-needed, so
|
||||||
|
* rather than set them in lguest_init_IRQ we are called here every time an
|
||||||
|
* lguest device needs an interrupt.
|
||||||
|
*
|
||||||
|
* FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should
|
||||||
|
* pass that up!
|
||||||
|
*/
|
||||||
void lguest_setup_irq(unsigned int irq)
|
void lguest_setup_irq(unsigned int irq)
|
||||||
{
|
{
|
||||||
irq_to_desc_alloc_node(irq, 0);
|
irq_to_desc_alloc_node(irq, 0);
|
||||||
@ -1298,7 +1353,7 @@ __init void lguest_init(void)
|
|||||||
*/
|
*/
|
||||||
switch_to_new_gdt(0);
|
switch_to_new_gdt(0);
|
||||||
|
|
||||||
/* As described in head_32.S, we map the first 128M of memory. */
|
/* We actually boot with all memory mapped, but let's say 128MB. */
|
||||||
max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
|
max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -102,6 +102,7 @@ send_interrupts:
|
|||||||
* create one manually here.
|
* create one manually here.
|
||||||
*/
|
*/
|
||||||
.byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
|
.byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
|
||||||
|
/* Put eax back the way we found it. */
|
||||||
popl %eax
|
popl %eax
|
||||||
ret
|
ret
|
||||||
|
|
||||||
@ -125,6 +126,7 @@ ENTRY(lg_restore_fl)
|
|||||||
jnz send_interrupts
|
jnz send_interrupts
|
||||||
/* Again, the normal path has used no extra registers. Clever, huh? */
|
/* Again, the normal path has used no extra registers. Clever, huh? */
|
||||||
ret
|
ret
|
||||||
|
/*:*/
|
||||||
|
|
||||||
/* These demark the EIP range where host should never deliver interrupts. */
|
/* These demark the EIP range where host should never deliver interrupts. */
|
||||||
.global lguest_noirq_start
|
.global lguest_noirq_start
|
||||||
|
@ -217,10 +217,15 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* It's possible the Guest did a NOTIFY hypercall to the
|
* It's possible the Guest did a NOTIFY hypercall to the
|
||||||
* Launcher, in which case we return from the read() now.
|
* Launcher.
|
||||||
*/
|
*/
|
||||||
if (cpu->pending_notify) {
|
if (cpu->pending_notify) {
|
||||||
|
/*
|
||||||
|
* Does it just needs to write to a registered
|
||||||
|
* eventfd (ie. the appropriate virtqueue thread)?
|
||||||
|
*/
|
||||||
if (!send_notify_to_eventfd(cpu)) {
|
if (!send_notify_to_eventfd(cpu)) {
|
||||||
|
/* OK, we tell the main Laucher. */
|
||||||
if (put_user(cpu->pending_notify, user))
|
if (put_user(cpu->pending_notify, user))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return sizeof(cpu->pending_notify);
|
return sizeof(cpu->pending_notify);
|
||||||
|
@ -59,7 +59,7 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
|
|||||||
case LHCALL_SHUTDOWN: {
|
case LHCALL_SHUTDOWN: {
|
||||||
char msg[128];
|
char msg[128];
|
||||||
/*
|
/*
|
||||||
* Shutdown is such a trivial hypercall that we do it in four
|
* Shutdown is such a trivial hypercall that we do it in five
|
||||||
* lines right here.
|
* lines right here.
|
||||||
*
|
*
|
||||||
* If the lgread fails, it will call kill_guest() itself; the
|
* If the lgread fails, it will call kill_guest() itself; the
|
||||||
@ -245,6 +245,10 @@ static void initialize(struct lg_cpu *cpu)
|
|||||||
* device), the Guest will still see the old page. In practice, this never
|
* device), the Guest will still see the old page. In practice, this never
|
||||||
* happens: why would the Guest read a page which it has never written to? But
|
* happens: why would the Guest read a page which it has never written to? But
|
||||||
* a similar scenario might one day bite us, so it's worth mentioning.
|
* a similar scenario might one day bite us, so it's worth mentioning.
|
||||||
|
*
|
||||||
|
* Note that if we used a shared anonymous mapping in the Launcher instead of
|
||||||
|
* mapping /dev/zero private, we wouldn't worry about cop-on-write. And we
|
||||||
|
* need that to switch the Launcher to processes (away from threads) anyway.
|
||||||
:*/
|
:*/
|
||||||
|
|
||||||
/*H:100
|
/*H:100
|
||||||
|
@ -236,7 +236,7 @@ static void lg_notify(struct virtqueue *vq)
|
|||||||
extern void lguest_setup_irq(unsigned int irq);
|
extern void lguest_setup_irq(unsigned int irq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine finds the first virtqueue described in the configuration of
|
* This routine finds the Nth virtqueue described in the configuration of
|
||||||
* this device and sets it up.
|
* this device and sets it up.
|
||||||
*
|
*
|
||||||
* This is kind of an ugly duckling. It'd be nicer to have a standard
|
* This is kind of an ugly duckling. It'd be nicer to have a standard
|
||||||
@ -244,9 +244,6 @@ extern void lguest_setup_irq(unsigned int irq);
|
|||||||
* everyone wants to do it differently. The KVM coders want the Guest to
|
* everyone wants to do it differently. The KVM coders want the Guest to
|
||||||
* allocate its own pages and tell the Host where they are, but for lguest it's
|
* allocate its own pages and tell the Host where they are, but for lguest it's
|
||||||
* simpler for the Host to simply tell us where the pages are.
|
* simpler for the Host to simply tell us where the pages are.
|
||||||
*
|
|
||||||
* So we provide drivers with a "find the Nth virtqueue and set it up"
|
|
||||||
* function.
|
|
||||||
*/
|
*/
|
||||||
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
|
static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
|
||||||
unsigned index,
|
unsigned index,
|
||||||
@ -422,7 +419,11 @@ static void add_lguest_device(struct lguest_device_desc *d,
|
|||||||
|
|
||||||
/* This devices' parent is the lguest/ dir. */
|
/* This devices' parent is the lguest/ dir. */
|
||||||
ldev->vdev.dev.parent = lguest_root;
|
ldev->vdev.dev.parent = lguest_root;
|
||||||
/* We have a unique device index thanks to the dev_index counter. */
|
/*
|
||||||
|
* The device type comes straight from the descriptor. There's also a
|
||||||
|
* device vendor field in the virtio_device struct, which we leave as
|
||||||
|
* 0.
|
||||||
|
*/
|
||||||
ldev->vdev.id.device = d->type;
|
ldev->vdev.id.device = d->type;
|
||||||
/*
|
/*
|
||||||
* We have a simple set of routines for querying the device's
|
* We have a simple set of routines for querying the device's
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
/*P:200
|
/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
|
||||||
* This contains all the /dev/lguest code, whereby the userspace launcher
|
|
||||||
* controls and communicates with the Guest. For example, the first write will
|
* controls and communicates with the Guest. For example, the first write will
|
||||||
* tell us the Guest's memory layout, pagetable, entry point and kernel address
|
* tell us the Guest's memory layout and entry point. A read will run the
|
||||||
* offset. A read will run the Guest until something happens, such as a signal
|
* Guest until something happens, such as a signal or the Guest doing a NOTIFY
|
||||||
* or the Guest doing a NOTIFY out to the Launcher.
|
* out to the Launcher.
|
||||||
:*/
|
:*/
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
@ -13,14 +12,41 @@
|
|||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include "lg.h"
|
#include "lg.h"
|
||||||
|
|
||||||
|
/*L:056
|
||||||
|
* Before we move on, let's jump ahead and look at what the kernel does when
|
||||||
|
* it needs to look up the eventfds. That will complete our picture of how we
|
||||||
|
* use RCU.
|
||||||
|
*
|
||||||
|
* The notification value is in cpu->pending_notify: we return true if it went
|
||||||
|
* to an eventfd.
|
||||||
|
*/
|
||||||
bool send_notify_to_eventfd(struct lg_cpu *cpu)
|
bool send_notify_to_eventfd(struct lg_cpu *cpu)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct lg_eventfd_map *map;
|
struct lg_eventfd_map *map;
|
||||||
|
|
||||||
/* lg->eventfds is RCU-protected */
|
/*
|
||||||
|
* This "rcu_read_lock()" helps track when someone is still looking at
|
||||||
|
* the (RCU-using) eventfds array. It's not actually a lock at all;
|
||||||
|
* indeed it's a noop in many configurations. (You didn't expect me to
|
||||||
|
* explain all the RCU secrets here, did you?)
|
||||||
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
/*
|
||||||
|
* rcu_dereference is the counter-side of rcu_assign_pointer(); it
|
||||||
|
* makes sure we don't access the memory pointed to by
|
||||||
|
* cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy,
|
||||||
|
* but Alpha allows this! Paul McKenney points out that a really
|
||||||
|
* aggressive compiler could have the same effect:
|
||||||
|
* http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
|
||||||
|
*
|
||||||
|
* So play safe, use rcu_dereference to get the rcu-protected pointer:
|
||||||
|
*/
|
||||||
map = rcu_dereference(cpu->lg->eventfds);
|
map = rcu_dereference(cpu->lg->eventfds);
|
||||||
|
/*
|
||||||
|
* Simple array search: even if they add an eventfd while we do this,
|
||||||
|
* we'll continue to use the old array and just won't see the new one.
|
||||||
|
*/
|
||||||
for (i = 0; i < map->num; i++) {
|
for (i = 0; i < map->num; i++) {
|
||||||
if (map->map[i].addr == cpu->pending_notify) {
|
if (map->map[i].addr == cpu->pending_notify) {
|
||||||
eventfd_signal(map->map[i].event, 1);
|
eventfd_signal(map->map[i].event, 1);
|
||||||
@ -28,14 +54,43 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* We're done with the rcu-protected variable cpu->lg->eventfds. */
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
/* If we cleared the notification, it's because we found a match. */
|
||||||
return cpu->pending_notify == 0;
|
return cpu->pending_notify == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*L:055
|
||||||
|
* One of the more tricksy tricks in the Linux Kernel is a technique called
|
||||||
|
* Read Copy Update. Since one point of lguest is to teach lguest journeyers
|
||||||
|
* about kernel coding, I use it here. (In case you're curious, other purposes
|
||||||
|
* include learning about virtualization and instilling a deep appreciation for
|
||||||
|
* simplicity and puppies).
|
||||||
|
*
|
||||||
|
* We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
|
||||||
|
* add new eventfds without ever blocking readers from accessing the array.
|
||||||
|
* The current Launcher only does this during boot, so that never happens. But
|
||||||
|
* Read Copy Update is cool, and adding a lock risks damaging even more puppies
|
||||||
|
* than this code does.
|
||||||
|
*
|
||||||
|
* We allocate a brand new one-larger array, copy the old one and add our new
|
||||||
|
* element. Then we make the lg eventfd pointer point to the new array.
|
||||||
|
* That's the easy part: now we need to free the old one, but we need to make
|
||||||
|
* sure no slow CPU somewhere is still looking at it. That's what
|
||||||
|
* synchronize_rcu does for us: waits until every CPU has indicated that it has
|
||||||
|
* moved on to know it's no longer using the old one.
|
||||||
|
*
|
||||||
|
* If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
|
||||||
|
*/
|
||||||
static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
|
static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
|
||||||
{
|
{
|
||||||
struct lg_eventfd_map *new, *old = lg->eventfds;
|
struct lg_eventfd_map *new, *old = lg->eventfds;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't allow notifications on value 0 anyway (pending_notify of
|
||||||
|
* 0 means "nothing pending").
|
||||||
|
*/
|
||||||
if (!addr)
|
if (!addr)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -62,12 +117,20 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
|
|||||||
}
|
}
|
||||||
new->num++;
|
new->num++;
|
||||||
|
|
||||||
/* Now put new one in place. */
|
/*
|
||||||
|
* Now put new one in place: rcu_assign_pointer() is a fancy way of
|
||||||
|
* doing "lg->eventfds = new", but it uses memory barriers to make
|
||||||
|
* absolutely sure that the contents of "new" written above is nailed
|
||||||
|
* down before we actually do the assignment.
|
||||||
|
*
|
||||||
|
* We have to think about these kinds of things when we're operating on
|
||||||
|
* live data without locks.
|
||||||
|
*/
|
||||||
rcu_assign_pointer(lg->eventfds, new);
|
rcu_assign_pointer(lg->eventfds, new);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We're not in a big hurry. Wait until noone's looking at old
|
* We're not in a big hurry. Wait until noone's looking at old
|
||||||
* version, then delete it.
|
* version, then free it.
|
||||||
*/
|
*/
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
kfree(old);
|
kfree(old);
|
||||||
@ -75,6 +138,14 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*L:052
|
||||||
|
* Receiving notifications from the Guest is usually done by attaching a
|
||||||
|
* particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will
|
||||||
|
* become readable when the Guest does an LHCALL_NOTIFY with that value.
|
||||||
|
*
|
||||||
|
* This is really convenient for processing each virtqueue in a separate
|
||||||
|
* thread.
|
||||||
|
*/
|
||||||
static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
|
static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
|
||||||
{
|
{
|
||||||
unsigned long addr, fd;
|
unsigned long addr, fd;
|
||||||
@ -86,6 +157,11 @@ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
|
|||||||
if (get_user(fd, input) != 0)
|
if (get_user(fd, input) != 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Just make sure two callers don't add eventfds at once. We really
|
||||||
|
* only need to lock against callers adding to the same Guest, so using
|
||||||
|
* the Big Lguest Lock is overkill. But this is setup, not a fast path.
|
||||||
|
*/
|
||||||
mutex_lock(&lguest_lock);
|
mutex_lock(&lguest_lock);
|
||||||
err = add_eventfd(lg, addr, fd);
|
err = add_eventfd(lg, addr, fd);
|
||||||
mutex_unlock(&lguest_lock);
|
mutex_unlock(&lguest_lock);
|
||||||
@ -106,6 +182,10 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
|
|||||||
if (irq >= LGUEST_IRQS)
|
if (irq >= LGUEST_IRQS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Next time the Guest runs, the core code will see if it can deliver
|
||||||
|
* this interrupt.
|
||||||
|
*/
|
||||||
set_interrupt(cpu, irq);
|
set_interrupt(cpu, irq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -307,10 +387,10 @@ unlock:
|
|||||||
* The first operation the Launcher does must be a write. All writes
|
* The first operation the Launcher does must be a write. All writes
|
||||||
* start with an unsigned long number: for the first write this must be
|
* start with an unsigned long number: for the first write this must be
|
||||||
* LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
|
* LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
|
||||||
* writes of other values to send interrupts.
|
* writes of other values to send interrupts or set up receipt of notifications.
|
||||||
*
|
*
|
||||||
* Note that we overload the "offset" in the /dev/lguest file to indicate what
|
* Note that we overload the "offset" in the /dev/lguest file to indicate what
|
||||||
* CPU number we're dealing with. Currently this is always 0, since we only
|
* CPU number we're dealing with. Currently this is always 0 since we only
|
||||||
* support uniprocessor Guests, but you can see the beginnings of SMP support
|
* support uniprocessor Guests, but you can see the beginnings of SMP support
|
||||||
* here.
|
* here.
|
||||||
*/
|
*/
|
||||||
|
@ -29,10 +29,10 @@
|
|||||||
/*H:300
|
/*H:300
|
||||||
* The Page Table Code
|
* The Page Table Code
|
||||||
*
|
*
|
||||||
* We use two-level page tables for the Guest. If you're not entirely
|
* We use two-level page tables for the Guest, or three-level with PAE. If
|
||||||
* comfortable with virtual addresses, physical addresses and page tables then
|
* you're not entirely comfortable with virtual addresses, physical addresses
|
||||||
* I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with
|
* and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
|
||||||
* diagrams!).
|
* Table Handling" (with diagrams!).
|
||||||
*
|
*
|
||||||
* The Guest keeps page tables, but we maintain the actual ones here: these are
|
* The Guest keeps page tables, but we maintain the actual ones here: these are
|
||||||
* called "shadow" page tables. Which is a very Guest-centric name: these are
|
* called "shadow" page tables. Which is a very Guest-centric name: these are
|
||||||
@ -52,9 +52,8 @@
|
|||||||
:*/
|
:*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is
|
* The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
|
||||||
* conveniently placed at the top 4MB, so it uses a separate, complete PTE
|
* or 512 PTE entries with PAE (2MB).
|
||||||
* page.
|
|
||||||
*/
|
*/
|
||||||
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
|
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
|
||||||
|
|
||||||
@ -81,7 +80,8 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
|
|||||||
|
|
||||||
/*H:320
|
/*H:320
|
||||||
* The page table code is curly enough to need helper functions to keep it
|
* The page table code is curly enough to need helper functions to keep it
|
||||||
* clear and clean.
|
* clear and clean. The kernel itself provides many of them; one advantage
|
||||||
|
* of insisting that the Guest and Host use the same CONFIG_PAE setting.
|
||||||
*
|
*
|
||||||
* There are two functions which return pointers to the shadow (aka "real")
|
* There are two functions which return pointers to the shadow (aka "real")
|
||||||
* page tables.
|
* page tables.
|
||||||
@ -155,7 +155,7 @@ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These two functions just like the above two, except they access the Guest
|
* These functions are just like the above two, except they access the Guest
|
||||||
* page tables. Hence they return a Guest address.
|
* page tables. Hence they return a Guest address.
|
||||||
*/
|
*/
|
||||||
static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
|
static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
|
||||||
@ -165,6 +165,7 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
/* Follow the PGD to the PMD. */
|
||||||
static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
|
static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
|
||||||
{
|
{
|
||||||
unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
|
unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
|
||||||
@ -172,6 +173,7 @@ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
|
|||||||
return gpage + pmd_index(vaddr) * sizeof(pmd_t);
|
return gpage + pmd_index(vaddr) * sizeof(pmd_t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Follow the PMD to the PTE. */
|
||||||
static unsigned long gpte_addr(struct lg_cpu *cpu,
|
static unsigned long gpte_addr(struct lg_cpu *cpu,
|
||||||
pmd_t gpmd, unsigned long vaddr)
|
pmd_t gpmd, unsigned long vaddr)
|
||||||
{
|
{
|
||||||
@ -181,6 +183,7 @@ static unsigned long gpte_addr(struct lg_cpu *cpu,
|
|||||||
return gpage + pte_index(vaddr) * sizeof(pte_t);
|
return gpage + pte_index(vaddr) * sizeof(pte_t);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
/* Follow the PGD to the PTE (no mid-level for !PAE). */
|
||||||
static unsigned long gpte_addr(struct lg_cpu *cpu,
|
static unsigned long gpte_addr(struct lg_cpu *cpu,
|
||||||
pgd_t gpgd, unsigned long vaddr)
|
pgd_t gpgd, unsigned long vaddr)
|
||||||
{
|
{
|
||||||
@ -314,6 +317,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
|
|||||||
pte_t gpte;
|
pte_t gpte;
|
||||||
pte_t *spte;
|
pte_t *spte;
|
||||||
|
|
||||||
|
/* Mid level for PAE. */
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
pmd_t *spmd;
|
pmd_t *spmd;
|
||||||
pmd_t gpmd;
|
pmd_t gpmd;
|
||||||
@ -391,6 +395,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
|
|||||||
*/
|
*/
|
||||||
gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
|
gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Read the actual PTE value. */
|
||||||
gpte = lgread(cpu, gpte_ptr, pte_t);
|
gpte = lgread(cpu, gpte_ptr, pte_t);
|
||||||
|
|
||||||
/* If this page isn't in the Guest page tables, we can't page it in. */
|
/* If this page isn't in the Guest page tables, we can't page it in. */
|
||||||
@ -507,6 +513,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
|
|||||||
if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
|
if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
|
||||||
kill_guest(cpu, "bad stack page %#lx", vaddr);
|
kill_guest(cpu, "bad stack page %#lx", vaddr);
|
||||||
}
|
}
|
||||||
|
/*:*/
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
static void release_pmd(pmd_t *spmd)
|
static void release_pmd(pmd_t *spmd)
|
||||||
@ -543,7 +550,11 @@ static void release_pgd(pgd_t *spgd)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_X86_PAE */
|
#else /* !CONFIG_X86_PAE */
|
||||||
/*H:450 If we chase down the release_pgd() code, it looks like this: */
|
/*H:450
|
||||||
|
* If we chase down the release_pgd() code, the non-PAE version looks like
|
||||||
|
* this. The PAE version is almost identical, but instead of calling
|
||||||
|
* release_pte it calls release_pmd(), which looks much like this.
|
||||||
|
*/
|
||||||
static void release_pgd(pgd_t *spgd)
|
static void release_pgd(pgd_t *spgd)
|
||||||
{
|
{
|
||||||
/* If the entry's not present, there's nothing to release. */
|
/* If the entry's not present, there's nothing to release. */
|
||||||
@ -898,17 +909,21 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
|
|||||||
/* ... throw it away. */
|
/* ... throw it away. */
|
||||||
release_pgd(lg->pgdirs[pgdir].pgdir + idx);
|
release_pgd(lg->pgdirs[pgdir].pgdir + idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
/* For setting a mid-level, we just throw everything away. It's easy. */
|
||||||
void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
|
void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
|
||||||
{
|
{
|
||||||
guest_pagetable_clear_all(&lg->cpus[0]);
|
guest_pagetable_clear_all(&lg->cpus[0]);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*H:505
|
||||||
* Once we know how much memory we have we can construct simple identity (which
|
* To get through boot, we construct simple identity page mappings (which
|
||||||
* set virtual == physical) and linear mappings which will get the Guest far
|
* set virtual == physical) and linear mappings which will get the Guest far
|
||||||
* enough into the boot to create its own.
|
* enough into the boot to create its own. The linear mapping means we
|
||||||
|
* simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET,
|
||||||
|
* as you'll see.
|
||||||
*
|
*
|
||||||
* We lay them out of the way, just below the initrd (which is why we need to
|
* We lay them out of the way, just below the initrd (which is why we need to
|
||||||
* know its size here).
|
* know its size here).
|
||||||
@ -944,6 +959,10 @@ static unsigned long setup_pagetables(struct lguest *lg,
|
|||||||
linear = (void *)pgdir - linear_pages * PAGE_SIZE;
|
linear = (void *)pgdir - linear_pages * PAGE_SIZE;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
/*
|
||||||
|
* And the single mid page goes below that. We only use one, but
|
||||||
|
* that's enough to map 1G, which definitely gets us through boot.
|
||||||
|
*/
|
||||||
pmds = (void *)linear - PAGE_SIZE;
|
pmds = (void *)linear - PAGE_SIZE;
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
@ -957,13 +976,14 @@ static unsigned long setup_pagetables(struct lguest *lg,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The top level points to the linear page table pages above.
|
|
||||||
* We setup the identity and linear mappings here.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
/*
|
||||||
|
* Make the Guest PMD entries point to the corresponding place in the
|
||||||
|
* linear mapping (up to one page worth of PMD).
|
||||||
|
*/
|
||||||
for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
|
for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
|
||||||
i += PTRS_PER_PTE, j++) {
|
i += PTRS_PER_PTE, j++) {
|
||||||
|
/* FIXME: native_set_pmd is overkill here. */
|
||||||
native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
|
native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
|
||||||
- mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
|
- mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
|
||||||
|
|
||||||
@ -971,18 +991,36 @@ static unsigned long setup_pagetables(struct lguest *lg,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* One PGD entry, pointing to that PMD page. */
|
||||||
set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
|
set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
|
||||||
|
/* Copy it in as the first PGD entry (ie. addresses 0-1G). */
|
||||||
if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
|
if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
/*
|
||||||
|
* And the third PGD entry (ie. addresses 3G-4G).
|
||||||
|
*
|
||||||
|
* FIXME: This assumes that PAGE_OFFSET for the Guest is 0xC0000000.
|
||||||
|
*/
|
||||||
if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
|
if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
#else
|
#else
|
||||||
|
/*
|
||||||
|
* The top level points to the linear page table pages above.
|
||||||
|
* We setup the identity and linear mappings here.
|
||||||
|
*/
|
||||||
phys_linear = (unsigned long)linear - mem_base;
|
phys_linear = (unsigned long)linear - mem_base;
|
||||||
for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
|
for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
|
||||||
pgd_t pgd;
|
pgd_t pgd;
|
||||||
|
/*
|
||||||
|
* Create a PGD entry which points to the right part of the
|
||||||
|
* linear PTE pages.
|
||||||
|
*/
|
||||||
pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
|
pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
|
||||||
(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
|
(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy it into the PGD page at 0 and PAGE_OFFSET.
|
||||||
|
*/
|
||||||
if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
|
if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
|
||||||
|| copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
|
|| copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
|
||||||
+ i / PTRS_PER_PTE],
|
+ i / PTRS_PER_PTE],
|
||||||
@ -992,8 +1030,8 @@ static unsigned long setup_pagetables(struct lguest *lg,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We return the top level (guest-physical) address: remember where
|
* We return the top level (guest-physical) address: we remember where
|
||||||
* this is.
|
* this is to write it into lguest_data when the Guest initializes.
|
||||||
*/
|
*/
|
||||||
return (unsigned long)pgdir - mem_base;
|
return (unsigned long)pgdir - mem_base;
|
||||||
}
|
}
|
||||||
@ -1031,7 +1069,9 @@ int init_guest_pagetable(struct lguest *lg)
|
|||||||
lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
|
lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
|
||||||
if (!lg->pgdirs[0].pgdir)
|
if (!lg->pgdirs[0].pgdir)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
/* For PAE, we also create the initial mid-level. */
|
||||||
pgd = lg->pgdirs[0].pgdir;
|
pgd = lg->pgdirs[0].pgdir;
|
||||||
pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
|
pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
|
||||||
if (!pmd_table)
|
if (!pmd_table)
|
||||||
@ -1040,11 +1080,13 @@ int init_guest_pagetable(struct lguest *lg)
|
|||||||
set_pgd(pgd + SWITCHER_PGD_INDEX,
|
set_pgd(pgd + SWITCHER_PGD_INDEX,
|
||||||
__pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
__pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* This is the current page table. */
|
||||||
lg->cpus[0].cpu_pgd = 0;
|
lg->cpus[0].cpu_pgd = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
|
/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
|
||||||
void page_table_guest_data_init(struct lg_cpu *cpu)
|
void page_table_guest_data_init(struct lg_cpu *cpu)
|
||||||
{
|
{
|
||||||
/* We get the kernel address: above this is all kernel memory. */
|
/* We get the kernel address: above this is all kernel memory. */
|
||||||
@ -1105,12 +1147,16 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
|
|||||||
pmd_t switcher_pmd;
|
pmd_t switcher_pmd;
|
||||||
pmd_t *pmd_table;
|
pmd_t *pmd_table;
|
||||||
|
|
||||||
|
/* FIXME: native_set_pmd is overkill here. */
|
||||||
native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
|
native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
|
||||||
PAGE_SHIFT, PAGE_KERNEL_EXEC));
|
PAGE_SHIFT, PAGE_KERNEL_EXEC));
|
||||||
|
|
||||||
|
/* Figure out where the pmd page is, by reading the PGD, and converting
|
||||||
|
* it to a virtual address. */
|
||||||
pmd_table = __va(pgd_pfn(cpu->lg->
|
pmd_table = __va(pgd_pfn(cpu->lg->
|
||||||
pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
|
pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
|
||||||
<< PAGE_SHIFT);
|
<< PAGE_SHIFT);
|
||||||
|
/* Now write it into the shadow page table. */
|
||||||
native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
|
native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
|
||||||
#else
|
#else
|
||||||
pgd_t switcher_pgd;
|
pgd_t switcher_pgd;
|
||||||
|
@ -187,7 +187,7 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
|
|||||||
* also simplify copy_in_guest_info(). Note that we'd still need to restore
|
* also simplify copy_in_guest_info(). Note that we'd still need to restore
|
||||||
* things when we exit to Launcher userspace, but that's fairly easy.
|
* things when we exit to Launcher userspace, but that's fairly easy.
|
||||||
*
|
*
|
||||||
* We could also try using this hooks for PGE, but that might be too expensive.
|
* We could also try using these hooks for PGE, but that might be too expensive.
|
||||||
*
|
*
|
||||||
* The hooks were designed for KVM, but we can also put them to good use.
|
* The hooks were designed for KVM, but we can also put them to good use.
|
||||||
:*/
|
:*/
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*P:900
|
/*P:900
|
||||||
* This is the Switcher: code which sits at 0xFFC00000 astride both the
|
* This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride
|
||||||
* Host and Guest to do the low-level Guest<->Host switch. It is as simple as
|
* both the Host and Guest to do the low-level Guest<->Host switch. It is as
|
||||||
* it can be made, but it's naturally very specific to x86.
|
* simple as it can be made, but it's naturally very specific to x86.
|
||||||
*
|
*
|
||||||
* You have now completed Preparation. If this has whet your appetite; if you
|
* You have now completed Preparation. If this has whet your appetite; if you
|
||||||
* are feeling invigorated and refreshed then the next, more challenging stage
|
* are feeling invigorated and refreshed then the next, more challenging stage
|
||||||
|
Loading…
Reference in New Issue
Block a user