Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull second round of block driver updates from Jens Axboe:
 "As mentioned in the original pull request, the bcache bits were pulled
  because of their dependency on the immutable bio vecs.  Kent re-did
  this part and resubmitted it, so here's the 2nd round of (mostly)
  driver updates for 3.13.  It contains:

 - The bcache work from Kent.

 - Conversion of virtio-blk to blk-mq.  This removes the bio and request
   path, and substitutes with the blk-mq path instead.  The end result
   almost 200 deleted lines.  Patch is acked by Asias and Christoph, who
   both did a bunch of testing.

 - A removal of bootmem.h include from Grygorii Strashko, part of a
   larger series of his killing the dependency on that header file.

 - Removal of __cpuinit from blk-mq from Paul Gortmaker"

* 'for-linus' of git://git.kernel.dk/linux-block: (56 commits)
  virtio_blk: blk-mq support
  blk-mq: remove newly added instances of __cpuinit
  bcache: defensively handle format strings
  bcache: Bypass torture test
  bcache: Delete some slower inline asm
  bcache: Use ida for bcache block dev minor
  bcache: Fix sysfs splat on shutdown with flash only devs
  bcache: Better full stripe scanning
  bcache: Have btree_split() insert into parent directly
  bcache: Move spinlock into struct time_stats
  bcache: Kill sequential_merge option
  bcache: Kill bch_next_recurse_key()
  bcache: Avoid deadlocking in garbage collection
  bcache: Incremental gc
  bcache: Add make_btree_freeing_key()
  bcache: Add btree_node_write_sync()
  bcache: PRECEDING_KEY()
  bcache: bch_(btree|extent)_ptr_invalid()
  bcache: Don't bother with bucket refcount for btree node allocations
  bcache: Debug code improvements
  ...
This commit is contained in:
Linus Torvalds 2013-11-15 16:33:41 -08:00
commit f412f2c60b
31 changed files with 3274 additions and 3485 deletions

View File

@ -6,7 +6,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/slab.h> #include <linux/slab.h>
#include "blk.h" #include "blk.h"

View File

@ -13,8 +13,8 @@
static LIST_HEAD(blk_mq_cpu_notify_list); static LIST_HEAD(blk_mq_cpu_notify_list);
static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self, static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
unsigned int cpu = (unsigned long) hcpu; unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify; struct blk_mq_cpu_notifier *notify;
@ -28,8 +28,8 @@ static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self,
return NOTIFY_OK; return NOTIFY_OK;
} }
static void __cpuinit blk_mq_cpu_notify(void *data, unsigned long action, static void blk_mq_cpu_notify(void *data, unsigned long action,
unsigned int cpu) unsigned int cpu)
{ {
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
/* /*

View File

@ -1444,7 +1444,7 @@ void blk_mq_free_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_mq_free_queue); EXPORT_SYMBOL(blk_mq_free_queue);
/* Basically redo blk_mq_init_queue with queue frozen */ /* Basically redo blk_mq_init_queue with queue frozen */
static void __cpuinit blk_mq_queue_reinit(struct request_queue *q) static void blk_mq_queue_reinit(struct request_queue *q)
{ {
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
@ -1461,8 +1461,8 @@ static void __cpuinit blk_mq_queue_reinit(struct request_queue *q)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
} }
static int __cpuinit blk_mq_queue_reinit_notify(struct notifier_block *nb, static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
struct request_queue *q; struct request_queue *q;

View File

@ -11,12 +11,11 @@
#include <linux/string_helpers.h> #include <linux/string_helpers.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/blk-mq.h>
#include <linux/numa.h>
#define PART_BITS 4 #define PART_BITS 4
static bool use_bio;
module_param(use_bio, bool, S_IRUGO);
static int major; static int major;
static DEFINE_IDA(vd_index_ida); static DEFINE_IDA(vd_index_ida);
@ -26,13 +25,11 @@ struct virtio_blk
{ {
struct virtio_device *vdev; struct virtio_device *vdev;
struct virtqueue *vq; struct virtqueue *vq;
wait_queue_head_t queue_wait; spinlock_t vq_lock;
/* The disk structure for the kernel. */ /* The disk structure for the kernel. */
struct gendisk *disk; struct gendisk *disk;
mempool_t *pool;
/* Process context for config space updates */ /* Process context for config space updates */
struct work_struct config_work; struct work_struct config_work;
@ -47,31 +44,17 @@ struct virtio_blk
/* Ida index - used to track minor number allocations. */ /* Ida index - used to track minor number allocations. */
int index; int index;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[/*sg_elems*/];
}; };
struct virtblk_req struct virtblk_req
{ {
struct request *req; struct request *req;
struct bio *bio;
struct virtio_blk_outhdr out_hdr; struct virtio_blk_outhdr out_hdr;
struct virtio_scsi_inhdr in_hdr; struct virtio_scsi_inhdr in_hdr;
struct work_struct work;
struct virtio_blk *vblk;
int flags;
u8 status; u8 status;
struct scatterlist sg[]; struct scatterlist sg[];
}; };
enum {
VBLK_IS_FLUSH = 1,
VBLK_REQ_FLUSH = 2,
VBLK_REQ_DATA = 4,
VBLK_REQ_FUA = 8,
};
static inline int virtblk_result(struct virtblk_req *vbr) static inline int virtblk_result(struct virtblk_req *vbr)
{ {
switch (vbr->status) { switch (vbr->status) {
@ -84,22 +67,6 @@ static inline int virtblk_result(struct virtblk_req *vbr)
} }
} }
static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
gfp_t gfp_mask)
{
struct virtblk_req *vbr;
vbr = mempool_alloc(vblk->pool, gfp_mask);
if (!vbr)
return NULL;
vbr->vblk = vblk;
if (use_bio)
sg_init_table(vbr->sg, vblk->sg_elems);
return vbr;
}
static int __virtblk_add_req(struct virtqueue *vq, static int __virtblk_add_req(struct virtqueue *vq,
struct virtblk_req *vbr, struct virtblk_req *vbr,
struct scatterlist *data_sg, struct scatterlist *data_sg,
@ -143,83 +110,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
} }
static void virtblk_add_req(struct virtblk_req *vbr, bool have_data)
{
struct virtio_blk *vblk = vbr->vblk;
DEFINE_WAIT(wait);
int ret;
spin_lock_irq(vblk->disk->queue->queue_lock);
while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg,
have_data)) < 0)) {
prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(vblk->disk->queue->queue_lock);
io_schedule();
spin_lock_irq(vblk->disk->queue->queue_lock);
finish_wait(&vblk->queue_wait, &wait);
}
virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock);
}
static void virtblk_bio_send_flush(struct virtblk_req *vbr)
{
vbr->flags |= VBLK_IS_FLUSH;
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = 0;
virtblk_add_req(vbr, false);
}
static void virtblk_bio_send_data(struct virtblk_req *vbr)
{
struct virtio_blk *vblk = vbr->vblk;
struct bio *bio = vbr->bio;
bool have_data;
vbr->flags &= ~VBLK_IS_FLUSH;
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = bio->bi_sector;
vbr->out_hdr.ioprio = bio_prio(bio);
if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) {
have_data = true;
if (bio->bi_rw & REQ_WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
else
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
} else
have_data = false;
virtblk_add_req(vbr, have_data);
}
static void virtblk_bio_send_data_work(struct work_struct *work)
{
struct virtblk_req *vbr;
vbr = container_of(work, struct virtblk_req, work);
virtblk_bio_send_data(vbr);
}
static void virtblk_bio_send_flush_work(struct work_struct *work)
{
struct virtblk_req *vbr;
vbr = container_of(work, struct virtblk_req, work);
virtblk_bio_send_flush(vbr);
}
static inline void virtblk_request_done(struct virtblk_req *vbr) static inline void virtblk_request_done(struct virtblk_req *vbr)
{ {
struct virtio_blk *vblk = vbr->vblk;
struct request *req = vbr->req; struct request *req = vbr->req;
int error = virtblk_result(vbr); int error = virtblk_result(vbr);
@ -231,92 +123,45 @@ static inline void virtblk_request_done(struct virtblk_req *vbr)
req->errors = (error != 0); req->errors = (error != 0);
} }
__blk_end_request_all(req, error); blk_mq_end_io(req, error);
mempool_free(vbr, vblk->pool);
}
static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
{
struct virtio_blk *vblk = vbr->vblk;
if (vbr->flags & VBLK_REQ_DATA) {
/* Send out the actual write data */
INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
queue_work(virtblk_wq, &vbr->work);
} else {
bio_endio(vbr->bio, virtblk_result(vbr));
mempool_free(vbr, vblk->pool);
}
}
static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
{
struct virtio_blk *vblk = vbr->vblk;
if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
/* Send out a flush before end the bio */
vbr->flags &= ~VBLK_REQ_DATA;
INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
queue_work(virtblk_wq, &vbr->work);
} else {
bio_endio(vbr->bio, virtblk_result(vbr));
mempool_free(vbr, vblk->pool);
}
}
static inline void virtblk_bio_done(struct virtblk_req *vbr)
{
if (unlikely(vbr->flags & VBLK_IS_FLUSH))
virtblk_bio_flush_done(vbr);
else
virtblk_bio_data_done(vbr);
} }
static void virtblk_done(struct virtqueue *vq) static void virtblk_done(struct virtqueue *vq)
{ {
struct virtio_blk *vblk = vq->vdev->priv; struct virtio_blk *vblk = vq->vdev->priv;
bool bio_done = false, req_done = false; bool req_done = false;
struct virtblk_req *vbr; struct virtblk_req *vbr;
unsigned long flags; unsigned long flags;
unsigned int len; unsigned int len;
spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); spin_lock_irqsave(&vblk->vq_lock, flags);
do { do {
virtqueue_disable_cb(vq); virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
if (vbr->bio) { virtblk_request_done(vbr);
virtblk_bio_done(vbr); req_done = true;
bio_done = true;
} else {
virtblk_request_done(vbr);
req_done = true;
}
} }
if (unlikely(virtqueue_is_broken(vq))) if (unlikely(virtqueue_is_broken(vq)))
break; break;
} while (!virtqueue_enable_cb(vq)); } while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vblk->vq_lock, flags);
/* In case queue is stopped waiting for more buffers. */ /* In case queue is stopped waiting for more buffers. */
if (req_done) if (req_done)
blk_start_queue(vblk->disk->queue); blk_mq_start_stopped_hw_queues(vblk->disk->queue);
spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
if (bio_done)
wake_up(&vblk->queue_wait);
} }
static bool do_req(struct request_queue *q, struct virtio_blk *vblk, static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
struct request *req)
{ {
struct virtio_blk *vblk = hctx->queue->queuedata;
struct virtblk_req *vbr = req->special;
unsigned long flags;
unsigned int num; unsigned int num;
struct virtblk_req *vbr; const bool last = (req->cmd_flags & REQ_END) != 0;
vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
if (!vbr)
/* When another request finishes we'll try again. */
return false;
vbr->req = req; vbr->req = req;
vbr->bio = NULL;
if (req->cmd_flags & REQ_FLUSH) { if (req->cmd_flags & REQ_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
vbr->out_hdr.sector = 0; vbr->out_hdr.sector = 0;
@ -344,7 +189,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
} }
} }
num = blk_rq_map_sg(q, vbr->req, vblk->sg); num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
if (num) { if (num) {
if (rq_data_dir(vbr->req) == WRITE) if (rq_data_dir(vbr->req) == WRITE)
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
@ -352,63 +197,18 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->out_hdr.type |= VIRTIO_BLK_T_IN; vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
} }
if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) { spin_lock_irqsave(&vblk->vq_lock, flags);
mempool_free(vbr, vblk->pool); if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
return false; spin_unlock_irqrestore(&vblk->vq_lock, flags);
} blk_mq_stop_hw_queue(hctx);
return true;
}
static void virtblk_request(struct request_queue *q)
{
struct virtio_blk *vblk = q->queuedata;
struct request *req;
unsigned int issued = 0;
while ((req = blk_peek_request(q)) != NULL) {
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
/* If this request fails, stop queue and wait for something to
finish to restart it. */
if (!do_req(q, vblk, req)) {
blk_stop_queue(q);
break;
}
blk_start_request(req);
issued++;
}
if (issued)
virtqueue_kick(vblk->vq); virtqueue_kick(vblk->vq);
} return BLK_MQ_RQ_QUEUE_BUSY;
static void virtblk_make_request(struct request_queue *q, struct bio *bio)
{
struct virtio_blk *vblk = q->queuedata;
struct virtblk_req *vbr;
BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);
vbr = virtblk_alloc_req(vblk, GFP_NOIO);
if (!vbr) {
bio_endio(bio, -ENOMEM);
return;
} }
spin_unlock_irqrestore(&vblk->vq_lock, flags);
vbr->bio = bio; if (last)
vbr->flags = 0; virtqueue_kick(vblk->vq);
if (bio->bi_rw & REQ_FLUSH) return BLK_MQ_RQ_QUEUE_OK;
vbr->flags |= VBLK_REQ_FLUSH;
if (bio->bi_rw & REQ_FUA)
vbr->flags |= VBLK_REQ_FUA;
if (bio->bi_size)
vbr->flags |= VBLK_REQ_DATA;
if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
virtblk_bio_send_flush(vbr);
else
virtblk_bio_send_data(vbr);
} }
/* return id (s/n) string for *disk to *id_str /* return id (s/n) string for *disk to *id_str
@ -673,12 +473,35 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR, __ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store); virtblk_cache_type_show, virtblk_cache_type_store);
static struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
};
static struct blk_mq_reg virtio_mq_reg = {
.ops = &virtio_mq_ops,
.nr_hw_queues = 1,
.queue_depth = 64,
.numa_node = NUMA_NO_NODE,
.flags = BLK_MQ_F_SHOULD_MERGE,
};
static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
struct request *rq, unsigned int nr)
{
struct virtio_blk *vblk = data;
struct virtblk_req *vbr = rq->special;
sg_init_table(vbr->sg, vblk->sg_elems);
}
static int virtblk_probe(struct virtio_device *vdev) static int virtblk_probe(struct virtio_device *vdev)
{ {
struct virtio_blk *vblk; struct virtio_blk *vblk;
struct request_queue *q; struct request_queue *q;
int err, index; int err, index;
int pool_size;
u64 cap; u64 cap;
u32 v, blk_size, sg_elems, opt_io_size; u32 v, blk_size, sg_elems, opt_io_size;
@ -702,17 +525,14 @@ static int virtblk_probe(struct virtio_device *vdev)
/* We need an extra sg elements at head and tail. */ /* We need an extra sg elements at head and tail. */
sg_elems += 2; sg_elems += 2;
vdev->priv = vblk = kmalloc(sizeof(*vblk) + vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
if (!vblk) { if (!vblk) {
err = -ENOMEM; err = -ENOMEM;
goto out_free_index; goto out_free_index;
} }
init_waitqueue_head(&vblk->queue_wait);
vblk->vdev = vdev; vblk->vdev = vdev;
vblk->sg_elems = sg_elems; vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
mutex_init(&vblk->config_lock); mutex_init(&vblk->config_lock);
INIT_WORK(&vblk->config_work, virtblk_config_changed_work); INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
@ -721,31 +541,27 @@ static int virtblk_probe(struct virtio_device *vdev)
err = init_vq(vblk); err = init_vq(vblk);
if (err) if (err)
goto out_free_vblk; goto out_free_vblk;
spin_lock_init(&vblk->vq_lock);
pool_size = sizeof(struct virtblk_req);
if (use_bio)
pool_size += sizeof(struct scatterlist) * sg_elems;
vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
if (!vblk->pool) {
err = -ENOMEM;
goto out_free_vq;
}
/* FIXME: How many partitions? How long is a piece of string? */ /* FIXME: How many partitions? How long is a piece of string? */
vblk->disk = alloc_disk(1 << PART_BITS); vblk->disk = alloc_disk(1 << PART_BITS);
if (!vblk->disk) { if (!vblk->disk) {
err = -ENOMEM; err = -ENOMEM;
goto out_mempool; goto out_free_vq;
} }
q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); virtio_mq_reg.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * sg_elems;
q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
if (!q) { if (!q) {
err = -ENOMEM; err = -ENOMEM;
goto out_put_disk; goto out_put_disk;
} }
if (use_bio) blk_mq_init_commands(q, virtblk_init_vbr, vblk);
blk_queue_make_request(q, virtblk_make_request);
q->queuedata = vblk; q->queuedata = vblk;
virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
@ -848,8 +664,6 @@ out_del_disk:
blk_cleanup_queue(vblk->disk->queue); blk_cleanup_queue(vblk->disk->queue);
out_put_disk: out_put_disk:
put_disk(vblk->disk); put_disk(vblk->disk);
out_mempool:
mempool_destroy(vblk->pool);
out_free_vq: out_free_vq:
vdev->config->del_vqs(vdev); vdev->config->del_vqs(vdev);
out_free_vblk: out_free_vblk:
@ -881,7 +695,6 @@ static void virtblk_remove(struct virtio_device *vdev)
refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
put_disk(vblk->disk); put_disk(vblk->disk);
mempool_destroy(vblk->pool);
vdev->config->del_vqs(vdev); vdev->config->del_vqs(vdev);
kfree(vblk); kfree(vblk);
@ -905,10 +718,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
flush_work(&vblk->config_work); flush_work(&vblk->config_work);
spin_lock_irq(vblk->disk->queue->queue_lock); blk_mq_stop_hw_queues(vblk->disk->queue);
blk_stop_queue(vblk->disk->queue);
spin_unlock_irq(vblk->disk->queue->queue_lock);
blk_sync_queue(vblk->disk->queue);
vdev->config->del_vqs(vdev); vdev->config->del_vqs(vdev);
return 0; return 0;
@ -921,11 +731,9 @@ static int virtblk_restore(struct virtio_device *vdev)
vblk->config_enable = true; vblk->config_enable = true;
ret = init_vq(vdev->priv); ret = init_vq(vdev->priv);
if (!ret) { if (!ret)
spin_lock_irq(vblk->disk->queue->queue_lock); blk_mq_start_stopped_hw_queues(vblk->disk->queue);
blk_start_queue(vblk->disk->queue);
spin_unlock_irq(vblk->disk->queue->queue_lock);
}
return ret; return ret;
} }
#endif #endif

View File

@ -13,15 +13,8 @@ config BCACHE_DEBUG
---help--- ---help---
Don't select this option unless you're a developer Don't select this option unless you're a developer
Enables extra debugging tools (primarily a fuzz tester) Enables extra debugging tools, allows expensive runtime checks to be
turned on.
config BCACHE_EDEBUG
bool "Extended runtime checks"
depends on BCACHE
---help---
Don't select this option unless you're a developer
Enables extra runtime checks which significantly affect performance
config BCACHE_CLOSURES_DEBUG config BCACHE_CLOSURES_DEBUG
bool "Debug closures" bool "Debug closures"

View File

@ -63,13 +63,12 @@
#include "bcache.h" #include "bcache.h"
#include "btree.h" #include "btree.h"
#include <linux/blkdev.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/random.h> #include <linux/random.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
#define MAX_IN_FLIGHT_DISCARDS 8U
/* Bucket heap / gen */ /* Bucket heap / gen */
uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
@ -121,75 +120,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
} }
/* Discard/TRIM */
struct discard {
struct list_head list;
struct work_struct work;
struct cache *ca;
long bucket;
struct bio bio;
struct bio_vec bv;
};
static void discard_finish(struct work_struct *w)
{
struct discard *d = container_of(w, struct discard, work);
struct cache *ca = d->ca;
char buf[BDEVNAME_SIZE];
if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
pr_notice("discard error on %s, disabling",
bdevname(ca->bdev, buf));
d->ca->discard = 0;
}
mutex_lock(&ca->set->bucket_lock);
fifo_push(&ca->free, d->bucket);
list_add(&d->list, &ca->discards);
atomic_dec(&ca->discards_in_flight);
mutex_unlock(&ca->set->bucket_lock);
closure_wake_up(&ca->set->bucket_wait);
wake_up_process(ca->alloc_thread);
closure_put(&ca->set->cl);
}
static void discard_endio(struct bio *bio, int error)
{
struct discard *d = container_of(bio, struct discard, bio);
schedule_work(&d->work);
}
static void do_discard(struct cache *ca, long bucket)
{
struct discard *d = list_first_entry(&ca->discards,
struct discard, list);
list_del(&d->list);
d->bucket = bucket;
atomic_inc(&ca->discards_in_flight);
closure_get(&ca->set->cl);
bio_init(&d->bio);
d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket);
d->bio.bi_bdev = ca->bdev;
d->bio.bi_rw = REQ_WRITE|REQ_DISCARD;
d->bio.bi_max_vecs = 1;
d->bio.bi_io_vec = d->bio.bi_inline_vecs;
d->bio.bi_size = bucket_bytes(ca);
d->bio.bi_end_io = discard_endio;
bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
submit_bio(0, &d->bio);
}
/* Allocation */ /* Allocation */
static inline bool can_inc_bucket_gen(struct bucket *b) static inline bool can_inc_bucket_gen(struct bucket *b)
@ -280,7 +210,7 @@ static void invalidate_buckets_lru(struct cache *ca)
* multiple times when it can't do anything * multiple times when it can't do anything
*/ */
ca->invalidate_needs_gc = 1; ca->invalidate_needs_gc = 1;
bch_queue_gc(ca->set); wake_up_gc(ca->set);
return; return;
} }
@ -305,7 +235,7 @@ static void invalidate_buckets_fifo(struct cache *ca)
if (++checked >= ca->sb.nbuckets) { if (++checked >= ca->sb.nbuckets) {
ca->invalidate_needs_gc = 1; ca->invalidate_needs_gc = 1;
bch_queue_gc(ca->set); wake_up_gc(ca->set);
return; return;
} }
} }
@ -330,7 +260,7 @@ static void invalidate_buckets_random(struct cache *ca)
if (++checked >= ca->sb.nbuckets / 2) { if (++checked >= ca->sb.nbuckets / 2) {
ca->invalidate_needs_gc = 1; ca->invalidate_needs_gc = 1;
bch_queue_gc(ca->set); wake_up_gc(ca->set);
return; return;
} }
} }
@ -398,16 +328,18 @@ static int bch_allocator_thread(void *arg)
else else
break; break;
allocator_wait(ca, (int) fifo_free(&ca->free) >
atomic_read(&ca->discards_in_flight));
if (ca->discard) { if (ca->discard) {
allocator_wait(ca, !list_empty(&ca->discards)); mutex_unlock(&ca->set->bucket_lock);
do_discard(ca, bucket); blkdev_issue_discard(ca->bdev,
} else { bucket_to_sector(ca->set, bucket),
fifo_push(&ca->free, bucket); ca->sb.block_size, GFP_KERNEL, 0);
closure_wake_up(&ca->set->bucket_wait); mutex_lock(&ca->set->bucket_lock);
} }
allocator_wait(ca, !fifo_full(&ca->free));
fifo_push(&ca->free, bucket);
wake_up(&ca->set->bucket_wait);
} }
/* /*
@ -433,16 +365,40 @@ static int bch_allocator_thread(void *arg)
} }
} }
long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
{ {
long r = -1; DEFINE_WAIT(w);
again: struct bucket *b;
long r;
/* fastpath */
if (fifo_used(&ca->free) > ca->watermark[watermark]) {
fifo_pop(&ca->free, r);
goto out;
}
if (!wait)
return -1;
while (1) {
if (fifo_used(&ca->free) > ca->watermark[watermark]) {
fifo_pop(&ca->free, r);
break;
}
prepare_to_wait(&ca->set->bucket_wait, &w,
TASK_UNINTERRUPTIBLE);
mutex_unlock(&ca->set->bucket_lock);
schedule();
mutex_lock(&ca->set->bucket_lock);
}
finish_wait(&ca->set->bucket_wait, &w);
out:
wake_up_process(ca->alloc_thread); wake_up_process(ca->alloc_thread);
if (fifo_used(&ca->free) > ca->watermark[watermark] && if (expensive_debug_checks(ca->set)) {
fifo_pop(&ca->free, r)) {
struct bucket *b = ca->buckets + r;
#ifdef CONFIG_BCACHE_EDEBUG
size_t iter; size_t iter;
long i; long i;
@ -455,36 +411,23 @@ again:
BUG_ON(i == r); BUG_ON(i == r);
fifo_for_each(i, &ca->unused, iter) fifo_for_each(i, &ca->unused, iter)
BUG_ON(i == r); BUG_ON(i == r);
#endif
BUG_ON(atomic_read(&b->pin) != 1);
SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
if (watermark <= WATERMARK_METADATA) {
SET_GC_MARK(b, GC_MARK_METADATA);
b->prio = BTREE_PRIO;
} else {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
b->prio = INITIAL_PRIO;
}
return r;
} }
trace_bcache_alloc_fail(ca); b = ca->buckets + r;
if (cl) { BUG_ON(atomic_read(&b->pin) != 1);
closure_wait(&ca->set->bucket_wait, cl);
if (closure_blocking(cl)) { SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
mutex_unlock(&ca->set->bucket_lock);
closure_sync(cl); if (watermark <= WATERMARK_METADATA) {
mutex_lock(&ca->set->bucket_lock); SET_GC_MARK(b, GC_MARK_METADATA);
goto again; b->prio = BTREE_PRIO;
} } else {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
b->prio = INITIAL_PRIO;
} }
return -1; return r;
} }
void bch_bucket_free(struct cache_set *c, struct bkey *k) void bch_bucket_free(struct cache_set *c, struct bkey *k)
@ -501,7 +444,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
} }
int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
struct bkey *k, int n, struct closure *cl) struct bkey *k, int n, bool wait)
{ {
int i; int i;
@ -514,7 +457,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct cache *ca = c->cache_by_alloc[i]; struct cache *ca = c->cache_by_alloc[i];
long b = bch_bucket_alloc(ca, watermark, cl); long b = bch_bucket_alloc(ca, watermark, wait);
if (b == -1) if (b == -1)
goto err; goto err;
@ -529,22 +472,202 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
return 0; return 0;
err: err:
bch_bucket_free(c, k); bch_bucket_free(c, k);
__bkey_put(c, k); bkey_put(c, k);
return -1; return -1;
} }
int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
struct bkey *k, int n, struct closure *cl) struct bkey *k, int n, bool wait)
{ {
int ret; int ret;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
ret = __bch_bucket_alloc_set(c, watermark, k, n, cl); ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
return ret; return ret;
} }
/* Sector allocator */
struct open_bucket {
struct list_head list;
unsigned last_write_point;
unsigned sectors_free;
BKEY_PADDED(key);
};
/*
* We keep multiple buckets open for writes, and try to segregate different
* write streams for better cache utilization: first we look for a bucket where
* the last write to it was sequential with the current write, and failing that
* we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
* For example, say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
*
* Both of those tasks will be doing fairly random IO so we can't rely on
* detecting sequential IO to segregate their data, but going off of the task
* should be a sane heuristic.
*/
static struct open_bucket *pick_data_bucket(struct cache_set *c,
const struct bkey *search,
unsigned write_point,
struct bkey *alloc)
{
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
ret = ret_task ?: list_first_entry(&c->data_buckets,
struct open_bucket, list);
found:
if (!ret->sectors_free && KEY_PTRS(alloc)) {
ret->sectors_free = c->sb.bucket_size;
bkey_copy(&ret->key, alloc);
bkey_init(alloc);
}
if (!ret->sectors_free)
ret = NULL;
return ret;
}
/*
* Allocates some space in the cache to write to, and k to point to the newly
* allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
* end of the newly allocated space).
*
* May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
* sectors were actually allocated.
*
* If s->writeback is true, will not fail.
*/
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
unsigned write_point, unsigned write_prio, bool wait)
{
struct open_bucket *b;
BKEY_PADDED(key) alloc;
unsigned i;
/*
* We might have to allocate a new bucket, which we can't do with a
* spinlock held. So if we have to allocate, we drop the lock, allocate
* and then retry. KEY_PTRS() indicates whether alloc points to
* allocated bucket(s).
*/
bkey_init(&alloc.key);
spin_lock(&c->data_bucket_lock);
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
unsigned watermark = write_prio
? WATERMARK_MOVINGGC
: WATERMARK_NONE;
spin_unlock(&c->data_bucket_lock);
if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
return false;
spin_lock(&c->data_bucket_lock);
}
/*
* If we had to allocate, we might race and not need to allocate the
* second time we call find_data_bucket(). If we allocated a bucket but
* didn't use it, drop the refcount bch_bucket_alloc_set() took:
*/
if (KEY_PTRS(&alloc.key))
bkey_put(c, &alloc.key);
for (i = 0; i < KEY_PTRS(&b->key); i++)
EBUG_ON(ptr_stale(c, &b->key, i));
/* Set up the pointer to the space we're allocating: */
for (i = 0; i < KEY_PTRS(&b->key); i++)
k->ptr[i] = b->key.ptr[i];
sectors = min(sectors, b->sectors_free);
SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
SET_KEY_SIZE(k, sectors);
SET_KEY_PTRS(k, KEY_PTRS(&b->key));
/*
* Move b to the end of the lru, and keep track of what this bucket was
* last used for:
*/
list_move_tail(&b->list, &c->data_buckets);
bkey_copy_key(&b->key, k);
b->last_write_point = write_point;
b->sectors_free -= sectors;
for (i = 0; i < KEY_PTRS(&b->key); i++) {
SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
atomic_long_add(sectors,
&PTR_CACHE(c, &b->key, i)->sectors_written);
}
if (b->sectors_free < c->sb.block_size)
b->sectors_free = 0;
/*
* k takes refcounts on the buckets it points to until it's inserted
* into the btree, but if we're done with this bucket we just transfer
* get_data_bucket()'s refcount.
*/
if (b->sectors_free)
for (i = 0; i < KEY_PTRS(&b->key); i++)
atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
spin_unlock(&c->data_bucket_lock);
return true;
}
/* Init */ /* Init */
void bch_open_buckets_free(struct cache_set *c)
{
struct open_bucket *b;
while (!list_empty(&c->data_buckets)) {
b = list_first_entry(&c->data_buckets,
struct open_bucket, list);
list_del(&b->list);
kfree(b);
}
}
int bch_open_buckets_alloc(struct cache_set *c)
{
int i;
spin_lock_init(&c->data_bucket_lock);
for (i = 0; i < 6; i++) {
struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
if (!b)
return -ENOMEM;
list_add(&b->list, &c->data_buckets);
}
return 0;
}
int bch_cache_allocator_start(struct cache *ca) int bch_cache_allocator_start(struct cache *ca)
{ {
struct task_struct *k = kthread_run(bch_allocator_thread, struct task_struct *k = kthread_run(bch_allocator_thread,
@ -556,22 +679,8 @@ int bch_cache_allocator_start(struct cache *ca)
return 0; return 0;
} }
void bch_cache_allocator_exit(struct cache *ca)
{
struct discard *d;
while (!list_empty(&ca->discards)) {
d = list_first_entry(&ca->discards, struct discard, list);
cancel_work_sync(&d->work);
list_del(&d->list);
kfree(d);
}
}
int bch_cache_allocator_init(struct cache *ca) int bch_cache_allocator_init(struct cache *ca)
{ {
unsigned i;
/* /*
* Reserve: * Reserve:
* Prio/gen writes first * Prio/gen writes first
@ -589,15 +698,5 @@ int bch_cache_allocator_init(struct cache *ca)
ca->watermark[WATERMARK_NONE] = ca->free.size / 2 + ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
ca->watermark[WATERMARK_MOVINGGC]; ca->watermark[WATERMARK_MOVINGGC];
for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
d->ca = ca;
INIT_WORK(&d->work, discard_finish);
list_add(&d->list, &ca->discards);
}
return 0; return 0;
} }

View File

@ -177,6 +177,7 @@
#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
#include <linux/bcache.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/list.h> #include <linux/list.h>
@ -210,168 +211,6 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_METADATA 2 #define GC_MARK_METADATA 2
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
struct bkey {
uint64_t high;
uint64_t low;
uint64_t ptr[];
};
/* Enough for a key with 6 pointers */
#define BKEY_PAD 8
#define BKEY_PADDED(key) \
union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
/* Version 0: Cache device
* Version 1: Backing device
* Version 2: Seed pointer into btree node checksum
* Version 3: Cache device with new UUID format
* Version 4: Backing device with data offset
*/
#define BCACHE_SB_VERSION_CDEV 0
#define BCACHE_SB_VERSION_BDEV 1
#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
#define BCACHE_SB_MAX_VERSION 4
#define SB_SECTOR 8
#define SB_SIZE 4096
#define SB_LABEL_SIZE 32
#define SB_JOURNAL_BUCKETS 256U
/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
#define MAX_CACHES_PER_SET 8
#define BDEV_DATA_START_DEFAULT 16 /* sectors */
struct cache_sb {
uint64_t csum;
uint64_t offset; /* sector where this sb was written */
uint64_t version;
uint8_t magic[16];
uint8_t uuid[16];
union {
uint8_t set_uuid[16];
uint64_t set_magic;
};
uint8_t label[SB_LABEL_SIZE];
uint64_t flags;
uint64_t seq;
uint64_t pad[8];
union {
struct {
/* Cache devices */
uint64_t nbuckets; /* device size */
uint16_t block_size; /* sectors */
uint16_t bucket_size; /* sectors */
uint16_t nr_in_set;
uint16_t nr_this_dev;
};
struct {
/* Backing devices */
uint64_t data_offset;
/*
* block_size from the cache device section is still used by
* backing devices, so don't add anything here until we fix
* things to not need it for backing devices anymore
*/
};
};
uint32_t last_mount; /* time_t */
uint16_t first_bucket;
union {
uint16_t njournal_buckets;
uint16_t keys;
};
uint64_t d[SB_JOURNAL_BUCKETS]; /* journal buckets */
};
BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
#define CACHE_REPLACEMENT_LRU 0U
#define CACHE_REPLACEMENT_FIFO 1U
#define CACHE_REPLACEMENT_RANDOM 2U
BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
#define CACHE_MODE_WRITETHROUGH 0U
#define CACHE_MODE_WRITEBACK 1U
#define CACHE_MODE_WRITEAROUND 2U
#define CACHE_MODE_NONE 3U
BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
#define BDEV_STATE_NONE 0U
#define BDEV_STATE_CLEAN 1U
#define BDEV_STATE_DIRTY 2U
#define BDEV_STATE_STALE 3U
/* Version 1: Seed pointer into btree node checksum
*/
#define BCACHE_BSET_VERSION 1
/*
* This is the on disk format for btree nodes - a btree node on disk is a list
* of these; within each set the keys are sorted
*/
struct bset {
uint64_t csum;
uint64_t magic;
uint64_t seq;
uint32_t version;
uint32_t keys;
union {
struct bkey start[0];
uint64_t d[0];
};
};
/*
* On disk format for priorities and gens - see super.c near prio_write() for
* more.
*/
struct prio_set {
uint64_t csum;
uint64_t magic;
uint64_t seq;
uint32_t version;
uint32_t pad;
uint64_t next_bucket;
struct bucket_disk {
uint16_t prio;
uint8_t gen;
} __attribute((packed)) data[];
};
struct uuid_entry {
union {
struct {
uint8_t uuid[16];
uint8_t label[32];
uint32_t first_reg;
uint32_t last_reg;
uint32_t invalidated;
uint32_t flags;
/* Size of flash only volumes */
uint64_t sectors;
};
uint8_t pad[128];
};
};
BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
#include "journal.h" #include "journal.h"
#include "stats.h" #include "stats.h"
struct search; struct search;
@ -384,8 +223,6 @@ struct keybuf_key {
void *private; void *private;
}; };
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
struct keybuf { struct keybuf {
struct bkey last_scanned; struct bkey last_scanned;
spinlock_t lock; spinlock_t lock;
@ -400,7 +237,7 @@ struct keybuf {
struct rb_root keys; struct rb_root keys;
#define KEYBUF_NR 100 #define KEYBUF_NR 500
DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR); DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
}; };
@ -429,16 +266,15 @@ struct bcache_device {
struct gendisk *disk; struct gendisk *disk;
/* If nonzero, we're closing */ unsigned long flags;
atomic_t closing; #define BCACHE_DEV_CLOSING 0
#define BCACHE_DEV_DETACHING 1
#define BCACHE_DEV_UNLINK_DONE 2
/* If nonzero, we're detaching/unregistering from cache set */ unsigned nr_stripes;
atomic_t detaching; unsigned stripe_size;
int flush_done;
uint64_t nr_stripes;
unsigned stripe_size_bits;
atomic_t *stripe_sectors_dirty; atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
unsigned long sectors_dirty_last; unsigned long sectors_dirty_last;
long sectors_dirty_derivative; long sectors_dirty_derivative;
@ -509,7 +345,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */ /* Limit number of writeback bios in flight */
struct semaphore in_flight; struct semaphore in_flight;
struct closure_with_timer writeback; struct task_struct *writeback_thread;
struct keybuf writeback_keys; struct keybuf writeback_keys;
@ -527,8 +363,8 @@ struct cached_dev {
unsigned sequential_cutoff; unsigned sequential_cutoff;
unsigned readahead; unsigned readahead;
unsigned sequential_merge:1;
unsigned verify:1; unsigned verify:1;
unsigned bypass_torture_test:1;
unsigned partial_stripes_expensive:1; unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1; unsigned writeback_metadata:1;
@ -620,15 +456,6 @@ struct cache {
bool discard; /* Get rid of? */ bool discard; /* Get rid of? */
/*
* We preallocate structs for issuing discards to buckets, and keep them
* on this list when they're not in use; do_discard() issues discards
* whenever there's work to do and is called by free_some_buckets() and
* when a discard finishes.
*/
atomic_t discards_in_flight;
struct list_head discards;
struct journal_device journal; struct journal_device journal;
/* The rest of this all shows up in sysfs */ /* The rest of this all shows up in sysfs */
@ -649,7 +476,6 @@ struct gc_stat {
size_t nkeys; size_t nkeys;
uint64_t data; /* sectors */ uint64_t data; /* sectors */
uint64_t dirty; /* sectors */
unsigned in_use; /* percent */ unsigned in_use; /* percent */
}; };
@ -744,8 +570,8 @@ struct cache_set {
* basically a lock for this that we can wait on asynchronously. The * basically a lock for this that we can wait on asynchronously. The
* btree_root() macro releases the lock when it returns. * btree_root() macro releases the lock when it returns.
*/ */
struct closure *try_harder; struct task_struct *try_harder;
struct closure_waitlist try_wait; wait_queue_head_t try_wait;
uint64_t try_harder_start; uint64_t try_harder_start;
/* /*
@ -759,7 +585,7 @@ struct cache_set {
* written. * written.
*/ */
atomic_t prio_blocked; atomic_t prio_blocked;
struct closure_waitlist bucket_wait; wait_queue_head_t bucket_wait;
/* /*
* For any bio we don't skip we subtract the number of sectors from * For any bio we don't skip we subtract the number of sectors from
@ -782,7 +608,7 @@ struct cache_set {
struct gc_stat gc_stats; struct gc_stat gc_stats;
size_t nbuckets; size_t nbuckets;
struct closure_with_waitlist gc; struct task_struct *gc_thread;
/* Where in the btree gc currently is */ /* Where in the btree gc currently is */
struct bkey gc_done; struct bkey gc_done;
@ -795,11 +621,10 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */ /* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc; atomic_t sectors_to_gc;
struct closure moving_gc; wait_queue_head_t moving_gc_wait;
struct closure_waitlist moving_gc_wait;
struct keybuf moving_gc_keys; struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */ /* Number of moving GC bios in flight */
atomic_t in_flight; struct semaphore moving_in_flight;
struct btree *root; struct btree *root;
@ -841,22 +666,27 @@ struct cache_set {
unsigned congested_read_threshold_us; unsigned congested_read_threshold_us;
unsigned congested_write_threshold_us; unsigned congested_write_threshold_us;
spinlock_t sort_time_lock;
struct time_stats sort_time; struct time_stats sort_time;
struct time_stats btree_gc_time; struct time_stats btree_gc_time;
struct time_stats btree_split_time; struct time_stats btree_split_time;
spinlock_t btree_read_time_lock;
struct time_stats btree_read_time; struct time_stats btree_read_time;
struct time_stats try_harder_time; struct time_stats try_harder_time;
atomic_long_t cache_read_races; atomic_long_t cache_read_races;
atomic_long_t writeback_keys_done; atomic_long_t writeback_keys_done;
atomic_long_t writeback_keys_failed; atomic_long_t writeback_keys_failed;
enum {
ON_ERROR_UNREGISTER,
ON_ERROR_PANIC,
} on_error;
unsigned error_limit; unsigned error_limit;
unsigned error_decay; unsigned error_decay;
unsigned short journal_delay_ms; unsigned short journal_delay_ms;
unsigned verify:1; unsigned verify:1;
unsigned key_merging_disabled:1; unsigned key_merging_disabled:1;
unsigned expensive_debug_checks:1;
unsigned gc_always_rewrite:1; unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1; unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1; unsigned copy_gc_enabled:1;
@ -865,21 +695,6 @@ struct cache_set {
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
}; };
static inline bool key_merging_disabled(struct cache_set *c)
{
#ifdef CONFIG_BCACHE_DEBUG
return c->key_merging_disabled;
#else
return 0;
#endif
}
static inline bool SB_IS_BDEV(const struct cache_sb *sb)
{
return sb->version == BCACHE_SB_VERSION_BDEV
|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
}
struct bbio { struct bbio {
unsigned submit_time_us; unsigned submit_time_us;
union { union {
@ -933,59 +748,6 @@ static inline unsigned local_clock_us(void)
#define prio_buckets(c) \ #define prio_buckets(c) \
DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
#define JSET_MAGIC 0x245235c1a3625032ULL
#define PSET_MAGIC 0x6750e15f87337f91ULL
#define BSET_MAGIC 0x90135c78b99e07f5ULL
#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC)
#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC)
#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC)
/* Bkey fields: all units are in sectors */
#define KEY_FIELD(name, field, offset, size) \
BITMASK(name, struct bkey, field, offset, size)
#define PTR_FIELD(name, offset, size) \
static inline uint64_t name(const struct bkey *k, unsigned i) \
{ return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \
\
static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
{ \
k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \
k->ptr[i] |= v << offset; \
}
KEY_FIELD(KEY_PTRS, high, 60, 3)
KEY_FIELD(HEADER_SIZE, high, 58, 2)
KEY_FIELD(KEY_CSUM, high, 56, 2)
KEY_FIELD(KEY_PINNED, high, 55, 1)
KEY_FIELD(KEY_DIRTY, high, 36, 1)
KEY_FIELD(KEY_SIZE, high, 20, 16)
KEY_FIELD(KEY_INODE, high, 0, 20)
/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
static inline uint64_t KEY_OFFSET(const struct bkey *k)
{
return k->low;
}
static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
{
k->low = v;
}
PTR_FIELD(PTR_DEV, 51, 12)
PTR_FIELD(PTR_OFFSET, 8, 43)
PTR_FIELD(PTR_GEN, 0, 8)
#define PTR_CHECK_DEV ((1 << 12) - 1)
#define PTR(gen, offset, dev) \
((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
{ {
return s >> c->bucket_bits; return s >> c->bucket_bits;
@ -1024,27 +786,11 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
/* Btree key macros */ /* Btree key macros */
/*
* The high bit being set is a relic from when we used it to do binary
* searches - it told you where a key started. It's not used anymore,
* and can probably be safely dropped.
*/
#define KEY(dev, sector, len) \
((struct bkey) { \
.high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
.low = (sector) \
})
static inline void bkey_init(struct bkey *k) static inline void bkey_init(struct bkey *k)
{ {
*k = KEY(0, 0, 0); *k = ZERO_KEY;
} }
#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
#define ZERO_KEY KEY(0, 0, 0)
/* /*
* This is used for various on disk data structures - cache_sb, prio_set, bset, * This is used for various on disk data structures - cache_sb, prio_set, bset,
* jset: The checksum is _always_ the first 8 bytes of these structs * jset: The checksum is _always_ the first 8 bytes of these structs
@ -1094,14 +840,6 @@ do { \
for (b = (ca)->buckets + (ca)->sb.first_bucket; \ for (b = (ca)->buckets + (ca)->sb.first_bucket; \
b < (ca)->buckets + (ca)->sb.nbuckets; b++) b < (ca)->buckets + (ca)->sb.nbuckets; b++)
static inline void __bkey_put(struct cache_set *c, struct bkey *k)
{
unsigned i;
for (i = 0; i < KEY_PTRS(k); i++)
atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
}
static inline void cached_dev_put(struct cached_dev *dc) static inline void cached_dev_put(struct cached_dev *dc)
{ {
if (atomic_dec_and_test(&dc->count)) if (atomic_dec_and_test(&dc->count))
@ -1173,13 +911,15 @@ uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int); void bch_rescale_priorities(struct cache_set *, int);
bool bch_bucket_add_unused(struct cache *, struct bucket *); bool bch_bucket_add_unused(struct cache *, struct bucket *);
long bch_bucket_alloc(struct cache *, unsigned, struct closure *); long bch_bucket_alloc(struct cache *, unsigned, bool);
void bch_bucket_free(struct cache_set *, struct bkey *); void bch_bucket_free(struct cache_set *, struct bkey *);
int __bch_bucket_alloc_set(struct cache_set *, unsigned, int __bch_bucket_alloc_set(struct cache_set *, unsigned,
struct bkey *, int, struct closure *); struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned, int bch_bucket_alloc_set(struct cache_set *, unsigned,
struct bkey *, int, struct closure *); struct bkey *, int, bool);
bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
unsigned, unsigned, bool);
__printf(2, 3) __printf(2, 3)
bool bch_cache_set_error(struct cache_set *, const char *, ...); bool bch_cache_set_error(struct cache_set *, const char *, ...);
@ -1187,7 +927,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...);
void bch_prio_write(struct cache *); void bch_prio_write(struct cache *);
void bch_write_bdev_super(struct cached_dev *, struct closure *); void bch_write_bdev_super(struct cached_dev *, struct closure *);
extern struct workqueue_struct *bcache_wq, *bch_gc_wq; extern struct workqueue_struct *bcache_wq;
extern const char * const bch_cache_modes[]; extern const char * const bch_cache_modes[];
extern struct mutex bch_register_lock; extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets; extern struct list_head bch_cache_sets;
@ -1220,15 +960,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *); void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *); int bch_btree_cache_alloc(struct cache_set *);
void bch_moving_init_cache_set(struct cache_set *); void bch_moving_init_cache_set(struct cache_set *);
int bch_open_buckets_alloc(struct cache_set *);
void bch_open_buckets_free(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca); int bch_cache_allocator_start(struct cache *ca);
void bch_cache_allocator_exit(struct cache *ca);
int bch_cache_allocator_init(struct cache *ca); int bch_cache_allocator_init(struct cache *ca);
void bch_debug_exit(void); void bch_debug_exit(void);
int bch_debug_init(struct kobject *); int bch_debug_init(struct kobject *);
void bch_writeback_exit(void);
int bch_writeback_init(void);
void bch_request_exit(void); void bch_request_exit(void);
int bch_request_init(void); int bch_request_init(void);
void bch_btree_exit(void); void bch_btree_exit(void);

View File

@ -14,22 +14,12 @@
/* Keylists */ /* Keylists */
void bch_keylist_copy(struct keylist *dest, struct keylist *src)
{
*dest = *src;
if (src->list == src->d) {
size_t n = (uint64_t *) src->top - src->d;
dest->top = (struct bkey *) &dest->d[n];
dest->list = dest->d;
}
}
int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c) int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
{ {
unsigned oldsize = (uint64_t *) l->top - l->list; size_t oldsize = bch_keylist_nkeys(l);
unsigned newsize = oldsize + 2 + nptrs; size_t newsize = oldsize + 2 + nptrs;
uint64_t *new; uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
uint64_t *new_keys;
/* The journalling code doesn't handle the case where the keys to insert /* The journalling code doesn't handle the case where the keys to insert
* is bigger than an empty write: If we just return -ENOMEM here, * is bigger than an empty write: If we just return -ENOMEM here,
@ -45,24 +35,23 @@ int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
roundup_pow_of_two(oldsize) == newsize) roundup_pow_of_two(oldsize) == newsize)
return 0; return 0;
new = krealloc(l->list == l->d ? NULL : l->list, new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
sizeof(uint64_t) * newsize, GFP_NOIO);
if (!new) if (!new_keys)
return -ENOMEM; return -ENOMEM;
if (l->list == l->d) if (!old_keys)
memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE); memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
l->list = new; l->keys_p = new_keys;
l->top = (struct bkey *) (&l->list[oldsize]); l->top_p = new_keys + oldsize;
return 0; return 0;
} }
struct bkey *bch_keylist_pop(struct keylist *l) struct bkey *bch_keylist_pop(struct keylist *l)
{ {
struct bkey *k = l->bottom; struct bkey *k = l->keys;
if (k == l->top) if (k == l->top)
return NULL; return NULL;
@ -73,21 +62,20 @@ struct bkey *bch_keylist_pop(struct keylist *l)
return l->top = k; return l->top = k;
} }
void bch_keylist_pop_front(struct keylist *l)
{
l->top_p -= bkey_u64s(l->keys);
memmove(l->keys,
bkey_next(l->keys),
bch_keylist_bytes(l));
}
/* Pointer validation */ /* Pointer validation */
bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
{ {
unsigned i; unsigned i;
char buf[80];
if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
goto bad;
if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
goto bad;
if (!KEY_SIZE(k))
return true;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) { if (ptr_available(c, k, i)) {
@ -98,13 +86,83 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
if (KEY_SIZE(k) + r > c->sb.bucket_size || if (KEY_SIZE(k) + r > c->sb.bucket_size ||
bucket < ca->sb.first_bucket || bucket < ca->sb.first_bucket ||
bucket >= ca->sb.nbuckets) bucket >= ca->sb.nbuckets)
goto bad; return true;
} }
return false;
}
bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
{
char buf[80];
if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
goto bad;
if (__ptr_invalid(c, k))
goto bad;
return false; return false;
bad: bad:
bch_bkey_to_text(buf, sizeof(buf), k); bch_bkey_to_text(buf, sizeof(buf), k);
cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k)); cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
return true;
}
bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
{
char buf[80];
if (!KEY_SIZE(k))
return true;
if (KEY_SIZE(k) > KEY_OFFSET(k))
goto bad;
if (__ptr_invalid(c, k))
goto bad;
return false;
bad:
bch_bkey_to_text(buf, sizeof(buf), k);
cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
return true;
}
static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
unsigned ptr)
{
struct bucket *g = PTR_BUCKET(b->c, k, ptr);
char buf[80];
if (mutex_trylock(&b->c->bucket_lock)) {
if (b->level) {
if (KEY_DIRTY(k) ||
g->prio != BTREE_PRIO ||
(b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_METADATA))
goto err;
} else {
if (g->prio == BTREE_PRIO)
goto err;
if (KEY_DIRTY(k) &&
b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_DIRTY)
goto err;
}
mutex_unlock(&b->c->bucket_lock);
}
return false;
err:
mutex_unlock(&b->c->bucket_lock);
bch_bkey_to_text(buf, sizeof(buf), k);
btree_bug(b,
"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true; return true;
} }
@ -118,64 +176,29 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
bch_ptr_invalid(b, k)) bch_ptr_invalid(b, k))
return true; return true;
if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV) for (i = 0; i < KEY_PTRS(k); i++) {
return true; if (!ptr_available(b->c, k, i))
return true;
for (i = 0; i < KEY_PTRS(k); i++) g = PTR_BUCKET(b->c, k, i);
if (ptr_available(b->c, k, i)) { stale = ptr_stale(b->c, k, i);
g = PTR_BUCKET(b->c, k, i);
stale = ptr_stale(b->c, k, i);
btree_bug_on(stale > 96, b, btree_bug_on(stale > 96, b,
"key too stale: %i, need_gc %u", "key too stale: %i, need_gc %u",
stale, b->c->need_gc); stale, b->c->need_gc);
btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
b, "stale dirty pointer"); b, "stale dirty pointer");
if (stale) if (stale)
return true; return true;
#ifdef CONFIG_BCACHE_EDEBUG if (expensive_debug_checks(b->c) &&
if (!mutex_trylock(&b->c->bucket_lock)) ptr_bad_expensive_checks(b, k, i))
continue; return true;
}
if (b->level) {
if (KEY_DIRTY(k) ||
g->prio != BTREE_PRIO ||
(b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_METADATA))
goto bug;
} else {
if (g->prio == BTREE_PRIO)
goto bug;
if (KEY_DIRTY(k) &&
b->c->gc_mark_valid &&
GC_MARK(g) != GC_MARK_DIRTY)
goto bug;
}
mutex_unlock(&b->c->bucket_lock);
#endif
}
return false; return false;
#ifdef CONFIG_BCACHE_EDEBUG
bug:
mutex_unlock(&b->c->bucket_lock);
{
char buf[80];
bch_bkey_to_text(buf, sizeof(buf), k);
btree_bug(b,
"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
}
return true;
#endif
} }
/* Key/pointer manipulation */ /* Key/pointer manipulation */
@ -458,16 +481,8 @@ static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
{ {
#ifdef CONFIG_X86_64
asm("shrd %[shift],%[high],%[low]"
: [low] "+Rm" (low)
: [high] "R" (high),
[shift] "ci" (shift)
: "cc");
#else
low >>= shift; low >>= shift;
low |= (high << 1) << (63U - shift); low |= (high << 1) << (63U - shift);
#endif
return low; return low;
} }
@ -686,7 +701,7 @@ void bch_bset_init_next(struct btree *b)
} else } else
get_random_bytes(&i->seq, sizeof(uint64_t)); get_random_bytes(&i->seq, sizeof(uint64_t));
i->magic = bset_magic(b->c); i->magic = bset_magic(&b->c->sb);
i->version = 0; i->version = 0;
i->keys = 0; i->keys = 0;
@ -824,16 +839,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
} else } else
i = bset_search_write_set(b, t, search); i = bset_search_write_set(b, t, search);
#ifdef CONFIG_BCACHE_EDEBUG if (expensive_debug_checks(b->c)) {
BUG_ON(bset_written(b, t) && BUG_ON(bset_written(b, t) &&
i.l != t->data->start && i.l != t->data->start &&
bkey_cmp(tree_to_prev_bkey(t, bkey_cmp(tree_to_prev_bkey(t,
inorder_to_tree(bkey_to_cacheline(t, i.l), t)), inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
search) > 0); search) > 0);
BUG_ON(i.r != end(t->data) && BUG_ON(i.r != end(t->data) &&
bkey_cmp(i.r, search) <= 0); bkey_cmp(i.r, search) <= 0);
#endif }
while (likely(i.l != i.r) && while (likely(i.l != i.r) &&
bkey_cmp(i.l, search) <= 0) bkey_cmp(i.l, search) <= 0)
@ -844,6 +859,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
/* Btree iterator */ /* Btree iterator */
/*
* Returns true if l > r - unless l == r, in which case returns true if l is
* older than r.
*
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
* equal in different sets, we have to process them newest to oldest.
*/
static inline bool btree_iter_cmp(struct btree_iter_set l, static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r) struct btree_iter_set r)
{ {
@ -867,12 +889,16 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
} }
struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
struct bkey *search, struct bset_tree *start) struct bkey *search, struct bset_tree *start)
{ {
struct bkey *ret = NULL; struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data); iter->size = ARRAY_SIZE(iter->data);
iter->used = 0; iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
iter->b = b;
#endif
for (; start <= &b->sets[b->nsets]; start++) { for (; start <= &b->sets[b->nsets]; start++) {
ret = bch_bset_search(b, start, search); ret = bch_bset_search(b, start, search);
bch_btree_iter_push(iter, ret, end(start->data)); bch_btree_iter_push(iter, ret, end(start->data));
@ -887,6 +913,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
struct bkey *ret = NULL; struct bkey *ret = NULL;
if (!btree_iter_end(iter)) { if (!btree_iter_end(iter)) {
bch_btree_iter_next_check(iter);
ret = iter->data->k; ret = iter->data->k;
iter->data->k = bkey_next(iter->data->k); iter->data->k = bkey_next(iter->data->k);
@ -916,14 +944,6 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
return ret; return ret;
} }
struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
{
struct btree_iter iter;
bch_btree_iter_init(b, &iter, search);
return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
}
/* Mergesort */ /* Mergesort */
static void sort_key_next(struct btree_iter *iter, static void sort_key_next(struct btree_iter *iter,
@ -998,7 +1018,6 @@ static void btree_mergesort(struct btree *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
pr_debug("sorted %i keys", out->keys); pr_debug("sorted %i keys", out->keys);
bch_check_key_order(b, out);
} }
static void __btree_sort(struct btree *b, struct btree_iter *iter, static void __btree_sort(struct btree *b, struct btree_iter *iter,
@ -1029,7 +1048,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
* memcpy() * memcpy()
*/ */
out->magic = bset_magic(b->c); out->magic = bset_magic(&b->c->sb);
out->seq = b->sets[0].data->seq; out->seq = b->sets[0].data->seq;
out->version = b->sets[0].data->version; out->version = b->sets[0].data->version;
swap(out, b->sets[0].data); swap(out, b->sets[0].data);
@ -1050,24 +1069,21 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
if (b->written) if (b->written)
bset_build_written_tree(b); bset_build_written_tree(b);
if (!start) { if (!start)
spin_lock(&b->c->sort_time_lock);
bch_time_stats_update(&b->c->sort_time, start_time); bch_time_stats_update(&b->c->sort_time, start_time);
spin_unlock(&b->c->sort_time_lock);
}
} }
void bch_btree_sort_partial(struct btree *b, unsigned start) void bch_btree_sort_partial(struct btree *b, unsigned start)
{ {
size_t oldsize = 0, order = b->page_order, keys = 0; size_t order = b->page_order, keys = 0;
struct btree_iter iter; struct btree_iter iter;
int oldsize = bch_count_data(b);
__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
BUG_ON(b->sets[b->nsets].data == write_block(b) && BUG_ON(b->sets[b->nsets].data == write_block(b) &&
(b->sets[b->nsets].size || b->nsets)); (b->sets[b->nsets].size || b->nsets));
if (b->written)
oldsize = bch_count_data(b);
if (start) { if (start) {
unsigned i; unsigned i;
@ -1083,7 +1099,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)
__btree_sort(b, &iter, start, order, false); __btree_sort(b, &iter, start, order, false);
EBUG_ON(b->written && bch_count_data(b) != oldsize); EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
} }
void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
@ -1101,9 +1117,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
btree_mergesort(b, new->sets->data, &iter, false, true); btree_mergesort(b, new->sets->data, &iter, false, true);
spin_lock(&b->c->sort_time_lock);
bch_time_stats_update(&b->c->sort_time, start_time); bch_time_stats_update(&b->c->sort_time, start_time);
spin_unlock(&b->c->sort_time_lock);
bkey_copy_key(&new->key, &b->key); bkey_copy_key(&new->key, &b->key);
new->sets->size = 0; new->sets->size = 0;
@ -1148,16 +1162,16 @@ out:
/* Sysfs stuff */ /* Sysfs stuff */
struct bset_stats { struct bset_stats {
struct btree_op op;
size_t nodes; size_t nodes;
size_t sets_written, sets_unwritten; size_t sets_written, sets_unwritten;
size_t bytes_written, bytes_unwritten; size_t bytes_written, bytes_unwritten;
size_t floats, failed; size_t floats, failed;
}; };
static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, static int btree_bset_stats(struct btree_op *op, struct btree *b)
struct bset_stats *stats)
{ {
struct bkey *k; struct bset_stats *stats = container_of(op, struct bset_stats, op);
unsigned i; unsigned i;
stats->nodes++; stats->nodes++;
@ -1182,30 +1196,19 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
} }
} }
if (b->level) { return MAP_CONTINUE;
struct btree_iter iter;
for_each_key_filter(b, k, &iter, bch_ptr_bad) {
int ret = btree(bset_stats, k, b, op, stats);
if (ret)
return ret;
}
}
return 0;
} }
int bch_bset_print_stats(struct cache_set *c, char *buf) int bch_bset_print_stats(struct cache_set *c, char *buf)
{ {
struct btree_op op;
struct bset_stats t; struct bset_stats t;
int ret; int ret;
bch_btree_op_init_stack(&op);
memset(&t, 0, sizeof(struct bset_stats)); memset(&t, 0, sizeof(struct bset_stats));
bch_btree_op_init(&t.op, -1);
ret = btree_root(bset_stats, c, &op, &t); ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
if (ret) if (ret < 0)
return ret; return ret;
return snprintf(buf, PAGE_SIZE, return snprintf(buf, PAGE_SIZE,

View File

@ -148,6 +148,9 @@
struct btree_iter { struct btree_iter {
size_t size, used; size_t size, used;
#ifdef CONFIG_BCACHE_DEBUG
struct btree *b;
#endif
struct btree_iter_set { struct btree_iter_set {
struct bkey *k, *end; struct bkey *k, *end;
} data[MAX_BSETS]; } data[MAX_BSETS];
@ -193,54 +196,26 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
} }
static inline size_t bkey_u64s(const struct bkey *k)
{
BUG_ON(KEY_CSUM(k) > 1);
return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
}
static inline size_t bkey_bytes(const struct bkey *k)
{
return bkey_u64s(k) * sizeof(uint64_t);
}
static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
{
memcpy(dest, src, bkey_bytes(src));
}
static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
{
if (!src)
src = &KEY(0, 0, 0);
SET_KEY_INODE(dest, KEY_INODE(src));
SET_KEY_OFFSET(dest, KEY_OFFSET(src));
}
static inline struct bkey *bkey_next(const struct bkey *k)
{
uint64_t *d = (void *) k;
return (struct bkey *) (d + bkey_u64s(k));
}
/* Keylists */ /* Keylists */
struct keylist { struct keylist {
struct bkey *top;
union { union {
uint64_t *list; struct bkey *keys;
struct bkey *bottom; uint64_t *keys_p;
};
union {
struct bkey *top;
uint64_t *top_p;
}; };
/* Enough room for btree_split's keys without realloc */ /* Enough room for btree_split's keys without realloc */
#define KEYLIST_INLINE 16 #define KEYLIST_INLINE 16
uint64_t d[KEYLIST_INLINE]; uint64_t inline_keys[KEYLIST_INLINE];
}; };
static inline void bch_keylist_init(struct keylist *l) static inline void bch_keylist_init(struct keylist *l)
{ {
l->top = (void *) (l->list = l->d); l->top_p = l->keys_p = l->inline_keys;
} }
static inline void bch_keylist_push(struct keylist *l) static inline void bch_keylist_push(struct keylist *l)
@ -256,17 +231,32 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
static inline bool bch_keylist_empty(struct keylist *l) static inline bool bch_keylist_empty(struct keylist *l)
{ {
return l->top == (void *) l->list; return l->top == l->keys;
}
static inline void bch_keylist_reset(struct keylist *l)
{
l->top = l->keys;
} }
static inline void bch_keylist_free(struct keylist *l) static inline void bch_keylist_free(struct keylist *l)
{ {
if (l->list != l->d) if (l->keys_p != l->inline_keys)
kfree(l->list); kfree(l->keys_p);
}
static inline size_t bch_keylist_nkeys(struct keylist *l)
{
return l->top_p - l->keys_p;
}
static inline size_t bch_keylist_bytes(struct keylist *l)
{
return bch_keylist_nkeys(l) * sizeof(uint64_t);
} }
void bch_keylist_copy(struct keylist *, struct keylist *);
struct bkey *bch_keylist_pop(struct keylist *); struct bkey *bch_keylist_pop(struct keylist *);
void bch_keylist_pop_front(struct keylist *);
int bch_keylist_realloc(struct keylist *, int, struct cache_set *); int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
@ -287,7 +277,9 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
} }
const char *bch_ptr_status(struct cache_set *, const struct bkey *); const char *bch_ptr_status(struct cache_set *, const struct bkey *);
bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *); bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
bool bch_ptr_bad(struct btree *, const struct bkey *); bool bch_ptr_bad(struct btree *, const struct bkey *);
static inline uint8_t gen_after(uint8_t a, uint8_t b) static inline uint8_t gen_after(uint8_t a, uint8_t b)
@ -311,7 +303,6 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *); typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
struct bkey *bch_btree_iter_next(struct btree_iter *); struct bkey *bch_btree_iter_next(struct btree_iter *);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *, struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
struct btree *, ptr_filter_fn); struct btree *, ptr_filter_fn);
@ -361,12 +352,30 @@ void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
struct bkey *__bch_bset_search(struct btree *, struct bset_tree *, struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
const struct bkey *); const struct bkey *);
/*
* Returns the first key that is strictly greater than search
*/
static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t, static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
const struct bkey *search) const struct bkey *search)
{ {
return search ? __bch_bset_search(b, t, search) : t->data->start; return search ? __bch_bset_search(b, t, search) : t->data->start;
} }
#define PRECEDING_KEY(_k) \
({ \
struct bkey *_ret = NULL; \
\
if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
\
if (!_ret->low) \
_ret->high--; \
_ret->low--; \
} \
\
_ret; \
})
bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *); bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
void bch_btree_sort_lazy(struct btree *); void bch_btree_sort_lazy(struct btree *);
void bch_btree_sort_into(struct btree *, struct btree *); void bch_btree_sort_into(struct btree *, struct btree *);

File diff suppressed because it is too large Load Diff

View File

@ -125,6 +125,7 @@ struct btree {
unsigned long seq; unsigned long seq;
struct rw_semaphore lock; struct rw_semaphore lock;
struct cache_set *c; struct cache_set *c;
struct btree *parent;
unsigned long flags; unsigned long flags;
uint16_t written; /* would be nice to kill */ uint16_t written; /* would be nice to kill */
@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k)
static inline void set_gc_sectors(struct cache_set *c) static inline void set_gc_sectors(struct cache_set *c)
{ {
atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8); atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
}
static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
{
return __bch_ptr_invalid(b->c, b->level, k);
} }
static inline struct bkey *bch_btree_iter_init(struct btree *b, static inline struct bkey *bch_btree_iter_init(struct btree *b,
@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
return __bch_btree_iter_init(b, iter, search, b->sets); return __bch_btree_iter_init(b, iter, search, b->sets);
} }
static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
{
if (b->level)
return bch_btree_ptr_invalid(b->c, k);
else
return bch_extent_ptr_invalid(b->c, k);
}
void bkey_put(struct cache_set *c, struct bkey *k);
/* Looping macros */ /* Looping macros */
#define for_each_cached_btree(b, c, iter) \ #define for_each_cached_btree(b, c, iter) \
@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
/* Recursing down the btree */ /* Recursing down the btree */
struct btree_op { struct btree_op {
struct closure cl;
struct cache_set *c;
/* Journal entry we have a refcount on */
atomic_t *journal;
/* Bio to be inserted into the cache */
struct bio *cache_bio;
unsigned inode;
uint16_t write_prio;
/* Btree level at which we start taking write locks */ /* Btree level at which we start taking write locks */
short lock; short lock;
/* Btree insertion type */
enum {
BTREE_INSERT,
BTREE_REPLACE
} type:8;
unsigned csum:1;
unsigned skip:1;
unsigned flush_journal:1;
unsigned insert_data_done:1;
unsigned lookup_done:1;
unsigned insert_collision:1; unsigned insert_collision:1;
/* Anything after this point won't get zeroed in do_bio_hook() */
/* Keys to be inserted */
struct keylist keys;
BKEY_PADDED(replace);
}; };
enum { static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
BTREE_INSERT_STATUS_INSERT, {
BTREE_INSERT_STATUS_BACK_MERGE, memset(op, 0, sizeof(struct btree_op));
BTREE_INSERT_STATUS_OVERWROTE, op->lock = write_lock_level;
BTREE_INSERT_STATUS_FRONT_MERGE, }
};
void bch_btree_op_init_stack(struct btree_op *);
static inline void rw_lock(bool w, struct btree *b, int level) static inline void rw_lock(bool w, struct btree *b, int level)
{ {
@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level)
static inline void rw_unlock(bool w, struct btree *b) static inline void rw_unlock(bool w, struct btree *b)
{ {
#ifdef CONFIG_BCACHE_EDEBUG
unsigned i;
if (w && b->key.ptr[0])
for (i = 0; i <= b->nsets; i++)
bch_check_key_order(b, b->sets[i].data);
#endif
if (w) if (w)
b->seq++; b->seq++;
(w ? up_write : up_read)(&b->lock); (w ? up_write : up_read)(&b->lock);
} }
#define insert_lock(s, b) ((b)->level <= (s)->lock)
/*
* These macros are for recursing down the btree - they handle the details of
* locking and looking up nodes in the cache for you. They're best treated as
* mere syntax when reading code that uses them.
*
* op->lock determines whether we take a read or a write lock at a given depth.
* If you've got a read lock and find that you need a write lock (i.e. you're
* going to have to split), set op->lock and return -EINTR; btree_root() will
* call you again and you'll have the correct lock.
*/
/**
* btree - recurse down the btree on a specified key
* @fn: function to call, which will be passed the child node
* @key: key to recurse on
* @b: parent btree node
* @op: pointer to struct btree_op
*/
#define btree(fn, key, b, op, ...) \
({ \
int _r, l = (b)->level - 1; \
bool _w = l <= (op)->lock; \
struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
if (!IS_ERR(_b)) { \
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
rw_unlock(_w, _b); \
} else \
_r = PTR_ERR(_b); \
_r; \
})
/**
* btree_root - call a function on the root of the btree
* @fn: function to call, which will be passed the child node
* @c: cache set
* @op: pointer to struct btree_op
*/
#define btree_root(fn, c, op, ...) \
({ \
int _r = -EINTR; \
do { \
struct btree *_b = (c)->root; \
bool _w = insert_lock(op, _b); \
rw_lock(_w, _b, _b->level); \
if (_b == (c)->root && \
_w == insert_lock(op, _b)) \
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
rw_unlock(_w, _b); \
bch_cannibalize_unlock(c, &(op)->cl); \
} while (_r == -EINTR); \
\
_r; \
})
static inline bool should_split(struct btree *b)
{
struct bset *i = write_block(b);
return b->written >= btree_blocks(b) ||
(i->seq == b->sets[0].data->seq &&
b->written + __set_blocks(i, i->keys + 15, b->c)
> btree_blocks(b));
}
void bch_btree_node_read(struct btree *); void bch_btree_node_read(struct btree *);
void bch_btree_node_write(struct btree *, struct closure *); void bch_btree_node_write(struct btree *, struct closure *);
void bch_cannibalize_unlock(struct cache_set *, struct closure *);
void bch_btree_set_root(struct btree *); void bch_btree_set_root(struct btree *);
struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
int, struct btree_op *);
bool bch_btree_insert_check_key(struct btree *, struct btree_op *, int bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bio *); struct bkey *);
int bch_btree_insert(struct btree_op *, struct cache_set *); int bch_btree_insert(struct cache_set *, struct keylist *,
atomic_t *, struct bkey *);
int bch_btree_search_recurse(struct btree *, struct btree_op *); int bch_gc_thread_start(struct cache_set *);
void bch_queue_gc(struct cache_set *);
size_t bch_btree_gc_finish(struct cache_set *); size_t bch_btree_gc_finish(struct cache_set *);
void bch_moving_gc(struct closure *); void bch_moving_gc(struct cache_set *);
int bch_btree_check(struct cache_set *, struct btree_op *); int bch_btree_check(struct cache_set *);
uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c)
{
if (c->gc_thread)
wake_up_process(c->gc_thread);
}
#define MAP_DONE 0
#define MAP_CONTINUE 1
#define MAP_ALL_NODES 0
#define MAP_LEAF_NODES 1
#define MAP_END_KEY 1
typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_nodes_fn *, int);
static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn)
{
return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
}
static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
struct cache_set *c,
struct bkey *from,
btree_map_nodes_fn *fn)
{
return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
}
typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
struct bkey *);
int bch_btree_map_keys(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_keys_fn *, int);
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
void bch_keybuf_init(struct keybuf *); void bch_keybuf_init(struct keybuf *);
void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, void bch_refill_keybuf(struct cache_set *, struct keybuf *,
keybuf_pred_fn *); struct bkey *, keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *); struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *); void bch_keybuf_del(struct keybuf *, struct keybuf_key *);

View File

@ -11,17 +11,6 @@
#include "closure.h" #include "closure.h"
void closure_queue(struct closure *cl)
{
struct workqueue_struct *wq = cl->wq;
if (wq) {
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
} else
cl->fn(cl);
}
EXPORT_SYMBOL_GPL(closure_queue);
#define CL_FIELD(type, field) \ #define CL_FIELD(type, field) \
case TYPE_ ## type: \ case TYPE_ ## type: \
return &container_of(cl, struct type, cl)->field return &container_of(cl, struct type, cl)->field
@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl)
{ {
switch (cl->type) { switch (cl->type) {
CL_FIELD(closure_with_waitlist, wait); CL_FIELD(closure_with_waitlist, wait);
CL_FIELD(closure_with_waitlist_and_timer, wait);
default:
return NULL;
}
}
static struct timer_list *closure_timer(struct closure *cl)
{
switch (cl->type) {
CL_FIELD(closure_with_timer, timer);
CL_FIELD(closure_with_waitlist_and_timer, timer);
default: default:
return NULL; return NULL;
} }
@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
int r = flags & CLOSURE_REMAINING_MASK; int r = flags & CLOSURE_REMAINING_MASK;
BUG_ON(flags & CLOSURE_GUARD_MASK); BUG_ON(flags & CLOSURE_GUARD_MASK);
BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING))); BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
/* Must deliver precisely one wakeup */ /* Must deliver precisely one wakeup */
if (r == 1 && (flags & CLOSURE_SLEEPING)) if (r == 1 && (flags & CLOSURE_SLEEPING))
@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
if (!r) { if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
/* CLOSURE_BLOCKING might be set - clear it */
atomic_set(&cl->remaining, atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER); CLOSURE_REMAINING_INITIALIZER);
closure_queue(cl); closure_queue(cl);
@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v)
{ {
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
} }
EXPORT_SYMBOL_GPL(closure_sub); EXPORT_SYMBOL(closure_sub);
void closure_put(struct closure *cl) void closure_put(struct closure *cl)
{ {
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
} }
EXPORT_SYMBOL_GPL(closure_put); EXPORT_SYMBOL(closure_put);
static void set_waiting(struct closure *cl, unsigned long f) static void set_waiting(struct closure *cl, unsigned long f)
{ {
@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1); closure_sub(cl, CLOSURE_WAITING + 1);
} }
} }
EXPORT_SYMBOL_GPL(__closure_wake_up); EXPORT_SYMBOL(__closure_wake_up);
bool closure_wait(struct closure_waitlist *list, struct closure *cl) bool closure_wait(struct closure_waitlist *list, struct closure *cl)
{ {
@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl)
return true; return true;
} }
EXPORT_SYMBOL_GPL(closure_wait); EXPORT_SYMBOL(closure_wait);
/** /**
* closure_sync() - sleep until a closure a closure has nothing left to wait on * closure_sync() - sleep until a closure a closure has nothing left to wait on
@ -169,7 +146,7 @@ void closure_sync(struct closure *cl)
__closure_end_sleep(cl); __closure_end_sleep(cl);
} }
EXPORT_SYMBOL_GPL(closure_sync); EXPORT_SYMBOL(closure_sync);
/** /**
* closure_trylock() - try to acquire the closure, without waiting * closure_trylock() - try to acquire the closure, without waiting
@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent)
CLOSURE_REMAINING_INITIALIZER) != -1) CLOSURE_REMAINING_INITIALIZER) != -1)
return false; return false;
closure_set_ret_ip(cl);
smp_mb(); smp_mb();
cl->parent = parent; cl->parent = parent;
if (parent) if (parent)
closure_get(parent); closure_get(parent);
closure_set_ret_ip(cl);
closure_debug_create(cl); closure_debug_create(cl);
return true; return true;
} }
EXPORT_SYMBOL_GPL(closure_trylock); EXPORT_SYMBOL(closure_trylock);
void __closure_lock(struct closure *cl, struct closure *parent, void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list) struct closure_waitlist *wait_list)
@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent,
if (closure_trylock(cl, parent)) if (closure_trylock(cl, parent))
return; return;
closure_wait_event_sync(wait_list, &wait, closure_wait_event(wait_list, &wait,
atomic_read(&cl->remaining) == -1); atomic_read(&cl->remaining) == -1);
} }
} }
EXPORT_SYMBOL_GPL(__closure_lock); EXPORT_SYMBOL(__closure_lock);
static void closure_delay_timer_fn(unsigned long data)
{
struct closure *cl = (struct closure *) data;
closure_sub(cl, CLOSURE_TIMER + 1);
}
void do_closure_timer_init(struct closure *cl)
{
struct timer_list *timer = closure_timer(cl);
init_timer(timer);
timer->data = (unsigned long) cl;
timer->function = closure_delay_timer_fn;
}
EXPORT_SYMBOL_GPL(do_closure_timer_init);
bool __closure_delay(struct closure *cl, unsigned long delay,
struct timer_list *timer)
{
if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
return false;
BUG_ON(timer_pending(timer));
timer->expires = jiffies + delay;
atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
add_timer(timer);
return true;
}
EXPORT_SYMBOL_GPL(__closure_delay);
void __closure_flush(struct closure *cl, struct timer_list *timer)
{
if (del_timer(timer))
closure_sub(cl, CLOSURE_TIMER + 1);
}
EXPORT_SYMBOL_GPL(__closure_flush);
void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
{
if (del_timer_sync(timer))
closure_sub(cl, CLOSURE_TIMER + 1);
}
EXPORT_SYMBOL_GPL(__closure_flush_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list); list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags); spin_unlock_irqrestore(&closure_list_lock, flags);
} }
EXPORT_SYMBOL_GPL(closure_debug_create); EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl) void closure_debug_destroy(struct closure *cl)
{ {
@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all); list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags); spin_unlock_irqrestore(&closure_list_lock, flags);
} }
EXPORT_SYMBOL_GPL(closure_debug_destroy); EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug; static struct dentry *debug;
@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent, cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK); r & CLOSURE_REMAINING_MASK);
seq_printf(f, "%s%s%s%s%s%s\n", seq_printf(f, "%s%s%s%s\n",
test_bit(WORK_STRUCT_PENDING, test_bit(WORK_STRUCT_PENDING,
work_data_bits(&cl->work)) ? "Q" : "", work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "", r & CLOSURE_RUNNING ? "R" : "",
r & CLOSURE_BLOCKING ? "B" : "",
r & CLOSURE_STACK ? "S" : "", r & CLOSURE_STACK ? "S" : "",
r & CLOSURE_SLEEPING ? "Sl" : "", r & CLOSURE_SLEEPING ? "Sl" : "");
r & CLOSURE_TIMER ? "T" : "");
if (r & CLOSURE_WAITING) if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n", seq_printf(f, " W %pF\n",

View File

@ -155,21 +155,6 @@
* delayed_work embeds a work item and a timer_list. The important thing is, use * delayed_work embeds a work item and a timer_list. The important thing is, use
* it exactly like you would a regular closure and closure_put() will magically * it exactly like you would a regular closure and closure_put() will magically
* handle everything for you. * handle everything for you.
*
* We've got closures that embed timers, too. They're called, appropriately
* enough:
* struct closure_with_timer;
*
* This gives you access to closure_delay(). It takes a refcount for a specified
* number of jiffies - you could then call closure_sync() (for a slightly
* convoluted version of msleep()) or continue_at() - which gives you the same
* effect as using a delayed work item, except you can reuse the work_struct
* already embedded in struct closure.
*
* Lastly, there's struct closure_with_waitlist_and_timer. It does what you
* probably expect, if you happen to need the features of both. (You don't
* really want to know how all this is implemented, but if I've done my job
* right you shouldn't have to care).
*/ */
struct closure; struct closure;
@ -182,16 +167,11 @@ struct closure_waitlist {
enum closure_type { enum closure_type {
TYPE_closure = 0, TYPE_closure = 0,
TYPE_closure_with_waitlist = 1, TYPE_closure_with_waitlist = 1,
TYPE_closure_with_timer = 2, MAX_CLOSURE_TYPE = 1,
TYPE_closure_with_waitlist_and_timer = 3,
MAX_CLOSURE_TYPE = 3,
}; };
enum closure_state { enum closure_state {
/* /*
* CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
* waiting asynchronously
*
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
* the thread that owns the closure, and cleared by the thread that's * the thread that owns the closure, and cleared by the thread that's
* waking up the closure. * waking up the closure.
@ -200,10 +180,6 @@ enum closure_state {
* - indicates that cl->task is valid and closure_put() may wake it up. * - indicates that cl->task is valid and closure_put() may wake it up.
* Only set or cleared by the thread that owns the closure. * Only set or cleared by the thread that owns the closure.
* *
* CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
* has an outstanding timer. Must be set by the thread that owns the
* closure, and cleared by the timer function when the timer goes off.
*
* The rest are for debugging and don't affect behaviour: * The rest are for debugging and don't affect behaviour:
* *
* CLOSURE_RUNNING: Set when a closure is running (i.e. by * CLOSURE_RUNNING: Set when a closure is running (i.e. by
@ -218,19 +194,17 @@ enum closure_state {
* closure with this flag set * closure with this flag set
*/ */
CLOSURE_BITS_START = (1 << 19), CLOSURE_BITS_START = (1 << 23),
CLOSURE_DESTRUCTOR = (1 << 19), CLOSURE_DESTRUCTOR = (1 << 23),
CLOSURE_BLOCKING = (1 << 21), CLOSURE_WAITING = (1 << 25),
CLOSURE_WAITING = (1 << 23), CLOSURE_SLEEPING = (1 << 27),
CLOSURE_SLEEPING = (1 << 25),
CLOSURE_TIMER = (1 << 27),
CLOSURE_RUNNING = (1 << 29), CLOSURE_RUNNING = (1 << 29),
CLOSURE_STACK = (1 << 31), CLOSURE_STACK = (1 << 31),
}; };
#define CLOSURE_GUARD_MASK \ #define CLOSURE_GUARD_MASK \
((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1) CLOSURE_RUNNING|CLOSURE_STACK) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1) #define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING) #define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@ -268,17 +242,6 @@ struct closure_with_waitlist {
struct closure_waitlist wait; struct closure_waitlist wait;
}; };
struct closure_with_timer {
struct closure cl;
struct timer_list timer;
};
struct closure_with_waitlist_and_timer {
struct closure cl;
struct closure_waitlist wait;
struct timer_list timer;
};
extern unsigned invalid_closure_type(void); extern unsigned invalid_closure_type(void);
#define __CLOSURE_TYPE(cl, _t) \ #define __CLOSURE_TYPE(cl, _t) \
@ -289,14 +252,11 @@ extern unsigned invalid_closure_type(void);
( \ ( \
__CLOSURE_TYPE(cl, closure) \ __CLOSURE_TYPE(cl, closure) \
__CLOSURE_TYPE(cl, closure_with_waitlist) \ __CLOSURE_TYPE(cl, closure_with_waitlist) \
__CLOSURE_TYPE(cl, closure_with_timer) \
__CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \
invalid_closure_type() \ invalid_closure_type() \
) )
void closure_sub(struct closure *cl, int v); void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl); void closure_put(struct closure *cl);
void closure_queue(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list); void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl); bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void closure_sync(struct closure *cl); void closure_sync(struct closure *cl);
@ -305,12 +265,6 @@ bool closure_trylock(struct closure *cl, struct closure *parent);
void __closure_lock(struct closure *cl, struct closure *parent, void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list); struct closure_waitlist *wait_list);
void do_closure_timer_init(struct closure *cl);
bool __closure_delay(struct closure *cl, unsigned long delay,
struct timer_list *timer);
void __closure_flush(struct closure *cl, struct timer_list *timer);
void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
void closure_debug_init(void); void closure_debug_init(void);
@ -354,11 +308,6 @@ static inline void closure_set_stopped(struct closure *cl)
atomic_sub(CLOSURE_RUNNING, &cl->remaining); atomic_sub(CLOSURE_RUNNING, &cl->remaining);
} }
static inline bool closure_is_stopped(struct closure *cl)
{
return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
}
static inline bool closure_is_unlocked(struct closure *cl) static inline bool closure_is_unlocked(struct closure *cl)
{ {
return atomic_read(&cl->remaining) == -1; return atomic_read(&cl->remaining) == -1;
@ -367,14 +316,6 @@ static inline bool closure_is_unlocked(struct closure *cl)
static inline void do_closure_init(struct closure *cl, struct closure *parent, static inline void do_closure_init(struct closure *cl, struct closure *parent,
bool running) bool running)
{ {
switch (cl->type) {
case TYPE_closure_with_timer:
case TYPE_closure_with_waitlist_and_timer:
do_closure_timer_init(cl);
default:
break;
}
cl->parent = parent; cl->parent = parent;
if (parent) if (parent)
closure_get(parent); closure_get(parent);
@ -429,8 +370,7 @@ do { \
static inline void closure_init_stack(struct closure *cl) static inline void closure_init_stack(struct closure *cl)
{ {
memset(cl, 0, sizeof(struct closure)); memset(cl, 0, sizeof(struct closure));
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER| atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
CLOSURE_BLOCKING|CLOSURE_STACK);
} }
/** /**
@ -461,24 +401,6 @@ do { \
#define closure_lock(cl, parent) \ #define closure_lock(cl, parent) \
__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait) __closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
/**
* closure_delay() - delay some number of jiffies
* @cl: the closure that will sleep
* @delay: the delay in jiffies
*
* Takes a refcount on @cl which will be released after @delay jiffies; this may
* be used to have a function run after a delay with continue_at(), or
* closure_sync() may be used for a convoluted version of msleep().
*/
#define closure_delay(cl, delay) \
__closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
#define closure_flush(cl) \
__closure_flush(__to_internal_closure(cl), &(cl)->timer)
#define closure_flush_sync(cl) \
__closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
static inline void __closure_end_sleep(struct closure *cl) static inline void __closure_end_sleep(struct closure *cl)
{ {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
@ -497,40 +419,6 @@ static inline void __closure_start_sleep(struct closure *cl)
atomic_add(CLOSURE_SLEEPING, &cl->remaining); atomic_add(CLOSURE_SLEEPING, &cl->remaining);
} }
/**
* closure_blocking() - returns true if the closure is in blocking mode.
*
* If a closure is in blocking mode, closure_wait_event() will sleep until the
* condition is true instead of waiting asynchronously.
*/
static inline bool closure_blocking(struct closure *cl)
{
return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
}
/**
* set_closure_blocking() - put a closure in blocking mode.
*
* If a closure is in blocking mode, closure_wait_event() will sleep until the
* condition is true instead of waiting asynchronously.
*
* Not thread safe - can only be called by the thread running the closure.
*/
static inline void set_closure_blocking(struct closure *cl)
{
if (!closure_blocking(cl))
atomic_add(CLOSURE_BLOCKING, &cl->remaining);
}
/*
* Not thread safe - can only be called by the thread running the closure.
*/
static inline void clear_closure_blocking(struct closure *cl)
{
if (closure_blocking(cl))
atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
}
/** /**
* closure_wake_up() - wake up all closures on a wait list. * closure_wake_up() - wake up all closures on a wait list.
*/ */
@ -561,63 +449,36 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* refcount on our closure. If this was a stack allocated closure, that would be * refcount on our closure. If this was a stack allocated closure, that would be
* bad. * bad.
*/ */
#define __closure_wait_event(list, cl, condition, _block) \ #define closure_wait_event(list, cl, condition) \
({ \ ({ \
bool block = _block; \
typeof(condition) ret; \ typeof(condition) ret; \
\ \
while (1) { \ while (1) { \
ret = (condition); \ ret = (condition); \
if (ret) { \ if (ret) { \
__closure_wake_up(list); \ __closure_wake_up(list); \
if (block) \ closure_sync(cl); \
closure_sync(cl); \
\
break; \ break; \
} \ } \
\ \
if (block) \ __closure_start_sleep(cl); \
__closure_start_sleep(cl); \
\
if (!closure_wait(list, cl)) { \
if (!block) \
break; \
\ \
if (!closure_wait(list, cl)) \
schedule(); \ schedule(); \
} \
} \ } \
\ \
ret; \ ret; \
}) })
/** static inline void closure_queue(struct closure *cl)
* closure_wait_event() - wait on a condition, synchronously or asynchronously. {
* @list: the wait list to wait on struct workqueue_struct *wq = cl->wq;
* @cl: the closure that is doing the waiting if (wq) {
* @condition: a C expression for the event to wait for INIT_WORK(&cl->work, cl->work.func);
* BUG_ON(!queue_work(wq, &cl->work));
* If the closure is in blocking mode, sleeps until the @condition evaluates to } else
* true - exactly like wait_event(). cl->fn(cl);
* }
* If the closure is not in blocking mode, waits asynchronously; if the
* condition is currently false the @cl is put onto @list and returns. @list
* owns a refcount on @cl; closure_sync() or continue_at() may be used later to
* wait for another thread to wake up @list, which drops the refcount on @cl.
*
* Returns the value of @condition; @cl will be on @list iff @condition was
* false.
*
* closure_wake_up(@list) must be called after changing any variable that could
* cause @condition to become true.
*/
#define closure_wait_event(list, cl, condition) \
__closure_wait_event(list, cl, condition, closure_blocking(cl))
#define closure_wait_event_async(list, cl, condition) \
__closure_wait_event(list, cl, condition, false)
#define closure_wait_event_sync(list, cl, condition) \
__closure_wait_event(list, cl, condition, true)
static inline void set_closure_fn(struct closure *cl, closure_fn *fn, static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq) struct workqueue_struct *wq)
@ -642,7 +503,7 @@ do { \
#define continue_at_nobarrier(_cl, _fn, _wq) \ #define continue_at_nobarrier(_cl, _fn, _wq) \
do { \ do { \
set_closure_fn(_cl, _fn, _wq); \ set_closure_fn(_cl, _fn, _wq); \
closure_queue(cl); \ closure_queue(_cl); \
return; \ return; \
} while (0) } while (0)

View File

@ -8,7 +8,6 @@
#include "bcache.h" #include "bcache.h"
#include "btree.h" #include "btree.h"
#include "debug.h" #include "debug.h"
#include "request.h"
#include <linux/console.h> #include <linux/console.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
@ -77,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
return out - buf; return out - buf;
} }
int bch_btree_to_text(char *buf, size_t size, const struct btree *b) #ifdef CONFIG_BCACHE_DEBUG
{
return scnprintf(buf, size, "%zu level %i/%i",
PTR_BUCKET_NR(b->c, &b->key, 0),
b->level, b->c->root ? b->c->root->level : -1);
}
#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
static bool skipped_backwards(struct btree *b, struct bkey *k)
{
return bkey_cmp(k, (!b->level)
? &START_KEY(bkey_next(k))
: bkey_next(k)) > 0;
}
static void dump_bset(struct btree *b, struct bset *i) static void dump_bset(struct btree *b, struct bset *i)
{ {
struct bkey *k; struct bkey *k, *next;
unsigned j; unsigned j;
char buf[80]; char buf[80];
for (k = i->start; k < end(i); k = bkey_next(k)) { for (k = i->start; k < end(i); k = next) {
next = bkey_next(k);
bch_bkey_to_text(buf, sizeof(buf), k); bch_bkey_to_text(buf, sizeof(buf), k);
printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
(uint64_t *) k - i->d, i->keys, buf); (uint64_t *) k - i->d, i->keys, buf);
@ -115,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i)
printk(" %s\n", bch_ptr_status(b->c, k)); printk(" %s\n", bch_ptr_status(b->c, k));
if (bkey_next(k) < end(i) && if (next < end(i) &&
skipped_backwards(b, k)) bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
printk(KERN_ERR "Key skipped backwards\n"); printk(KERN_ERR "Key skipped backwards\n");
} }
} }
#endif static void bch_dump_bucket(struct btree *b)
{
unsigned i;
#ifdef CONFIG_BCACHE_DEBUG console_lock();
for (i = 0; i <= b->nsets; i++)
dump_bset(b, b->sets[i].data);
console_unlock();
}
void bch_btree_verify(struct btree *b, struct bset *new) void bch_btree_verify(struct btree *b, struct bset *new)
{ {
@ -176,66 +169,44 @@ void bch_btree_verify(struct btree *b, struct bset *new)
mutex_unlock(&b->c->verify_lock); mutex_unlock(&b->c->verify_lock);
} }
static void data_verify_endio(struct bio *bio, int error) void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
}
void bch_data_verify(struct search *s)
{ {
char name[BDEVNAME_SIZE]; char name[BDEVNAME_SIZE];
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct closure *cl = &s->cl;
struct bio *check; struct bio *check;
struct bio_vec *bv; struct bio_vec *bv;
int i; int i;
if (!s->unaligned_bvec) check = bio_clone(bio, GFP_NOIO);
bio_for_each_segment(bv, s->orig_bio, i)
bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
check = bio_clone(s->orig_bio, GFP_NOIO);
if (!check) if (!check)
return; return;
if (bio_alloc_pages(check, GFP_NOIO)) if (bio_alloc_pages(check, GFP_NOIO))
goto out_put; goto out_put;
check->bi_rw = READ_SYNC; submit_bio_wait(READ_SYNC, check);
check->bi_private = cl;
check->bi_end_io = data_verify_endio;
closure_bio_submit(check, cl, &dc->disk); bio_for_each_segment(bv, bio, i) {
closure_sync(cl); void *p1 = kmap_atomic(bv->bv_page);
void *p2 = page_address(check->bi_io_vec[i].bv_page);
bio_for_each_segment(bv, s->orig_bio, i) { cache_set_err_on(memcmp(p1 + bv->bv_offset,
void *p1 = kmap(bv->bv_page); p2 + bv->bv_offset,
void *p2 = kmap(check->bi_io_vec[i].bv_page); bv->bv_len),
dc->disk.c,
"verify failed at dev %s sector %llu",
bdevname(dc->bdev, name),
(uint64_t) bio->bi_sector);
if (memcmp(p1 + bv->bv_offset, kunmap_atomic(p1);
p2 + bv->bv_offset,
bv->bv_len))
printk(KERN_ERR
"bcache (%s): verify failed at sector %llu\n",
bdevname(dc->bdev, name),
(uint64_t) s->orig_bio->bi_sector);
kunmap(bv->bv_page);
kunmap(check->bi_io_vec[i].bv_page);
} }
__bio_for_each_segment(bv, check, i, 0) bio_for_each_segment_all(bv, check, i)
__free_page(bv->bv_page); __free_page(bv->bv_page);
out_put: out_put:
bio_put(check); bio_put(check);
} }
#endif int __bch_count_data(struct btree *b)
#ifdef CONFIG_BCACHE_EDEBUG
unsigned bch_count_data(struct btree *b)
{ {
unsigned ret = 0; unsigned ret = 0;
struct btree_iter iter; struct btree_iter iter;
@ -247,72 +218,60 @@ unsigned bch_count_data(struct btree *b)
return ret; return ret;
} }
static void vdump_bucket_and_panic(struct btree *b, const char *fmt, void __bch_check_keys(struct btree *b, const char *fmt, ...)
va_list args)
{
unsigned i;
char buf[80];
console_lock();
for (i = 0; i <= b->nsets; i++)
dump_bset(b, b->sets[i].data);
vprintk(fmt, args);
console_unlock();
bch_btree_to_text(buf, sizeof(buf), b);
panic("at %s\n", buf);
}
void bch_check_key_order_msg(struct btree *b, struct bset *i,
const char *fmt, ...)
{
struct bkey *k;
if (!i->keys)
return;
for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
if (skipped_backwards(b, k)) {
va_list args;
va_start(args, fmt);
vdump_bucket_and_panic(b, fmt, args);
va_end(args);
}
}
void bch_check_keys(struct btree *b, const char *fmt, ...)
{ {
va_list args; va_list args;
struct bkey *k, *p = NULL; struct bkey *k, *p = NULL;
struct btree_iter iter; struct btree_iter iter;
const char *err;
if (b->level)
return;
for_each_key(b, k, &iter) { for_each_key(b, k, &iter) {
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) { if (!b->level) {
printk(KERN_ERR "Keys out of order:\n"); err = "Keys out of order";
goto bug; if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
} goto bug;
if (bch_ptr_invalid(b, k)) if (bch_ptr_invalid(b, k))
continue; continue;
if (p && bkey_cmp(p, &START_KEY(k)) > 0) { err = "Overlapping keys";
printk(KERN_ERR "Overlapping keys:\n"); if (p && bkey_cmp(p, &START_KEY(k)) > 0)
goto bug; goto bug;
} else {
if (bch_ptr_bad(b, k))
continue;
err = "Duplicate keys";
if (p && !bkey_cmp(p, k))
goto bug;
} }
p = k; p = k;
} }
err = "Key larger than btree node key";
if (p && bkey_cmp(p, &b->key) > 0)
goto bug;
return; return;
bug: bug:
bch_dump_bucket(b);
va_start(args, fmt); va_start(args, fmt);
vdump_bucket_and_panic(b, fmt, args); vprintk(fmt, args);
va_end(args); va_end(args);
panic("bcache error: %s:\n", err);
}
void bch_btree_iter_next_check(struct btree_iter *iter)
{
struct bkey *k = iter->data->k, *next = bkey_next(k);
if (next < iter->data->end &&
bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
bch_dump_bucket(iter->b);
panic("Key skipped backwards\n");
}
} }
#endif #endif

View File

@ -4,40 +4,44 @@
/* Btree/bkey debug printing */ /* Btree/bkey debug printing */
int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k); int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
#ifdef CONFIG_BCACHE_EDEBUG
unsigned bch_count_data(struct btree *);
void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
void bch_check_keys(struct btree *, const char *, ...);
#define bch_check_key_order(b, i) \
bch_check_key_order_msg(b, i, "keys out of order")
#define EBUG_ON(cond) BUG_ON(cond)
#else /* EDEBUG */
#define bch_count_data(b) 0
#define bch_check_key_order(b, i) do {} while (0)
#define bch_check_key_order_msg(b, i, ...) do {} while (0)
#define bch_check_keys(b, ...) do {} while (0)
#define EBUG_ON(cond) do {} while (0)
#endif
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *, struct bset *); void bch_btree_verify(struct btree *, struct bset *);
void bch_data_verify(struct search *); void bch_data_verify(struct cached_dev *, struct bio *);
int __bch_count_data(struct btree *);
void __bch_check_keys(struct btree *, const char *, ...);
void bch_btree_iter_next_check(struct btree_iter *);
#define EBUG_ON(cond) BUG_ON(cond)
#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled)
#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */ #else /* DEBUG */
static inline void bch_btree_verify(struct btree *b, struct bset *i) {} static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
static inline void bch_data_verify(struct search *s) {}; static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
static inline int __bch_count_data(struct btree *b) { return -1; }
static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
#define EBUG_ON(cond) do { if (cond); } while (0)
#define expensive_debug_checks(c) 0
#define key_merging_disabled(c) 0
#define bypass_torture_test(d) 0
#endif #endif
#define bch_count_data(b) \
(expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
#define bch_check_keys(b, ...) \
do { \
if (expensive_debug_checks((b)->c)) \
__bch_check_keys(b, __VA_ARGS__); \
} while (0)
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *); void bch_debug_init_cache_set(struct cache_set *);
#else #else

View File

@ -7,7 +7,6 @@
#include "bcache.h" #include "bcache.h"
#include "btree.h" #include "btree.h"
#include "debug.h" #include "debug.h"
#include "request.h"
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
@ -31,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
} }
static int journal_read_bucket(struct cache *ca, struct list_head *list, static int journal_read_bucket(struct cache *ca, struct list_head *list,
struct btree_op *op, unsigned bucket_index) unsigned bucket_index)
{ {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio; struct bio *bio = &ja->bio;
struct journal_replay *i; struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data; struct jset *j, *data = ca->set->journal.w[0].data;
struct closure cl;
unsigned len, left, offset = 0; unsigned len, left, offset = 0;
int ret = 0; int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
closure_init_stack(&cl);
pr_debug("reading %llu", (uint64_t) bucket); pr_debug("reading %llu", (uint64_t) bucket);
while (offset < ca->sb.bucket_size) { while (offset < ca->sb.bucket_size) {
@ -55,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset;
bio->bi_size = len << 9; bio->bi_size = len << 9;
bio->bi_end_io = journal_read_endio; bio->bi_end_io = journal_read_endio;
bio->bi_private = &op->cl; bio->bi_private = &cl;
bch_bio_map(bio, data); bch_bio_map(bio, data);
closure_bio_submit(bio, &op->cl, ca); closure_bio_submit(bio, &cl, ca);
closure_sync(&op->cl); closure_sync(&cl);
/* This function could be simpler now since we no longer write /* This function could be simpler now since we no longer write
* journal entries that overlap bucket boundaries; this means * journal entries that overlap bucket boundaries; this means
@ -72,7 +74,7 @@ reread: left = ca->sb.bucket_size - offset;
struct list_head *where; struct list_head *where;
size_t blocks, bytes = set_bytes(j); size_t blocks, bytes = set_bytes(j);
if (j->magic != jset_magic(ca->set)) if (j->magic != jset_magic(&ca->sb))
return ret; return ret;
if (bytes > left << 9) if (bytes > left << 9)
@ -129,12 +131,11 @@ next_set:
return ret; return ret;
} }
int bch_journal_read(struct cache_set *c, struct list_head *list, int bch_journal_read(struct cache_set *c, struct list_head *list)
struct btree_op *op)
{ {
#define read_bucket(b) \ #define read_bucket(b) \
({ \ ({ \
int ret = journal_read_bucket(ca, list, op, b); \ int ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \ __set_bit(b, bitmap); \
if (ret < 0) \ if (ret < 0) \
return ret; \ return ret; \
@ -292,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
} }
} }
int bch_journal_replay(struct cache_set *s, struct list_head *list, int bch_journal_replay(struct cache_set *s, struct list_head *list)
struct btree_op *op)
{ {
int ret = 0, keys = 0, entries = 0; int ret = 0, keys = 0, entries = 0;
struct bkey *k; struct bkey *k;
@ -301,31 +301,30 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
list_entry(list->prev, struct journal_replay, list); list_entry(list->prev, struct journal_replay, list);
uint64_t start = i->j.last_seq, end = i->j.seq, n = start; uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
struct keylist keylist;
bch_keylist_init(&keylist);
list_for_each_entry(i, list, list) { list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1); BUG_ON(i->pin && atomic_read(i->pin) != 1);
if (n != i->j.seq) cache_set_err_on(n != i->j.seq, s,
pr_err( "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
"journal entries %llu-%llu missing! (replaying %llu-%llu)\n", n, i->j.seq - 1, start, end);
n, i->j.seq - 1, start, end);
for (k = i->j.start; for (k = i->j.start;
k < end(&i->j); k < end(&i->j);
k = bkey_next(k)) { k = bkey_next(k)) {
trace_bcache_journal_replay_key(k); trace_bcache_journal_replay_key(k);
bkey_copy(op->keys.top, k); bkey_copy(keylist.top, k);
bch_keylist_push(&op->keys); bch_keylist_push(&keylist);
op->journal = i->pin; ret = bch_btree_insert(s, &keylist, i->pin, NULL);
atomic_inc(op->journal);
ret = bch_btree_insert(op, s);
if (ret) if (ret)
goto err; goto err;
BUG_ON(!bch_keylist_empty(&op->keys)); BUG_ON(!bch_keylist_empty(&keylist));
keys++; keys++;
cond_resched(); cond_resched();
@ -339,14 +338,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
pr_info("journal replay done, %i keys in %i entries, seq %llu", pr_info("journal replay done, %i keys in %i entries, seq %llu",
keys, entries, end); keys, entries, end);
err:
while (!list_empty(list)) { while (!list_empty(list)) {
i = list_first_entry(list, struct journal_replay, list); i = list_first_entry(list, struct journal_replay, list);
list_del(&i->list); list_del(&i->list);
kfree(i); kfree(i);
} }
err:
closure_sync(&op->cl);
return ret; return ret;
} }
@ -358,48 +356,35 @@ static void btree_flush_write(struct cache_set *c)
* Try to find the btree node with that references the oldest journal * Try to find the btree node with that references the oldest journal
* entry, best is our current candidate and is locked if non NULL: * entry, best is our current candidate and is locked if non NULL:
*/ */
struct btree *b, *best = NULL; struct btree *b, *best;
unsigned iter; unsigned i;
retry:
best = NULL;
for_each_cached_btree(b, c, iter) { for_each_cached_btree(b, c, i)
if (!down_write_trylock(&b->lock)) if (btree_current_write(b)->journal) {
continue; if (!best)
best = b;
if (!btree_node_dirty(b) || else if (journal_pin_cmp(c,
!btree_current_write(b)->journal) { btree_current_write(best)->journal,
rw_unlock(true, b); btree_current_write(b)->journal)) {
continue; best = b;
}
} }
if (!best) b = best;
best = b; if (b) {
else if (journal_pin_cmp(c, rw_lock(true, b, b->level);
btree_current_write(best),
btree_current_write(b))) { if (!btree_current_write(b)->journal) {
rw_unlock(true, best);
best = b;
} else
rw_unlock(true, b); rw_unlock(true, b);
/* We raced */
goto retry;
}
bch_btree_node_write(b, NULL);
rw_unlock(true, b);
} }
if (best)
goto out;
/* We can't find the best btree node, just pick the first */
list_for_each_entry(b, &c->btree_cache, list)
if (!b->level && btree_node_dirty(b)) {
best = b;
rw_lock(true, best, best->level);
goto found;
}
out:
if (!best)
return;
found:
if (btree_node_dirty(best))
bch_btree_node_write(best, NULL);
rw_unlock(true, best);
} }
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
@ -495,7 +480,7 @@ static void journal_reclaim(struct cache_set *c)
do_journal_discard(ca); do_journal_discard(ca);
if (c->journal.blocks_free) if (c->journal.blocks_free)
return; goto out;
/* /*
* Allocate: * Allocate:
@ -521,7 +506,7 @@ static void journal_reclaim(struct cache_set *c)
if (n) if (n)
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
out:
if (!journal_full(&c->journal)) if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait); __closure_wake_up(&c->journal.wait);
} }
@ -554,32 +539,26 @@ static void journal_write_endio(struct bio *bio, int error)
struct journal_write *w = bio->bi_private; struct journal_write *w = bio->bi_private;
cache_set_err_on(error, w->c, "journal io error"); cache_set_err_on(error, w->c, "journal io error");
closure_put(&w->c->journal.io.cl); closure_put(&w->c->journal.io);
} }
static void journal_write(struct closure *); static void journal_write(struct closure *);
static void journal_write_done(struct closure *cl) static void journal_write_done(struct closure *cl)
{ {
struct journal *j = container_of(cl, struct journal, io.cl); struct journal *j = container_of(cl, struct journal, io);
struct cache_set *c = container_of(j, struct cache_set, journal);
struct journal_write *w = (j->cur == j->w) struct journal_write *w = (j->cur == j->w)
? &j->w[1] ? &j->w[1]
: &j->w[0]; : &j->w[0];
__closure_wake_up(&w->wait); __closure_wake_up(&w->wait);
continue_at_nobarrier(cl, journal_write, system_wq);
if (c->journal_delay_ms)
closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
continue_at(cl, journal_write, system_wq);
} }
static void journal_write_unlocked(struct closure *cl) static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock) __releases(c->journal.lock)
{ {
struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl); struct cache_set *c = container_of(cl, struct cache_set, journal.io);
struct cache *ca; struct cache *ca;
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
@ -617,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
w->data->magic = jset_magic(c); w->data->magic = jset_magic(&c->sb);
w->data->version = BCACHE_JSET_VERSION; w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal); w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data); w->data->csum = csum_set(w->data);
@ -660,121 +639,134 @@ static void journal_write_unlocked(struct closure *cl)
static void journal_write(struct closure *cl) static void journal_write(struct closure *cl)
{ {
struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl); struct cache_set *c = container_of(cl, struct cache_set, journal.io);
spin_lock(&c->journal.lock); spin_lock(&c->journal.lock);
journal_write_unlocked(cl); journal_write_unlocked(cl);
} }
static void __journal_try_write(struct cache_set *c, bool noflush) static void journal_try_write(struct cache_set *c)
__releases(c->journal.lock) __releases(c->journal.lock)
{ {
struct closure *cl = &c->journal.io.cl; struct closure *cl = &c->journal.io;
struct journal_write *w = c->journal.cur;
if (!closure_trylock(cl, &c->cl)) w->need_write = true;
spin_unlock(&c->journal.lock);
else if (noflush && journal_full(&c->journal)) { if (closure_trylock(cl, &c->cl))
spin_unlock(&c->journal.lock);
continue_at(cl, journal_write, system_wq);
} else
journal_write_unlocked(cl); journal_write_unlocked(cl);
else
spin_unlock(&c->journal.lock);
} }
#define journal_try_write(c) __journal_try_write(c, false) static struct journal_write *journal_wait_for_write(struct cache_set *c,
unsigned nkeys)
void bch_journal_meta(struct cache_set *c, struct closure *cl)
{ {
struct journal_write *w; size_t sectors;
struct closure cl;
if (CACHE_SYNC(&c->sb)) { closure_init_stack(&cl);
spin_lock(&c->journal.lock);
while (1) {
struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys,
c) * c->sb.block_size;
if (sectors <= min_t(size_t,
c->journal.blocks_free * c->sb.block_size,
PAGE_SECTORS << JSET_BITS))
return w;
/* XXX: tracepoint */
if (!journal_full(&c->journal)) {
trace_bcache_journal_entry_full(c);
/*
* XXX: If we were inserting so many keys that they
* won't fit in an _empty_ journal write, we'll
* deadlock. For now, handle this in
* bch_keylist_realloc() - but something to think about.
*/
BUG_ON(!w->data->keys);
closure_wait(&w->wait, &cl);
journal_try_write(c); /* unlocks */
} else {
trace_bcache_journal_full(c);
closure_wait(&c->journal.wait, &cl);
journal_reclaim(c);
spin_unlock(&c->journal.lock);
btree_flush_write(c);
}
closure_sync(&cl);
spin_lock(&c->journal.lock); spin_lock(&c->journal.lock);
w = c->journal.cur;
w->need_write = true;
if (cl)
BUG_ON(!closure_wait(&w->wait, cl));
closure_flush(&c->journal.io);
__journal_try_write(c, true);
} }
} }
static void journal_write_work(struct work_struct *work)
{
struct cache_set *c = container_of(to_delayed_work(work),
struct cache_set,
journal.work);
spin_lock(&c->journal.lock);
journal_try_write(c);
}
/* /*
* Entry point to the journalling code - bio_insert() and btree_invalidate() * Entry point to the journalling code - bio_insert() and btree_invalidate()
* pass bch_journal() a list of keys to be journalled, and then * pass bch_journal() a list of keys to be journalled, and then
* bch_journal() hands those same keys off to btree_insert_async() * bch_journal() hands those same keys off to btree_insert_async()
*/ */
void bch_journal(struct closure *cl) atomic_t *bch_journal(struct cache_set *c,
struct keylist *keys,
struct closure *parent)
{ {
struct btree_op *op = container_of(cl, struct btree_op, cl);
struct cache_set *c = op->c;
struct journal_write *w; struct journal_write *w;
size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list; atomic_t *ret;
if (op->type != BTREE_INSERT || if (!CACHE_SYNC(&c->sb))
!CACHE_SYNC(&c->sb)) return NULL;
goto out;
/* w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
* If we're looping because we errored, might already be waiting on
* another journal write:
*/
while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
closure_sync(cl->parent);
spin_lock(&c->journal.lock); memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
w->data->keys += bch_keylist_nkeys(keys);
if (journal_full(&c->journal)) { ret = &fifo_back(&c->journal.pin);
trace_bcache_journal_full(c); atomic_inc(ret);
closure_wait(&c->journal.wait, cl);
journal_reclaim(c);
spin_unlock(&c->journal.lock);
btree_flush_write(c);
continue_at(cl, bch_journal, bcache_wq);
}
w = c->journal.cur;
w->need_write = true;
b = __set_blocks(w->data, w->data->keys + n, c);
if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
b > c->journal.blocks_free) {
trace_bcache_journal_entry_full(c);
/*
* XXX: If we were inserting so many keys that they won't fit in
* an _empty_ journal write, we'll deadlock. For now, handle
* this in bch_keylist_realloc() - but something to think about.
*/
BUG_ON(!w->data->keys);
BUG_ON(!closure_wait(&w->wait, cl));
closure_flush(&c->journal.io);
if (parent) {
closure_wait(&w->wait, parent);
journal_try_write(c); journal_try_write(c);
continue_at(cl, bch_journal, bcache_wq); } else if (!w->need_write) {
schedule_delayed_work(&c->journal.work,
msecs_to_jiffies(c->journal_delay_ms));
spin_unlock(&c->journal.lock);
} else {
spin_unlock(&c->journal.lock);
} }
memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
w->data->keys += n;
op->journal = &fifo_back(&c->journal.pin); return ret;
atomic_inc(op->journal); }
if (op->flush_journal) { void bch_journal_meta(struct cache_set *c, struct closure *cl)
closure_flush(&c->journal.io); {
closure_wait(&w->wait, cl->parent); struct keylist keys;
} atomic_t *ref;
journal_try_write(c); bch_keylist_init(&keys);
out:
bch_btree_insert_async(cl); ref = bch_journal(c, &keys, cl);
if (ref)
atomic_dec_bug(ref);
} }
void bch_journal_free(struct cache_set *c) void bch_journal_free(struct cache_set *c)
@ -790,6 +782,7 @@ int bch_journal_alloc(struct cache_set *c)
closure_init_unlocked(&j->io); closure_init_unlocked(&j->io);
spin_lock_init(&j->lock); spin_lock_init(&j->lock);
INIT_DELAYED_WORK(&j->work, journal_write_work);
c->journal_delay_ms = 100; c->journal_delay_ms = 100;

View File

@ -75,43 +75,6 @@
* nodes that are pinning the oldest journal entries first. * nodes that are pinning the oldest journal entries first.
*/ */
#define BCACHE_JSET_VERSION_UUIDv1 1
/* Always latest UUID format */
#define BCACHE_JSET_VERSION_UUID 1
#define BCACHE_JSET_VERSION 1
/*
* On disk format for a journal entry:
* seq is monotonically increasing; every journal entry has its own unique
* sequence number.
*
* last_seq is the oldest journal entry that still has keys the btree hasn't
* flushed to disk yet.
*
* version is for on disk format changes.
*/
struct jset {
uint64_t csum;
uint64_t magic;
uint64_t seq;
uint32_t version;
uint32_t keys;
uint64_t last_seq;
BKEY_PADDED(uuid_bucket);
BKEY_PADDED(btree_root);
uint16_t btree_level;
uint16_t pad[3];
uint64_t prio_bucket[MAX_CACHES_PER_SET];
union {
struct bkey start[0];
uint64_t d[0];
};
};
/* /*
* Only used for holding the journal entries we read in btree_journal_read() * Only used for holding the journal entries we read in btree_journal_read()
* during cache_registration * during cache_registration
@ -140,7 +103,8 @@ struct journal {
spinlock_t lock; spinlock_t lock;
/* used when waiting because the journal was full */ /* used when waiting because the journal was full */
struct closure_waitlist wait; struct closure_waitlist wait;
struct closure_with_timer io; struct closure io;
struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */ /* Number of blocks free in the bucket(s) we're currently writing to */
unsigned blocks_free; unsigned blocks_free;
@ -188,8 +152,7 @@ struct journal_device {
}; };
#define journal_pin_cmp(c, l, r) \ #define journal_pin_cmp(c, l, r) \
(fifo_idx(&(c)->journal.pin, (l)->journal) > \ (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
fifo_idx(&(c)->journal.pin, (r)->journal))
#define JOURNAL_PIN 20000 #define JOURNAL_PIN 20000
@ -199,15 +162,14 @@ struct journal_device {
struct closure; struct closure;
struct cache_set; struct cache_set;
struct btree_op; struct btree_op;
struct keylist;
void bch_journal(struct closure *); atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
void bch_journal_next(struct journal *); void bch_journal_next(struct journal *);
void bch_journal_mark(struct cache_set *, struct list_head *); void bch_journal_mark(struct cache_set *, struct list_head *);
void bch_journal_meta(struct cache_set *, struct closure *); void bch_journal_meta(struct cache_set *, struct closure *);
int bch_journal_read(struct cache_set *, struct list_head *, int bch_journal_read(struct cache_set *, struct list_head *);
struct btree_op *); int bch_journal_replay(struct cache_set *, struct list_head *);
int bch_journal_replay(struct cache_set *, struct list_head *,
struct btree_op *);
void bch_journal_free(struct cache_set *); void bch_journal_free(struct cache_set *);
int bch_journal_alloc(struct cache_set *); int bch_journal_alloc(struct cache_set *);

View File

@ -12,8 +12,9 @@
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
struct moving_io { struct moving_io {
struct closure cl;
struct keybuf_key *w; struct keybuf_key *w;
struct search s; struct data_insert_op op;
struct bbio bio; struct bbio bio;
}; };
@ -38,13 +39,13 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl) static void moving_io_destructor(struct closure *cl)
{ {
struct moving_io *io = container_of(cl, struct moving_io, s.cl); struct moving_io *io = container_of(cl, struct moving_io, cl);
kfree(io); kfree(io);
} }
static void write_moving_finish(struct closure *cl) static void write_moving_finish(struct closure *cl)
{ {
struct moving_io *io = container_of(cl, struct moving_io, s.cl); struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio; struct bio *bio = &io->bio.bio;
struct bio_vec *bv; struct bio_vec *bv;
int i; int i;
@ -52,13 +53,12 @@ static void write_moving_finish(struct closure *cl)
bio_for_each_segment_all(bv, bio, i) bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page); __free_page(bv->bv_page);
if (io->s.op.insert_collision) if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key); trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
atomic_dec_bug(&io->s.op.c->in_flight); up(&io->op.c->moving_in_flight);
closure_wake_up(&io->s.op.c->moving_gc_wait);
closure_return_with_destructor(cl, moving_io_destructor); closure_return_with_destructor(cl, moving_io_destructor);
} }
@ -66,12 +66,12 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error) static void read_moving_endio(struct bio *bio, int error)
{ {
struct moving_io *io = container_of(bio->bi_private, struct moving_io *io = container_of(bio->bi_private,
struct moving_io, s.cl); struct moving_io, cl);
if (error) if (error)
io->s.error = error; io->op.error = error;
bch_bbio_endio(io->s.op.c, bio, error, "reading data to move"); bch_bbio_endio(io->op.c, bio, error, "reading data to move");
} }
static void moving_init(struct moving_io *io) static void moving_init(struct moving_io *io)
@ -85,54 +85,53 @@ static void moving_init(struct moving_io *io)
bio->bi_size = KEY_SIZE(&io->w->key) << 9; bio->bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS); PAGE_SECTORS);
bio->bi_private = &io->s.cl; bio->bi_private = &io->cl;
bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL); bch_bio_map(bio, NULL);
} }
static void write_moving(struct closure *cl) static void write_moving(struct closure *cl)
{ {
struct search *s = container_of(cl, struct search, cl); struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_io *io = container_of(s, struct moving_io, s); struct data_insert_op *op = &io->op;
if (!s->error) { if (!op->error) {
moving_init(io); moving_init(io);
io->bio.bio.bi_sector = KEY_START(&io->w->key); io->bio.bio.bi_sector = KEY_START(&io->w->key);
s->op.lock = -1; op->write_prio = 1;
s->op.write_prio = 1; op->bio = &io->bio.bio;
s->op.cache_bio = &io->bio.bio;
s->writeback = KEY_DIRTY(&io->w->key); op->writeback = KEY_DIRTY(&io->w->key);
s->op.csum = KEY_CSUM(&io->w->key); op->csum = KEY_CSUM(&io->w->key);
s->op.type = BTREE_REPLACE; bkey_copy(&op->replace_key, &io->w->key);
bkey_copy(&s->op.replace, &io->w->key); op->replace = true;
closure_init(&s->op.cl, cl); closure_call(&op->cl, bch_data_insert, NULL, cl);
bch_insert_data(&s->op.cl);
} }
continue_at(cl, write_moving_finish, NULL); continue_at(cl, write_moving_finish, system_wq);
} }
static void read_moving_submit(struct closure *cl) static void read_moving_submit(struct closure *cl)
{ {
struct search *s = container_of(cl, struct search, cl); struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_io *io = container_of(s, struct moving_io, s);
struct bio *bio = &io->bio.bio; struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, s->op.c, &io->w->key, 0); bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
continue_at(cl, write_moving, bch_gc_wq); continue_at(cl, write_moving, system_wq);
} }
static void read_moving(struct closure *cl) static void read_moving(struct cache_set *c)
{ {
struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
struct keybuf_key *w; struct keybuf_key *w;
struct moving_io *io; struct moving_io *io;
struct bio *bio; struct bio *bio;
struct closure cl;
closure_init_stack(&cl);
/* XXX: if we error, background writeback could stall indefinitely */ /* XXX: if we error, background writeback could stall indefinitely */
@ -150,8 +149,8 @@ static void read_moving(struct closure *cl)
w->private = io; w->private = io;
io->w = w; io->w = w;
io->s.op.inode = KEY_INODE(&w->key); io->op.inode = KEY_INODE(&w->key);
io->s.op.c = c; io->op.c = c;
moving_init(io); moving_init(io);
bio = &io->bio.bio; bio = &io->bio.bio;
@ -164,13 +163,8 @@ static void read_moving(struct closure *cl)
trace_bcache_gc_copy(&w->key); trace_bcache_gc_copy(&w->key);
closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); down(&c->moving_in_flight);
closure_call(&io->cl, read_moving_submit, NULL, &cl);
if (atomic_inc_return(&c->in_flight) >= 64) {
closure_wait_event(&c->moving_gc_wait, cl,
atomic_read(&c->in_flight) < 64);
continue_at(cl, read_moving, bch_gc_wq);
}
} }
if (0) { if (0) {
@ -180,7 +174,7 @@ err: if (!IS_ERR_OR_NULL(w->private))
bch_keybuf_del(&c->moving_gc_keys, w); bch_keybuf_del(&c->moving_gc_keys, w);
} }
closure_return(cl); closure_sync(&cl);
} }
static bool bucket_cmp(struct bucket *l, struct bucket *r) static bool bucket_cmp(struct bucket *l, struct bucket *r)
@ -193,15 +187,14 @@ static unsigned bucket_heap_top(struct cache *ca)
return GC_SECTORS_USED(heap_peek(&ca->heap)); return GC_SECTORS_USED(heap_peek(&ca->heap));
} }
void bch_moving_gc(struct closure *cl) void bch_moving_gc(struct cache_set *c)
{ {
struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned i; unsigned i;
if (!c->copy_gc_enabled) if (!c->copy_gc_enabled)
closure_return(cl); return;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
@ -242,13 +235,11 @@ void bch_moving_gc(struct closure *cl)
c->moving_gc_keys.last_scanned = ZERO_KEY; c->moving_gc_keys.last_scanned = ZERO_KEY;
closure_init(&c->moving_gc, cl); read_moving(c);
read_moving(&c->moving_gc);
closure_return(cl);
} }
void bch_moving_init_cache_set(struct cache_set *c) void bch_moving_init_cache_set(struct cache_set *c)
{ {
bch_keybuf_init(&c->moving_gc_keys); bch_keybuf_init(&c->moving_gc_keys);
sema_init(&c->moving_in_flight, 64);
} }

File diff suppressed because it is too large Load Diff

View File

@ -3,40 +3,33 @@
#include <linux/cgroup.h> #include <linux/cgroup.h>
struct search { struct data_insert_op {
/* Stack frame for bio_complete */
struct closure cl; struct closure cl;
struct cache_set *c;
struct bio *bio;
struct bcache_device *d; unsigned inode;
struct task_struct *task; uint16_t write_point;
uint16_t write_prio;
struct bbio bio;
struct bio *orig_bio;
struct bio *cache_miss;
unsigned cache_bio_sectors;
unsigned recoverable:1;
unsigned unaligned_bvec:1;
unsigned write:1;
unsigned writeback:1;
/* IO error returned to s->bio */
short error; short error;
unsigned long start_time;
/* Anything past op->keys won't get zeroed in do_bio_hook */ unsigned bypass:1;
struct btree_op op; unsigned writeback:1;
unsigned flush_journal:1;
unsigned csum:1;
unsigned replace:1;
unsigned replace_collision:1;
unsigned insert_data_done:1;
/* Anything past this point won't get zeroed in search_alloc() */
struct keylist insert_keys;
BKEY_PADDED(replace_key);
}; };
void bch_cache_read_endio(struct bio *, int);
unsigned bch_get_congested(struct cache_set *); unsigned bch_get_congested(struct cache_set *);
void bch_insert_data(struct closure *cl); void bch_data_insert(struct closure *cl);
void bch_btree_insert_async(struct closure *);
void bch_cache_read_endio(struct bio *, int);
void bch_open_buckets_free(struct cache_set *);
int bch_open_buckets_alloc(struct cache_set *);
void bch_cached_dev_request_init(struct cached_dev *dc); void bch_cached_dev_request_init(struct cached_dev *dc);
void bch_flash_dev_request_init(struct bcache_device *d); void bch_flash_dev_request_init(struct bcache_device *d);

View File

@ -7,7 +7,6 @@
#include "bcache.h" #include "bcache.h"
#include "stats.h" #include "stats.h"
#include "btree.h" #include "btree.h"
#include "request.h"
#include "sysfs.h" #include "sysfs.h"
/* /*
@ -196,35 +195,36 @@ static void mark_cache_stats(struct cache_stat_collector *stats,
atomic_inc(&stats->cache_bypass_misses); atomic_inc(&stats->cache_bypass_misses);
} }
void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass) void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass); mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&s->op.c->accounting.collector, hit, bypass); mark_cache_stats(&c->accounting.collector, hit, bypass);
#ifdef CONFIG_CGROUP_BCACHE #ifdef CONFIG_CGROUP_BCACHE
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
#endif #endif
} }
void bch_mark_cache_readahead(struct search *s) void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads); atomic_inc(&dc->accounting.collector.cache_readaheads);
atomic_inc(&s->op.c->accounting.collector.cache_readaheads); atomic_inc(&c->accounting.collector.cache_readaheads);
} }
void bch_mark_cache_miss_collision(struct search *s) void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions); atomic_inc(&dc->accounting.collector.cache_miss_collisions);
atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions); atomic_inc(&c->accounting.collector.cache_miss_collisions);
} }
void bch_mark_sectors_bypassed(struct search *s, int sectors) void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
int sectors)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
} }
void bch_cache_accounting_init(struct cache_accounting *acc, void bch_cache_accounting_init(struct cache_accounting *acc,

View File

@ -38,7 +38,9 @@ struct cache_accounting {
struct cache_stats day; struct cache_stats day;
}; };
struct search; struct cache_set;
struct cached_dev;
struct bcache_device;
void bch_cache_accounting_init(struct cache_accounting *acc, void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent); struct closure *parent);
@ -50,9 +52,10 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc); void bch_cache_accounting_destroy(struct cache_accounting *acc);
void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass); void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
void bch_mark_cache_readahead(struct search *s); bool, bool);
void bch_mark_cache_miss_collision(struct search *s); void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
void bch_mark_sectors_bypassed(struct search *s, int sectors); void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
#endif /* _BCACHE_STATS_H_ */ #endif /* _BCACHE_STATS_H_ */

View File

@ -16,6 +16,7 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h> #include <linux/random.h>
@ -45,21 +46,13 @@ const char * const bch_cache_modes[] = {
NULL NULL
}; };
struct uuid_entry_v0 {
uint8_t uuid[16];
uint8_t label[32];
uint32_t first_reg;
uint32_t last_reg;
uint32_t invalidated;
uint32_t pad;
};
static struct kobject *bcache_kobj; static struct kobject *bcache_kobj;
struct mutex bch_register_lock; struct mutex bch_register_lock;
LIST_HEAD(bch_cache_sets); LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices); static LIST_HEAD(uncached_devices);
static int bcache_major, bcache_minor; static int bcache_major;
static DEFINE_IDA(bcache_minor);
static wait_queue_head_t unregister_wait; static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq; struct workqueue_struct *bcache_wq;
@ -382,7 +375,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{ {
struct bkey *k = &j->uuid_bucket; struct bkey *k = &j->uuid_bucket;
if (__bch_ptr_invalid(c, 1, k)) if (bch_btree_ptr_invalid(c, k))
return "bad uuid pointer"; return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k); bkey_copy(&c->uuid_bucket, k);
@ -427,7 +420,7 @@ static int __uuid_write(struct cache_set *c)
lockdep_assert_held(&bch_register_lock); lockdep_assert_held(&bch_register_lock);
if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
return 1; return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size); SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@ -435,7 +428,7 @@ static int __uuid_write(struct cache_set *c)
closure_sync(&cl); closure_sync(&cl);
bkey_copy(&c->uuid_bucket, &k.key); bkey_copy(&c->uuid_bucket, &k.key);
__bkey_put(c, &k.key); bkey_put(c, &k.key);
return 0; return 0;
} }
@ -562,10 +555,10 @@ void bch_prio_write(struct cache *ca)
} }
p->next_bucket = ca->prio_buckets[i + 1]; p->next_bucket = ca->prio_buckets[i + 1];
p->magic = pset_magic(ca); p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
BUG_ON(bucket == -1); BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock); mutex_unlock(&ca->set->bucket_lock);
@ -613,7 +606,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities"); pr_warn("bad csum reading priorities");
if (p->magic != pset_magic(ca)) if (p->magic != pset_magic(&ca->sb))
pr_warn("bad magic reading priorities"); pr_warn("bad magic reading priorities");
bucket = p->next_bucket; bucket = p->next_bucket;
@ -630,7 +623,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
static int open_dev(struct block_device *b, fmode_t mode) static int open_dev(struct block_device *b, fmode_t mode)
{ {
struct bcache_device *d = b->bd_disk->private_data; struct bcache_device *d = b->bd_disk->private_data;
if (atomic_read(&d->closing)) if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
return -ENXIO; return -ENXIO;
closure_get(&d->cl); closure_get(&d->cl);
@ -659,20 +652,24 @@ static const struct block_device_operations bcache_ops = {
void bcache_device_stop(struct bcache_device *d) void bcache_device_stop(struct bcache_device *d)
{ {
if (!atomic_xchg(&d->closing, 1)) if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
closure_queue(&d->cl); closure_queue(&d->cl);
} }
static void bcache_device_unlink(struct bcache_device *d) static void bcache_device_unlink(struct bcache_device *d)
{ {
unsigned i; lockdep_assert_held(&bch_register_lock);
struct cache *ca;
sysfs_remove_link(&d->c->kobj, d->name); if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
sysfs_remove_link(&d->kobj, "cache"); unsigned i;
struct cache *ca;
for_each_cache(ca, d->c, i) sysfs_remove_link(&d->c->kobj, d->name);
bd_unlink_disk_holder(ca->bdev, d->disk); sysfs_remove_link(&d->kobj, "cache");
for_each_cache(ca, d->c, i)
bd_unlink_disk_holder(ca->bdev, d->disk);
}
} }
static void bcache_device_link(struct bcache_device *d, struct cache_set *c, static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
@ -696,19 +693,16 @@ static void bcache_device_detach(struct bcache_device *d)
{ {
lockdep_assert_held(&bch_register_lock); lockdep_assert_held(&bch_register_lock);
if (atomic_read(&d->detaching)) { if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
struct uuid_entry *u = d->c->uuids + d->id; struct uuid_entry *u = d->c->uuids + d->id;
SET_UUID_FLASH_ONLY(u, 0); SET_UUID_FLASH_ONLY(u, 0);
memcpy(u->uuid, invalid_uuid, 16); memcpy(u->uuid, invalid_uuid, 16);
u->invalidated = cpu_to_le32(get_seconds()); u->invalidated = cpu_to_le32(get_seconds());
bch_uuid_write(d->c); bch_uuid_write(d->c);
atomic_set(&d->detaching, 0);
} }
if (!d->flush_done) bcache_device_unlink(d);
bcache_device_unlink(d);
d->c->devices[d->id] = NULL; d->c->devices[d->id] = NULL;
closure_put(&d->c->caching); closure_put(&d->c->caching);
@ -739,14 +733,20 @@ static void bcache_device_free(struct bcache_device *d)
del_gendisk(d->disk); del_gendisk(d->disk);
if (d->disk && d->disk->queue) if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue); blk_cleanup_queue(d->disk->queue);
if (d->disk) if (d->disk) {
ida_simple_remove(&bcache_minor, d->disk->first_minor);
put_disk(d->disk); put_disk(d->disk);
}
bio_split_pool_free(&d->bio_split_hook); bio_split_pool_free(&d->bio_split_hook);
if (d->unaligned_bvec) if (d->unaligned_bvec)
mempool_destroy(d->unaligned_bvec); mempool_destroy(d->unaligned_bvec);
if (d->bio_split) if (d->bio_split)
bioset_free(d->bio_split); bioset_free(d->bio_split);
if (is_vmalloc_addr(d->full_dirty_stripes))
vfree(d->full_dirty_stripes);
else
kfree(d->full_dirty_stripes);
if (is_vmalloc_addr(d->stripe_sectors_dirty)) if (is_vmalloc_addr(d->stripe_sectors_dirty))
vfree(d->stripe_sectors_dirty); vfree(d->stripe_sectors_dirty);
else else
@ -760,15 +760,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
{ {
struct request_queue *q; struct request_queue *q;
size_t n; size_t n;
int minor;
if (!d->stripe_size_bits) if (!d->stripe_size)
d->stripe_size_bits = 31; d->stripe_size = 1 << 31;
d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >> d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
d->stripe_size_bits;
if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) if (!d->nr_stripes ||
d->nr_stripes > INT_MAX ||
d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
pr_err("nr_stripes too large");
return -ENOMEM; return -ENOMEM;
}
n = d->nr_stripes * sizeof(atomic_t); n = d->nr_stripes * sizeof(atomic_t);
d->stripe_sectors_dirty = n < PAGE_SIZE << 6 d->stripe_sectors_dirty = n < PAGE_SIZE << 6
@ -777,22 +781,38 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->stripe_sectors_dirty) if (!d->stripe_sectors_dirty)
return -ENOMEM; return -ENOMEM;
n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
d->full_dirty_stripes = n < PAGE_SIZE << 6
? kzalloc(n, GFP_KERNEL)
: vzalloc(n);
if (!d->full_dirty_stripes)
return -ENOMEM;
minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
if (minor < 0)
return minor;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1, !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
sizeof(struct bio_vec) * BIO_MAX_PAGES)) || sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook) || bio_split_pool_init(&d->bio_split_hook) ||
!(d->disk = alloc_disk(1)) || !(d->disk = alloc_disk(1))) {
!(q = blk_alloc_queue(GFP_KERNEL))) ida_simple_remove(&bcache_minor, minor);
return -ENOMEM; return -ENOMEM;
}
set_capacity(d->disk, sectors); set_capacity(d->disk, sectors);
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
d->disk->major = bcache_major; d->disk->major = bcache_major;
d->disk->first_minor = bcache_minor++; d->disk->first_minor = minor;
d->disk->fops = &bcache_ops; d->disk->fops = &bcache_ops;
d->disk->private_data = d; d->disk->private_data = d;
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return -ENOMEM;
blk_queue_make_request(q, NULL); blk_queue_make_request(q, NULL);
d->disk->queue = q; d->disk->queue = q;
q->queuedata = d; q->queuedata = d;
@ -874,7 +894,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
struct closure cl; struct closure cl;
closure_init_stack(&cl); closure_init_stack(&cl);
BUG_ON(!atomic_read(&dc->disk.detaching)); BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(atomic_read(&dc->count)); BUG_ON(atomic_read(&dc->count));
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
@ -888,6 +908,8 @@ static void cached_dev_detach_finish(struct work_struct *w)
bcache_device_detach(&dc->disk); bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices); list_move(&dc->list, &uncached_devices);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
mutex_unlock(&bch_register_lock); mutex_unlock(&bch_register_lock);
pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
@ -900,10 +922,10 @@ void bch_cached_dev_detach(struct cached_dev *dc)
{ {
lockdep_assert_held(&bch_register_lock); lockdep_assert_held(&bch_register_lock);
if (atomic_read(&dc->disk.closing)) if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return; return;
if (atomic_xchg(&dc->disk.detaching, 1)) if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
return; return;
/* /*
@ -1030,6 +1052,7 @@ static void cached_dev_free(struct closure *cl)
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
cancel_delayed_work_sync(&dc->writeback_rate_update); cancel_delayed_work_sync(&dc->writeback_rate_update);
kthread_stop(dc->writeback_thread);
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
@ -1058,11 +1081,7 @@ static void cached_dev_flush(struct closure *cl)
struct bcache_device *d = &dc->disk; struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
d->flush_done = 1; bcache_device_unlink(d);
if (d->c)
bcache_device_unlink(d);
mutex_unlock(&bch_register_lock); mutex_unlock(&bch_register_lock);
bch_cache_accounting_destroy(&dc->accounting); bch_cache_accounting_destroy(&dc->accounting);
@ -1088,7 +1107,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
spin_lock_init(&dc->io_lock); spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20; dc->sequential_cutoff = 4 << 20;
for (io = dc->io; io < dc->io + RECENT_IO; io++) { for (io = dc->io; io < dc->io + RECENT_IO; io++) {
@ -1260,7 +1278,8 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{ {
va_list args; va_list args;
if (test_bit(CACHE_SET_STOPPING, &c->flags)) if (c->on_error != ON_ERROR_PANIC &&
test_bit(CACHE_SET_STOPPING, &c->flags))
return false; return false;
/* XXX: we can be called from atomic context /* XXX: we can be called from atomic context
@ -1275,6 +1294,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
printk(", disabling caching\n"); printk(", disabling caching\n");
if (c->on_error == ON_ERROR_PANIC)
panic("panic forced after error\n");
bch_cache_set_unregister(c); bch_cache_set_unregister(c);
return true; return true;
} }
@ -1339,6 +1361,9 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal); kobject_put(&c->internal);
kobject_del(&c->kobj); kobject_del(&c->kobj);
if (c->gc_thread)
kthread_stop(c->gc_thread);
if (!IS_ERR_OR_NULL(c->root)) if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache); list_add(&c->root->list, &c->btree_cache);
@ -1433,12 +1458,19 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->sort_crit_factor = int_sqrt(c->btree_pages); c->sort_crit_factor = int_sqrt(c->btree_pages);
mutex_init(&c->bucket_lock);
mutex_init(&c->sort_lock);
spin_lock_init(&c->sort_time_lock);
closure_init_unlocked(&c->sb_write); closure_init_unlocked(&c->sb_write);
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->try_wait);
init_waitqueue_head(&c->bucket_wait);
closure_init_unlocked(&c->uuid_write); closure_init_unlocked(&c->uuid_write);
spin_lock_init(&c->btree_read_time_lock); mutex_init(&c->sort_lock);
spin_lock_init(&c->sort_time.lock);
spin_lock_init(&c->btree_gc_time.lock);
spin_lock_init(&c->btree_split_time.lock);
spin_lock_init(&c->btree_read_time.lock);
spin_lock_init(&c->try_harder_time.lock);
bch_moving_init_cache_set(c); bch_moving_init_cache_set(c);
INIT_LIST_HEAD(&c->list); INIT_LIST_HEAD(&c->list);
@ -1483,11 +1515,10 @@ static void run_cache_set(struct cache_set *c)
const char *err = "cannot allocate memory"; const char *err = "cannot allocate memory";
struct cached_dev *dc, *t; struct cached_dev *dc, *t;
struct cache *ca; struct cache *ca;
struct closure cl;
unsigned i; unsigned i;
struct btree_op op; closure_init_stack(&cl);
bch_btree_op_init_stack(&op);
op.lock = SHRT_MAX;
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets; c->nbuckets += ca->sb.nbuckets;
@ -1498,7 +1529,7 @@ static void run_cache_set(struct cache_set *c)
struct jset *j; struct jset *j;
err = "cannot allocate memory for journal"; err = "cannot allocate memory for journal";
if (bch_journal_read(c, &journal, &op)) if (bch_journal_read(c, &journal))
goto err; goto err;
pr_debug("btree_journal_read() done"); pr_debug("btree_journal_read() done");
@ -1522,23 +1553,23 @@ static void run_cache_set(struct cache_set *c)
k = &j->btree_root; k = &j->btree_root;
err = "bad btree root"; err = "bad btree root";
if (__bch_ptr_invalid(c, j->btree_level + 1, k)) if (bch_btree_ptr_invalid(c, k))
goto err; goto err;
err = "error reading btree root"; err = "error reading btree root";
c->root = bch_btree_node_get(c, k, j->btree_level, &op); c->root = bch_btree_node_get(c, k, j->btree_level, true);
if (IS_ERR_OR_NULL(c->root)) if (IS_ERR_OR_NULL(c->root))
goto err; goto err;
list_del_init(&c->root->list); list_del_init(&c->root->list);
rw_unlock(true, c->root); rw_unlock(true, c->root);
err = uuid_read(c, j, &op.cl); err = uuid_read(c, j, &cl);
if (err) if (err)
goto err; goto err;
err = "error in recovery"; err = "error in recovery";
if (bch_btree_check(c, &op)) if (bch_btree_check(c))
goto err; goto err;
bch_journal_mark(c, &journal); bch_journal_mark(c, &journal);
@ -1570,11 +1601,9 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID) if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c); __uuid_write(c);
bch_journal_replay(c, &journal, &op); bch_journal_replay(c, &journal);
} else { } else {
pr_notice("invalidating existing data"); pr_notice("invalidating existing data");
/* Don't want invalidate_buckets() to queue a gc yet */
closure_lock(&c->gc, NULL);
for_each_cache(ca, c, i) { for_each_cache(ca, c, i) {
unsigned j; unsigned j;
@ -1600,15 +1629,15 @@ static void run_cache_set(struct cache_set *c)
err = "cannot allocate new UUID bucket"; err = "cannot allocate new UUID bucket";
if (__uuid_write(c)) if (__uuid_write(c))
goto err_unlock_gc; goto err;
err = "cannot allocate new btree root"; err = "cannot allocate new btree root";
c->root = bch_btree_node_alloc(c, 0, &op.cl); c->root = bch_btree_node_alloc(c, 0, true);
if (IS_ERR_OR_NULL(c->root)) if (IS_ERR_OR_NULL(c->root))
goto err_unlock_gc; goto err;
bkey_copy_key(&c->root->key, &MAX_KEY); bkey_copy_key(&c->root->key, &MAX_KEY);
bch_btree_node_write(c->root, &op.cl); bch_btree_node_write(c->root, &cl);
bch_btree_set_root(c->root); bch_btree_set_root(c->root);
rw_unlock(true, c->root); rw_unlock(true, c->root);
@ -1621,14 +1650,14 @@ static void run_cache_set(struct cache_set *c)
SET_CACHE_SYNC(&c->sb, true); SET_CACHE_SYNC(&c->sb, true);
bch_journal_next(&c->journal); bch_journal_next(&c->journal);
bch_journal_meta(c, &op.cl); bch_journal_meta(c, &cl);
/* Unlock */
closure_set_stopped(&c->gc.cl);
closure_put(&c->gc.cl);
} }
closure_sync(&op.cl); err = "error starting gc thread";
if (bch_gc_thread_start(c))
goto err;
closure_sync(&cl);
c->sb.last_mount = get_seconds(); c->sb.last_mount = get_seconds();
bcache_write_super(c); bcache_write_super(c);
@ -1638,13 +1667,10 @@ static void run_cache_set(struct cache_set *c)
flash_devs_run(c); flash_devs_run(c);
return; return;
err_unlock_gc:
closure_set_stopped(&c->gc.cl);
closure_put(&c->gc.cl);
err: err:
closure_sync(&op.cl); closure_sync(&cl);
/* XXX: test this, it's broken */ /* XXX: test this, it's broken */
bch_cache_set_error(c, err); bch_cache_set_error(c, "%s", err);
} }
static bool can_attach_cache(struct cache *ca, struct cache_set *c) static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@ -1725,8 +1751,6 @@ void bch_cache_release(struct kobject *kobj)
if (ca->set) if (ca->set)
ca->set->cache[ca->sb.nr_this_dev] = NULL; ca->set->cache[ca->sb.nr_this_dev] = NULL;
bch_cache_allocator_exit(ca);
bio_split_pool_free(&ca->bio_split_hook); bio_split_pool_free(&ca->bio_split_hook);
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
@ -1758,8 +1782,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
__module_get(THIS_MODULE); __module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype); kobject_init(&ca->kobj, &bch_cache_ktype);
INIT_LIST_HEAD(&ca->discards);
bio_init(&ca->journal.bio); bio_init(&ca->journal.bio);
ca->journal.bio.bi_max_vecs = 8; ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@ -2006,7 +2028,6 @@ static struct notifier_block reboot = {
static void bcache_exit(void) static void bcache_exit(void)
{ {
bch_debug_exit(); bch_debug_exit();
bch_writeback_exit();
bch_request_exit(); bch_request_exit();
bch_btree_exit(); bch_btree_exit();
if (bcache_kobj) if (bcache_kobj)
@ -2039,7 +2060,6 @@ static int __init bcache_init(void)
sysfs_create_files(bcache_kobj, files) || sysfs_create_files(bcache_kobj, files) ||
bch_btree_init() || bch_btree_init() ||
bch_request_init() || bch_request_init() ||
bch_writeback_init() ||
bch_debug_init(bcache_kobj)) bch_debug_init(bcache_kobj))
goto err; goto err;

View File

@ -21,6 +21,12 @@ static const char * const cache_replacement_policies[] = {
NULL NULL
}; };
static const char * const error_actions[] = {
"unregister",
"panic",
NULL
};
write_attribute(attach); write_attribute(attach);
write_attribute(detach); write_attribute(detach);
write_attribute(unregister); write_attribute(unregister);
@ -66,7 +72,6 @@ rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us); rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff); rw_attribute(sequential_cutoff);
rw_attribute(sequential_merge);
rw_attribute(data_csum); rw_attribute(data_csum);
rw_attribute(cache_mode); rw_attribute(cache_mode);
rw_attribute(writeback_metadata); rw_attribute(writeback_metadata);
@ -90,11 +95,14 @@ rw_attribute(discard);
rw_attribute(running); rw_attribute(running);
rw_attribute(label); rw_attribute(label);
rw_attribute(readahead); rw_attribute(readahead);
rw_attribute(errors);
rw_attribute(io_error_limit); rw_attribute(io_error_limit);
rw_attribute(io_error_halflife); rw_attribute(io_error_halflife);
rw_attribute(verify); rw_attribute(verify);
rw_attribute(bypass_torture_test);
rw_attribute(key_merging_disabled); rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite); rw_attribute(gc_always_rewrite);
rw_attribute(expensive_debug_checks);
rw_attribute(freelist_percent); rw_attribute(freelist_percent);
rw_attribute(cache_replacement_policy); rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled); rw_attribute(btree_shrinker_disabled);
@ -116,6 +124,7 @@ SHOW(__bch_cached_dev)
sysfs_printf(data_csum, "%i", dc->disk.data_csum); sysfs_printf(data_csum, "%i", dc->disk.data_csum);
var_printf(verify, "%i"); var_printf(verify, "%i");
var_printf(bypass_torture_test, "%i");
var_printf(writeback_metadata, "%i"); var_printf(writeback_metadata, "%i");
var_printf(writeback_running, "%i"); var_printf(writeback_running, "%i");
var_print(writeback_delay); var_print(writeback_delay);
@ -150,10 +159,9 @@ SHOW(__bch_cached_dev)
sysfs_hprint(dirty_data, sysfs_hprint(dirty_data,
bcache_dev_sectors_dirty(&dc->disk) << 9); bcache_dev_sectors_dirty(&dc->disk) << 9);
sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9); sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
var_printf(partial_stripes_expensive, "%u"); var_printf(partial_stripes_expensive, "%u");
var_printf(sequential_merge, "%i");
var_hprint(sequential_cutoff); var_hprint(sequential_cutoff);
var_hprint(readahead); var_hprint(readahead);
@ -185,6 +193,7 @@ STORE(__cached_dev)
sysfs_strtoul(data_csum, dc->disk.data_csum); sysfs_strtoul(data_csum, dc->disk.data_csum);
d_strtoul(verify); d_strtoul(verify);
d_strtoul(bypass_torture_test);
d_strtoul(writeback_metadata); d_strtoul(writeback_metadata);
d_strtoul(writeback_running); d_strtoul(writeback_running);
d_strtoul(writeback_delay); d_strtoul(writeback_delay);
@ -199,7 +208,6 @@ STORE(__cached_dev)
dc->writeback_rate_p_term_inverse, 1, INT_MAX); dc->writeback_rate_p_term_inverse, 1, INT_MAX);
d_strtoul(writeback_rate_d_smooth); d_strtoul(writeback_rate_d_smooth);
d_strtoul(sequential_merge);
d_strtoi_h(sequential_cutoff); d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead); d_strtoi_h(readahead);
@ -311,7 +319,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_stripe_size, &sysfs_stripe_size,
&sysfs_partial_stripes_expensive, &sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff, &sysfs_sequential_cutoff,
&sysfs_sequential_merge,
&sysfs_clear_stats, &sysfs_clear_stats,
&sysfs_running, &sysfs_running,
&sysfs_state, &sysfs_state,
@ -319,6 +326,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_readahead, &sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify, &sysfs_verify,
&sysfs_bypass_torture_test,
#endif #endif
NULL NULL
}; };
@ -366,7 +374,7 @@ STORE(__bch_flash_dev)
} }
if (attr == &sysfs_unregister) { if (attr == &sysfs_unregister) {
atomic_set(&d->detaching, 1); set_bit(BCACHE_DEV_DETACHING, &d->flags);
bcache_device_stop(d); bcache_device_stop(d);
} }
@ -481,7 +489,6 @@ lock_root:
sysfs_print(btree_used_percent, btree_used(c)); sysfs_print(btree_used_percent, btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes); sysfs_print(btree_nodes, c->gc_stats.nodes);
sysfs_hprint(dirty_data, c->gc_stats.dirty);
sysfs_hprint(average_key_size, average_key_size(c)); sysfs_hprint(average_key_size, average_key_size(c));
sysfs_print(cache_read_races, sysfs_print(cache_read_races,
@ -492,6 +499,10 @@ lock_root:
sysfs_print(writeback_keys_failed, sysfs_print(writeback_keys_failed,
atomic_long_read(&c->writeback_keys_failed)); atomic_long_read(&c->writeback_keys_failed));
if (attr == &sysfs_errors)
return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
c->on_error);
/* See count_io_errors for why 88 */ /* See count_io_errors for why 88 */
sysfs_print(io_error_halflife, c->error_decay * 88); sysfs_print(io_error_halflife, c->error_decay * 88);
sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT); sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
@ -506,6 +517,8 @@ lock_root:
sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
sysfs_printf(verify, "%i", c->verify); sysfs_printf(verify, "%i", c->verify);
sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
sysfs_printf(expensive_debug_checks,
"%i", c->expensive_debug_checks);
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
@ -555,7 +568,7 @@ STORE(__bch_cache_set)
} }
if (attr == &sysfs_trigger_gc) if (attr == &sysfs_trigger_gc)
bch_queue_gc(c); wake_up_gc(c);
if (attr == &sysfs_prune_cache) { if (attr == &sysfs_prune_cache) {
struct shrink_control sc; struct shrink_control sc;
@ -569,6 +582,15 @@ STORE(__bch_cache_set)
sysfs_strtoul(congested_write_threshold_us, sysfs_strtoul(congested_write_threshold_us,
c->congested_write_threshold_us); c->congested_write_threshold_us);
if (attr == &sysfs_errors) {
ssize_t v = bch_read_string_list(buf, error_actions);
if (v < 0)
return v;
c->on_error = v;
}
if (attr == &sysfs_io_error_limit) if (attr == &sysfs_io_error_limit)
c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT; c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
@ -579,6 +601,7 @@ STORE(__bch_cache_set)
sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
sysfs_strtoul(verify, c->verify); sysfs_strtoul(verify, c->verify);
sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
@ -618,8 +641,8 @@ static struct attribute *bch_cache_set_files[] = {
&sysfs_cache_available_percent, &sysfs_cache_available_percent,
&sysfs_average_key_size, &sysfs_average_key_size,
&sysfs_dirty_data,
&sysfs_errors,
&sysfs_io_error_limit, &sysfs_io_error_limit,
&sysfs_io_error_halflife, &sysfs_io_error_halflife,
&sysfs_congested, &sysfs_congested,
@ -653,6 +676,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify, &sysfs_verify,
&sysfs_key_merging_disabled, &sysfs_key_merging_disabled,
&sysfs_expensive_debug_checks,
#endif #endif
&sysfs_gc_always_rewrite, &sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled, &sysfs_btree_shrinker_disabled,

View File

@ -1,6 +1,5 @@
#include "bcache.h" #include "bcache.h"
#include "btree.h" #include "btree.h"
#include "request.h"
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <linux/module.h> #include <linux/module.h>

View File

@ -168,10 +168,14 @@ int bch_parse_uuid(const char *s, char *uuid)
void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
{ {
uint64_t now = local_clock(); uint64_t now, duration, last;
uint64_t duration = time_after64(now, start_time)
spin_lock(&stats->lock);
now = local_clock();
duration = time_after64(now, start_time)
? now - start_time : 0; ? now - start_time : 0;
uint64_t last = time_after64(now, stats->last) last = time_after64(now, stats->last)
? now - stats->last : 0; ? now - stats->last : 0;
stats->max_duration = max(stats->max_duration, duration); stats->max_duration = max(stats->max_duration, duration);
@ -188,6 +192,8 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
} }
stats->last = now ?: 1; stats->last = now ?: 1;
spin_unlock(&stats->lock);
} }
/** /**

View File

@ -15,28 +15,18 @@
struct closure; struct closure;
#ifdef CONFIG_BCACHE_EDEBUG #ifdef CONFIG_BCACHE_DEBUG
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
#else /* EDEBUG */ #else /* DEBUG */
#define atomic_dec_bug(v) atomic_dec(v) #define atomic_dec_bug(v) atomic_dec(v)
#define atomic_inc_bug(v, i) atomic_inc(v) #define atomic_inc_bug(v, i) atomic_inc(v)
#endif #endif
#define BITMASK(name, type, field, offset, size) \
static inline uint64_t name(const type *k) \
{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); } \
\
static inline void SET_##name(type *k, uint64_t v) \
{ \
k->field &= ~(~((uint64_t) ~0 << size) << offset); \
k->field |= v << offset; \
}
#define DECLARE_HEAP(type, name) \ #define DECLARE_HEAP(type, name) \
struct { \ struct { \
size_t size, used; \ size_t size, used; \
@ -388,6 +378,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[
ssize_t bch_read_string_list(const char *buf, const char * const list[]); ssize_t bch_read_string_list(const char *buf, const char * const list[]);
struct time_stats { struct time_stats {
spinlock_t lock;
/* /*
* all fields are in nanoseconds, averages are ewmas stored left shifted * all fields are in nanoseconds, averages are ewmas stored left shifted
* by 8 * by 8

View File

@ -11,18 +11,11 @@
#include "debug.h" #include "debug.h"
#include "writeback.h" #include "writeback.h"
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
static struct workqueue_struct *dirty_wq;
static void read_dirty(struct closure *);
struct dirty_io {
struct closure cl;
struct cached_dev *dc;
struct bio bio;
};
/* Rate limiting */ /* Rate limiting */
static void __update_writeback_rate(struct cached_dev *dc) static void __update_writeback_rate(struct cached_dev *dc)
@ -72,9 +65,6 @@ out:
dc->writeback_rate_derivative = derivative; dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change; dc->writeback_rate_change = change;
dc->writeback_rate_target = target; dc->writeback_rate_target = target;
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
} }
static void update_writeback_rate(struct work_struct *work) static void update_writeback_rate(struct work_struct *work)
@ -90,13 +80,16 @@ static void update_writeback_rate(struct work_struct *work)
__update_writeback_rate(dc); __update_writeback_rate(dc);
up_read(&dc->writeback_lock); up_read(&dc->writeback_lock);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
} }
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{ {
uint64_t ret; uint64_t ret;
if (atomic_read(&dc->disk.detaching) || if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent) !dc->writeback_percent)
return 0; return 0;
@ -105,37 +98,11 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
return min_t(uint64_t, ret, HZ); return min_t(uint64_t, ret, HZ);
} }
/* Background writeback */ struct dirty_io {
struct closure cl;
static bool dirty_pred(struct keybuf *buf, struct bkey *k) struct cached_dev *dc;
{ struct bio bio;
return KEY_DIRTY(k); };
}
static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
{
uint64_t stripe;
unsigned nr_sectors = KEY_SIZE(k);
struct cached_dev *dc = container_of(buf, struct cached_dev,
writeback_keys);
unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
if (!KEY_DIRTY(k))
return false;
stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
while (1) {
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
stripe_size)
return false;
if (nr_sectors <= stripe_size)
return true;
nr_sectors -= stripe_size;
stripe++;
}
}
static void dirty_init(struct keybuf_key *w) static void dirty_init(struct keybuf_key *w)
{ {
@ -153,131 +120,6 @@ static void dirty_init(struct keybuf_key *w)
bch_bio_map(bio, NULL); bch_bio_map(bio, NULL);
} }
static void refill_dirty(struct closure *cl)
{
struct cached_dev *dc = container_of(cl, struct cached_dev,
writeback.cl);
struct keybuf *buf = &dc->writeback_keys;
bool searched_from_start = false;
struct bkey end = MAX_KEY;
SET_KEY_INODE(&end, dc->disk.id);
if (!atomic_read(&dc->disk.detaching) &&
!dc->writeback_running)
closure_return(cl);
down_write(&dc->writeback_lock);
if (!atomic_read(&dc->has_dirty)) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
bch_write_bdev_super(dc, NULL);
up_write(&dc->writeback_lock);
closure_return(cl);
}
if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
buf->last_scanned = KEY(dc->disk.id, 0, 0);
searched_from_start = true;
}
if (dc->partial_stripes_expensive) {
uint64_t i;
for (i = 0; i < dc->disk.nr_stripes; i++)
if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
1 << dc->disk.stripe_size_bits)
goto full_stripes;
goto normal_refill;
full_stripes:
bch_refill_keybuf(dc->disk.c, buf, &end,
dirty_full_stripe_pred);
} else {
normal_refill:
bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
}
if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
/* Searched the entire btree - delay awhile */
if (RB_EMPTY_ROOT(&buf->keys)) {
atomic_set(&dc->has_dirty, 0);
cached_dev_put(dc);
}
if (!atomic_read(&dc->disk.detaching))
closure_delay(&dc->writeback, dc->writeback_delay * HZ);
}
up_write(&dc->writeback_lock);
bch_ratelimit_reset(&dc->writeback_rate);
/* Punt to workqueue only so we don't recurse and blow the stack */
continue_at(cl, read_dirty, dirty_wq);
}
void bch_writeback_queue(struct cached_dev *dc)
{
if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
if (!atomic_read(&dc->disk.detaching))
closure_delay(&dc->writeback, dc->writeback_delay * HZ);
continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
}
}
void bch_writeback_add(struct cached_dev *dc)
{
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
atomic_inc(&dc->count);
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
/* XXX: should do this synchronously */
bch_write_bdev_super(dc, NULL);
}
bch_writeback_queue(dc);
if (dc->writeback_percent)
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
}
}
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
uint64_t offset, int nr_sectors)
{
struct bcache_device *d = c->devices[inode];
unsigned stripe_size, stripe_offset;
uint64_t stripe;
if (!d)
return;
stripe_size = 1 << d->stripe_size_bits;
stripe = offset >> d->stripe_size_bits;
stripe_offset = offset & (stripe_size - 1);
while (nr_sectors) {
int s = min_t(unsigned, abs(nr_sectors),
stripe_size - stripe_offset);
if (nr_sectors < 0)
s = -s;
atomic_add(s, d->stripe_sectors_dirty + stripe);
nr_sectors -= s;
stripe_offset = 0;
stripe++;
}
}
/* Background writeback - IO loop */
static void dirty_io_destructor(struct closure *cl) static void dirty_io_destructor(struct closure *cl)
{ {
struct dirty_io *io = container_of(cl, struct dirty_io, cl); struct dirty_io *io = container_of(cl, struct dirty_io, cl);
@ -297,26 +139,25 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */ /* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) { if (KEY_DIRTY(&w->key)) {
int ret;
unsigned i; unsigned i;
struct btree_op op; struct keylist keys;
bch_btree_op_init_stack(&op);
op.type = BTREE_REPLACE; bch_keylist_init(&keys);
bkey_copy(&op.replace, &w->key);
SET_KEY_DIRTY(&w->key, false); bkey_copy(keys.top, &w->key);
bch_keylist_add(&op.keys, &w->key); SET_KEY_DIRTY(keys.top, false);
bch_keylist_push(&keys);
for (i = 0; i < KEY_PTRS(&w->key); i++) for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
bch_btree_insert(&op, dc->disk.c); ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
closure_sync(&op.cl);
if (op.insert_collision) if (ret)
trace_bcache_writeback_collision(&w->key); trace_bcache_writeback_collision(&w->key);
atomic_long_inc(op.insert_collision atomic_long_inc(ret
? &dc->disk.c->writeback_keys_failed ? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done); : &dc->disk.c->writeback_keys_done);
} }
@ -374,30 +215,33 @@ static void read_dirty_submit(struct closure *cl)
continue_at(cl, write_dirty, system_wq); continue_at(cl, write_dirty, system_wq);
} }
static void read_dirty(struct closure *cl) static void read_dirty(struct cached_dev *dc)
{ {
struct cached_dev *dc = container_of(cl, struct cached_dev, unsigned delay = 0;
writeback.cl);
unsigned delay = writeback_delay(dc, 0);
struct keybuf_key *w; struct keybuf_key *w;
struct dirty_io *io; struct dirty_io *io;
struct closure cl;
closure_init_stack(&cl);
/* /*
* XXX: if we error, background writeback just spins. Should use some * XXX: if we error, background writeback just spins. Should use some
* mempools. * mempools.
*/ */
while (1) { while (!kthread_should_stop()) {
try_to_freeze();
w = bch_keybuf_next(&dc->writeback_keys); w = bch_keybuf_next(&dc->writeback_keys);
if (!w) if (!w)
break; break;
BUG_ON(ptr_stale(dc->disk.c, &w->key, 0)); BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
if (delay > 0 && if (KEY_START(&w->key) != dc->last_read ||
(KEY_START(&w->key) != dc->last_read || jiffies_to_msecs(delay) > 50)
jiffies_to_msecs(delay) > 50)) while (!kthread_should_stop() && delay)
delay = schedule_timeout_uninterruptible(delay); delay = schedule_timeout_interruptible(delay);
dc->last_read = KEY_OFFSET(&w->key); dc->last_read = KEY_OFFSET(&w->key);
@ -423,7 +267,7 @@ static void read_dirty(struct closure *cl)
trace_bcache_writeback(&w->key); trace_bcache_writeback(&w->key);
down(&dc->in_flight); down(&dc->in_flight);
closure_call(&io->cl, read_dirty_submit, NULL, cl); closure_call(&io->cl, read_dirty_submit, NULL, &cl);
delay = writeback_delay(dc, KEY_SIZE(&w->key)); delay = writeback_delay(dc, KEY_SIZE(&w->key));
} }
@ -439,52 +283,205 @@ err:
* Wait for outstanding writeback IOs to finish (and keybuf slots to be * Wait for outstanding writeback IOs to finish (and keybuf slots to be
* freed) before refilling again * freed) before refilling again
*/ */
continue_at(cl, refill_dirty, dirty_wq); closure_sync(&cl);
} }
/* Init */ /* Scan for dirty data */
static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op, void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
struct cached_dev *dc) uint64_t offset, int nr_sectors)
{ {
struct bkey *k; struct bcache_device *d = c->devices[inode];
struct btree_iter iter; unsigned stripe_offset, stripe, sectors_dirty;
bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0)); if (!d)
while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) return;
if (!b->level) {
if (KEY_INODE(k) > dc->disk.id)
break;
if (KEY_DIRTY(k)) stripe = offset_to_stripe(d, offset);
bcache_dev_sectors_dirty_add(b->c, dc->disk.id, stripe_offset = offset & (d->stripe_size - 1);
KEY_START(k),
KEY_SIZE(k));
} else {
btree(sectors_dirty_init, k, b, op, dc);
if (KEY_INODE(k) > dc->disk.id)
break;
cond_resched(); while (nr_sectors) {
int s = min_t(unsigned, abs(nr_sectors),
d->stripe_size - stripe_offset);
if (nr_sectors < 0)
s = -s;
if (stripe >= d->nr_stripes)
return;
sectors_dirty = atomic_add_return(s,
d->stripe_sectors_dirty + stripe);
if (sectors_dirty == d->stripe_size)
set_bit(stripe, d->full_dirty_stripes);
else
clear_bit(stripe, d->full_dirty_stripes);
nr_sectors -= s;
stripe_offset = 0;
stripe++;
}
}
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
return KEY_DIRTY(k);
}
static void refill_full_stripes(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
unsigned start_stripe, stripe, next_stripe;
bool wrapped = false;
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
if (stripe >= dc->disk.nr_stripes)
stripe = 0;
start_stripe = stripe;
while (1) {
stripe = find_next_bit(dc->disk.full_dirty_stripes,
dc->disk.nr_stripes, stripe);
if (stripe == dc->disk.nr_stripes)
goto next;
next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
dc->disk.nr_stripes, stripe);
buf->last_scanned = KEY(dc->disk.id,
stripe * dc->disk.stripe_size, 0);
bch_refill_keybuf(dc->disk.c, buf,
&KEY(dc->disk.id,
next_stripe * dc->disk.stripe_size, 0),
dirty_pred);
if (array_freelist_empty(&buf->freelist))
return;
stripe = next_stripe;
next:
if (wrapped && stripe > start_stripe)
return;
if (stripe == dc->disk.nr_stripes) {
stripe = 0;
wrapped = true;
} }
}
}
static bool refill_dirty(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
bool searched_from_start = false;
if (dc->partial_stripes_expensive) {
refill_full_stripes(dc);
if (array_freelist_empty(&buf->freelist))
return false;
}
if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
buf->last_scanned = KEY(dc->disk.id, 0, 0);
searched_from_start = true;
}
bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
}
static int bch_writeback_thread(void *arg)
{
struct cached_dev *dc = arg;
bool searched_full_index;
while (!kthread_should_stop()) {
down_write(&dc->writeback_lock);
if (!atomic_read(&dc->has_dirty) ||
(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
!dc->writeback_running)) {
up_write(&dc->writeback_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
return 0;
try_to_freeze();
schedule();
continue;
}
searched_full_index = refill_dirty(dc);
if (searched_full_index &&
RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
atomic_set(&dc->has_dirty, 0);
cached_dev_put(dc);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
bch_write_bdev_super(dc, NULL);
}
up_write(&dc->writeback_lock);
bch_ratelimit_reset(&dc->writeback_rate);
read_dirty(dc);
if (searched_full_index) {
unsigned delay = dc->writeback_delay * HZ;
while (delay &&
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
delay = schedule_timeout_interruptible(delay);
}
}
return 0; return 0;
} }
void bch_sectors_dirty_init(struct cached_dev *dc) /* Init */
{
struct btree_op op;
bch_btree_op_init_stack(&op); struct sectors_dirty_init {
btree_root(sectors_dirty_init, dc->disk.c, &op, dc); struct btree_op op;
unsigned inode;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
struct bkey *k)
{
struct sectors_dirty_init *op = container_of(_op,
struct sectors_dirty_init, op);
if (KEY_INODE(k) > op->inode)
return MAP_DONE;
if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
KEY_START(k), KEY_SIZE(k));
return MAP_CONTINUE;
} }
void bch_cached_dev_writeback_init(struct cached_dev *dc) void bch_sectors_dirty_init(struct cached_dev *dc)
{
struct sectors_dirty_init op;
bch_btree_op_init(&op.op, -1);
op.inode = dc->disk.id;
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
}
int bch_cached_dev_writeback_init(struct cached_dev *dc)
{ {
sema_init(&dc->in_flight, 64); sema_init(&dc->in_flight, 64);
closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock); init_rwsem(&dc->writeback_lock);
bch_keybuf_init(&dc->writeback_keys); bch_keybuf_init(&dc->writeback_keys);
dc->writeback_metadata = true; dc->writeback_metadata = true;
@ -498,22 +495,16 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_rate_p_term_inverse = 64; dc->writeback_rate_p_term_inverse = 64;
dc->writeback_rate_d_smooth = 8; dc->writeback_rate_d_smooth = 8;
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
return PTR_ERR(dc->writeback_thread);
set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update, schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ); dc->writeback_rate_update_seconds * HZ);
}
void bch_writeback_exit(void)
{
if (dirty_wq)
destroy_workqueue(dirty_wq);
}
int __init bch_writeback_init(void)
{
dirty_wq = create_workqueue("bcache_writeback");
if (!dirty_wq)
return -ENOMEM;
return 0; return 0;
} }

View File

@ -14,20 +14,27 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret; return ret;
} }
static inline bool bcache_dev_stripe_dirty(struct bcache_device *d, static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
do_div(offset, d->stripe_size);
return offset;
}
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset, uint64_t offset,
unsigned nr_sectors) unsigned nr_sectors)
{ {
uint64_t stripe = offset >> d->stripe_size_bits; unsigned stripe = offset_to_stripe(&dc->disk, offset);
while (1) { while (1) {
if (atomic_read(d->stripe_sectors_dirty + stripe)) if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
return true; return true;
if (nr_sectors <= 1 << d->stripe_size_bits) if (nr_sectors <= dc->disk.stripe_size)
return false; return false;
nr_sectors -= 1 << d->stripe_size_bits; nr_sectors -= dc->disk.stripe_size;
stripe++; stripe++;
} }
} }
@ -38,12 +45,12 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned in_use = dc->disk.c->gc_stats.in_use; unsigned in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK || if (cache_mode != CACHE_MODE_WRITEBACK ||
atomic_read(&dc->disk.detaching) || test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
in_use > CUTOFF_WRITEBACK_SYNC) in_use > CUTOFF_WRITEBACK_SYNC)
return false; return false;
if (dc->partial_stripes_expensive && if (dc->partial_stripes_expensive &&
bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector, bcache_dev_stripe_dirty(dc, bio->bi_sector,
bio_sectors(bio))) bio_sectors(bio)))
return true; return true;
@ -54,11 +61,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
in_use <= CUTOFF_WRITEBACK; in_use <= CUTOFF_WRITEBACK;
} }
static inline void bch_writeback_queue(struct cached_dev *dc)
{
wake_up_process(dc->writeback_thread);
}
static inline void bch_writeback_add(struct cached_dev *dc)
{
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
atomic_inc(&dc->count);
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
/* XXX: should do this synchronously */
bch_write_bdev_super(dc, NULL);
}
bch_writeback_queue(dc);
}
}
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
void bch_writeback_queue(struct cached_dev *);
void bch_writeback_add(struct cached_dev *);
void bch_sectors_dirty_init(struct cached_dev *dc); void bch_sectors_dirty_init(struct cached_dev *dc);
void bch_cached_dev_writeback_init(struct cached_dev *); int bch_cached_dev_writeback_init(struct cached_dev *);
#endif #endif

View File

@ -6,11 +6,9 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
struct search;
DECLARE_EVENT_CLASS(bcache_request, DECLARE_EVENT_CLASS(bcache_request,
TP_PROTO(struct search *s, struct bio *bio), TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(s, bio), TP_ARGS(d, bio),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev ) __field(dev_t, dev )
@ -24,8 +22,8 @@ DECLARE_EVENT_CLASS(bcache_request,
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio->bi_bdev->bd_dev;
__entry->orig_major = s->d->disk->major; __entry->orig_major = d->disk->major;
__entry->orig_minor = s->d->disk->first_minor; __entry->orig_minor = d->disk->first_minor;
__entry->sector = bio->bi_sector; __entry->sector = bio->bi_sector;
__entry->orig_sector = bio->bi_sector - 16; __entry->orig_sector = bio->bi_sector - 16;
__entry->nr_sector = bio->bi_size >> 9; __entry->nr_sector = bio->bi_size >> 9;
@ -79,13 +77,13 @@ DECLARE_EVENT_CLASS(btree_node,
/* request.c */ /* request.c */
DEFINE_EVENT(bcache_request, bcache_request_start, DEFINE_EVENT(bcache_request, bcache_request_start,
TP_PROTO(struct search *s, struct bio *bio), TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(s, bio) TP_ARGS(d, bio)
); );
DEFINE_EVENT(bcache_request, bcache_request_end, DEFINE_EVENT(bcache_request, bcache_request_end,
TP_PROTO(struct search *s, struct bio *bio), TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(s, bio) TP_ARGS(d, bio)
); );
DECLARE_EVENT_CLASS(bcache_bio, DECLARE_EVENT_CLASS(bcache_bio,
@ -370,6 +368,35 @@ DEFINE_EVENT(btree_node, bcache_btree_set_root,
TP_ARGS(b) TP_ARGS(b)
); );
TRACE_EVENT(bcache_keyscan,
TP_PROTO(unsigned nr_found,
unsigned start_inode, uint64_t start_offset,
unsigned end_inode, uint64_t end_offset),
TP_ARGS(nr_found,
start_inode, start_offset,
end_inode, end_offset),
TP_STRUCT__entry(
__field(__u32, nr_found )
__field(__u32, start_inode )
__field(__u64, start_offset )
__field(__u32, end_inode )
__field(__u64, end_offset )
),
TP_fast_assign(
__entry->nr_found = nr_found;
__entry->start_inode = start_inode;
__entry->start_offset = start_offset;
__entry->end_inode = end_inode;
__entry->end_offset = end_offset;
),
TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
__entry->start_inode, __entry->start_offset,
__entry->end_inode, __entry->end_offset)
);
/* Allocator */ /* Allocator */
TRACE_EVENT(bcache_alloc_invalidate, TRACE_EVENT(bcache_alloc_invalidate,

373
include/uapi/linux/bcache.h Normal file
View File

@ -0,0 +1,373 @@
#ifndef _LINUX_BCACHE_H
#define _LINUX_BCACHE_H
/*
* Bcache on disk data structures
*/
#include <asm/types.h>
#define BITMASK(name, type, field, offset, size) \
static inline __u64 name(const type *k) \
{ return (k->field >> offset) & ~(~0ULL << size); } \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
k->field &= ~(~(~0ULL << size) << offset); \
k->field |= (v & ~(~0ULL << size)) << offset; \
}
/* Btree keys - all units are in sectors */
struct bkey {
__u64 high;
__u64 low;
__u64 ptr[];
};
#define KEY_FIELD(name, field, offset, size) \
BITMASK(name, struct bkey, field, offset, size)
#define PTR_FIELD(name, offset, size) \
static inline __u64 name(const struct bkey *k, unsigned i) \
{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
\
static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
{ \
k->ptr[i] &= ~(~(~0ULL << size) << offset); \
k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
}
#define KEY_SIZE_BITS 16
KEY_FIELD(KEY_PTRS, high, 60, 3)
KEY_FIELD(HEADER_SIZE, high, 58, 2)
KEY_FIELD(KEY_CSUM, high, 56, 2)
KEY_FIELD(KEY_PINNED, high, 55, 1)
KEY_FIELD(KEY_DIRTY, high, 36, 1)
KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
KEY_FIELD(KEY_INODE, high, 0, 20)
/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
static inline __u64 KEY_OFFSET(const struct bkey *k)
{
return k->low;
}
static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
{
k->low = v;
}
/*
* The high bit being set is a relic from when we used it to do binary
* searches - it told you where a key started. It's not used anymore,
* and can probably be safely dropped.
*/
#define KEY(inode, offset, size) \
((struct bkey) { \
.high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
.low = (offset) \
})
#define ZERO_KEY KEY(0, 0, 0)
#define MAX_KEY_INODE (~(~0 << 20))
#define MAX_KEY_OFFSET (~0ULL >> 1)
#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
#define PTR_DEV_BITS 12
PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
PTR_FIELD(PTR_OFFSET, 8, 43)
PTR_FIELD(PTR_GEN, 0, 8)
#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
#define PTR(gen, offset, dev) \
((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
/* Bkey utility code */
static inline unsigned long bkey_u64s(const struct bkey *k)
{
return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
}
static inline unsigned long bkey_bytes(const struct bkey *k)
{
return bkey_u64s(k) * sizeof(__u64);
}
#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
{
SET_KEY_INODE(dest, KEY_INODE(src));
SET_KEY_OFFSET(dest, KEY_OFFSET(src));
}
static inline struct bkey *bkey_next(const struct bkey *k)
{
__u64 *d = (void *) k;
return (struct bkey *) (d + bkey_u64s(k));
}
static inline struct bkey *bkey_last(const struct bkey *k, unsigned nr_keys)
{
__u64 *d = (void *) k;
return (struct bkey *) (d + nr_keys);
}
/* Enough for a key with 6 pointers */
#define BKEY_PAD 8
#define BKEY_PADDED(key) \
union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
/* Superblock */
/* Version 0: Cache device
* Version 1: Backing device
* Version 2: Seed pointer into btree node checksum
* Version 3: Cache device with new UUID format
* Version 4: Backing device with data offset
*/
#define BCACHE_SB_VERSION_CDEV 0
#define BCACHE_SB_VERSION_BDEV 1
#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
#define BCACHE_SB_MAX_VERSION 4
#define SB_SECTOR 8
#define SB_SIZE 4096
#define SB_LABEL_SIZE 32
#define SB_JOURNAL_BUCKETS 256U
/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
#define MAX_CACHES_PER_SET 8
#define BDEV_DATA_START_DEFAULT 16 /* sectors */
struct cache_sb {
__u64 csum;
__u64 offset; /* sector where this sb was written */
__u64 version;
__u8 magic[16];
__u8 uuid[16];
union {
__u8 set_uuid[16];
__u64 set_magic;
};
__u8 label[SB_LABEL_SIZE];
__u64 flags;
__u64 seq;
__u64 pad[8];
union {
struct {
/* Cache devices */
__u64 nbuckets; /* device size */
__u16 block_size; /* sectors */
__u16 bucket_size; /* sectors */
__u16 nr_in_set;
__u16 nr_this_dev;
};
struct {
/* Backing devices */
__u64 data_offset;
/*
* block_size from the cache device section is still used by
* backing devices, so don't add anything here until we fix
* things to not need it for backing devices anymore
*/
};
};
__u32 last_mount; /* time_t */
__u16 first_bucket;
union {
__u16 njournal_buckets;
__u16 keys;
};
__u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
};
static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
{
return sb->version == BCACHE_SB_VERSION_BDEV
|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
}
BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
#define CACHE_REPLACEMENT_LRU 0U
#define CACHE_REPLACEMENT_FIFO 1U
#define CACHE_REPLACEMENT_RANDOM 2U
BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
#define CACHE_MODE_WRITETHROUGH 0U
#define CACHE_MODE_WRITEBACK 1U
#define CACHE_MODE_WRITEAROUND 2U
#define CACHE_MODE_NONE 3U
BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
#define BDEV_STATE_NONE 0U
#define BDEV_STATE_CLEAN 1U
#define BDEV_STATE_DIRTY 2U
#define BDEV_STATE_STALE 3U
/*
* Magic numbers
*
* The various other data structures have their own magic numbers, which are
* xored with the first part of the cache set's UUID
*/
#define JSET_MAGIC 0x245235c1a3625032ULL
#define PSET_MAGIC 0x6750e15f87337f91ULL
#define BSET_MAGIC 0x90135c78b99e07f5ULL
static inline __u64 jset_magic(struct cache_sb *sb)
{
return sb->set_magic ^ JSET_MAGIC;
}
static inline __u64 pset_magic(struct cache_sb *sb)
{
return sb->set_magic ^ PSET_MAGIC;
}
static inline __u64 bset_magic(struct cache_sb *sb)
{
return sb->set_magic ^ BSET_MAGIC;
}
/*
* Journal
*
* On disk format for a journal entry:
* seq is monotonically increasing; every journal entry has its own unique
* sequence number.
*
* last_seq is the oldest journal entry that still has keys the btree hasn't
* flushed to disk yet.
*
* version is for on disk format changes.
*/
#define BCACHE_JSET_VERSION_UUIDv1 1
#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
#define BCACHE_JSET_VERSION 1
struct jset {
__u64 csum;
__u64 magic;
__u64 seq;
__u32 version;
__u32 keys;
__u64 last_seq;
BKEY_PADDED(uuid_bucket);
BKEY_PADDED(btree_root);
__u16 btree_level;
__u16 pad[3];
__u64 prio_bucket[MAX_CACHES_PER_SET];
union {
struct bkey start[0];
__u64 d[0];
};
};
/* Bucket prios/gens */
struct prio_set {
__u64 csum;
__u64 magic;
__u64 seq;
__u32 version;
__u32 pad;
__u64 next_bucket;
struct bucket_disk {
__u16 prio;
__u8 gen;
} __attribute((packed)) data[];
};
/* UUIDS - per backing device/flash only volume metadata */
struct uuid_entry {
union {
struct {
__u8 uuid[16];
__u8 label[32];
__u32 first_reg;
__u32 last_reg;
__u32 invalidated;
__u32 flags;
/* Size of flash only volumes */
__u64 sectors;
};
__u8 pad[128];
};
};
BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
/* Btree nodes */
/* Version 1: Seed pointer into btree node checksum
*/
#define BCACHE_BSET_CSUM 1
#define BCACHE_BSET_VERSION 1
/*
* Btree nodes
*
* On disk a btree node is a list/log of these; within each set the keys are
* sorted
*/
struct bset {
__u64 csum;
__u64 magic;
__u64 seq;
__u32 version;
__u32 keys;
union {
struct bkey start[0];
__u64 d[0];
};
};
/* OBSOLETE */
/* UUIDS - per backing device/flash only volume metadata */
struct uuid_entry_v0 {
__u8 uuid[16];
__u8 label[32];
__u32 first_reg;
__u32 last_reg;
__u32 invalidated;
__u32 pad;
};
#endif /* _LINUX_BCACHE_H */