virtiofs: add logic to free up a memory range
Add logic to free up a busy memory range. Freed memory range will be returned to free pool. Add a worker which can be started to select and free some busy memory ranges. Process can also steal one of its busy dax ranges if free range is not available. I will refer it to as direct reclaim. If free range is not available and nothing can't be stolen from same inode, caller waits on a waitq for free range to become available. For reclaiming a range, as of now we need to hold following locks in specified order. down_write(&fi->i_mmap_sem); down_write(&fi->dax->sem); We look for a free range in following order. A. Try to get a free range. B. If not, try direct reclaim. C. If not, wait for a memory range to become free Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
d0cfb9dcbc
commit
9a752d18c8
523
fs/fuse/dax.c
523
fs/fuse/dax.c
@ -6,6 +6,7 @@
|
||||
|
||||
#include "fuse_i.h"
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/pfn_t.h>
|
||||
@ -20,8 +21,21 @@
|
||||
#define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT)
|
||||
#define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE)
|
||||
|
||||
/* Number of ranges reclaimer will try to free in one invocation */
|
||||
#define FUSE_DAX_RECLAIM_CHUNK (10)
|
||||
|
||||
/*
|
||||
* Dax memory reclaim threshold in percetage of total ranges. When free
|
||||
* number of free ranges drops below this threshold, reclaim can trigger
|
||||
* Default is 20%
|
||||
*/
|
||||
#define FUSE_DAX_RECLAIM_THRESHOLD (20)
|
||||
|
||||
/** Translation information for file offsets to DAX window offsets */
|
||||
struct fuse_dax_mapping {
|
||||
/* Pointer to inode where this memory range is mapped */
|
||||
struct inode *inode;
|
||||
|
||||
/* Will connect in fcd->free_ranges to keep track of free memory */
|
||||
struct list_head list;
|
||||
|
||||
@ -39,6 +53,9 @@ struct fuse_dax_mapping {
|
||||
|
||||
/* Is this mapping read-only or read-write */
|
||||
bool writable;
|
||||
|
||||
/* reference count when the mapping is used by dax iomap. */
|
||||
refcount_t refcnt;
|
||||
};
|
||||
|
||||
/* Per-inode dax map */
|
||||
@ -62,9 +79,17 @@ struct fuse_conn_dax {
|
||||
unsigned long nr_busy_ranges;
|
||||
struct list_head busy_ranges;
|
||||
|
||||
/* Worker to free up memory ranges */
|
||||
struct delayed_work free_work;
|
||||
|
||||
/* Wait queue for a dax range to become free */
|
||||
wait_queue_head_t range_waitq;
|
||||
|
||||
/* DAX Window Free Ranges */
|
||||
long nr_free_ranges;
|
||||
struct list_head free_ranges;
|
||||
|
||||
unsigned long nr_ranges;
|
||||
};
|
||||
|
||||
static inline struct fuse_dax_mapping *
|
||||
@ -76,6 +101,30 @@ node_to_dmap(struct interval_tree_node *node)
|
||||
return container_of(node, struct fuse_dax_mapping, itn);
|
||||
}
|
||||
|
||||
static struct fuse_dax_mapping *
|
||||
alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
|
||||
|
||||
static void
|
||||
__kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
|
||||
{
|
||||
unsigned long free_threshold;
|
||||
|
||||
/* If number of free ranges are below threshold, start reclaim */
|
||||
free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
|
||||
1);
|
||||
if (fcd->nr_free_ranges < free_threshold)
|
||||
queue_delayed_work(system_long_wq, &fcd->free_work,
|
||||
msecs_to_jiffies(delay_ms));
|
||||
}
|
||||
|
||||
static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
|
||||
unsigned long delay_ms)
|
||||
{
|
||||
spin_lock(&fcd->lock);
|
||||
__kick_dmap_free_worker(fcd, delay_ms);
|
||||
spin_unlock(&fcd->lock);
|
||||
}
|
||||
|
||||
static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
|
||||
{
|
||||
struct fuse_dax_mapping *dmap;
|
||||
@ -89,6 +138,8 @@ static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
|
||||
fcd->nr_free_ranges--;
|
||||
}
|
||||
spin_unlock(&fcd->lock);
|
||||
|
||||
kick_dmap_free_worker(fcd, 0);
|
||||
return dmap;
|
||||
}
|
||||
|
||||
@ -101,12 +152,21 @@ static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
|
||||
fcd->nr_busy_ranges--;
|
||||
}
|
||||
|
||||
static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
spin_lock(&fcd->lock);
|
||||
__dmap_remove_busy_list(fcd, dmap);
|
||||
spin_unlock(&fcd->lock);
|
||||
}
|
||||
|
||||
/* This assumes fcd->lock is held */
|
||||
static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
list_add_tail(&dmap->list, &fcd->free_ranges);
|
||||
fcd->nr_free_ranges++;
|
||||
wake_up(&fcd->range_waitq);
|
||||
}
|
||||
|
||||
static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
|
||||
@ -151,6 +211,12 @@ static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx,
|
||||
return err;
|
||||
dmap->writable = writable;
|
||||
if (!upgrade) {
|
||||
/*
|
||||
* We don't take a refernce on inode. inode is valid right now
|
||||
* and when inode is going away, cleanup logic should first
|
||||
* cleanup dmap entries.
|
||||
*/
|
||||
dmap->inode = inode;
|
||||
dmap->itn.start = dmap->itn.last = start_idx;
|
||||
/* Protected by fi->dax->sem */
|
||||
interval_tree_insert(&dmap->itn, &fi->dax->tree);
|
||||
@ -228,6 +294,7 @@ static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
|
||||
dmap->itn.start, dmap->itn.last, dmap->window_offset,
|
||||
dmap->length);
|
||||
__dmap_remove_busy_list(fcd, dmap);
|
||||
dmap->inode = NULL;
|
||||
dmap->itn.start = dmap->itn.last = 0;
|
||||
__dmap_add_to_free_pool(fcd, dmap);
|
||||
}
|
||||
@ -256,6 +323,8 @@ static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
|
||||
if (!node)
|
||||
break;
|
||||
dmap = node_to_dmap(node);
|
||||
/* inode is going away. There should not be any users of dmap */
|
||||
WARN_ON(refcount_read(&dmap->refcnt) > 1);
|
||||
interval_tree_remove(&dmap->itn, &fi->dax->tree);
|
||||
num++;
|
||||
list_add(&dmap->list, &to_remove);
|
||||
@ -280,6 +349,21 @@ static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
|
||||
spin_unlock(&fcd->lock);
|
||||
}
|
||||
|
||||
static int dmap_removemapping_one(struct inode *inode,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
struct fuse_removemapping_one forget_one;
|
||||
struct fuse_removemapping_in inarg;
|
||||
|
||||
memset(&inarg, 0, sizeof(inarg));
|
||||
inarg.count = 1;
|
||||
memset(&forget_one, 0, sizeof(forget_one));
|
||||
forget_one.moffset = dmap->window_offset;
|
||||
forget_one.len = dmap->length;
|
||||
|
||||
return fuse_send_removemapping(inode, &inarg, &forget_one);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is called from evict_inode() and by that time inode is going away. So
|
||||
* this function does not take any locks like fi->dax->sem for traversing
|
||||
@ -327,6 +411,16 @@ static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length,
|
||||
if (flags & IOMAP_FAULT)
|
||||
iomap->length = ALIGN(len, PAGE_SIZE);
|
||||
iomap->type = IOMAP_MAPPED;
|
||||
/*
|
||||
* increace refcnt so that reclaim code knows this dmap is in
|
||||
* use. This assumes fi->dax->sem mutex is held either
|
||||
* shared/exclusive.
|
||||
*/
|
||||
refcount_inc(&dmap->refcnt);
|
||||
|
||||
/* iomap->private should be NULL */
|
||||
WARN_ON_ONCE(iomap->private);
|
||||
iomap->private = dmap;
|
||||
} else {
|
||||
/* Mapping beyond end of file is hole */
|
||||
fuse_fill_iomap_hole(iomap, length);
|
||||
@ -346,8 +440,28 @@ static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
|
||||
unsigned long start_idx = pos >> FUSE_DAX_SHIFT;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
alloc_dmap = alloc_dax_mapping(fcd);
|
||||
if (!alloc_dmap)
|
||||
/*
|
||||
* Can't do inline reclaim in fault path. We call
|
||||
* dax_layout_busy_page() before we free a range. And
|
||||
* fuse_wait_dax_page() drops fi->i_mmap_sem lock and requires it.
|
||||
* In fault path we enter with fi->i_mmap_sem held and can't drop
|
||||
* it. Also in fault path we hold fi->i_mmap_sem shared and not
|
||||
* exclusive, so that creates further issues with fuse_wait_dax_page().
|
||||
* Hence return -EAGAIN and fuse_dax_fault() will wait for a memory
|
||||
* range to become free and retry.
|
||||
*/
|
||||
if (flags & IOMAP_FAULT) {
|
||||
alloc_dmap = alloc_dax_mapping(fcd);
|
||||
if (!alloc_dmap)
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
|
||||
if (IS_ERR(alloc_dmap))
|
||||
return PTR_ERR(alloc_dmap);
|
||||
}
|
||||
|
||||
/* If we are here, we should have memory allocated */
|
||||
if (WARN_ON(!alloc_dmap))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
@ -399,9 +513,10 @@ static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
|
||||
node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
|
||||
|
||||
/* We are holding either inode lock or i_mmap_sem, and that should
|
||||
* ensure that dmap can't reclaimed or truncated and it should still
|
||||
* be there in tree despite the fact we dropped and re-acquired the
|
||||
* lock.
|
||||
* ensure that dmap can't be truncated. We are holding a reference
|
||||
* on dmap and that should make sure it can't be reclaimed. So dmap
|
||||
* should still be there in tree despite the fact we dropped and
|
||||
* re-acquired the fi->dax->sem lock.
|
||||
*/
|
||||
ret = -EIO;
|
||||
if (WARN_ON(!node))
|
||||
@ -409,6 +524,17 @@ static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
|
||||
|
||||
dmap = node_to_dmap(node);
|
||||
|
||||
/* We took an extra reference on dmap to make sure its not reclaimd.
|
||||
* Now we hold fi->dax->sem lock and that reference is not needed
|
||||
* anymore. Drop it.
|
||||
*/
|
||||
if (refcount_dec_and_test(&dmap->refcnt)) {
|
||||
/* refcount should not hit 0. This object only goes
|
||||
* away when fuse connection goes away
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
/* Maybe another thread already upgraded mapping while we were not
|
||||
* holding lock.
|
||||
*/
|
||||
@ -468,7 +594,11 @@ static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
|
||||
* two threads to be trying to this simultaneously
|
||||
* for same dmap. So drop shared lock and acquire
|
||||
* exclusive lock.
|
||||
*
|
||||
* Before dropping fi->dax->sem lock, take reference
|
||||
* on dmap so that its not freed by range reclaim.
|
||||
*/
|
||||
refcount_inc(&dmap->refcnt);
|
||||
up_read(&fi->dax->sem);
|
||||
pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n",
|
||||
__func__, pos, length);
|
||||
@ -505,6 +635,17 @@ static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length,
|
||||
ssize_t written, unsigned int flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
struct fuse_dax_mapping *dmap = iomap->private;
|
||||
|
||||
if (dmap) {
|
||||
if (refcount_dec_and_test(&dmap->refcnt)) {
|
||||
/* refcount should not hit 0. This object only goes
|
||||
* away when fuse connection goes away
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* DAX writes beyond end-of-file aren't handled using iomap, so the
|
||||
* file size is unchanged and there is nothing to do here.
|
||||
*/
|
||||
@ -654,9 +795,16 @@ static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf,
|
||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
pfn_t pfn;
|
||||
int error = 0;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_conn_dax *fcd = fc->dax;
|
||||
bool retry = false;
|
||||
|
||||
if (write)
|
||||
sb_start_pagefault(sb);
|
||||
retry:
|
||||
if (retry && !(fcd->nr_free_ranges > 0))
|
||||
wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
|
||||
|
||||
/*
|
||||
* We need to serialize against not only truncate but also against
|
||||
@ -665,7 +813,13 @@ static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf,
|
||||
* to populate page cache or access memory we are trying to free.
|
||||
*/
|
||||
down_read(&get_fuse_inode(inode)->i_mmap_sem);
|
||||
ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &fuse_iomap_ops);
|
||||
ret = dax_iomap_fault(vmf, pe_size, &pfn, &error, &fuse_iomap_ops);
|
||||
if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
|
||||
error = 0;
|
||||
retry = true;
|
||||
up_read(&get_fuse_inode(inode)->i_mmap_sem);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (ret & VM_FAULT_NEEDDSYNC)
|
||||
ret = dax_finish_sync_fault(vmf, pe_size, pfn);
|
||||
@ -714,6 +868,348 @@ int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmap_writeback_invalidate(struct inode *inode,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
int ret;
|
||||
loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
|
||||
loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1);
|
||||
|
||||
ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos);
|
||||
if (ret) {
|
||||
pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n",
|
||||
ret, start_pos, end_pos);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = invalidate_inode_pages2_range(inode->i_mapping,
|
||||
start_pos >> PAGE_SHIFT,
|
||||
end_pos >> PAGE_SHIFT);
|
||||
if (ret)
|
||||
pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n",
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int reclaim_one_dmap_locked(struct inode *inode,
|
||||
struct fuse_dax_mapping *dmap)
|
||||
{
|
||||
int ret;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
/*
|
||||
* igrab() was done to make sure inode won't go under us, and this
|
||||
* further avoids the race with evict().
|
||||
*/
|
||||
ret = dmap_writeback_invalidate(inode, dmap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Remove dax mapping from inode interval tree now */
|
||||
interval_tree_remove(&dmap->itn, &fi->dax->tree);
|
||||
fi->dax->nr--;
|
||||
|
||||
/* It is possible that umount/shutdown has killed the fuse connection
|
||||
* and worker thread is trying to reclaim memory in parallel. Don't
|
||||
* warn in that case.
|
||||
*/
|
||||
ret = dmap_removemapping_one(inode, dmap);
|
||||
if (ret && ret != -ENOTCONN) {
|
||||
pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n",
|
||||
dmap->window_offset, dmap->length, ret);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Find first mapped dmap for an inode and return file offset. Caller needs
|
||||
* to hold fi->dax->sem lock either shared or exclusive.
|
||||
*/
|
||||
static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_dax_mapping *dmap;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
|
||||
node = interval_tree_iter_next(node, 0, -1)) {
|
||||
dmap = node_to_dmap(node);
|
||||
/* still in use. */
|
||||
if (refcount_read(&dmap->refcnt) > 1)
|
||||
continue;
|
||||
|
||||
return dmap;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find first mapping in the tree and free it and return it. Do not add
|
||||
* it back to free pool.
|
||||
*/
|
||||
static struct fuse_dax_mapping *
|
||||
inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
|
||||
bool *retry)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_dax_mapping *dmap;
|
||||
u64 dmap_start, dmap_end;
|
||||
unsigned long start_idx;
|
||||
int ret;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
down_write(&fi->i_mmap_sem);
|
||||
|
||||
/* Lookup a dmap and corresponding file offset to reclaim. */
|
||||
down_read(&fi->dax->sem);
|
||||
dmap = inode_lookup_first_dmap(inode);
|
||||
if (dmap) {
|
||||
start_idx = dmap->itn.start;
|
||||
dmap_start = start_idx << FUSE_DAX_SHIFT;
|
||||
dmap_end = dmap_start + FUSE_DAX_SZ - 1;
|
||||
}
|
||||
up_read(&fi->dax->sem);
|
||||
|
||||
if (!dmap)
|
||||
goto out_mmap_sem;
|
||||
/*
|
||||
* Make sure there are no references to inode pages using
|
||||
* get_user_pages()
|
||||
*/
|
||||
ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
|
||||
if (ret) {
|
||||
pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n",
|
||||
ret);
|
||||
dmap = ERR_PTR(ret);
|
||||
goto out_mmap_sem;
|
||||
}
|
||||
|
||||
down_write(&fi->dax->sem);
|
||||
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
||||
/* Range already got reclaimed by somebody else */
|
||||
if (!node) {
|
||||
if (retry)
|
||||
*retry = true;
|
||||
goto out_write_dmap_sem;
|
||||
}
|
||||
|
||||
dmap = node_to_dmap(node);
|
||||
/* still in use. */
|
||||
if (refcount_read(&dmap->refcnt) > 1) {
|
||||
dmap = NULL;
|
||||
if (retry)
|
||||
*retry = true;
|
||||
goto out_write_dmap_sem;
|
||||
}
|
||||
|
||||
ret = reclaim_one_dmap_locked(inode, dmap);
|
||||
if (ret < 0) {
|
||||
dmap = ERR_PTR(ret);
|
||||
goto out_write_dmap_sem;
|
||||
}
|
||||
|
||||
/* Clean up dmap. Do not add back to free list */
|
||||
dmap_remove_busy_list(fcd, dmap);
|
||||
dmap->inode = NULL;
|
||||
dmap->itn.start = dmap->itn.last = 0;
|
||||
|
||||
pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n",
|
||||
__func__, inode, dmap->window_offset, dmap->length);
|
||||
|
||||
out_write_dmap_sem:
|
||||
up_write(&fi->dax->sem);
|
||||
out_mmap_sem:
|
||||
up_write(&fi->i_mmap_sem);
|
||||
return dmap;
|
||||
}
|
||||
|
||||
static struct fuse_dax_mapping *
|
||||
alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
|
||||
{
|
||||
struct fuse_dax_mapping *dmap;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
while (1) {
|
||||
bool retry = false;
|
||||
|
||||
dmap = alloc_dax_mapping(fcd);
|
||||
if (dmap)
|
||||
return dmap;
|
||||
|
||||
dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
|
||||
/*
|
||||
* Either we got a mapping or it is an error, return in both
|
||||
* the cases.
|
||||
*/
|
||||
if (dmap)
|
||||
return dmap;
|
||||
|
||||
/* If we could not reclaim a mapping because it
|
||||
* had a reference or some other temporary failure,
|
||||
* Try again. We want to give up inline reclaim only
|
||||
* if there is no range assigned to this node. Otherwise
|
||||
* if a deadlock is possible if we sleep with fi->i_mmap_sem
|
||||
* held and worker to free memory can't make progress due
|
||||
* to unavailability of fi->i_mmap_sem lock. So sleep
|
||||
* only if fi->dax->nr=0
|
||||
*/
|
||||
if (retry)
|
||||
continue;
|
||||
/*
|
||||
* There are no mappings which can be reclaimed. Wait for one.
|
||||
* We are not holding fi->dax->sem. So it is possible
|
||||
* that range gets added now. But as we are not holding
|
||||
* fi->i_mmap_sem, worker should still be able to free up
|
||||
* a range and wake us up.
|
||||
*/
|
||||
if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
|
||||
if (wait_event_killable_exclusive(fcd->range_waitq,
|
||||
(fcd->nr_free_ranges > 0))) {
|
||||
return ERR_PTR(-EINTR);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
|
||||
struct inode *inode,
|
||||
unsigned long start_idx)
|
||||
{
|
||||
int ret;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_dax_mapping *dmap;
|
||||
struct interval_tree_node *node;
|
||||
|
||||
/* Find fuse dax mapping at file offset inode. */
|
||||
node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
|
||||
|
||||
/* Range already got cleaned up by somebody else */
|
||||
if (!node)
|
||||
return 0;
|
||||
dmap = node_to_dmap(node);
|
||||
|
||||
/* still in use. */
|
||||
if (refcount_read(&dmap->refcnt) > 1)
|
||||
return 0;
|
||||
|
||||
ret = reclaim_one_dmap_locked(inode, dmap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Cleanup dmap entry and add back to free list */
|
||||
spin_lock(&fcd->lock);
|
||||
dmap_reinit_add_to_free_pool(fcd, dmap);
|
||||
spin_unlock(&fcd->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a range of memory.
|
||||
* Locking:
|
||||
* 1. Take fi->i_mmap_sem to block dax faults.
|
||||
* 2. Take fi->dax->sem to protect interval tree and also to make sure
|
||||
* read/write can not reuse a dmap which we might be freeing.
|
||||
*/
|
||||
static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
|
||||
struct inode *inode,
|
||||
unsigned long start_idx,
|
||||
unsigned long end_idx)
|
||||
{
|
||||
int ret;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
|
||||
loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
|
||||
|
||||
down_write(&fi->i_mmap_sem);
|
||||
ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
|
||||
if (ret) {
|
||||
pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
|
||||
ret);
|
||||
goto out_mmap_sem;
|
||||
}
|
||||
|
||||
down_write(&fi->dax->sem);
|
||||
ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
|
||||
up_write(&fi->dax->sem);
|
||||
out_mmap_sem:
|
||||
up_write(&fi->i_mmap_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
|
||||
unsigned long nr_to_free)
|
||||
{
|
||||
struct fuse_dax_mapping *dmap, *pos, *temp;
|
||||
int ret, nr_freed = 0;
|
||||
unsigned long start_idx = 0, end_idx = 0;
|
||||
struct inode *inode = NULL;
|
||||
|
||||
/* Pick first busy range and free it for now*/
|
||||
while (1) {
|
||||
if (nr_freed >= nr_to_free)
|
||||
break;
|
||||
|
||||
dmap = NULL;
|
||||
spin_lock(&fcd->lock);
|
||||
|
||||
if (!fcd->nr_busy_ranges) {
|
||||
spin_unlock(&fcd->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
|
||||
busy_list) {
|
||||
/* skip this range if it's in use. */
|
||||
if (refcount_read(&pos->refcnt) > 1)
|
||||
continue;
|
||||
|
||||
inode = igrab(pos->inode);
|
||||
/*
|
||||
* This inode is going away. That will free
|
||||
* up all the ranges anyway, continue to
|
||||
* next range.
|
||||
*/
|
||||
if (!inode)
|
||||
continue;
|
||||
/*
|
||||
* Take this element off list and add it tail. If
|
||||
* this element can't be freed, it will help with
|
||||
* selecting new element in next iteration of loop.
|
||||
*/
|
||||
dmap = pos;
|
||||
list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
|
||||
start_idx = end_idx = dmap->itn.start;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&fcd->lock);
|
||||
if (!dmap)
|
||||
return 0;
|
||||
|
||||
ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
|
||||
iput(inode);
|
||||
if (ret)
|
||||
return ret;
|
||||
nr_freed++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fuse_dax_free_mem_worker(struct work_struct *work)
|
||||
{
|
||||
int ret;
|
||||
struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
|
||||
free_work.work);
|
||||
ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
|
||||
if (ret) {
|
||||
pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
/* If number of free ranges are still below threhold, requeue */
|
||||
kick_dmap_free_worker(fcd, 1);
|
||||
}
|
||||
|
||||
static void fuse_free_dax_mem_ranges(struct list_head *mem_list)
|
||||
{
|
||||
struct fuse_dax_mapping *range, *temp;
|
||||
@ -745,8 +1241,11 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
|
||||
size_t dax_size = -1;
|
||||
unsigned long i;
|
||||
|
||||
init_waitqueue_head(&fcd->range_waitq);
|
||||
INIT_LIST_HEAD(&fcd->free_ranges);
|
||||
INIT_LIST_HEAD(&fcd->busy_ranges);
|
||||
INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
|
||||
|
||||
id = dax_read_lock();
|
||||
nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
|
||||
&pfn);
|
||||
@ -773,10 +1272,12 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
|
||||
range->window_offset = i * FUSE_DAX_SZ;
|
||||
range->length = FUSE_DAX_SZ;
|
||||
INIT_LIST_HEAD(&range->busy_list);
|
||||
refcount_set(&range->refcnt, 1);
|
||||
list_add_tail(&range->list, &fcd->free_ranges);
|
||||
}
|
||||
|
||||
fcd->nr_free_ranges = nr_ranges;
|
||||
fcd->nr_ranges = nr_ranges;
|
||||
return 0;
|
||||
out_err:
|
||||
/* Free All allocated elements */
|
||||
@ -852,3 +1353,13 @@ bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void fuse_dax_cancel_work(struct fuse_conn *fc)
|
||||
{
|
||||
struct fuse_conn_dax *fcd = fc->dax;
|
||||
|
||||
if (fcd)
|
||||
cancel_delayed_work_sync(&fcd->free_work);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fuse_dax_cancel_work);
|
||||
|
@ -1130,5 +1130,6 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi);
|
||||
void fuse_dax_inode_init(struct inode *inode);
|
||||
void fuse_dax_inode_cleanup(struct inode *inode);
|
||||
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment);
|
||||
void fuse_dax_cancel_work(struct fuse_conn *fc);
|
||||
|
||||
#endif /* _FS_FUSE_I_H */
|
||||
|
@ -1341,6 +1341,12 @@ static void virtio_kill_sb(struct super_block *sb)
|
||||
vfs = fc->iq.priv;
|
||||
fsvq = &vfs->vqs[VQ_HIPRIO];
|
||||
|
||||
/* Stop dax worker. Soon evict_inodes() will be called which will
|
||||
* free all memory ranges belonging to all inodes.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_FUSE_DAX))
|
||||
fuse_dax_cancel_work(fc);
|
||||
|
||||
/* Stop forget queue. Soon destroy will be sent */
|
||||
spin_lock(&fsvq->lock);
|
||||
fsvq->connected = false;
|
||||
|
Loading…
Reference in New Issue
Block a user