Merge branch 'for-3.20/bdi' of git://git.kernel.dk/linux-block
Pull backing device changes from Jens Axboe: "This contains a cleanup of how the backing device is handled, in preparation for a rework of the life time rules. In this part, the most important change is to split the unrelated nommu mmap flags from it, but also removing a backing_dev_info pointer from the address_space (and inode), and a cleanup of other various minor bits. Christoph did all the work here, I just fixed an oops with pages that have a swap backing. Arnd fixed a missing export, and Oleg killed the lustre backing_dev_info from staging. Last patch was from Al, unexporting parts that are now no longer needed outside" * 'for-3.20/bdi' of git://git.kernel.dk/linux-block: Make super_blocks and sb_lock static mtd: export new mtd_mmap_capabilities fs: make inode_to_bdi() handle NULL inode staging/lustre/llite: get rid of backing_dev_info fs: remove default_backing_dev_info fs: don't reassign dirty inodes to default_backing_dev_info nfs: don't call bdi_unregister ceph: remove call to bdi_unregister fs: remove mapping->backing_dev_info fs: export inode_to_bdi and use it in favor of mapping->backing_dev_info nilfs2: set up s_bdi like the generic mount_bdev code block_dev: get bdev inode bdi directly from the block device block_dev: only write bdev inode on close fs: introduce f_op->mmap_capabilities for nommu mmap support fs: kill BDI_CAP_SWAP_BACKED fs: deduplicate noop_backing_dev_info
This commit is contained in:
commit
6bec003528
@ -43,12 +43,12 @@ and it's also much more restricted in the latter case:
|
||||
even if this was created by another process.
|
||||
|
||||
- If possible, the file mapping will be directly on the backing device
|
||||
if the backing device has the BDI_CAP_MAP_DIRECT capability and
|
||||
if the backing device has the NOMMU_MAP_DIRECT capability and
|
||||
appropriate mapping protection capabilities. Ramfs, romfs, cramfs
|
||||
and mtd might all permit this.
|
||||
|
||||
- If the backing device device can't or won't permit direct sharing,
|
||||
but does have the BDI_CAP_MAP_COPY capability, then a copy of the
|
||||
but does have the NOMMU_MAP_COPY capability, then a copy of the
|
||||
appropriate bit of the file will be read into a contiguous bit of
|
||||
memory and any extraneous space beyond the EOF will be cleared
|
||||
|
||||
@ -220,7 +220,7 @@ directly (can't be copied).
|
||||
|
||||
The file->f_op->mmap() operation will be called to actually inaugurate the
|
||||
mapping. It can be rejected at that point. Returning the ENOSYS error will
|
||||
cause the mapping to be copied instead if BDI_CAP_MAP_COPY is specified.
|
||||
cause the mapping to be copied instead if NOMMU_MAP_COPY is specified.
|
||||
|
||||
The vm_ops->close() routine will be invoked when the last mapping on a chardev
|
||||
is removed. An existing mapping will be shared, partially or not, if possible
|
||||
@ -232,7 +232,7 @@ want to handle it, despite the fact it's got an operation. For instance, it
|
||||
might try directing the call to a secondary driver which turns out not to
|
||||
implement it. Such is the case for the framebuffer driver which attempts to
|
||||
direct the call to the device-specific driver. Under such circumstances, the
|
||||
mapping request will be rejected if BDI_CAP_MAP_COPY is not specified, and a
|
||||
mapping request will be rejected if NOMMU_MAP_COPY is not specified, and a
|
||||
copy mapped otherwise.
|
||||
|
||||
IMPORTANT NOTE:
|
||||
|
@ -607,7 +607,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
q->backing_dev_info.ra_pages =
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
|
||||
q->backing_dev_info.state = 0;
|
||||
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
|
||||
q->backing_dev_info.capabilities = 0;
|
||||
q->backing_dev_info.name = "block";
|
||||
q->node = node_id;
|
||||
|
||||
|
@ -287,13 +287,24 @@ static unsigned long get_unmapped_area_mem(struct file *file,
|
||||
return pgoff << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* permit direct mmap, for read, write or exec */
|
||||
static unsigned memory_mmap_capabilities(struct file *file)
|
||||
{
|
||||
return NOMMU_MAP_DIRECT |
|
||||
NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
|
||||
}
|
||||
|
||||
static unsigned zero_mmap_capabilities(struct file *file)
|
||||
{
|
||||
return NOMMU_MAP_COPY;
|
||||
}
|
||||
|
||||
/* can't do an in-place private mapping if there's no MMU */
|
||||
static inline int private_mapping_ok(struct vm_area_struct *vma)
|
||||
{
|
||||
return vma->vm_flags & VM_MAYSHARE;
|
||||
}
|
||||
#else
|
||||
#define get_unmapped_area_mem NULL
|
||||
|
||||
static inline int private_mapping_ok(struct vm_area_struct *vma)
|
||||
{
|
||||
@ -721,7 +732,10 @@ static const struct file_operations mem_fops = {
|
||||
.write = write_mem,
|
||||
.mmap = mmap_mem,
|
||||
.open = open_mem,
|
||||
#ifndef CONFIG_MMU
|
||||
.get_unmapped_area = get_unmapped_area_mem,
|
||||
.mmap_capabilities = memory_mmap_capabilities,
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEVKMEM
|
||||
@ -731,7 +745,10 @@ static const struct file_operations kmem_fops = {
|
||||
.write = write_kmem,
|
||||
.mmap = mmap_kmem,
|
||||
.open = open_kmem,
|
||||
#ifndef CONFIG_MMU
|
||||
.get_unmapped_area = get_unmapped_area_mem,
|
||||
.mmap_capabilities = memory_mmap_capabilities,
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -760,16 +777,9 @@ static const struct file_operations zero_fops = {
|
||||
.read_iter = read_iter_zero,
|
||||
.aio_write = aio_write_zero,
|
||||
.mmap = mmap_zero,
|
||||
};
|
||||
|
||||
/*
|
||||
* capabilities for /dev/zero
|
||||
* - permits private mappings, "copies" are taken of the source of zeros
|
||||
* - no writeback happens
|
||||
*/
|
||||
static struct backing_dev_info zero_bdi = {
|
||||
.name = "char/mem",
|
||||
.capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
|
||||
#ifndef CONFIG_MMU
|
||||
.mmap_capabilities = zero_mmap_capabilities,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct file_operations full_fops = {
|
||||
@ -783,22 +793,22 @@ static const struct memdev {
|
||||
const char *name;
|
||||
umode_t mode;
|
||||
const struct file_operations *fops;
|
||||
struct backing_dev_info *dev_info;
|
||||
fmode_t fmode;
|
||||
} devlist[] = {
|
||||
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
|
||||
[1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
|
||||
#ifdef CONFIG_DEVKMEM
|
||||
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
|
||||
[2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
|
||||
#endif
|
||||
[3] = { "null", 0666, &null_fops, NULL },
|
||||
[3] = { "null", 0666, &null_fops, 0 },
|
||||
#ifdef CONFIG_DEVPORT
|
||||
[4] = { "port", 0, &port_fops, NULL },
|
||||
[4] = { "port", 0, &port_fops, 0 },
|
||||
#endif
|
||||
[5] = { "zero", 0666, &zero_fops, &zero_bdi },
|
||||
[7] = { "full", 0666, &full_fops, NULL },
|
||||
[8] = { "random", 0666, &random_fops, NULL },
|
||||
[9] = { "urandom", 0666, &urandom_fops, NULL },
|
||||
[5] = { "zero", 0666, &zero_fops, 0 },
|
||||
[7] = { "full", 0666, &full_fops, 0 },
|
||||
[8] = { "random", 0666, &random_fops, 0 },
|
||||
[9] = { "urandom", 0666, &urandom_fops, 0 },
|
||||
#ifdef CONFIG_PRINTK
|
||||
[11] = { "kmsg", 0644, &kmsg_fops, NULL },
|
||||
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -816,12 +826,7 @@ static int memory_open(struct inode *inode, struct file *filp)
|
||||
return -ENXIO;
|
||||
|
||||
filp->f_op = dev->fops;
|
||||
if (dev->dev_info)
|
||||
filp->f_mapping->backing_dev_info = dev->dev_info;
|
||||
|
||||
/* Is /dev/mem or /dev/kmem ? */
|
||||
if (dev->dev_info == &directly_mappable_cdev_bdi)
|
||||
filp->f_mode |= FMODE_UNSIGNED_OFFSET;
|
||||
filp->f_mode |= dev->fmode;
|
||||
|
||||
if (dev->fops->open)
|
||||
return dev->fops->open(inode, filp);
|
||||
@ -846,11 +851,6 @@ static struct class *mem_class;
|
||||
static int __init chr_dev_init(void)
|
||||
{
|
||||
int minor;
|
||||
int err;
|
||||
|
||||
err = bdi_init(&zero_bdi);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
|
||||
printk("unable to get major %d for memory devs\n", MEM_MAJOR);
|
||||
|
@ -104,11 +104,9 @@ static int raw_release(struct inode *inode, struct file *filp)
|
||||
|
||||
mutex_lock(&raw_mutex);
|
||||
bdev = raw_devices[minor].binding;
|
||||
if (--raw_devices[minor].inuse == 0) {
|
||||
if (--raw_devices[minor].inuse == 0)
|
||||
/* Here inode->i_mapping == bdev->bd_inode->i_mapping */
|
||||
inode->i_mapping = &inode->i_data;
|
||||
inode->i_mapping->backing_dev_info = &default_backing_dev_info;
|
||||
}
|
||||
mutex_unlock(&raw_mutex);
|
||||
|
||||
blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
|
||||
|
@ -49,7 +49,6 @@ static DEFINE_MUTEX(mtd_mutex);
|
||||
*/
|
||||
struct mtd_file_info {
|
||||
struct mtd_info *mtd;
|
||||
struct inode *ino;
|
||||
enum mtd_file_modes mode;
|
||||
};
|
||||
|
||||
@ -59,10 +58,6 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
|
||||
return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
|
||||
}
|
||||
|
||||
static int count;
|
||||
static struct vfsmount *mnt;
|
||||
static struct file_system_type mtd_inodefs_type;
|
||||
|
||||
static int mtdchar_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int minor = iminor(inode);
|
||||
@ -70,7 +65,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
|
||||
int ret = 0;
|
||||
struct mtd_info *mtd;
|
||||
struct mtd_file_info *mfi;
|
||||
struct inode *mtd_ino;
|
||||
|
||||
pr_debug("MTD_open\n");
|
||||
|
||||
@ -78,10 +72,6 @@ static int mtdchar_open(struct inode *inode, struct file *file)
|
||||
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
|
||||
return -EACCES;
|
||||
|
||||
ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&mtd_mutex);
|
||||
mtd = get_mtd_device(NULL, devnum);
|
||||
|
||||
@ -95,43 +85,26 @@ static int mtdchar_open(struct inode *inode, struct file *file)
|
||||
goto out1;
|
||||
}
|
||||
|
||||
mtd_ino = iget_locked(mnt->mnt_sb, devnum);
|
||||
if (!mtd_ino) {
|
||||
ret = -ENOMEM;
|
||||
goto out1;
|
||||
}
|
||||
if (mtd_ino->i_state & I_NEW) {
|
||||
mtd_ino->i_private = mtd;
|
||||
mtd_ino->i_mode = S_IFCHR;
|
||||
mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
|
||||
unlock_new_inode(mtd_ino);
|
||||
}
|
||||
file->f_mapping = mtd_ino->i_mapping;
|
||||
|
||||
/* You can't open it RW if it's not a writeable device */
|
||||
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
|
||||
ret = -EACCES;
|
||||
goto out2;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
|
||||
if (!mfi) {
|
||||
ret = -ENOMEM;
|
||||
goto out2;
|
||||
goto out1;
|
||||
}
|
||||
mfi->ino = mtd_ino;
|
||||
mfi->mtd = mtd;
|
||||
file->private_data = mfi;
|
||||
mutex_unlock(&mtd_mutex);
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
iput(mtd_ino);
|
||||
out1:
|
||||
put_mtd_device(mtd);
|
||||
out:
|
||||
mutex_unlock(&mtd_mutex);
|
||||
simple_release_fs(&mnt, &count);
|
||||
return ret;
|
||||
} /* mtdchar_open */
|
||||
|
||||
@ -148,12 +121,9 @@ static int mtdchar_close(struct inode *inode, struct file *file)
|
||||
if ((file->f_mode & FMODE_WRITE))
|
||||
mtd_sync(mtd);
|
||||
|
||||
iput(mfi->ino);
|
||||
|
||||
put_mtd_device(mtd);
|
||||
file->private_data = NULL;
|
||||
kfree(mfi);
|
||||
simple_release_fs(&mnt, &count);
|
||||
|
||||
return 0;
|
||||
} /* mtdchar_close */
|
||||
@ -1117,6 +1087,13 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
|
||||
ret = mtd_get_unmapped_area(mtd, len, offset, flags);
|
||||
return ret == -EOPNOTSUPP ? -ENODEV : ret;
|
||||
}
|
||||
|
||||
static unsigned mtdchar_mmap_capabilities(struct file *file)
|
||||
{
|
||||
struct mtd_file_info *mfi = file->private_data;
|
||||
|
||||
return mtd_mmap_capabilities(mfi->mtd);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -1160,27 +1137,10 @@ static const struct file_operations mtd_fops = {
|
||||
.mmap = mtdchar_mmap,
|
||||
#ifndef CONFIG_MMU
|
||||
.get_unmapped_area = mtdchar_get_unmapped_area,
|
||||
.mmap_capabilities = mtdchar_mmap_capabilities,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct super_operations mtd_ops = {
|
||||
.drop_inode = generic_delete_inode,
|
||||
.statfs = simple_statfs,
|
||||
};
|
||||
|
||||
static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data)
|
||||
{
|
||||
return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
|
||||
}
|
||||
|
||||
static struct file_system_type mtd_inodefs_type = {
|
||||
.name = "mtd_inodefs",
|
||||
.mount = mtd_inodefs_mount,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
MODULE_ALIAS_FS("mtd_inodefs");
|
||||
|
||||
int __init init_mtdchar(void)
|
||||
{
|
||||
int ret;
|
||||
@ -1193,23 +1153,11 @@ int __init init_mtdchar(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = register_filesystem(&mtd_inodefs_type);
|
||||
if (ret) {
|
||||
pr_err("Can't register mtd_inodefs filesystem, error %d\n",
|
||||
ret);
|
||||
goto err_unregister_chdev;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
err_unregister_chdev:
|
||||
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __exit cleanup_mtdchar(void)
|
||||
{
|
||||
unregister_filesystem(&mtd_inodefs_type);
|
||||
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
|
||||
}
|
||||
|
||||
|
@ -732,8 +732,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
||||
|
||||
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
|
||||
|
||||
concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
|
||||
|
||||
concat->subdev[0] = subdev[0];
|
||||
|
||||
for (i = 1; i < num_devs; i++) {
|
||||
@ -761,14 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
|
||||
subdev[i]->flags & MTD_WRITEABLE;
|
||||
}
|
||||
|
||||
/* only permit direct mapping if the BDIs are all the same
|
||||
* - copy-mapping is still permitted
|
||||
*/
|
||||
if (concat->mtd.backing_dev_info !=
|
||||
subdev[i]->backing_dev_info)
|
||||
concat->mtd.backing_dev_info =
|
||||
&default_backing_dev_info;
|
||||
|
||||
concat->mtd.size += subdev[i]->size;
|
||||
concat->mtd.ecc_stats.badblocks +=
|
||||
subdev[i]->ecc_stats.badblocks;
|
||||
|
@ -43,33 +43,7 @@
|
||||
|
||||
#include "mtdcore.h"
|
||||
|
||||
/*
|
||||
* backing device capabilities for non-mappable devices (such as NAND flash)
|
||||
* - permits private mappings, copies are taken of the data
|
||||
*/
|
||||
static struct backing_dev_info mtd_bdi_unmappable = {
|
||||
.capabilities = BDI_CAP_MAP_COPY,
|
||||
};
|
||||
|
||||
/*
|
||||
* backing device capabilities for R/O mappable devices (such as ROM)
|
||||
* - permits private mappings, copies are taken of the data
|
||||
* - permits non-writable shared mappings
|
||||
*/
|
||||
static struct backing_dev_info mtd_bdi_ro_mappable = {
|
||||
.capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
|
||||
BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
|
||||
};
|
||||
|
||||
/*
|
||||
* backing device capabilities for writable mappable devices (such as RAM)
|
||||
* - permits private mappings, copies are taken of the data
|
||||
* - permits non-writable shared mappings
|
||||
*/
|
||||
static struct backing_dev_info mtd_bdi_rw_mappable = {
|
||||
.capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
|
||||
BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
|
||||
BDI_CAP_WRITE_MAP),
|
||||
static struct backing_dev_info mtd_bdi = {
|
||||
};
|
||||
|
||||
static int mtd_cls_suspend(struct device *dev, pm_message_t state);
|
||||
@ -365,6 +339,23 @@ static struct device_type mtd_devtype = {
|
||||
.release = mtd_release,
|
||||
};
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
|
||||
{
|
||||
switch (mtd->type) {
|
||||
case MTD_RAM:
|
||||
return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
|
||||
NOMMU_MAP_READ | NOMMU_MAP_WRITE;
|
||||
case MTD_ROM:
|
||||
return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
|
||||
NOMMU_MAP_READ;
|
||||
default:
|
||||
return NOMMU_MAP_COPY;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* add_mtd_device - register an MTD device
|
||||
* @mtd: pointer to new MTD device info structure
|
||||
@ -380,19 +371,7 @@ int add_mtd_device(struct mtd_info *mtd)
|
||||
struct mtd_notifier *not;
|
||||
int i, error;
|
||||
|
||||
if (!mtd->backing_dev_info) {
|
||||
switch (mtd->type) {
|
||||
case MTD_RAM:
|
||||
mtd->backing_dev_info = &mtd_bdi_rw_mappable;
|
||||
break;
|
||||
case MTD_ROM:
|
||||
mtd->backing_dev_info = &mtd_bdi_ro_mappable;
|
||||
break;
|
||||
default:
|
||||
mtd->backing_dev_info = &mtd_bdi_unmappable;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mtd->backing_dev_info = &mtd_bdi;
|
||||
|
||||
BUG_ON(mtd->writesize == 0);
|
||||
mutex_lock(&mtd_table_mutex);
|
||||
@ -1237,17 +1216,9 @@ static int __init init_mtd(void)
|
||||
if (ret)
|
||||
goto err_reg;
|
||||
|
||||
ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
|
||||
ret = mtd_bdi_init(&mtd_bdi, "mtd");
|
||||
if (ret)
|
||||
goto err_bdi1;
|
||||
|
||||
ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
|
||||
if (ret)
|
||||
goto err_bdi2;
|
||||
|
||||
ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
|
||||
if (ret)
|
||||
goto err_bdi3;
|
||||
goto err_bdi;
|
||||
|
||||
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
|
||||
|
||||
@ -1260,11 +1231,7 @@ static int __init init_mtd(void)
|
||||
out_procfs:
|
||||
if (proc_mtd)
|
||||
remove_proc_entry("mtd", NULL);
|
||||
err_bdi3:
|
||||
bdi_destroy(&mtd_bdi_ro_mappable);
|
||||
err_bdi2:
|
||||
bdi_destroy(&mtd_bdi_unmappable);
|
||||
err_bdi1:
|
||||
err_bdi:
|
||||
class_unregister(&mtd_class);
|
||||
err_reg:
|
||||
pr_err("Error registering mtd class or bdi: %d\n", ret);
|
||||
@ -1277,9 +1244,7 @@ static void __exit cleanup_mtd(void)
|
||||
if (proc_mtd)
|
||||
remove_proc_entry("mtd", NULL);
|
||||
class_unregister(&mtd_class);
|
||||
bdi_destroy(&mtd_bdi_unmappable);
|
||||
bdi_destroy(&mtd_bdi_ro_mappable);
|
||||
bdi_destroy(&mtd_bdi_rw_mappable);
|
||||
bdi_destroy(&mtd_bdi);
|
||||
}
|
||||
|
||||
module_init(init_mtd);
|
||||
|
@ -378,7 +378,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
|
||||
|
||||
slave->mtd.name = name;
|
||||
slave->mtd.owner = master->owner;
|
||||
slave->mtd.backing_dev_info = master->backing_dev_info;
|
||||
|
||||
/* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
|
||||
* to have the same data be in two different partitions.
|
||||
|
@ -987,7 +987,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
|
||||
if (err)
|
||||
goto out_free;
|
||||
lsi->lsi_flags |= LSI_BDI_INITIALIZED;
|
||||
lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
|
||||
lsi->lsi_bdi.capabilities = 0;
|
||||
err = ll_bdi_register(&lsi->lsi_bdi);
|
||||
if (err)
|
||||
goto out_free;
|
||||
@ -1812,10 +1812,6 @@ void ll_read_inode2(struct inode *inode, void *opaque)
|
||||
|
||||
/* OIDEBUG(inode); */
|
||||
|
||||
/* initializing backing dev info. */
|
||||
inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
|
||||
|
||||
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
struct ll_sb_info *sbi = ll_i2sbi(inode);
|
||||
|
||||
|
@ -335,7 +335,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
|
||||
}
|
||||
init_rwsem(&v9ses->rename_sem);
|
||||
|
||||
rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY);
|
||||
rc = bdi_setup_and_register(&v9ses->bdi, "9p");
|
||||
if (rc) {
|
||||
kfree(v9ses->aname);
|
||||
kfree(v9ses->uname);
|
||||
|
@ -106,7 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
|
||||
volume->cell = params->cell;
|
||||
volume->vid = vlocation->vldb.vid[params->type];
|
||||
|
||||
ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY);
|
||||
ret = bdi_setup_and_register(&volume->bdi, "afs");
|
||||
if (ret)
|
||||
goto error_bdi;
|
||||
|
||||
|
13
fs/aio.c
13
fs/aio.c
@ -165,15 +165,6 @@ static struct vfsmount *aio_mnt;
|
||||
static const struct file_operations aio_ring_fops;
|
||||
static const struct address_space_operations aio_ctx_aops;
|
||||
|
||||
/* Backing dev info for aio fs.
|
||||
* -no dirty page accounting or writeback happens
|
||||
*/
|
||||
static struct backing_dev_info aio_fs_backing_dev_info = {
|
||||
.name = "aiofs",
|
||||
.state = 0,
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
|
||||
};
|
||||
|
||||
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
|
||||
{
|
||||
struct qstr this = QSTR_INIT("[aio]", 5);
|
||||
@ -185,7 +176,6 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
|
||||
|
||||
inode->i_mapping->a_ops = &aio_ctx_aops;
|
||||
inode->i_mapping->private_data = ctx;
|
||||
inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
|
||||
inode->i_size = PAGE_SIZE * nr_pages;
|
||||
|
||||
path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
|
||||
@ -230,9 +220,6 @@ static int __init aio_setup(void)
|
||||
if (IS_ERR(aio_mnt))
|
||||
panic("Failed to create aio fs mount.");
|
||||
|
||||
if (bdi_init(&aio_fs_backing_dev_info))
|
||||
panic("Failed to init aio fs backing dev info.");
|
||||
|
||||
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
||||
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
||||
|
||||
|
@ -49,23 +49,15 @@ inline struct block_device *I_BDEV(struct inode *inode)
|
||||
}
|
||||
EXPORT_SYMBOL(I_BDEV);
|
||||
|
||||
/*
|
||||
* Move the inode from its current bdi to a new bdi. Make sure the inode
|
||||
* is clean before moving so that it doesn't linger on the old bdi.
|
||||
*/
|
||||
static void bdev_inode_switch_bdi(struct inode *inode,
|
||||
struct backing_dev_info *dst)
|
||||
static void bdev_write_inode(struct inode *inode)
|
||||
{
|
||||
while (true) {
|
||||
spin_lock(&inode->i_lock);
|
||||
if (!(inode->i_state & I_DIRTY)) {
|
||||
inode->i_data.backing_dev_info = dst;
|
||||
spin_unlock(&inode->i_lock);
|
||||
return;
|
||||
}
|
||||
spin_lock(&inode->i_lock);
|
||||
while (inode->i_state & I_DIRTY) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
WARN_ON_ONCE(write_inode_now(inode, true));
|
||||
spin_lock(&inode->i_lock);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
/* Kill _all_ buffers and pagecache , dirty or not.. */
|
||||
@ -584,7 +576,6 @@ struct block_device *bdget(dev_t dev)
|
||||
inode->i_bdev = bdev;
|
||||
inode->i_data.a_ops = &def_blk_aops;
|
||||
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
|
||||
inode->i_data.backing_dev_info = &default_backing_dev_info;
|
||||
spin_lock(&bdev_lock);
|
||||
list_add(&bdev->bd_list, &all_bdevs);
|
||||
spin_unlock(&bdev_lock);
|
||||
@ -1145,8 +1136,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
bdev->bd_queue = disk->queue;
|
||||
bdev->bd_contains = bdev;
|
||||
if (!partno) {
|
||||
struct backing_dev_info *bdi;
|
||||
|
||||
ret = -ENXIO;
|
||||
bdev->bd_part = disk_get_part(disk, partno);
|
||||
if (!bdev->bd_part)
|
||||
@ -1172,11 +1161,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
if (!ret)
|
||||
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
bdev_inode_switch_bdi(bdev->bd_inode, bdi);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the device is invalidated, rescan partition
|
||||
@ -1203,8 +1189,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
if (ret)
|
||||
goto out_clear;
|
||||
bdev->bd_contains = whole;
|
||||
bdev_inode_switch_bdi(bdev->bd_inode,
|
||||
whole->bd_inode->i_data.backing_dev_info);
|
||||
bdev->bd_part = disk_get_part(disk, partno);
|
||||
if (!(disk->flags & GENHD_FL_UP) ||
|
||||
!bdev->bd_part || !bdev->bd_part->nr_sects) {
|
||||
@ -1244,7 +1228,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
bdev->bd_disk = NULL;
|
||||
bdev->bd_part = NULL;
|
||||
bdev->bd_queue = NULL;
|
||||
bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info);
|
||||
if (bdev != bdev->bd_contains)
|
||||
__blkdev_put(bdev->bd_contains, mode, 1);
|
||||
bdev->bd_contains = NULL;
|
||||
@ -1464,11 +1447,11 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
WARN_ON_ONCE(bdev->bd_holders);
|
||||
sync_blockdev(bdev);
|
||||
kill_bdev(bdev);
|
||||
/* ->release can cause the old bdi to disappear,
|
||||
* so must switch it out first
|
||||
/*
|
||||
* ->release can cause the queue to disappear, so flush all
|
||||
* dirty data before.
|
||||
*/
|
||||
bdev_inode_switch_bdi(bdev->bd_inode,
|
||||
&default_backing_dev_info);
|
||||
bdev_write_inode(bdev->bd_inode);
|
||||
}
|
||||
if (bdev->bd_contains == bdev) {
|
||||
if (disk->fops->release)
|
||||
|
@ -1715,12 +1715,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
|
||||
{
|
||||
int err;
|
||||
|
||||
bdi->capabilities = BDI_CAP_MAP_COPY;
|
||||
err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
|
||||
err = bdi_setup_and_register(bdi, "btrfs");
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
bdi->ra_pages = default_backing_dev_info.ra_pages;
|
||||
bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
|
||||
bdi->congested_fn = btrfs_congested_fn;
|
||||
bdi->congested_data = info;
|
||||
return 0;
|
||||
@ -2319,7 +2318,6 @@ int open_ctree(struct super_block *sb,
|
||||
*/
|
||||
fs_info->btree_inode->i_size = OFFSET_MAX;
|
||||
fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
|
||||
fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
|
||||
|
||||
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
|
||||
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
|
||||
|
@ -1746,7 +1746,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
current->backing_dev_info = inode->i_mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||
if (err) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
@ -3608,7 +3608,6 @@ cache_acl:
|
||||
switch (inode->i_mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
inode->i_mapping->a_ops = &btrfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
||||
inode->i_fop = &btrfs_file_operations;
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
@ -3623,7 +3622,6 @@ cache_acl:
|
||||
case S_IFLNK:
|
||||
inode->i_op = &btrfs_symlink_inode_operations;
|
||||
inode->i_mapping->a_ops = &btrfs_symlink_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
break;
|
||||
default:
|
||||
inode->i_op = &btrfs_special_inode_operations;
|
||||
@ -6088,7 +6086,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
|
||||
inode->i_fop = &btrfs_file_operations;
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
inode->i_mapping->a_ops = &btrfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
|
||||
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
|
||||
if (err)
|
||||
@ -9203,7 +9200,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
inode->i_fop = &btrfs_file_operations;
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
inode->i_mapping->a_ops = &btrfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
||||
|
||||
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
|
||||
@ -9247,7 +9243,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
||||
|
||||
inode->i_op = &btrfs_symlink_inode_operations;
|
||||
inode->i_mapping->a_ops = &btrfs_symlink_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
inode_set_bytes(inode, name_len);
|
||||
btrfs_i_size_write(inode, name_len);
|
||||
err = btrfs_update_inode(trans, root, inode);
|
||||
@ -9459,7 +9454,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
|
||||
inode->i_mapping->a_ops = &btrfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
||||
|
||||
ret = btrfs_init_inode_security(trans, inode, dir, NULL);
|
||||
|
@ -945,7 +945,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = file->f_mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
|
||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||
if (err)
|
||||
|
@ -783,8 +783,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
|
||||
}
|
||||
|
||||
inode->i_mapping->a_ops = &ceph_aops;
|
||||
inode->i_mapping->backing_dev_info =
|
||||
&ceph_sb_to_client(inode->i_sb)->backing_dev_info;
|
||||
|
||||
switch (inode->i_mode & S_IFMT) {
|
||||
case S_IFIFO:
|
||||
|
@ -40,17 +40,6 @@ static void ceph_put_super(struct super_block *s)
|
||||
|
||||
dout("put_super\n");
|
||||
ceph_mdsc_close_sessions(fsc->mdsc);
|
||||
|
||||
/*
|
||||
* ensure we release the bdi before put_anon_super releases
|
||||
* the device name.
|
||||
*/
|
||||
if (s->s_bdi == &fsc->backing_dev_info) {
|
||||
bdi_unregister(&fsc->backing_dev_info);
|
||||
s->s_bdi = NULL;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
@ -910,7 +899,7 @@ static int ceph_register_bdi(struct super_block *sb,
|
||||
>> PAGE_SHIFT;
|
||||
else
|
||||
fsc->backing_dev_info.ra_pages =
|
||||
default_backing_dev_info.ra_pages;
|
||||
VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
|
||||
|
||||
err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
|
||||
atomic_long_inc_return(&bdi_seq));
|
||||
@ -1002,11 +991,16 @@ out_final:
|
||||
static void ceph_kill_sb(struct super_block *s)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
|
||||
dev_t dev = s->s_dev;
|
||||
|
||||
dout("kill_sb %p\n", s);
|
||||
|
||||
ceph_mdsc_pre_umount(fsc->mdsc);
|
||||
kill_anon_super(s); /* will call put_super after sb is r/o */
|
||||
generic_shutdown_super(s);
|
||||
ceph_mdsc_destroy(fsc);
|
||||
|
||||
destroy_fs_client(fsc);
|
||||
free_anon_bdev(dev);
|
||||
}
|
||||
|
||||
static struct file_system_type ceph_fs_type = {
|
||||
|
@ -24,27 +24,6 @@
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* capabilities for /dev/mem, /dev/kmem and similar directly mappable character
|
||||
* devices
|
||||
* - permits shared-mmap for read, write and/or exec
|
||||
* - does not permit private mmap in NOMMU mode (can't do COW)
|
||||
* - no readahead or I/O queue unplugging required
|
||||
*/
|
||||
struct backing_dev_info directly_mappable_cdev_bdi = {
|
||||
.name = "char",
|
||||
.capabilities = (
|
||||
#ifdef CONFIG_MMU
|
||||
/* permit private copies of the data to be taken */
|
||||
BDI_CAP_MAP_COPY |
|
||||
#endif
|
||||
/* permit direct mmap, for read, write or exec */
|
||||
BDI_CAP_MAP_DIRECT |
|
||||
BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
|
||||
/* no writeback happens */
|
||||
BDI_CAP_NO_ACCT_AND_WRITEBACK),
|
||||
};
|
||||
|
||||
static struct kobj_map *cdev_map;
|
||||
|
||||
static DEFINE_MUTEX(chrdevs_lock);
|
||||
@ -575,8 +554,6 @@ static struct kobject *base_probe(dev_t dev, int *part, void *data)
|
||||
void __init chrdev_init(void)
|
||||
{
|
||||
cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
|
||||
if (bdi_init(&directly_mappable_cdev_bdi))
|
||||
panic("Failed to init directly mappable cdev bdi");
|
||||
}
|
||||
|
||||
|
||||
@ -590,4 +567,3 @@ EXPORT_SYMBOL(cdev_del);
|
||||
EXPORT_SYMBOL(cdev_add);
|
||||
EXPORT_SYMBOL(__register_chrdev);
|
||||
EXPORT_SYMBOL(__unregister_chrdev);
|
||||
EXPORT_SYMBOL(directly_mappable_cdev_bdi);
|
||||
|
@ -3446,7 +3446,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
|
||||
int referral_walks_count = 0;
|
||||
#endif
|
||||
|
||||
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
|
||||
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -937,8 +937,6 @@ retry_iget5_locked:
|
||||
inode->i_flags |= S_NOATIME | S_NOCMTIME;
|
||||
if (inode->i_state & I_NEW) {
|
||||
inode->i_ino = hash;
|
||||
if (S_ISREG(inode->i_mode))
|
||||
inode->i_data.backing_dev_info = sb->s_bdi;
|
||||
#ifdef CONFIG_CIFS_FSCACHE
|
||||
/* initialize per-inode cache cookie pointer */
|
||||
CIFS_I(inode)->fscache = NULL;
|
||||
|
@ -183,7 +183,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY);
|
||||
error = bdi_setup_and_register(&vc->bdi, "coda");
|
||||
if (error)
|
||||
goto unlock_out;
|
||||
|
||||
|
@ -70,8 +70,6 @@ extern int configfs_is_root(struct config_item *item);
|
||||
|
||||
extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
|
||||
extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
|
||||
extern int configfs_inode_init(void);
|
||||
extern void configfs_inode_exit(void);
|
||||
|
||||
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
|
||||
extern int configfs_make_dirent(struct configfs_dirent *,
|
||||
|
@ -50,12 +50,6 @@ static const struct address_space_operations configfs_aops = {
|
||||
.write_end = simple_write_end,
|
||||
};
|
||||
|
||||
static struct backing_dev_info configfs_backing_dev_info = {
|
||||
.name = "configfs",
|
||||
.ra_pages = 0, /* No readahead */
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
|
||||
};
|
||||
|
||||
static const struct inode_operations configfs_inode_operations ={
|
||||
.setattr = configfs_setattr,
|
||||
};
|
||||
@ -137,7 +131,6 @@ struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
|
||||
if (inode) {
|
||||
inode->i_ino = get_next_ino();
|
||||
inode->i_mapping->a_ops = &configfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
|
||||
inode->i_op = &configfs_inode_operations;
|
||||
|
||||
if (sd->s_iattr) {
|
||||
@ -283,13 +276,3 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name)
|
||||
}
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
int __init configfs_inode_init(void)
|
||||
{
|
||||
return bdi_init(&configfs_backing_dev_info);
|
||||
}
|
||||
|
||||
void configfs_inode_exit(void)
|
||||
{
|
||||
bdi_destroy(&configfs_backing_dev_info);
|
||||
}
|
||||
|
@ -145,19 +145,13 @@ static int __init configfs_init(void)
|
||||
if (!config_kobj)
|
||||
goto out2;
|
||||
|
||||
err = configfs_inode_init();
|
||||
err = register_filesystem(&configfs_fs_type);
|
||||
if (err)
|
||||
goto out3;
|
||||
|
||||
err = register_filesystem(&configfs_fs_type);
|
||||
if (err)
|
||||
goto out4;
|
||||
|
||||
return 0;
|
||||
out4:
|
||||
pr_err("Unable to register filesystem!\n");
|
||||
configfs_inode_exit();
|
||||
out3:
|
||||
pr_err("Unable to register filesystem!\n");
|
||||
kobject_put(config_kobj);
|
||||
out2:
|
||||
kmem_cache_destroy(configfs_dir_cachep);
|
||||
@ -172,7 +166,6 @@ static void __exit configfs_exit(void)
|
||||
kobject_put(config_kobj);
|
||||
kmem_cache_destroy(configfs_dir_cachep);
|
||||
configfs_dir_cachep = NULL;
|
||||
configfs_inode_exit();
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Oracle");
|
||||
|
@ -67,7 +67,6 @@ static int ecryptfs_inode_set(struct inode *inode, void *opaque)
|
||||
inode->i_ino = lower_inode->i_ino;
|
||||
inode->i_version++;
|
||||
inode->i_mapping->a_ops = &ecryptfs_aops;
|
||||
inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
inode->i_op = &ecryptfs_symlink_iops;
|
||||
|
@ -520,7 +520,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
|
||||
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs");
|
||||
if (rc)
|
||||
goto out1;
|
||||
|
||||
|
@ -1214,7 +1214,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
|
||||
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
|
||||
}
|
||||
|
||||
inode->i_mapping->backing_dev_info = sb->s_bdi;
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
inode->i_op = &exofs_file_inode_operations;
|
||||
inode->i_fop = &exofs_file_operations;
|
||||
@ -1314,7 +1313,6 @@ struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
|
||||
|
||||
set_obj_2bcreated(oi);
|
||||
|
||||
inode->i_mapping->backing_dev_info = sb->s_bdi;
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_ino = sbi->s_nextid++;
|
||||
inode->i_blkbits = EXOFS_BLKSHIFT;
|
||||
|
@ -836,7 +836,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
goto free_sbi;
|
||||
}
|
||||
|
||||
ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
|
||||
ret = bdi_setup_and_register(&sbi->bdi, "exofs");
|
||||
if (ret) {
|
||||
EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
|
||||
dput(sb->s_root);
|
||||
|
@ -170,7 +170,7 @@ static void ext2_preread_inode(struct inode *inode)
|
||||
struct ext2_group_desc * gdp;
|
||||
struct backing_dev_info *bdi;
|
||||
|
||||
bdi = inode->i_mapping->backing_dev_info;
|
||||
bdi = inode_to_bdi(inode);
|
||||
if (bdi_read_congested(bdi))
|
||||
return;
|
||||
if (bdi_write_congested(bdi))
|
||||
|
@ -334,7 +334,7 @@ static void save_error_info(struct super_block *sb, const char *func,
|
||||
static int block_device_ejected(struct super_block *sb)
|
||||
{
|
||||
struct inode *bd_inode = sb->s_bdev->bd_inode;
|
||||
struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
|
||||
|
||||
return bdi->dev == NULL;
|
||||
}
|
||||
|
@ -66,15 +66,21 @@ int writeback_in_progress(struct backing_dev_info *bdi)
|
||||
}
|
||||
EXPORT_SYMBOL(writeback_in_progress);
|
||||
|
||||
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
|
||||
struct backing_dev_info *inode_to_bdi(struct inode *inode)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct super_block *sb;
|
||||
|
||||
if (!inode)
|
||||
return &noop_backing_dev_info;
|
||||
|
||||
sb = inode->i_sb;
|
||||
#ifdef CONFIG_BLOCK
|
||||
if (sb_is_blkdev_sb(sb))
|
||||
return inode->i_mapping->backing_dev_info;
|
||||
|
||||
return blk_get_backing_dev_info(I_BDEV(inode));
|
||||
#endif
|
||||
return sb->s_bdi;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inode_to_bdi);
|
||||
|
||||
static inline struct inode *wb_inode(struct list_head *head)
|
||||
{
|
||||
|
@ -1159,7 +1159,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
|
||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||
if (err)
|
||||
@ -1464,7 +1464,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
|
||||
{
|
||||
struct inode *inode = req->inode;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(inode);
|
||||
int i;
|
||||
|
||||
list_del(&req->writepages_entry);
|
||||
@ -1658,7 +1658,7 @@ static int fuse_writepage_locked(struct page *page)
|
||||
req->end = fuse_writepage_end;
|
||||
req->inode = inode;
|
||||
|
||||
inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
|
||||
inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
|
||||
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
@ -1768,7 +1768,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
|
||||
|
||||
if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
|
||||
old_req->state == FUSE_REQ_PENDING)) {
|
||||
struct backing_dev_info *bdi = page->mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
|
||||
|
||||
copy_highpage(old_req->pages[0], page);
|
||||
spin_unlock(&fc->lock);
|
||||
@ -1872,7 +1872,7 @@ static int fuse_writepages_fill(struct page *page,
|
||||
req->page_descs[req->num_pages].offset = 0;
|
||||
req->page_descs[req->num_pages].length = PAGE_SIZE;
|
||||
|
||||
inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
|
||||
inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
|
||||
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
|
||||
|
||||
err = 0;
|
||||
|
@ -308,7 +308,6 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
|
||||
if (!fc->writeback_cache || !S_ISREG(attr->mode))
|
||||
inode->i_flags |= S_NOCMTIME;
|
||||
inode->i_generation = generation;
|
||||
inode->i_data.backing_dev_info = &fc->bdi;
|
||||
fuse_init_inode(inode, attr);
|
||||
unlock_new_inode(inode);
|
||||
} else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
|
||||
|
@ -289,7 +289,7 @@ continue_unlock:
|
||||
if (!clear_page_dirty_for_io(page))
|
||||
goto continue_unlock;
|
||||
|
||||
trace_wbc_writepage(wbc, mapping->backing_dev_info);
|
||||
trace_wbc_writepage(wbc, inode_to_bdi(inode));
|
||||
|
||||
ret = __gfs2_jdata_writepage(page, wbc);
|
||||
if (unlikely(ret)) {
|
||||
|
@ -768,7 +768,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
||||
mapping->flags = 0;
|
||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||
mapping->private_data = NULL;
|
||||
mapping->backing_dev_info = s->s_bdi;
|
||||
mapping->writeback_index = 0;
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
||||
mapping->flags = 0;
|
||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||
mapping->private_data = NULL;
|
||||
mapping->backing_dev_info = sb->s_bdi;
|
||||
mapping->writeback_index = 0;
|
||||
|
||||
spin_lock_init(&sdp->sd_log_lock);
|
||||
|
@ -743,7 +743,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
|
||||
struct backing_dev_info *bdi = metamapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
|
||||
int ret = 0;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
|
@ -62,12 +62,6 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
|
||||
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
|
||||
}
|
||||
|
||||
static struct backing_dev_info hugetlbfs_backing_dev_info = {
|
||||
.name = "hugetlbfs",
|
||||
.ra_pages = 0, /* No readahead */
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
|
||||
};
|
||||
|
||||
int sysctl_hugetlb_shm_group;
|
||||
|
||||
enum {
|
||||
@ -498,7 +492,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
|
||||
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
|
||||
&hugetlbfs_i_mmap_rwsem_key);
|
||||
inode->i_mapping->a_ops = &hugetlbfs_aops;
|
||||
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
inode->i_mapping->private_data = resv_map;
|
||||
info = HUGETLBFS_I(inode);
|
||||
@ -1032,10 +1025,6 @@ static int __init init_hugetlbfs_fs(void)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
error = bdi_init(&hugetlbfs_backing_dev_info);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = -ENOMEM;
|
||||
hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
|
||||
sizeof(struct hugetlbfs_inode_info),
|
||||
@ -1071,7 +1060,6 @@ static int __init init_hugetlbfs_fs(void)
|
||||
out:
|
||||
kmem_cache_destroy(hugetlbfs_inode_cachep);
|
||||
out2:
|
||||
bdi_destroy(&hugetlbfs_backing_dev_info);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -1091,7 +1079,6 @@ static void __exit exit_hugetlbfs_fs(void)
|
||||
for_each_hstate(h)
|
||||
kern_unmount(hugetlbfs_vfsmount[i++]);
|
||||
unregister_filesystem(&hugetlbfs_fs_type);
|
||||
bdi_destroy(&hugetlbfs_backing_dev_info);
|
||||
}
|
||||
|
||||
module_init(init_hugetlbfs_fs)
|
||||
|
13
fs/inode.c
13
fs/inode.c
@ -170,20 +170,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
||||
atomic_set(&mapping->i_mmap_writable, 0);
|
||||
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
|
||||
mapping->private_data = NULL;
|
||||
mapping->backing_dev_info = &default_backing_dev_info;
|
||||
mapping->writeback_index = 0;
|
||||
|
||||
/*
|
||||
* If the block_device provides a backing_dev_info for client
|
||||
* inodes then use that. Otherwise the inode share the bdev's
|
||||
* backing_dev_info.
|
||||
*/
|
||||
if (sb->s_bdev) {
|
||||
struct backing_dev_info *bdi;
|
||||
|
||||
bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
|
||||
mapping->backing_dev_info = bdi;
|
||||
}
|
||||
inode->i_private = NULL;
|
||||
inode->i_mapping = mapping;
|
||||
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
|
||||
|
@ -24,12 +24,6 @@ static const struct address_space_operations kernfs_aops = {
|
||||
.write_end = simple_write_end,
|
||||
};
|
||||
|
||||
static struct backing_dev_info kernfs_bdi = {
|
||||
.name = "kernfs",
|
||||
.ra_pages = 0, /* No readahead */
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
|
||||
};
|
||||
|
||||
static const struct inode_operations kernfs_iops = {
|
||||
.permission = kernfs_iop_permission,
|
||||
.setattr = kernfs_iop_setattr,
|
||||
@ -40,12 +34,6 @@ static const struct inode_operations kernfs_iops = {
|
||||
.listxattr = kernfs_iop_listxattr,
|
||||
};
|
||||
|
||||
void __init kernfs_inode_init(void)
|
||||
{
|
||||
if (bdi_init(&kernfs_bdi))
|
||||
panic("failed to init kernfs_bdi");
|
||||
}
|
||||
|
||||
static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
|
||||
{
|
||||
static DEFINE_MUTEX(iattr_mutex);
|
||||
@ -298,7 +286,6 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
|
||||
kernfs_get(kn);
|
||||
inode->i_private = kn;
|
||||
inode->i_mapping->a_ops = &kernfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &kernfs_bdi;
|
||||
inode->i_op = &kernfs_iops;
|
||||
|
||||
set_default_inode_attr(inode, kn->mode);
|
||||
|
@ -88,7 +88,6 @@ int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
|
||||
ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf,
|
||||
size_t size);
|
||||
ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
|
||||
void kernfs_inode_init(void);
|
||||
|
||||
/*
|
||||
* dir.c
|
||||
|
@ -246,5 +246,4 @@ void __init kernfs_init(void)
|
||||
kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
|
||||
sizeof(struct kernfs_node),
|
||||
0, SLAB_PANIC, NULL);
|
||||
kernfs_inode_init();
|
||||
}
|
||||
|
@ -267,7 +267,6 @@ ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
|
||||
if (inode) {
|
||||
atomic_set(&NCP_FINFO(inode)->opened, info->opened);
|
||||
|
||||
inode->i_mapping->backing_dev_info = sb->s_bdi;
|
||||
inode->i_ino = info->ino;
|
||||
ncp_set_attr(inode, info);
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
@ -560,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
|
||||
server = NCP_SBP(sb);
|
||||
memset(server, 0, sizeof(*server));
|
||||
|
||||
error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
|
||||
error = bdi_setup_and_register(&server->bdi, "ncpfs");
|
||||
if (error)
|
||||
goto out_fput;
|
||||
|
||||
|
@ -1002,7 +1002,7 @@ mds_commit:
|
||||
spin_unlock(cinfo->lock);
|
||||
if (!cinfo->dreq) {
|
||||
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||
inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
|
||||
inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
|
||||
BDI_RECLAIMABLE);
|
||||
__mark_inode_dirty(req->wb_context->dentry->d_inode,
|
||||
I_DIRTY_DATASYNC);
|
||||
|
@ -1366,7 +1366,7 @@ ff_layout_mark_request_commit(struct nfs_page *req,
|
||||
spin_unlock(cinfo->lock);
|
||||
if (!cinfo->dreq) {
|
||||
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||
inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
|
||||
inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
|
||||
BDI_RECLAIMABLE);
|
||||
__mark_inode_dirty(req->wb_context->dentry->d_inode,
|
||||
I_DIRTY_DATASYNC);
|
||||
|
@ -388,7 +388,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
|
||||
inode->i_data.a_ops = &nfs_file_aops;
|
||||
inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
|
||||
} else if (S_ISDIR(inode->i_mode)) {
|
||||
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
|
||||
inode->i_fop = &nfs_dir_operations;
|
||||
|
@ -430,7 +430,6 @@ int nfs_show_options(struct seq_file *, struct dentry *);
|
||||
int nfs_show_devname(struct seq_file *, struct dentry *);
|
||||
int nfs_show_path(struct seq_file *, struct dentry *);
|
||||
int nfs_show_stats(struct seq_file *, struct dentry *);
|
||||
void nfs_put_super(struct super_block *);
|
||||
int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
|
||||
|
||||
/* write.c */
|
||||
|
@ -53,7 +53,6 @@ static const struct super_operations nfs4_sops = {
|
||||
.destroy_inode = nfs_destroy_inode,
|
||||
.write_inode = nfs4_write_inode,
|
||||
.drop_inode = nfs_drop_inode,
|
||||
.put_super = nfs_put_super,
|
||||
.statfs = nfs_statfs,
|
||||
.evict_inode = nfs4_evict_inode,
|
||||
.umount_begin = nfs_umount_begin,
|
||||
|
@ -311,7 +311,6 @@ const struct super_operations nfs_sops = {
|
||||
.destroy_inode = nfs_destroy_inode,
|
||||
.write_inode = nfs_write_inode,
|
||||
.drop_inode = nfs_drop_inode,
|
||||
.put_super = nfs_put_super,
|
||||
.statfs = nfs_statfs,
|
||||
.evict_inode = nfs_evict_inode,
|
||||
.umount_begin = nfs_umount_begin,
|
||||
@ -2572,7 +2571,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
|
||||
error = nfs_bdi_register(server);
|
||||
if (error) {
|
||||
mntroot = ERR_PTR(error);
|
||||
goto error_splat_bdi;
|
||||
goto error_splat_super;
|
||||
}
|
||||
server->super = s;
|
||||
}
|
||||
@ -2604,9 +2603,6 @@ error_splat_root:
|
||||
dput(mntroot);
|
||||
mntroot = ERR_PTR(error);
|
||||
error_splat_super:
|
||||
if (server && !s->s_root)
|
||||
bdi_unregister(&server->backing_dev_info);
|
||||
error_splat_bdi:
|
||||
deactivate_locked_super(s);
|
||||
goto out;
|
||||
}
|
||||
@ -2653,28 +2649,20 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_fs_mount);
|
||||
|
||||
/*
|
||||
* Ensure that we unregister the bdi before kill_anon_super
|
||||
* releases the device name
|
||||
*/
|
||||
void nfs_put_super(struct super_block *s)
|
||||
{
|
||||
struct nfs_server *server = NFS_SB(s);
|
||||
|
||||
bdi_unregister(&server->backing_dev_info);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_put_super);
|
||||
|
||||
/*
|
||||
* Destroy an NFS2/3 superblock
|
||||
*/
|
||||
void nfs_kill_super(struct super_block *s)
|
||||
{
|
||||
struct nfs_server *server = NFS_SB(s);
|
||||
dev_t dev = s->s_dev;
|
||||
|
||||
generic_shutdown_super(s);
|
||||
|
||||
kill_anon_super(s);
|
||||
nfs_fscache_release_super_cookie(s);
|
||||
|
||||
nfs_free_server(server);
|
||||
free_anon_bdev(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_kill_super);
|
||||
|
||||
|
@ -791,7 +791,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
|
||||
spin_unlock(cinfo->lock);
|
||||
if (!cinfo->dreq) {
|
||||
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||
inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
|
||||
inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
|
||||
BDI_RECLAIMABLE);
|
||||
__mark_inode_dirty(req->wb_context->dentry->d_inode,
|
||||
I_DIRTY_DATASYNC);
|
||||
@ -858,7 +858,7 @@ static void
|
||||
nfs_clear_page_commit(struct page *page)
|
||||
{
|
||||
dec_zone_page_state(page, NR_UNSTABLE_NFS);
|
||||
dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
|
||||
dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE);
|
||||
}
|
||||
|
||||
/* Called holding inode (/cinfo) lock */
|
||||
@ -1607,7 +1607,7 @@ void nfs_retry_commit(struct list_head *page_list,
|
||||
nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
|
||||
if (!cinfo->dreq) {
|
||||
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||
dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
|
||||
dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
|
||||
BDI_RECLAIMABLE);
|
||||
}
|
||||
nfs_unlock_and_release_request(req);
|
||||
|
@ -172,7 +172,6 @@ int nilfs_init_gcinode(struct inode *inode)
|
||||
inode->i_mode = S_IFREG;
|
||||
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
||||
inode->i_mapping->a_ops = &empty_aops;
|
||||
inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
|
||||
|
||||
ii->i_flags = 0;
|
||||
nilfs_bmap_init_gc(ii->i_bmap);
|
||||
|
@ -429,7 +429,6 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
|
||||
|
||||
inode->i_mode = S_IFREG;
|
||||
mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
|
||||
inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
|
||||
|
||||
inode->i_op = &def_mdt_iops;
|
||||
inode->i_fop = &def_mdt_fops;
|
||||
@ -457,13 +456,12 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
|
||||
struct nilfs_shadow_map *shadow)
|
||||
{
|
||||
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
|
||||
struct backing_dev_info *bdi = inode->i_sb->s_bdi;
|
||||
|
||||
INIT_LIST_HEAD(&shadow->frozen_buffers);
|
||||
address_space_init_once(&shadow->frozen_data);
|
||||
nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
|
||||
nilfs_mapping_init(&shadow->frozen_data, inode);
|
||||
address_space_init_once(&shadow->frozen_btnodes);
|
||||
nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
|
||||
nilfs_mapping_init(&shadow->frozen_btnodes, inode);
|
||||
mi->mi_shadow = shadow;
|
||||
return 0;
|
||||
}
|
||||
|
@ -461,14 +461,12 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
|
||||
return nc;
|
||||
}
|
||||
|
||||
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
|
||||
struct backing_dev_info *bdi)
|
||||
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
|
||||
{
|
||||
mapping->host = inode;
|
||||
mapping->flags = 0;
|
||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||
mapping->private_data = NULL;
|
||||
mapping->backing_dev_info = bdi;
|
||||
mapping->a_ops = &empty_aops;
|
||||
}
|
||||
|
||||
|
@ -57,8 +57,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
|
||||
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
|
||||
void nilfs_clear_dirty_page(struct page *, bool);
|
||||
void nilfs_clear_dirty_pages(struct address_space *, bool);
|
||||
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
|
||||
struct backing_dev_info *bdi);
|
||||
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
|
||||
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
|
||||
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
|
||||
sector_t start_blk,
|
||||
|
@ -166,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
|
||||
ii->i_state = 0;
|
||||
ii->i_cno = 0;
|
||||
ii->vfs_inode.i_version = 1;
|
||||
nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi);
|
||||
nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode);
|
||||
return &ii->vfs_inode;
|
||||
}
|
||||
|
||||
@ -1057,7 +1057,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct the_nilfs *nilfs;
|
||||
struct nilfs_root *fsroot;
|
||||
struct backing_dev_info *bdi;
|
||||
__u64 cno;
|
||||
int err;
|
||||
|
||||
@ -1077,8 +1076,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sb->s_time_gran = 1;
|
||||
sb->s_max_links = NILFS_LINK_MAX;
|
||||
|
||||
bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
|
||||
sb->s_bdi = bdi ? : &default_backing_dev_info;
|
||||
sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
|
||||
|
||||
err = load_nilfs(nilfs, sb);
|
||||
if (err)
|
||||
|
@ -19,6 +19,7 @@
|
||||
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/pagemap.h>
|
||||
@ -2091,7 +2092,7 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
|
||||
count = iov_length(iov, nr_segs);
|
||||
pos = *ppos;
|
||||
/* We can write back this queue in page reclaim. */
|
||||
current->backing_dev_info = mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
written = 0;
|
||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||
if (err)
|
||||
|
@ -390,12 +390,6 @@ clear_fields:
|
||||
ip->ip_conn = NULL;
|
||||
}
|
||||
|
||||
static struct backing_dev_info dlmfs_backing_dev_info = {
|
||||
.name = "ocfs2-dlmfs",
|
||||
.ra_pages = 0, /* No readahead */
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
|
||||
};
|
||||
|
||||
static struct inode *dlmfs_get_root_inode(struct super_block *sb)
|
||||
{
|
||||
struct inode *inode = new_inode(sb);
|
||||
@ -404,7 +398,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
|
||||
if (inode) {
|
||||
inode->i_ino = get_next_ino();
|
||||
inode_init_owner(inode, NULL, mode);
|
||||
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
inc_nlink(inode);
|
||||
|
||||
@ -428,7 +421,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
|
||||
|
||||
inode->i_ino = get_next_ino();
|
||||
inode_init_owner(inode, parent, mode);
|
||||
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
|
||||
ip = DLMFS_I(inode);
|
||||
@ -643,10 +635,6 @@ static int __init init_dlmfs_fs(void)
|
||||
int status;
|
||||
int cleanup_inode = 0, cleanup_worker = 0;
|
||||
|
||||
status = bdi_init(&dlmfs_backing_dev_info);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
|
||||
sizeof(struct dlmfs_inode_private),
|
||||
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
|
||||
@ -673,7 +661,6 @@ bail:
|
||||
kmem_cache_destroy(dlmfs_inode_cache);
|
||||
if (cleanup_worker)
|
||||
destroy_workqueue(user_dlm_worker);
|
||||
bdi_destroy(&dlmfs_backing_dev_info);
|
||||
} else
|
||||
printk("OCFS2 User DLM kernel interface loaded\n");
|
||||
return status;
|
||||
@ -693,7 +680,6 @@ static void __exit exit_dlmfs_fs(void)
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(dlmfs_inode_cache);
|
||||
|
||||
bdi_destroy(&dlmfs_backing_dev_info);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Oracle");
|
||||
|
@ -2363,7 +2363,7 @@ relock:
|
||||
goto out_dio;
|
||||
}
|
||||
} else {
|
||||
current->backing_dev_info = file->f_mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
written = generic_perform_write(file, from, *ppos);
|
||||
if (likely(written >= 0))
|
||||
iocb->ki_pos = *ppos + written;
|
||||
|
@ -34,7 +34,14 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
|
||||
unsigned long flags);
|
||||
static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
|
||||
static unsigned ramfs_mmap_capabilities(struct file *file)
|
||||
{
|
||||
return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ |
|
||||
NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
|
||||
}
|
||||
|
||||
const struct file_operations ramfs_file_operations = {
|
||||
.mmap_capabilities = ramfs_mmap_capabilities,
|
||||
.mmap = ramfs_nommu_mmap,
|
||||
.get_unmapped_area = ramfs_nommu_get_unmapped_area,
|
||||
.read = new_sync_read,
|
||||
|
@ -50,14 +50,6 @@ static const struct address_space_operations ramfs_aops = {
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
};
|
||||
|
||||
static struct backing_dev_info ramfs_backing_dev_info = {
|
||||
.name = "ramfs",
|
||||
.ra_pages = 0, /* No readahead */
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
|
||||
BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
|
||||
BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
|
||||
};
|
||||
|
||||
struct inode *ramfs_get_inode(struct super_block *sb,
|
||||
const struct inode *dir, umode_t mode, dev_t dev)
|
||||
{
|
||||
@ -67,7 +59,6 @@ struct inode *ramfs_get_inode(struct super_block *sb,
|
||||
inode->i_ino = get_next_ino();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_mapping->a_ops = &ramfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
|
||||
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
|
||||
mapping_set_unevictable(inode->i_mapping);
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
@ -267,19 +258,9 @@ static struct file_system_type ramfs_fs_type = {
|
||||
int __init init_ramfs_fs(void)
|
||||
{
|
||||
static unsigned long once;
|
||||
int err;
|
||||
|
||||
if (test_and_set_bit(0, &once))
|
||||
return 0;
|
||||
|
||||
err = bdi_init(&ramfs_backing_dev_info);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = register_filesystem(&ramfs_fs_type);
|
||||
if (err)
|
||||
bdi_destroy(&ramfs_backing_dev_info);
|
||||
|
||||
return err;
|
||||
return register_filesystem(&ramfs_fs_type);
|
||||
}
|
||||
fs_initcall(init_ramfs_fs);
|
||||
|
@ -70,6 +70,15 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
|
||||
}
|
||||
|
||||
static unsigned romfs_mmap_capabilities(struct file *file)
|
||||
{
|
||||
struct mtd_info *mtd = file_inode(file)->i_sb->s_mtd;
|
||||
|
||||
if (!mtd)
|
||||
return NOMMU_MAP_COPY;
|
||||
return mtd_mmap_capabilities(mtd);
|
||||
}
|
||||
|
||||
const struct file_operations romfs_ro_fops = {
|
||||
.llseek = generic_file_llseek,
|
||||
.read = new_sync_read,
|
||||
@ -77,4 +86,5 @@ const struct file_operations romfs_ro_fops = {
|
||||
.splice_read = generic_file_splice_read,
|
||||
.mmap = romfs_mmap,
|
||||
.get_unmapped_area = romfs_get_unmapped_area,
|
||||
.mmap_capabilities = romfs_mmap_capabilities,
|
||||
};
|
||||
|
@ -355,9 +355,6 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos)
|
||||
case ROMFH_REG:
|
||||
i->i_fop = &romfs_ro_fops;
|
||||
i->i_data.a_ops = &romfs_aops;
|
||||
if (i->i_sb->s_mtd)
|
||||
i->i_data.backing_dev_info =
|
||||
i->i_sb->s_mtd->backing_dev_info;
|
||||
if (nextfh & ROMFH_EXEC)
|
||||
mode |= S_IXUGO;
|
||||
break;
|
||||
|
12
fs/super.c
12
fs/super.c
@ -36,8 +36,8 @@
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
LIST_HEAD(super_blocks);
|
||||
DEFINE_SPINLOCK(sb_lock);
|
||||
static LIST_HEAD(super_blocks);
|
||||
static DEFINE_SPINLOCK(sb_lock);
|
||||
|
||||
static char *sb_writers_name[SB_FREEZE_LEVELS] = {
|
||||
"sb_writers",
|
||||
@ -185,8 +185,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
|
||||
}
|
||||
init_waitqueue_head(&s->s_writers.wait);
|
||||
init_waitqueue_head(&s->s_writers.wait_unfrozen);
|
||||
s->s_bdi = &noop_backing_dev_info;
|
||||
s->s_flags = flags;
|
||||
s->s_bdi = &default_backing_dev_info;
|
||||
INIT_HLIST_NODE(&s->s_instances);
|
||||
INIT_HLIST_BL_HEAD(&s->s_anon);
|
||||
INIT_LIST_HEAD(&s->s_inodes);
|
||||
@ -863,10 +863,7 @@ EXPORT_SYMBOL(free_anon_bdev);
|
||||
|
||||
int set_anon_super(struct super_block *s, void *data)
|
||||
{
|
||||
int error = get_anon_bdev(&s->s_dev);
|
||||
if (!error)
|
||||
s->s_bdi = &noop_backing_dev_info;
|
||||
return error;
|
||||
return get_anon_bdev(&s->s_dev);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(set_anon_super);
|
||||
@ -1111,7 +1108,6 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
|
||||
sb = root->d_sb;
|
||||
BUG_ON(!sb);
|
||||
WARN_ON(!sb->s_bdi);
|
||||
WARN_ON(sb->s_bdi == &default_backing_dev_info);
|
||||
sb->s_flags |= MS_BORN;
|
||||
|
||||
error = security_sb_kern_mount(sb, flags, secdata);
|
||||
|
@ -108,8 +108,6 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime =
|
||||
ubifs_current_time(inode);
|
||||
inode->i_mapping->nrpages = 0;
|
||||
/* Disable readahead */
|
||||
inode->i_mapping->backing_dev_info = &c->bdi;
|
||||
|
||||
switch (mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
|
@ -156,9 +156,6 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
|
||||
if (err)
|
||||
goto out_invalid;
|
||||
|
||||
/* Disable read-ahead */
|
||||
inode->i_mapping->backing_dev_info = &c->bdi;
|
||||
|
||||
switch (inode->i_mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
inode->i_mapping->a_ops = &ubifs_file_address_operations;
|
||||
@ -2017,7 +2014,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
* Read-ahead will be disabled because @c->bdi.ra_pages is 0.
|
||||
*/
|
||||
c->bdi.name = "ubifs",
|
||||
c->bdi.capabilities = BDI_CAP_MAP_COPY;
|
||||
c->bdi.capabilities = 0;
|
||||
err = bdi_init(&c->bdi);
|
||||
if (err)
|
||||
goto out_close;
|
||||
|
@ -735,7 +735,7 @@ xfs_file_buffered_aio_write(
|
||||
|
||||
iov_iter_truncate(from, count);
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
|
||||
write_retry:
|
||||
trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
|
||||
|
@ -106,6 +106,8 @@ struct backing_dev_info {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct backing_dev_info *inode_to_bdi(struct inode *inode);
|
||||
|
||||
int __must_check bdi_init(struct backing_dev_info *bdi);
|
||||
void bdi_destroy(struct backing_dev_info *bdi);
|
||||
|
||||
@ -114,7 +116,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
|
||||
const char *fmt, ...);
|
||||
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
|
||||
void bdi_unregister(struct backing_dev_info *bdi);
|
||||
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
|
||||
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
enum wb_reason reason);
|
||||
void bdi_start_background_writeback(struct backing_dev_info *bdi);
|
||||
@ -228,46 +230,17 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
|
||||
* BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
|
||||
* BDI_CAP_NO_WRITEBACK: Don't write pages back
|
||||
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
|
||||
*
|
||||
* These flags let !MMU mmap() govern direct device mapping vs immediate
|
||||
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
|
||||
*
|
||||
* BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
|
||||
* BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
|
||||
* BDI_CAP_READ_MAP: Can be mapped for reading
|
||||
* BDI_CAP_WRITE_MAP: Can be mapped for writing
|
||||
* BDI_CAP_EXEC_MAP: Can be mapped for execution
|
||||
*
|
||||
* BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
|
||||
*
|
||||
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
|
||||
*/
|
||||
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
|
||||
#define BDI_CAP_NO_WRITEBACK 0x00000002
|
||||
#define BDI_CAP_MAP_COPY 0x00000004
|
||||
#define BDI_CAP_MAP_DIRECT 0x00000008
|
||||
#define BDI_CAP_READ_MAP 0x00000010
|
||||
#define BDI_CAP_WRITE_MAP 0x00000020
|
||||
#define BDI_CAP_EXEC_MAP 0x00000040
|
||||
#define BDI_CAP_NO_ACCT_WB 0x00000080
|
||||
#define BDI_CAP_SWAP_BACKED 0x00000100
|
||||
#define BDI_CAP_STABLE_WRITES 0x00000200
|
||||
#define BDI_CAP_STRICTLIMIT 0x00000400
|
||||
|
||||
#define BDI_CAP_VMFLAGS \
|
||||
(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
|
||||
#define BDI_CAP_NO_ACCT_WB 0x00000004
|
||||
#define BDI_CAP_STABLE_WRITES 0x00000008
|
||||
#define BDI_CAP_STRICTLIMIT 0x00000010
|
||||
|
||||
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
|
||||
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
|
||||
|
||||
#if defined(VM_MAYREAD) && \
|
||||
(BDI_CAP_READ_MAP != VM_MAYREAD || \
|
||||
BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
|
||||
BDI_CAP_EXEC_MAP != VM_MAYEXEC)
|
||||
#error please change backing_dev_info::capabilities flags
|
||||
#endif
|
||||
|
||||
extern struct backing_dev_info default_backing_dev_info;
|
||||
extern struct backing_dev_info noop_backing_dev_info;
|
||||
|
||||
int writeback_in_progress(struct backing_dev_info *bdi);
|
||||
@ -329,24 +302,14 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
|
||||
BDI_CAP_NO_WRITEBACK));
|
||||
}
|
||||
|
||||
static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
|
||||
{
|
||||
return bdi->capabilities & BDI_CAP_SWAP_BACKED;
|
||||
}
|
||||
|
||||
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
|
||||
{
|
||||
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
|
||||
return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
|
||||
}
|
||||
|
||||
static inline bool mapping_cap_account_dirty(struct address_space *mapping)
|
||||
{
|
||||
return bdi_cap_account_dirty(mapping->backing_dev_info);
|
||||
}
|
||||
|
||||
static inline bool mapping_cap_swap_backed(struct address_space *mapping)
|
||||
{
|
||||
return bdi_cap_swap_backed(mapping->backing_dev_info);
|
||||
return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
|
||||
}
|
||||
|
||||
static inline int bdi_sched_wait(void *word)
|
||||
|
@ -30,6 +30,4 @@ void cdev_del(struct cdev *);
|
||||
|
||||
void cd_forget(struct inode *);
|
||||
|
||||
extern struct backing_dev_info directly_mappable_cdev_bdi;
|
||||
|
||||
#endif
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <asm/byteorder.h>
|
||||
#include <uapi/linux/fs.h>
|
||||
|
||||
struct backing_dev_info;
|
||||
struct export_operations;
|
||||
struct hd_geometry;
|
||||
struct iovec;
|
||||
@ -394,7 +395,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
|
||||
loff_t pos, unsigned len, unsigned copied,
|
||||
struct page *page, void *fsdata);
|
||||
|
||||
struct backing_dev_info;
|
||||
struct address_space {
|
||||
struct inode *host; /* owner: inode, block_device */
|
||||
struct radix_tree_root page_tree; /* radix tree of all pages */
|
||||
@ -408,7 +408,6 @@ struct address_space {
|
||||
pgoff_t writeback_index;/* writeback starts here */
|
||||
const struct address_space_operations *a_ops; /* methods */
|
||||
unsigned long flags; /* error bits/gfp mask */
|
||||
struct backing_dev_info *backing_dev_info; /* device readahead, etc */
|
||||
spinlock_t private_lock; /* for use by the address_space */
|
||||
struct list_head private_list; /* ditto */
|
||||
void *private_data; /* ditto */
|
||||
@ -1201,8 +1200,6 @@ struct mm_struct;
|
||||
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
|
||||
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
|
||||
|
||||
extern struct list_head super_blocks;
|
||||
extern spinlock_t sb_lock;
|
||||
|
||||
/* Possible states of 'frozen' field */
|
||||
enum {
|
||||
@ -1519,6 +1516,26 @@ struct block_device_operations;
|
||||
#define HAVE_COMPAT_IOCTL 1
|
||||
#define HAVE_UNLOCKED_IOCTL 1
|
||||
|
||||
/*
|
||||
* These flags let !MMU mmap() govern direct device mapping vs immediate
|
||||
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
|
||||
*
|
||||
* NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
|
||||
* NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
|
||||
* NOMMU_MAP_READ: Can be mapped for reading
|
||||
* NOMMU_MAP_WRITE: Can be mapped for writing
|
||||
* NOMMU_MAP_EXEC: Can be mapped for execution
|
||||
*/
|
||||
#define NOMMU_MAP_COPY 0x00000001
|
||||
#define NOMMU_MAP_DIRECT 0x00000008
|
||||
#define NOMMU_MAP_READ VM_MAYREAD
|
||||
#define NOMMU_MAP_WRITE VM_MAYWRITE
|
||||
#define NOMMU_MAP_EXEC VM_MAYEXEC
|
||||
|
||||
#define NOMMU_VMFLAGS \
|
||||
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
|
||||
|
||||
|
||||
struct iov_iter;
|
||||
|
||||
struct file_operations {
|
||||
@ -1553,6 +1570,9 @@ struct file_operations {
|
||||
long (*fallocate)(struct file *file, int mode, loff_t offset,
|
||||
loff_t len);
|
||||
void (*show_fdinfo)(struct seq_file *m, struct file *f);
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned (*mmap_capabilities)(struct file *);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct inode_operations {
|
||||
|
@ -408,4 +408,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
|
||||
return mtd_is_bitflip(err) || mtd_is_eccerr(err);
|
||||
}
|
||||
|
||||
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
|
||||
|
||||
#endif /* __MTD_MTD_H__ */
|
||||
|
@ -47,7 +47,7 @@ TRACE_EVENT(writeback_dirty_page,
|
||||
|
||||
TP_fast_assign(
|
||||
strncpy(__entry->name,
|
||||
mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
|
||||
mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
|
||||
__entry->ino = mapping ? mapping->host->i_ino : 0;
|
||||
__entry->index = page->index;
|
||||
),
|
||||
@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(inode);
|
||||
|
||||
/* may be called for files on pseudo FSes w/ unregistered bdi */
|
||||
strncpy(__entry->name,
|
||||
@ -116,7 +116,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
|
||||
|
||||
TP_fast_assign(
|
||||
strncpy(__entry->name,
|
||||
dev_name(inode->i_mapping->backing_dev_info->dev), 32);
|
||||
dev_name(inode_to_bdi(inode)->dev), 32);
|
||||
__entry->ino = inode->i_ino;
|
||||
__entry->sync_mode = wbc->sync_mode;
|
||||
),
|
||||
@ -156,10 +156,8 @@ DECLARE_EVENT_CLASS(writeback_work_class,
|
||||
__field(int, reason)
|
||||
),
|
||||
TP_fast_assign(
|
||||
struct device *dev = bdi->dev;
|
||||
if (!dev)
|
||||
dev = default_backing_dev_info.dev;
|
||||
strncpy(__entry->name, dev_name(dev), 32);
|
||||
strncpy(__entry->name,
|
||||
bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
|
||||
__entry->nr_pages = work->nr_pages;
|
||||
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
|
||||
__entry->sync_mode = work->sync_mode;
|
||||
|
107
mm/backing-dev.c
107
mm/backing-dev.c
@ -14,19 +14,10 @@
|
||||
|
||||
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
|
||||
|
||||
struct backing_dev_info default_backing_dev_info = {
|
||||
.name = "default",
|
||||
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
|
||||
.state = 0,
|
||||
.capabilities = BDI_CAP_MAP_COPY,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(default_backing_dev_info);
|
||||
|
||||
struct backing_dev_info noop_backing_dev_info = {
|
||||
.name = "noop",
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
|
||||
|
||||
static struct class *bdi_class;
|
||||
|
||||
@ -40,17 +31,6 @@ LIST_HEAD(bdi_list);
|
||||
/* bdi_wq serves all asynchronous writeback tasks */
|
||||
struct workqueue_struct *bdi_wq;
|
||||
|
||||
static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
|
||||
{
|
||||
if (wb1 < wb2) {
|
||||
spin_lock(&wb1->list_lock);
|
||||
spin_lock_nested(&wb2->list_lock, 1);
|
||||
} else {
|
||||
spin_lock(&wb2->list_lock);
|
||||
spin_lock_nested(&wb1->list_lock, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
@ -264,9 +244,6 @@ static int __init default_bdi_init(void)
|
||||
if (!bdi_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = bdi_init(&default_backing_dev_info);
|
||||
if (!err)
|
||||
bdi_register(&default_backing_dev_info, NULL, "default");
|
||||
err = bdi_init(&noop_backing_dev_info);
|
||||
|
||||
return err;
|
||||
@ -355,19 +332,19 @@ EXPORT_SYMBOL(bdi_register_dev);
|
||||
*/
|
||||
static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
||||
{
|
||||
if (!bdi_cap_writeback_dirty(bdi))
|
||||
/* Make sure nobody queues further work */
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
if (!test_and_clear_bit(BDI_registered, &bdi->state)) {
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
return;
|
||||
}
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
|
||||
/*
|
||||
* Make sure nobody finds us on the bdi_list anymore
|
||||
*/
|
||||
bdi_remove_from_list(bdi);
|
||||
|
||||
/* Make sure nobody queues further work */
|
||||
spin_lock_bh(&bdi->wb_lock);
|
||||
clear_bit(BDI_registered, &bdi->state);
|
||||
spin_unlock_bh(&bdi->wb_lock);
|
||||
|
||||
/*
|
||||
* Drain work list and shutdown the delayed_work. At this point,
|
||||
* @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
|
||||
@ -375,37 +352,22 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
||||
*/
|
||||
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
|
||||
flush_delayed_work(&bdi->wb.dwork);
|
||||
WARN_ON(!list_empty(&bdi->work_list));
|
||||
WARN_ON(delayed_work_pending(&bdi->wb.dwork));
|
||||
}
|
||||
|
||||
/*
|
||||
* This bdi is going away now, make sure that no super_blocks point to it
|
||||
* Called when the device behind @bdi has been removed or ejected.
|
||||
*
|
||||
* We can't really do much here except for reducing the dirty ratio at
|
||||
* the moment. In the future we should be able to set a flag so that
|
||||
* the filesystem can handle errors at mark_inode_dirty time instead
|
||||
* of only at writeback time.
|
||||
*/
|
||||
static void bdi_prune_sb(struct backing_dev_info *bdi)
|
||||
{
|
||||
struct super_block *sb;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (sb->s_bdi == bdi)
|
||||
sb->s_bdi = &default_backing_dev_info;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
}
|
||||
|
||||
void bdi_unregister(struct backing_dev_info *bdi)
|
||||
{
|
||||
if (bdi->dev) {
|
||||
bdi_set_min_ratio(bdi, 0);
|
||||
trace_writeback_bdi_unregister(bdi);
|
||||
bdi_prune_sb(bdi);
|
||||
if (WARN_ON_ONCE(!bdi->dev))
|
||||
return;
|
||||
|
||||
bdi_wb_shutdown(bdi);
|
||||
bdi_debug_unregister(bdi);
|
||||
device_unregister(bdi->dev);
|
||||
bdi->dev = NULL;
|
||||
}
|
||||
bdi_set_min_ratio(bdi, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(bdi_unregister);
|
||||
|
||||
@ -474,37 +436,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Splice our entries to the default_backing_dev_info. This
|
||||
* condition shouldn't happen. @wb must be empty at this point and
|
||||
* dirty inodes on it might cause other issues. This workaround is
|
||||
* added by ce5f8e779519 ("writeback: splice dirty inode entries to
|
||||
* default bdi on bdi_destroy()") without root-causing the issue.
|
||||
*
|
||||
* http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com
|
||||
* http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350
|
||||
*
|
||||
* We should probably add WARN_ON() to find out whether it still
|
||||
* happens and track it down if so.
|
||||
*/
|
||||
if (bdi_has_dirty_io(bdi)) {
|
||||
struct bdi_writeback *dst = &default_backing_dev_info.wb;
|
||||
|
||||
bdi_lock_two(&bdi->wb, dst);
|
||||
list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
|
||||
list_splice(&bdi->wb.b_io, &dst->b_io);
|
||||
list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
|
||||
spin_unlock(&bdi->wb.list_lock);
|
||||
spin_unlock(&dst->list_lock);
|
||||
}
|
||||
|
||||
bdi_unregister(bdi);
|
||||
bdi_wb_shutdown(bdi);
|
||||
|
||||
WARN_ON(!list_empty(&bdi->work_list));
|
||||
WARN_ON(delayed_work_pending(&bdi->wb.dwork));
|
||||
|
||||
if (bdi->dev) {
|
||||
bdi_debug_unregister(bdi);
|
||||
device_unregister(bdi->dev);
|
||||
bdi->dev = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
|
||||
percpu_counter_destroy(&bdi->bdi_stat[i]);
|
||||
|
||||
fprop_local_destroy_percpu(&bdi->completions);
|
||||
}
|
||||
EXPORT_SYMBOL(bdi_destroy);
|
||||
@ -513,13 +457,12 @@ EXPORT_SYMBOL(bdi_destroy);
|
||||
* For use from filesystems to quickly init and register a bdi associated
|
||||
* with dirty writeback
|
||||
*/
|
||||
int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
|
||||
unsigned int cap)
|
||||
int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
|
||||
{
|
||||
int err;
|
||||
|
||||
bdi->name = name;
|
||||
bdi->capabilities = cap;
|
||||
bdi->capabilities = 0;
|
||||
err = bdi_init(bdi);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -73,7 +73,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
|
||||
else
|
||||
endbyte--; /* inclusive */
|
||||
|
||||
bdi = mapping->backing_dev_info;
|
||||
bdi = inode_to_bdi(mapping->host);
|
||||
|
||||
switch (advice) {
|
||||
case POSIX_FADV_NORMAL:
|
||||
@ -113,7 +113,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
|
||||
case POSIX_FADV_NOREUSE:
|
||||
break;
|
||||
case POSIX_FADV_DONTNEED:
|
||||
if (!bdi_write_congested(mapping->backing_dev_info))
|
||||
if (!bdi_write_congested(bdi))
|
||||
__filemap_fdatawrite_range(mapping, offset, endbyte,
|
||||
WB_SYNC_NONE);
|
||||
|
||||
|
@ -211,7 +211,7 @@ void __delete_from_page_cache(struct page *page, void *shadow)
|
||||
*/
|
||||
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
|
||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
|
||||
dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2564,7 +2564,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
size_t count = iov_iter_count(from);
|
||||
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/uio.h>
|
||||
@ -409,7 +410,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
|
||||
count = len;
|
||||
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = mapping->backing_dev_info;
|
||||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
|
||||
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
|
||||
if (ret)
|
||||
|
17
mm/madvise.c
17
mm/madvise.c
@ -222,19 +222,22 @@ static long madvise_willneed(struct vm_area_struct *vma,
|
||||
struct file *file = vma->vm_file;
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
if (!file || mapping_cap_swap_backed(file->f_mapping)) {
|
||||
if (!file) {
|
||||
*prev = vma;
|
||||
if (!file)
|
||||
force_swapin_readahead(vma, start, end);
|
||||
else
|
||||
force_shm_swapin_readahead(vma, start, end,
|
||||
file->f_mapping);
|
||||
force_swapin_readahead(vma, start, end);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (shmem_mapping(file->f_mapping)) {
|
||||
*prev = vma;
|
||||
force_shm_swapin_readahead(vma, start, end,
|
||||
file->f_mapping);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
if (!file)
|
||||
return -EBADF;
|
||||
#endif
|
||||
|
||||
if (file->f_mapping->a_ops->get_xip_mem) {
|
||||
/* no bad return value, but ignore advice */
|
||||
|
69
mm/nommu.c
69
mm/nommu.c
@ -980,9 +980,6 @@ static int validate_mmap_request(struct file *file,
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (file) {
|
||||
/* validate file mapping requests */
|
||||
struct address_space *mapping;
|
||||
|
||||
/* files must support mmap */
|
||||
if (!file->f_op->mmap)
|
||||
return -ENODEV;
|
||||
@ -991,28 +988,22 @@ static int validate_mmap_request(struct file *file,
|
||||
* - we support chardevs that provide their own "memory"
|
||||
* - we support files/blockdevs that are memory backed
|
||||
*/
|
||||
mapping = file->f_mapping;
|
||||
if (!mapping)
|
||||
mapping = file_inode(file)->i_mapping;
|
||||
|
||||
capabilities = 0;
|
||||
if (mapping && mapping->backing_dev_info)
|
||||
capabilities = mapping->backing_dev_info->capabilities;
|
||||
|
||||
if (!capabilities) {
|
||||
if (file->f_op->mmap_capabilities) {
|
||||
capabilities = file->f_op->mmap_capabilities(file);
|
||||
} else {
|
||||
/* no explicit capabilities set, so assume some
|
||||
* defaults */
|
||||
switch (file_inode(file)->i_mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
case S_IFBLK:
|
||||
capabilities = BDI_CAP_MAP_COPY;
|
||||
capabilities = NOMMU_MAP_COPY;
|
||||
break;
|
||||
|
||||
case S_IFCHR:
|
||||
capabilities =
|
||||
BDI_CAP_MAP_DIRECT |
|
||||
BDI_CAP_READ_MAP |
|
||||
BDI_CAP_WRITE_MAP;
|
||||
NOMMU_MAP_DIRECT |
|
||||
NOMMU_MAP_READ |
|
||||
NOMMU_MAP_WRITE;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1023,9 +1014,9 @@ static int validate_mmap_request(struct file *file,
|
||||
/* eliminate any capabilities that we can't support on this
|
||||
* device */
|
||||
if (!file->f_op->get_unmapped_area)
|
||||
capabilities &= ~BDI_CAP_MAP_DIRECT;
|
||||
capabilities &= ~NOMMU_MAP_DIRECT;
|
||||
if (!file->f_op->read)
|
||||
capabilities &= ~BDI_CAP_MAP_COPY;
|
||||
capabilities &= ~NOMMU_MAP_COPY;
|
||||
|
||||
/* The file shall have been opened with read permission. */
|
||||
if (!(file->f_mode & FMODE_READ))
|
||||
@ -1044,29 +1035,29 @@ static int validate_mmap_request(struct file *file,
|
||||
if (locks_verify_locked(file))
|
||||
return -EAGAIN;
|
||||
|
||||
if (!(capabilities & BDI_CAP_MAP_DIRECT))
|
||||
if (!(capabilities & NOMMU_MAP_DIRECT))
|
||||
return -ENODEV;
|
||||
|
||||
/* we mustn't privatise shared mappings */
|
||||
capabilities &= ~BDI_CAP_MAP_COPY;
|
||||
capabilities &= ~NOMMU_MAP_COPY;
|
||||
} else {
|
||||
/* we're going to read the file into private memory we
|
||||
* allocate */
|
||||
if (!(capabilities & BDI_CAP_MAP_COPY))
|
||||
if (!(capabilities & NOMMU_MAP_COPY))
|
||||
return -ENODEV;
|
||||
|
||||
/* we don't permit a private writable mapping to be
|
||||
* shared with the backing device */
|
||||
if (prot & PROT_WRITE)
|
||||
capabilities &= ~BDI_CAP_MAP_DIRECT;
|
||||
capabilities &= ~NOMMU_MAP_DIRECT;
|
||||
}
|
||||
|
||||
if (capabilities & BDI_CAP_MAP_DIRECT) {
|
||||
if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
|
||||
((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
|
||||
((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
|
||||
if (capabilities & NOMMU_MAP_DIRECT) {
|
||||
if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
|
||||
((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
|
||||
((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
|
||||
) {
|
||||
capabilities &= ~BDI_CAP_MAP_DIRECT;
|
||||
capabilities &= ~NOMMU_MAP_DIRECT;
|
||||
if (flags & MAP_SHARED) {
|
||||
printk(KERN_WARNING
|
||||
"MAP_SHARED not completely supported on !MMU\n");
|
||||
@ -1083,21 +1074,21 @@ static int validate_mmap_request(struct file *file,
|
||||
} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
|
||||
/* handle implication of PROT_EXEC by PROT_READ */
|
||||
if (current->personality & READ_IMPLIES_EXEC) {
|
||||
if (capabilities & BDI_CAP_EXEC_MAP)
|
||||
if (capabilities & NOMMU_MAP_EXEC)
|
||||
prot |= PROT_EXEC;
|
||||
}
|
||||
} else if ((prot & PROT_READ) &&
|
||||
(prot & PROT_EXEC) &&
|
||||
!(capabilities & BDI_CAP_EXEC_MAP)
|
||||
!(capabilities & NOMMU_MAP_EXEC)
|
||||
) {
|
||||
/* backing file is not executable, try to copy */
|
||||
capabilities &= ~BDI_CAP_MAP_DIRECT;
|
||||
capabilities &= ~NOMMU_MAP_DIRECT;
|
||||
}
|
||||
} else {
|
||||
/* anonymous mappings are always memory backed and can be
|
||||
* privately mapped
|
||||
*/
|
||||
capabilities = BDI_CAP_MAP_COPY;
|
||||
capabilities = NOMMU_MAP_COPY;
|
||||
|
||||
/* handle PROT_EXEC implication by PROT_READ */
|
||||
if ((prot & PROT_READ) &&
|
||||
@ -1129,7 +1120,7 @@ static unsigned long determine_vm_flags(struct file *file,
|
||||
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
|
||||
/* vm_flags |= mm->def_flags; */
|
||||
|
||||
if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
|
||||
if (!(capabilities & NOMMU_MAP_DIRECT)) {
|
||||
/* attempt to share read-only copies of mapped file chunks */
|
||||
vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
|
||||
if (file && !(prot & PROT_WRITE))
|
||||
@ -1138,7 +1129,7 @@ static unsigned long determine_vm_flags(struct file *file,
|
||||
/* overlay a shareable mapping on the backing device or inode
|
||||
* if possible - used for chardevs, ramfs/tmpfs/shmfs and
|
||||
* romfs/cramfs */
|
||||
vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
|
||||
vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
|
||||
if (flags & MAP_SHARED)
|
||||
vm_flags |= VM_SHARED;
|
||||
}
|
||||
@ -1191,7 +1182,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
|
||||
* shared mappings on devices or memory
|
||||
* - VM_MAYSHARE will be set if it may attempt to share
|
||||
*/
|
||||
if (capabilities & BDI_CAP_MAP_DIRECT) {
|
||||
if (capabilities & NOMMU_MAP_DIRECT) {
|
||||
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
|
||||
if (ret == 0) {
|
||||
/* shouldn't return success if we're not sharing */
|
||||
@ -1380,7 +1371,7 @@ unsigned long do_mmap_pgoff(struct file *file,
|
||||
if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
|
||||
!(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
|
||||
/* new mapping is not a subset of the region */
|
||||
if (!(capabilities & BDI_CAP_MAP_DIRECT))
|
||||
if (!(capabilities & NOMMU_MAP_DIRECT))
|
||||
goto sharing_violation;
|
||||
continue;
|
||||
}
|
||||
@ -1419,7 +1410,7 @@ unsigned long do_mmap_pgoff(struct file *file,
|
||||
* - this is the hook for quasi-memory character devices to
|
||||
* tell us the location of a shared mapping
|
||||
*/
|
||||
if (capabilities & BDI_CAP_MAP_DIRECT) {
|
||||
if (capabilities & NOMMU_MAP_DIRECT) {
|
||||
addr = file->f_op->get_unmapped_area(file, addr, len,
|
||||
pgoff, flags);
|
||||
if (IS_ERR_VALUE(addr)) {
|
||||
@ -1431,10 +1422,10 @@ unsigned long do_mmap_pgoff(struct file *file,
|
||||
* the mapping so we'll have to attempt to copy
|
||||
* it */
|
||||
ret = -ENODEV;
|
||||
if (!(capabilities & BDI_CAP_MAP_COPY))
|
||||
if (!(capabilities & NOMMU_MAP_COPY))
|
||||
goto error_just_free;
|
||||
|
||||
capabilities &= ~BDI_CAP_MAP_DIRECT;
|
||||
capabilities &= ~NOMMU_MAP_DIRECT;
|
||||
} else {
|
||||
vma->vm_start = region->vm_start = addr;
|
||||
vma->vm_end = region->vm_end = addr + len;
|
||||
@ -1445,7 +1436,7 @@ unsigned long do_mmap_pgoff(struct file *file,
|
||||
vma->vm_region = region;
|
||||
|
||||
/* set up the mapping
|
||||
* - the region is filled in if BDI_CAP_MAP_DIRECT is still set
|
||||
* - the region is filled in if NOMMU_MAP_DIRECT is still set
|
||||
*/
|
||||
if (file && vma->vm_flags & VM_SHARED)
|
||||
ret = do_mmap_shared_file(vma);
|
||||
|
@ -1351,7 +1351,7 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||
unsigned long task_ratelimit;
|
||||
unsigned long dirty_ratelimit;
|
||||
unsigned long pos_ratio;
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||
bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
|
||||
unsigned long start_time = jiffies;
|
||||
|
||||
@ -1574,7 +1574,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
|
||||
*/
|
||||
void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
||||
{
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||
int ratelimit;
|
||||
int *p;
|
||||
|
||||
@ -1929,7 +1929,7 @@ continue_unlock:
|
||||
if (!clear_page_dirty_for_io(page))
|
||||
goto continue_unlock;
|
||||
|
||||
trace_wbc_writepage(wbc, mapping->backing_dev_info);
|
||||
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
|
||||
ret = (*writepage)(page, wbc, data);
|
||||
if (unlikely(ret)) {
|
||||
if (ret == AOP_WRITEPAGE_ACTIVATE) {
|
||||
@ -2094,10 +2094,12 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
|
||||
trace_writeback_dirty_page(page, mapping);
|
||||
|
||||
if (mapping_cap_account_dirty(mapping)) {
|
||||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||
|
||||
__inc_zone_page_state(page, NR_FILE_DIRTY);
|
||||
__inc_zone_page_state(page, NR_DIRTIED);
|
||||
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
|
||||
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
|
||||
__inc_bdi_stat(bdi, BDI_RECLAIMABLE);
|
||||
__inc_bdi_stat(bdi, BDI_DIRTIED);
|
||||
task_io_account_write(PAGE_CACHE_SIZE);
|
||||
current->nr_dirtied++;
|
||||
this_cpu_inc(bdp_ratelimits);
|
||||
@ -2156,7 +2158,7 @@ void account_page_redirty(struct page *page)
|
||||
if (mapping && mapping_cap_account_dirty(mapping)) {
|
||||
current->nr_dirtied--;
|
||||
dec_zone_page_state(page, NR_DIRTIED);
|
||||
dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
|
||||
dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(account_page_redirty);
|
||||
@ -2298,7 +2300,7 @@ int clear_page_dirty_for_io(struct page *page)
|
||||
*/
|
||||
if (TestClearPageDirty(page)) {
|
||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||
dec_bdi_stat(mapping->backing_dev_info,
|
||||
dec_bdi_stat(inode_to_bdi(mapping->host),
|
||||
BDI_RECLAIMABLE);
|
||||
return 1;
|
||||
}
|
||||
@ -2316,7 +2318,7 @@ int test_clear_page_writeback(struct page *page)
|
||||
|
||||
memcg = mem_cgroup_begin_page_stat(page);
|
||||
if (mapping) {
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&mapping->tree_lock, flags);
|
||||
@ -2351,7 +2353,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
|
||||
|
||||
memcg = mem_cgroup_begin_page_stat(page);
|
||||
if (mapping) {
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&mapping->tree_lock, flags);
|
||||
@ -2405,12 +2407,7 @@ EXPORT_SYMBOL(mapping_tagged);
|
||||
*/
|
||||
void wait_for_stable_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
|
||||
if (!bdi_cap_stable_pages_required(bdi))
|
||||
return;
|
||||
|
||||
wait_on_page_writeback(page);
|
||||
if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
|
||||
wait_on_page_writeback(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_for_stable_page);
|
||||
|
@ -27,7 +27,7 @@
|
||||
void
|
||||
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
||||
{
|
||||
ra->ra_pages = mapping->backing_dev_info->ra_pages;
|
||||
ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
|
||||
ra->prev_pos = -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
||||
@ -541,7 +541,7 @@ page_cache_async_readahead(struct address_space *mapping,
|
||||
/*
|
||||
* Defer asynchronous read-ahead on IO congestion.
|
||||
*/
|
||||
if (bdi_read_congested(mapping->backing_dev_info))
|
||||
if (bdi_read_congested(inode_to_bdi(mapping->host)))
|
||||
return;
|
||||
|
||||
/* do read-ahead */
|
||||
|
24
mm/shmem.c
24
mm/shmem.c
@ -191,11 +191,6 @@ static const struct inode_operations shmem_dir_inode_operations;
|
||||
static const struct inode_operations shmem_special_inode_operations;
|
||||
static const struct vm_operations_struct shmem_vm_ops;
|
||||
|
||||
static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
|
||||
.ra_pages = 0, /* No readahead */
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
|
||||
};
|
||||
|
||||
static LIST_HEAD(shmem_swaplist);
|
||||
static DEFINE_MUTEX(shmem_swaplist_mutex);
|
||||
|
||||
@ -765,11 +760,11 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
|
||||
goto redirty;
|
||||
|
||||
/*
|
||||
* shmem_backing_dev_info's capabilities prevent regular writeback or
|
||||
* sync from ever calling shmem_writepage; but a stacking filesystem
|
||||
* might use ->writepage of its underlying filesystem, in which case
|
||||
* tmpfs should write out to swap only in response to memory pressure,
|
||||
* and not for the writeback threads or sync.
|
||||
* Our capabilities prevent regular writeback or sync from ever calling
|
||||
* shmem_writepage; but a stacking filesystem might use ->writepage of
|
||||
* its underlying filesystem, in which case tmpfs should write out to
|
||||
* swap only in response to memory pressure, and not for the writeback
|
||||
* threads or sync.
|
||||
*/
|
||||
if (!wbc->for_reclaim) {
|
||||
WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
|
||||
@ -1415,7 +1410,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
|
||||
inode->i_ino = get_next_ino();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_blocks = 0;
|
||||
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
inode->i_generation = get_seconds();
|
||||
info = SHMEM_I(inode);
|
||||
@ -1461,7 +1455,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
|
||||
|
||||
bool shmem_mapping(struct address_space *mapping)
|
||||
{
|
||||
return mapping->backing_dev_info == &shmem_backing_dev_info;
|
||||
return mapping->host->i_sb->s_op == &shmem_ops;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TMPFS
|
||||
@ -3225,10 +3219,6 @@ int __init shmem_init(void)
|
||||
if (shmem_inode_cachep)
|
||||
return 0;
|
||||
|
||||
error = bdi_init(&shmem_backing_dev_info);
|
||||
if (error)
|
||||
goto out4;
|
||||
|
||||
error = shmem_init_inodecache();
|
||||
if (error)
|
||||
goto out3;
|
||||
@ -3252,8 +3242,6 @@ out1:
|
||||
out2:
|
||||
shmem_destroy_inodecache();
|
||||
out3:
|
||||
bdi_destroy(&shmem_backing_dev_info);
|
||||
out4:
|
||||
shm_mnt = ERR_PTR(error);
|
||||
return error;
|
||||
}
|
||||
|
@ -1138,8 +1138,6 @@ void __init swap_setup(void)
|
||||
#ifdef CONFIG_SWAP
|
||||
int i;
|
||||
|
||||
if (bdi_init(swapper_spaces[0].backing_dev_info))
|
||||
panic("Failed to init swap bdi");
|
||||
for (i = 0; i < MAX_SWAPFILES; i++)
|
||||
spin_lock_init(&swapper_spaces[i].tree_lock);
|
||||
#endif
|
||||
|
@ -32,17 +32,11 @@ static const struct address_space_operations swap_aops = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct backing_dev_info swap_backing_dev_info = {
|
||||
.name = "swap",
|
||||
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
|
||||
};
|
||||
|
||||
struct address_space swapper_spaces[MAX_SWAPFILES] = {
|
||||
[0 ... MAX_SWAPFILES - 1] = {
|
||||
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
|
||||
.i_mmap_writable = ATOMIC_INIT(0),
|
||||
.a_ops = &swap_aops,
|
||||
.backing_dev_info = &swap_backing_dev_info,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -112,7 +112,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size)
|
||||
struct address_space *mapping = page->mapping;
|
||||
if (mapping && mapping_cap_account_dirty(mapping)) {
|
||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||
dec_bdi_stat(mapping->backing_dev_info,
|
||||
dec_bdi_stat(inode_to_bdi(mapping->host),
|
||||
BDI_RECLAIMABLE);
|
||||
if (account_size)
|
||||
task_io_account_cancelled_write(account_size);
|
||||
|
@ -500,7 +500,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
|
||||
}
|
||||
if (mapping->a_ops->writepage == NULL)
|
||||
return PAGE_ACTIVATE;
|
||||
if (!may_write_to_queue(mapping->backing_dev_info, sc))
|
||||
if (!may_write_to_queue(inode_to_bdi(mapping->host), sc))
|
||||
return PAGE_KEEP;
|
||||
|
||||
if (clear_page_dirty_for_io(page)) {
|
||||
@ -879,7 +879,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||
*/
|
||||
mapping = page_mapping(page);
|
||||
if (((dirty || writeback) && mapping &&
|
||||
bdi_write_congested(mapping->backing_dev_info)) ||
|
||||
bdi_write_congested(inode_to_bdi(mapping->host))) ||
|
||||
(writeback && PageReclaim(page)))
|
||||
nr_congested++;
|
||||
|
||||
|
@ -726,16 +726,15 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
|
||||
return prot | PROT_EXEC;
|
||||
/*
|
||||
* ditto if it's not on noexec mount, except that on !MMU we need
|
||||
* BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
|
||||
* NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
|
||||
*/
|
||||
if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned long caps = 0;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
if (mapping && mapping->backing_dev_info)
|
||||
caps = mapping->backing_dev_info->capabilities;
|
||||
if (!(caps & BDI_CAP_EXEC_MAP))
|
||||
return prot;
|
||||
if (file->f_op->mmap_capabilities) {
|
||||
unsigned caps = file->f_op->mmap_capabilities(file);
|
||||
if (!(caps & NOMMU_MAP_EXEC))
|
||||
return prot;
|
||||
}
|
||||
#endif
|
||||
return prot | PROT_EXEC;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user