2005-04-17 02:20:36 +04:00
/*
* Resizable virtual memory filesystem for Linux .
*
* Copyright ( C ) 2000 Linus Torvalds .
* 2000 Transmeta Corp .
* 2000 - 2001 Christoph Rohland
* 2000 - 2001 SAP AG
* 2002 Red Hat Inc .
2011-08-04 03:21:25 +04:00
* Copyright ( C ) 2002 - 2011 Hugh Dickins .
* Copyright ( C ) 2011 Google Inc .
2005-06-22 04:15:04 +04:00
* Copyright ( C ) 2002 - 2005 VERITAS Software Corporation .
2005-04-17 02:20:36 +04:00
* Copyright ( C ) 2004 Andi Kleen , SuSE Labs
*
* Extended attribute support for tmpfs :
* Copyright ( c ) 2004 , Luke Kenneth Casson Leighton < lkcl @ lkcl . net >
* Copyright ( c ) 2004 Red Hat , Inc . , James Morris < jmorris @ redhat . com >
*
2009-01-07 01:40:20 +03:00
* tiny - shmem :
* Copyright ( c ) 2004 , 2008 Matt Mackall < mpm @ selenic . com >
*
2005-04-17 02:20:36 +04:00
* This file is released under the GPL .
*/
2009-01-07 01:40:20 +03:00
# include <linux/fs.h>
# include <linux/init.h>
# include <linux/vfs.h>
# include <linux/mount.h>
2009-04-14 01:40:12 +04:00
# include <linux/pagemap.h>
2009-01-07 01:40:20 +03:00
# include <linux/file.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/swap.h>
static struct vfsmount * shm_mnt ;
# ifdef CONFIG_SHMEM
2005-04-17 02:20:36 +04:00
/*
* This virtual memory filesystem is heavily based on the ramfs . It
* extends ramfs by the ability to use swap and honor resource limits
* which makes it a completely usable filesystem .
*/
2006-09-29 13:01:35 +04:00
# include <linux/xattr.h>
2007-07-17 15:04:28 +04:00
# include <linux/exportfs.h>
2009-11-03 18:44:44 +03:00
# include <linux/posix_acl.h>
2006-09-29 13:01:35 +04:00
# include <linux/generic_acl.h>
2005-04-17 02:20:36 +04:00
# include <linux/mman.h>
# include <linux/string.h>
# include <linux/slab.h>
# include <linux/backing-dev.h>
# include <linux/shmem_fs.h>
# include <linux/writeback.h>
# include <linux/blkdev.h>
2011-08-04 03:21:21 +04:00
# include <linux/pagevec.h>
2011-08-04 03:21:21 +04:00
# include <linux/percpu_counter.h>
2011-07-26 04:12:32 +04:00
# include <linux/splice.h>
2005-04-17 02:20:36 +04:00
# include <linux/security.h>
# include <linux/swapops.h>
# include <linux/mempolicy.h>
# include <linux/namei.h>
2006-02-22 02:49:47 +03:00
# include <linux/ctype.h>
[PATCH] add migratepage address space op to shmem
Basic problem: pages of a shared memory segment can only be migrated once.
In 2.6.16 through 2.6.17-rc1, shared memory mappings do not have a
migratepage address space op. Therefore, migrate_pages() falls back to
default processing. In this path, it will try to pageout() dirty pages.
Once a shared memory page has been migrated it becomes dirty, so
migrate_pages() will try to page it out. However, because the page count
is 3 [cache + current + pte], pageout() will return PAGE_KEEP because
is_page_cache_freeable() returns false. This will abort all subsequent
migrations.
This patch adds a migratepage address space op to shared memory segments to
avoid taking the default path. We use the "migrate_page()" function
because it knows how to migrate dirty pages. This allows shared memory
segment pages to migrate, subject to other conditions such as # pte's
referencing the page [page_mapcount(page)], when requested.
I think this is safe. If we're migrating a shared memory page, then we
found the page via a page table, so it must be in memory.
Can be verified with memtoy and the shmem-mbind-test script, both
available at: http://free.linux.hp.com/~lts/Tools/
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-04-22 13:35:48 +04:00
# include <linux/migrate.h>
2006-09-26 10:31:11 +04:00
# include <linux/highmem.h>
2008-02-08 15:21:48 +03:00
# include <linux/seq_file.h>
2008-10-07 22:00:12 +04:00
# include <linux/magic.h>
[PATCH] add migratepage address space op to shmem
Basic problem: pages of a shared memory segment can only be migrated once.
In 2.6.16 through 2.6.17-rc1, shared memory mappings do not have a
migratepage address space op. Therefore, migrate_pages() falls back to
default processing. In this path, it will try to pageout() dirty pages.
Once a shared memory page has been migrated it becomes dirty, so
migrate_pages() will try to page it out. However, because the page count
is 3 [cache + current + pte], pageout() will return PAGE_KEEP because
is_page_cache_freeable() returns false. This will abort all subsequent
migrations.
This patch adds a migratepage address space op to shared memory segments to
avoid taking the default path. We use the "migrate_page()" function
because it knows how to migrate dirty pages. This allows shared memory
segment pages to migrate, subject to other conditions such as # pte's
referencing the page [page_mapcount(page)], when requested.
I think this is safe. If we're migrating a shared memory page, then we
found the page via a page table, so it must be in memory.
Can be verified with memtoy and the shmem-mbind-test script, both
available at: http://free.linux.hp.com/~lts/Tools/
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-04-22 13:35:48 +04:00
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
# include <asm/pgtable.h>
2009-04-14 01:40:12 +04:00
# define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
2005-04-17 02:20:36 +04:00
# define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
/* Pretend that each entry is of this size in directory's i_size */
# define BOGO_DIRENT_SIZE 20
2011-08-04 03:21:26 +04:00
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
# define SHORT_SYMLINK_LEN 128
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
struct shmem_xattr {
struct list_head list ; /* anchored by shmem_inode_info->xattr_list */
char * name ; /* xattr name */
size_t size ;
char value [ 0 ] ;
} ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
/* Flag allocation requirements to shmem_getpage */
2005-04-17 02:20:36 +04:00
enum sgp_type {
SGP_READ , /* don't exceed i_size, don't allocate page */
SGP_CACHE , /* don't exceed i_size, may allocate page */
2008-02-05 09:28:51 +03:00
SGP_DIRTY , /* like SGP_CACHE, but set new page dirty */
2005-04-17 02:20:36 +04:00
SGP_WRITE , /* may exceed i_size, may allocate page */
} ;
2008-02-08 15:21:49 +03:00
# ifdef CONFIG_TMPFS
2008-02-08 15:21:48 +03:00
static unsigned long shmem_default_max_blocks ( void )
{
return totalram_pages / 2 ;
}
static unsigned long shmem_default_max_inodes ( void )
{
return min ( totalram_pages - totalhigh_pages , totalram_pages / 2 ) ;
}
2008-02-08 15:21:49 +03:00
# endif
2008-02-08 15:21:48 +03:00
2011-07-26 04:12:34 +04:00
static int shmem_getpage_gfp ( struct inode * inode , pgoff_t index ,
struct page * * pagep , enum sgp_type sgp , gfp_t gfp , int * fault_type ) ;
static inline int shmem_getpage ( struct inode * inode , pgoff_t index ,
struct page * * pagep , enum sgp_type sgp , int * fault_type )
{
return shmem_getpage_gfp ( inode , index , pagep , sgp ,
mapping_gfp_mask ( inode - > i_mapping ) , fault_type ) ;
}
2005-04-17 02:20:36 +04:00
static inline struct shmem_sb_info * SHMEM_SB ( struct super_block * sb )
{
return sb - > s_fs_info ;
}
/*
* shmem_file_setup pre - accounts the whole fixed size of a VM object ,
* for shared memory and for shared anonymous ( / dev / zero ) mappings
* ( unless MAP_NORESERVE and sysctl_overcommit_memory < = 1 ) ,
* consistent with the pre - accounting of private mappings . . .
*/
static inline int shmem_acct_size ( unsigned long flags , loff_t size )
{
2009-02-24 23:51:52 +03:00
return ( flags & VM_NORESERVE ) ?
0 : security_vm_enough_memory_kern ( VM_ACCT ( size ) ) ;
2005-04-17 02:20:36 +04:00
}
static inline void shmem_unacct_size ( unsigned long flags , loff_t size )
{
2009-02-24 23:51:52 +03:00
if ( ! ( flags & VM_NORESERVE ) )
2005-04-17 02:20:36 +04:00
vm_unacct_memory ( VM_ACCT ( size ) ) ;
}
/*
* . . . whereas tmpfs objects are accounted incrementally as
* pages are allocated , in order to allow huge sparse files .
* shmem_getpage reports shmem_acct_block failure as - ENOSPC not - ENOMEM ,
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM .
*/
static inline int shmem_acct_block ( unsigned long flags )
{
2009-02-24 23:51:52 +03:00
return ( flags & VM_NORESERVE ) ?
security_vm_enough_memory_kern ( VM_ACCT ( PAGE_CACHE_SIZE ) ) : 0 ;
2005-04-17 02:20:36 +04:00
}
static inline void shmem_unacct_blocks ( unsigned long flags , long pages )
{
2009-02-24 23:51:52 +03:00
if ( flags & VM_NORESERVE )
2005-04-17 02:20:36 +04:00
vm_unacct_memory ( pages * VM_ACCT ( PAGE_CACHE_SIZE ) ) ;
}
2007-03-05 11:30:28 +03:00
static const struct super_operations shmem_ops ;
2006-06-28 15:26:44 +04:00
static const struct address_space_operations shmem_aops ;
2006-12-07 07:40:36 +03:00
static const struct file_operations shmem_file_operations ;
2007-02-12 11:55:39 +03:00
static const struct inode_operations shmem_inode_operations ;
static const struct inode_operations shmem_dir_inode_operations ;
static const struct inode_operations shmem_special_inode_operations ;
2009-09-27 22:29:37 +04:00
static const struct vm_operations_struct shmem_vm_ops ;
2005-04-17 02:20:36 +04:00
2005-09-07 02:17:45 +04:00
static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
2005-04-17 02:20:36 +04:00
. ra_pages = 0 , /* No readahead */
2008-10-19 07:26:32 +04:00
. capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED ,
2005-04-17 02:20:36 +04:00
} ;
static LIST_HEAD ( shmem_swaplist ) ;
tmpfs: make shmem_unuse more preemptible
shmem_unuse is at present an unbroken search through every swap vector page of
every tmpfs file which might be swapped, all under shmem_swaplist_lock. This
dates from long ago, when the caller held mmlist_lock over it all too: long
gone, but there's never been much pressure for preemptible swapoff.
Make it a little more preemptible, replacing shmem_swaplist_lock by
shmem_swaplist_mutex, inserting a cond_resched in the main loop, and a
cond_resched_lock (on info->lock) at one convenient point in the
shmem_unuse_inode loop, where it has no outstanding kmap_atomic.
If we're serious about preemptible swapoff, there's much further to go e.g.
I'm stupid to let the kmap_atomics of the decreasingly significant HIGHMEM
case dictate preemptiblility for other configs. But as in the earlier patch
to make swapoff scan ptes preemptibly, my hidden agenda is really towards
making memcgroups work, hardly about preemptibility at all.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:52 +03:00
static DEFINE_MUTEX ( shmem_swaplist_mutex ) ;
2005-04-17 02:20:36 +04:00
2008-02-05 09:28:47 +03:00
static int shmem_reserve_inode ( struct super_block * sb )
{
struct shmem_sb_info * sbinfo = SHMEM_SB ( sb ) ;
if ( sbinfo - > max_inodes ) {
spin_lock ( & sbinfo - > stat_lock ) ;
if ( ! sbinfo - > free_inodes ) {
spin_unlock ( & sbinfo - > stat_lock ) ;
return - ENOSPC ;
}
sbinfo - > free_inodes - - ;
spin_unlock ( & sbinfo - > stat_lock ) ;
}
return 0 ;
}
static void shmem_free_inode ( struct super_block * sb )
{
struct shmem_sb_info * sbinfo = SHMEM_SB ( sb ) ;
if ( sbinfo - > max_inodes ) {
spin_lock ( & sbinfo - > stat_lock ) ;
sbinfo - > free_inodes + + ;
spin_unlock ( & sbinfo - > stat_lock ) ;
}
}
2008-03-20 03:00:41 +03:00
/**
2011-08-04 03:21:21 +04:00
* shmem_recalc_inode - recalculate the block usage of an inode
2005-04-17 02:20:36 +04:00
* @ inode : inode to recalc
*
* We have to calculate the free blocks since the mm can drop
* undirtied hole pages behind our back .
*
* But normally info - > alloced = = inode - > i_mapping - > nrpages + info - > swapped
* So mm freed is info - > alloced - ( inode - > i_mapping - > nrpages + info - > swapped )
*
* It has to be called with the spinlock held .
*/
static void shmem_recalc_inode ( struct inode * inode )
{
struct shmem_inode_info * info = SHMEM_I ( inode ) ;
long freed ;
freed = info - > alloced - info - > swapped - inode - > i_mapping - > nrpages ;
if ( freed > 0 ) {
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
struct shmem_sb_info * sbinfo = SHMEM_SB ( inode - > i_sb ) ;
if ( sbinfo - > max_blocks )
percpu_counter_add ( & sbinfo - > used_blocks , - freed ) ;
2005-04-17 02:20:36 +04:00
info - > alloced - = freed ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
inode - > i_blocks - = freed * BLOCKS_PER_PAGE ;
2005-04-17 02:20:36 +04:00
shmem_unacct_blocks ( info - > flags , freed ) ;
}
}
2011-08-04 03:21:22 +04:00
/*
* Replace item expected in radix tree by a new item , while holding tree lock .
*/
static int shmem_radix_tree_replace ( struct address_space * mapping ,
pgoff_t index , void * expected , void * replacement )
{
void * * pslot ;
void * item = NULL ;
VM_BUG_ON ( ! expected ) ;
pslot = radix_tree_lookup_slot ( & mapping - > page_tree , index ) ;
if ( pslot )
item = radix_tree_deref_slot_protected ( pslot ,
& mapping - > tree_lock ) ;
if ( item ! = expected )
return - ENOENT ;
if ( replacement )
radix_tree_replace_slot ( pslot , replacement ) ;
else
radix_tree_delete ( & mapping - > page_tree , index ) ;
return 0 ;
}
2011-08-04 03:21:23 +04:00
/*
* Like add_to_page_cache_locked , but error if expected item has gone .
*/
static int shmem_add_to_page_cache ( struct page * page ,
struct address_space * mapping ,
pgoff_t index , gfp_t gfp , void * expected )
{
2011-08-04 03:21:24 +04:00
int error = 0 ;
2011-08-04 03:21:23 +04:00
VM_BUG_ON ( ! PageLocked ( page ) ) ;
VM_BUG_ON ( ! PageSwapBacked ( page ) ) ;
if ( ! expected )
error = radix_tree_preload ( gfp & GFP_RECLAIM_MASK ) ;
if ( ! error ) {
page_cache_get ( page ) ;
page - > mapping = mapping ;
page - > index = index ;
spin_lock_irq ( & mapping - > tree_lock ) ;
if ( ! expected )
error = radix_tree_insert ( & mapping - > page_tree ,
index , page ) ;
else
error = shmem_radix_tree_replace ( mapping , index ,
expected , page ) ;
if ( ! error ) {
mapping - > nrpages + + ;
__inc_zone_page_state ( page , NR_FILE_PAGES ) ;
__inc_zone_page_state ( page , NR_SHMEM ) ;
spin_unlock_irq ( & mapping - > tree_lock ) ;
} else {
page - > mapping = NULL ;
spin_unlock_irq ( & mapping - > tree_lock ) ;
page_cache_release ( page ) ;
}
if ( ! expected )
radix_tree_preload_end ( ) ;
}
if ( error )
mem_cgroup_uncharge_cache_page ( page ) ;
return error ;
}
2011-08-04 03:21:25 +04:00
/*
* Like delete_from_page_cache , but substitutes swap for page .
*/
static void shmem_delete_from_page_cache ( struct page * page , void * radswap )
{
struct address_space * mapping = page - > mapping ;
int error ;
spin_lock_irq ( & mapping - > tree_lock ) ;
error = shmem_radix_tree_replace ( mapping , page - > index , page , radswap ) ;
page - > mapping = NULL ;
mapping - > nrpages - - ;
__dec_zone_page_state ( page , NR_FILE_PAGES ) ;
__dec_zone_page_state ( page , NR_SHMEM ) ;
spin_unlock_irq ( & mapping - > tree_lock ) ;
page_cache_release ( page ) ;
BUG_ON ( error ) ;
}
2011-08-04 03:21:22 +04:00
/*
* Like find_get_pages , but collecting swap entries as well as pages .
*/
static unsigned shmem_find_get_pages_and_swap ( struct address_space * mapping ,
pgoff_t start , unsigned int nr_pages ,
struct page * * pages , pgoff_t * indices )
{
unsigned int i ;
unsigned int ret ;
unsigned int nr_found ;
rcu_read_lock ( ) ;
restart :
nr_found = radix_tree_gang_lookup_slot ( & mapping - > page_tree ,
( void * * * ) pages , indices , start , nr_pages ) ;
ret = 0 ;
for ( i = 0 ; i < nr_found ; i + + ) {
struct page * page ;
repeat :
page = radix_tree_deref_slot ( ( void * * ) pages [ i ] ) ;
if ( unlikely ( ! page ) )
continue ;
if ( radix_tree_exception ( page ) ) {
2011-08-04 03:21:28 +04:00
if ( radix_tree_deref_retry ( page ) )
goto restart ;
/*
* Otherwise , we must be storing a swap entry
* here as an exceptional entry : so return it
* without attempting to raise page count .
*/
goto export ;
2011-08-04 03:21:22 +04:00
}
if ( ! page_cache_get_speculative ( page ) )
goto repeat ;
/* Has the page moved? */
if ( unlikely ( page ! = * ( ( void * * ) pages [ i ] ) ) ) {
page_cache_release ( page ) ;
goto repeat ;
}
export :
indices [ ret ] = indices [ i ] ;
pages [ ret ] = page ;
ret + + ;
}
if ( unlikely ( ! ret & & nr_found ) )
goto restart ;
rcu_read_unlock ( ) ;
return ret ;
}
/*
* Remove swap entry from radix tree , free the swap and its page cache .
*/
static int shmem_free_swap ( struct address_space * mapping ,
pgoff_t index , void * radswap )
{
int error ;
spin_lock_irq ( & mapping - > tree_lock ) ;
error = shmem_radix_tree_replace ( mapping , index , radswap , NULL ) ;
spin_unlock_irq ( & mapping - > tree_lock ) ;
if ( ! error )
free_swap_and_cache ( radix_to_swp_entry ( radswap ) ) ;
return error ;
}
/*
* Pagevec may contain swap entries , so shuffle up pages before releasing .
*/
static void shmem_pagevec_release ( struct pagevec * pvec )
{
int i , j ;
for ( i = 0 , j = 0 ; i < pagevec_count ( pvec ) ; i + + ) {
struct page * page = pvec - > pages [ i ] ;
if ( ! radix_tree_exceptional_entry ( page ) )
pvec - > pages [ j + + ] = page ;
}
pvec - > nr = j ;
pagevec_release ( pvec ) ;
}
/*
* Remove range of pages and swap entries from radix tree , and free them .
*/
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
void shmem_truncate_range ( struct inode * inode , loff_t lstart , loff_t lend )
2005-04-17 02:20:36 +04:00
{
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
struct address_space * mapping = inode - > i_mapping ;
2005-04-17 02:20:36 +04:00
struct shmem_inode_info * info = SHMEM_I ( inode ) ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
pgoff_t start = ( lstart + PAGE_CACHE_SIZE - 1 ) > > PAGE_CACHE_SHIFT ;
2011-08-04 03:21:21 +04:00
unsigned partial = lstart & ( PAGE_CACHE_SIZE - 1 ) ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
pgoff_t end = ( lend > > PAGE_CACHE_SHIFT ) ;
2011-08-04 03:21:21 +04:00
struct pagevec pvec ;
2011-08-04 03:21:22 +04:00
pgoff_t indices [ PAGEVEC_SIZE ] ;
long nr_swaps_freed = 0 ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
pgoff_t index ;
2011-08-04 03:21:21 +04:00
int i ;
BUG_ON ( ( lend & ( PAGE_CACHE_SIZE - 1 ) ) ! = ( PAGE_CACHE_SIZE - 1 ) ) ;
pagevec_init ( & pvec , 0 ) ;
index = start ;
2011-08-04 03:21:22 +04:00
while ( index < = end ) {
pvec . nr = shmem_find_get_pages_and_swap ( mapping , index ,
min ( end - index , ( pgoff_t ) PAGEVEC_SIZE - 1 ) + 1 ,
pvec . pages , indices ) ;
if ( ! pvec . nr )
break ;
2011-08-04 03:21:21 +04:00
mem_cgroup_uncharge_start ( ) ;
for ( i = 0 ; i < pagevec_count ( & pvec ) ; i + + ) {
struct page * page = pvec . pages [ i ] ;
2011-08-04 03:21:22 +04:00
index = indices [ i ] ;
2011-08-04 03:21:21 +04:00
if ( index > end )
break ;
2011-08-04 03:21:22 +04:00
if ( radix_tree_exceptional_entry ( page ) ) {
nr_swaps_freed + = ! shmem_free_swap ( mapping ,
index , page ) ;
2011-08-04 03:21:21 +04:00
continue ;
2011-08-04 03:21:22 +04:00
}
if ( ! trylock_page ( page ) )
2011-08-04 03:21:21 +04:00
continue ;
2011-08-04 03:21:22 +04:00
if ( page - > mapping = = mapping ) {
VM_BUG_ON ( PageWriteback ( page ) ) ;
truncate_inode_page ( mapping , page ) ;
2011-08-04 03:21:21 +04:00
}
unlock_page ( page ) ;
}
2011-08-04 03:21:22 +04:00
shmem_pagevec_release ( & pvec ) ;
2011-08-04 03:21:21 +04:00
mem_cgroup_uncharge_end ( ) ;
cond_resched ( ) ;
index + + ;
}
2005-04-17 02:20:36 +04:00
2011-08-04 03:21:21 +04:00
if ( partial ) {
struct page * page = NULL ;
shmem_getpage ( inode , start - 1 , & page , SGP_READ , NULL ) ;
if ( page ) {
zero_user_segment ( page , partial , PAGE_CACHE_SIZE ) ;
set_page_dirty ( page ) ;
unlock_page ( page ) ;
page_cache_release ( page ) ;
}
}
index = start ;
for ( ; ; ) {
cond_resched ( ) ;
2011-08-04 03:21:22 +04:00
pvec . nr = shmem_find_get_pages_and_swap ( mapping , index ,
min ( end - index , ( pgoff_t ) PAGEVEC_SIZE - 1 ) + 1 ,
pvec . pages , indices ) ;
if ( ! pvec . nr ) {
2011-08-04 03:21:21 +04:00
if ( index = = start )
break ;
index = start ;
continue ;
}
2011-08-04 03:21:22 +04:00
if ( index = = start & & indices [ 0 ] > end ) {
shmem_pagevec_release ( & pvec ) ;
2011-08-04 03:21:21 +04:00
break ;
}
mem_cgroup_uncharge_start ( ) ;
for ( i = 0 ; i < pagevec_count ( & pvec ) ; i + + ) {
struct page * page = pvec . pages [ i ] ;
2011-08-04 03:21:22 +04:00
index = indices [ i ] ;
2011-08-04 03:21:21 +04:00
if ( index > end )
break ;
2011-08-04 03:21:22 +04:00
if ( radix_tree_exceptional_entry ( page ) ) {
nr_swaps_freed + = ! shmem_free_swap ( mapping ,
index , page ) ;
continue ;
}
2011-08-04 03:21:21 +04:00
lock_page ( page ) ;
2011-08-04 03:21:22 +04:00
if ( page - > mapping = = mapping ) {
VM_BUG_ON ( PageWriteback ( page ) ) ;
truncate_inode_page ( mapping , page ) ;
}
2011-08-04 03:21:21 +04:00
unlock_page ( page ) ;
}
2011-08-04 03:21:22 +04:00
shmem_pagevec_release ( & pvec ) ;
2011-08-04 03:21:21 +04:00
mem_cgroup_uncharge_end ( ) ;
index + + ;
}
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
2005-04-17 02:20:36 +04:00
spin_lock ( & info - > lock ) ;
2011-08-04 03:21:22 +04:00
info - > swapped - = nr_swaps_freed ;
2005-04-17 02:20:36 +04:00
shmem_recalc_inode ( inode ) ;
spin_unlock ( & info - > lock ) ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
inode - > i_ctime = inode - > i_mtime = CURRENT_TIME ;
2005-04-17 02:20:36 +04:00
}
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
EXPORT_SYMBOL_GPL ( shmem_truncate_range ) ;
2005-04-17 02:20:36 +04:00
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
static int shmem_setattr ( struct dentry * dentry , struct iattr * attr )
2005-04-17 02:20:36 +04:00
{
struct inode * inode = dentry - > d_inode ;
int error ;
2010-06-04 13:30:03 +04:00
error = inode_change_ok ( inode , attr ) ;
if ( error )
return error ;
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
if ( S_ISREG ( inode - > i_mode ) & & ( attr - > ia_valid & ATTR_SIZE ) ) {
loff_t oldsize = inode - > i_size ;
loff_t newsize = attr - > ia_size ;
2010-05-26 19:05:36 +04:00
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
if ( newsize ! = oldsize ) {
i_size_write ( inode , newsize ) ;
inode - > i_ctime = inode - > i_mtime = CURRENT_TIME ;
}
if ( newsize < oldsize ) {
loff_t holebegin = round_up ( newsize , PAGE_SIZE ) ;
unmap_mapping_range ( inode - > i_mapping , holebegin , 0 , 1 ) ;
shmem_truncate_range ( inode , newsize , ( loff_t ) - 1 ) ;
/* unmap again to remove racily COWed private pages */
unmap_mapping_range ( inode - > i_mapping , holebegin , 0 , 1 ) ;
}
2005-04-17 02:20:36 +04:00
}
2010-06-04 13:30:03 +04:00
setattr_copy ( inode , attr ) ;
2006-09-29 13:01:35 +04:00
# ifdef CONFIG_TMPFS_POSIX_ACL
2010-06-04 13:30:03 +04:00
if ( attr - > ia_valid & ATTR_MODE )
2009-11-03 18:44:44 +03:00
error = generic_acl_chmod ( inode ) ;
2006-09-29 13:01:35 +04:00
# endif
2005-04-17 02:20:36 +04:00
return error ;
}
2010-06-06 03:10:41 +04:00
static void shmem_evict_inode ( struct inode * inode )
2005-04-17 02:20:36 +04:00
{
struct shmem_inode_info * info = SHMEM_I ( inode ) ;
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
struct shmem_xattr * xattr , * nxattr ;
2005-04-17 02:20:36 +04:00
2010-05-26 19:05:36 +04:00
if ( inode - > i_mapping - > a_ops = = & shmem_aops ) {
2005-04-17 02:20:36 +04:00
shmem_unacct_size ( info - > flags , inode - > i_size ) ;
inode - > i_size = 0 ;
2010-05-26 19:05:36 +04:00
shmem_truncate_range ( inode , 0 , ( loff_t ) - 1 ) ;
2005-04-17 02:20:36 +04:00
if ( ! list_empty ( & info - > swaplist ) ) {
tmpfs: make shmem_unuse more preemptible
shmem_unuse is at present an unbroken search through every swap vector page of
every tmpfs file which might be swapped, all under shmem_swaplist_lock. This
dates from long ago, when the caller held mmlist_lock over it all too: long
gone, but there's never been much pressure for preemptible swapoff.
Make it a little more preemptible, replacing shmem_swaplist_lock by
shmem_swaplist_mutex, inserting a cond_resched in the main loop, and a
cond_resched_lock (on info->lock) at one convenient point in the
shmem_unuse_inode loop, where it has no outstanding kmap_atomic.
If we're serious about preemptible swapoff, there's much further to go e.g.
I'm stupid to let the kmap_atomics of the decreasingly significant HIGHMEM
case dictate preemptiblility for other configs. But as in the earlier patch
to make swapoff scan ptes preemptibly, my hidden agenda is really towards
making memcgroups work, hardly about preemptibility at all.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:52 +03:00
mutex_lock ( & shmem_swaplist_mutex ) ;
2005-04-17 02:20:36 +04:00
list_del_init ( & info - > swaplist ) ;
tmpfs: make shmem_unuse more preemptible
shmem_unuse is at present an unbroken search through every swap vector page of
every tmpfs file which might be swapped, all under shmem_swaplist_lock. This
dates from long ago, when the caller held mmlist_lock over it all too: long
gone, but there's never been much pressure for preemptible swapoff.
Make it a little more preemptible, replacing shmem_swaplist_lock by
shmem_swaplist_mutex, inserting a cond_resched in the main loop, and a
cond_resched_lock (on info->lock) at one convenient point in the
shmem_unuse_inode loop, where it has no outstanding kmap_atomic.
If we're serious about preemptible swapoff, there's much further to go e.g.
I'm stupid to let the kmap_atomics of the decreasingly significant HIGHMEM
case dictate preemptiblility for other configs. But as in the earlier patch
to make swapoff scan ptes preemptibly, my hidden agenda is really towards
making memcgroups work, hardly about preemptibility at all.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:52 +03:00
mutex_unlock ( & shmem_swaplist_mutex ) ;
2005-04-17 02:20:36 +04:00
}
2011-08-04 03:21:26 +04:00
} else
kfree ( info - > symlink ) ;
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
list_for_each_entry_safe ( xattr , nxattr , & info - > xattr_list , list ) {
kfree ( xattr - > name ) ;
kfree ( xattr ) ;
}
2005-06-22 04:15:04 +04:00
BUG_ON ( inode - > i_blocks ) ;
2008-02-05 09:28:47 +03:00
shmem_free_inode ( inode - > i_sb ) ;
2010-06-06 03:10:41 +04:00
end_writeback ( inode ) ;
2005-04-17 02:20:36 +04:00
}
2011-08-04 03:21:23 +04:00
/*
* If swap found in inode , free it and move page from swapcache to filecache .
*/
2011-08-04 03:21:21 +04:00
static int shmem_unuse_inode ( struct shmem_inode_info * info ,
swp_entry_t swap , struct page * page )
2005-04-17 02:20:36 +04:00
{
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
struct address_space * mapping = info - > vfs_inode . i_mapping ;
2011-08-04 03:21:23 +04:00
void * radswap ;
2011-08-04 03:21:21 +04:00
pgoff_t index ;
2008-02-05 09:28:51 +03:00
int error ;
2005-04-17 02:20:36 +04:00
2011-08-04 03:21:23 +04:00
radswap = swp_to_radix_entry ( swap ) ;
tmpfs radix_tree: locate_item to speed up swapoff
We have already acknowledged that swapoff of a tmpfs file is slower than
it was before conversion to the generic radix_tree: a little slower
there will be acceptable, if the hotter paths are faster.
But it was a shock to find swapoff of a 500MB file 20 times slower on my
laptop, taking 10 minutes; and at that rate it significantly slows down
my testing.
Now, most of that turned out to be overhead from PROVE_LOCKING and
PROVE_RCU: without those it was only 4 times slower than before; and
more realistic tests on other machines don't fare as badly.
I've tried a number of things to improve it, including tagging the swap
entries, then doing lookup by tag: I'd expected that to halve the time,
but in practice it's erratic, and often counter-productive.
The only change I've so far found to make a consistent improvement, is
to short-circuit the way we go back and forth, gang lookup packing
entries into the array supplied, then shmem scanning that array for the
target entry. Scanning in place doubles the speed, so it's now only
twice as slow as before (or three times slower when the PROVEs are on).
So, add radix_tree_locate_item() as an expedient, once-off,
single-caller hack to do the lookup directly in place. #ifdef it on
CONFIG_SHMEM and CONFIG_SWAP, as much to document its limited
applicability as save space in other configurations. And, sadly,
#include sched.h for cond_resched().
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:27 +04:00
index = radix_tree_locate_item ( & mapping - > page_tree , radswap ) ;
2011-08-04 03:21:23 +04:00
if ( index = = - 1 )
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
return 0 ;
2008-02-05 09:28:53 +03:00
tmpfs: fix shmem_swaplist races
Intensive swapoff testing shows shmem_unuse spinning on an entry in
shmem_swaplist pointing to itself: how does that come about? Days pass...
First guess is this: shmem_delete_inode tests list_empty without taking the
global mutex (so the swapping case doesn't slow down the common case); but
there's an instant in shmem_unuse_inode's list_move_tail when the list entry
may appear empty (a rare case, because it's actually moving the head not the
the list member). So there's a danger of leaving the inode on the swaplist
when it's freed, then reinitialized to point to itself when reused. Fix that
by skipping the list_move_tail when it's a no-op, which happens to plug this.
But this same spinning then surfaces on another machine. Ah, I'd never
suspected it, but shmem_writepage's swaplist manipulation is unsafe: though we
still hold page lock, which would hold off inode deletion if the page were in
pagecache, it doesn't hold off once it's in swapcache (free_swap_and_cache
doesn't wait on locked pages). Hmm: we could put the the inode on swaplist
earlier, but then shmem_unuse_inode could never prune unswapped inodes.
Fix this with an igrab before dropping info->lock, as in shmem_unuse_inode;
though I am a little uneasy about the iput which has to follow - it works, and
I see nothing wrong with it, but it is surprising that shmem inode deletion
may now occur below shmem_writepage. Revisit this fix later?
And while we're looking at these races: the way shmem_unuse tests swapped
without holding info->lock looks unsafe, if we've more than one swap area: a
racing shmem_writepage on another page of the same inode could be putting it
in swapcache, just as we're deciding to remove the inode from swaplist -
there's a danger of going on swap without being listed, so a later swapoff
would hang, being unable to locate the entry. Move that test and removal down
into shmem_unuse_inode, once info->lock is held.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:55 +03:00
/*
* Move _head_ to start search for next from here .
2010-06-06 03:10:41 +04:00
* But be careful : shmem_evict_inode checks list_empty without taking
tmpfs: fix shmem_swaplist races
Intensive swapoff testing shows shmem_unuse spinning on an entry in
shmem_swaplist pointing to itself: how does that come about? Days pass...
First guess is this: shmem_delete_inode tests list_empty without taking the
global mutex (so the swapping case doesn't slow down the common case); but
there's an instant in shmem_unuse_inode's list_move_tail when the list entry
may appear empty (a rare case, because it's actually moving the head not the
the list member). So there's a danger of leaving the inode on the swaplist
when it's freed, then reinitialized to point to itself when reused. Fix that
by skipping the list_move_tail when it's a no-op, which happens to plug this.
But this same spinning then surfaces on another machine. Ah, I'd never
suspected it, but shmem_writepage's swaplist manipulation is unsafe: though we
still hold page lock, which would hold off inode deletion if the page were in
pagecache, it doesn't hold off once it's in swapcache (free_swap_and_cache
doesn't wait on locked pages). Hmm: we could put the the inode on swaplist
earlier, but then shmem_unuse_inode could never prune unswapped inodes.
Fix this with an igrab before dropping info->lock, as in shmem_unuse_inode;
though I am a little uneasy about the iput which has to follow - it works, and
I see nothing wrong with it, but it is surprising that shmem inode deletion
may now occur below shmem_writepage. Revisit this fix later?
And while we're looking at these races: the way shmem_unuse tests swapped
without holding info->lock looks unsafe, if we've more than one swap area: a
racing shmem_writepage on another page of the same inode could be putting it
in swapcache, just as we're deciding to remove the inode from swaplist -
there's a danger of going on swap without being listed, so a later swapoff
would hang, being unable to locate the entry. Move that test and removal down
into shmem_unuse_inode, once info->lock is held.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:55 +03:00
* mutex , and there ' s an instant in list_move_tail when info - > swaplist
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
* would appear empty , if it were the only one on shmem_swaplist .
tmpfs: fix shmem_swaplist races
Intensive swapoff testing shows shmem_unuse spinning on an entry in
shmem_swaplist pointing to itself: how does that come about? Days pass...
First guess is this: shmem_delete_inode tests list_empty without taking the
global mutex (so the swapping case doesn't slow down the common case); but
there's an instant in shmem_unuse_inode's list_move_tail when the list entry
may appear empty (a rare case, because it's actually moving the head not the
the list member). So there's a danger of leaving the inode on the swaplist
when it's freed, then reinitialized to point to itself when reused. Fix that
by skipping the list_move_tail when it's a no-op, which happens to plug this.
But this same spinning then surfaces on another machine. Ah, I'd never
suspected it, but shmem_writepage's swaplist manipulation is unsafe: though we
still hold page lock, which would hold off inode deletion if the page were in
pagecache, it doesn't hold off once it's in swapcache (free_swap_and_cache
doesn't wait on locked pages). Hmm: we could put the the inode on swaplist
earlier, but then shmem_unuse_inode could never prune unswapped inodes.
Fix this with an igrab before dropping info->lock, as in shmem_unuse_inode;
though I am a little uneasy about the iput which has to follow - it works, and
I see nothing wrong with it, but it is surprising that shmem inode deletion
may now occur below shmem_writepage. Revisit this fix later?
And while we're looking at these races: the way shmem_unuse tests swapped
without holding info->lock looks unsafe, if we've more than one swap area: a
racing shmem_writepage on another page of the same inode could be putting it
in swapcache, just as we're deciding to remove the inode from swaplist -
there's a danger of going on swap without being listed, so a later swapoff
would hang, being unable to locate the entry. Move that test and removal down
into shmem_unuse_inode, once info->lock is held.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:55 +03:00
*/
if ( shmem_swaplist . next ! = & info - > swaplist )
list_move_tail ( & shmem_swaplist , & info - > swaplist ) ;
2008-02-05 09:28:53 +03:00
2009-01-08 05:07:56 +03:00
/*
2011-05-12 02:13:37 +04:00
* We rely on shmem_swaplist_mutex , not only to protect the swaplist ,
* but also to hold up shmem_evict_inode ( ) : so inode cannot be freed
* beneath us ( pagelock doesn ' t help until the page is in pagecache ) .
2009-01-08 05:07:56 +03:00
*/
2011-08-04 03:21:23 +04:00
error = shmem_add_to_page_cache ( page , mapping , index ,
GFP_NOWAIT , radswap ) ;
2011-05-12 02:13:37 +04:00
/* which does mem_cgroup_uncharge_cache_page on error */
memcg: remove refcnt from page_cgroup
memcg: performance improvements
Patch Description
1/5 ... remove refcnt fron page_cgroup patch (shmem handling is fixed)
2/5 ... swapcache handling patch
3/5 ... add helper function for shmem's memory reclaim patch
4/5 ... optimize by likely/unlikely ppatch
5/5 ... remove redundunt check patch (shmem handling is fixed.)
Unix bench result.
== 2.6.26-rc2-mm1 + memory resource controller
Execl Throughput 2915.4 lps (29.6 secs, 3 samples)
C Compiler Throughput 1019.3 lpm (60.0 secs, 3 samples)
Shell Scripts (1 concurrent) 5796.0 lpm (60.0 secs, 3 samples)
Shell Scripts (8 concurrent) 1097.7 lpm (60.0 secs, 3 samples)
Shell Scripts (16 concurrent) 565.3 lpm (60.0 secs, 3 samples)
File Read 1024 bufsize 2000 maxblocks 1022128.0 KBps (30.0 secs, 3 samples)
File Write 1024 bufsize 2000 maxblocks 544057.0 KBps (30.0 secs, 3 samples)
File Copy 1024 bufsize 2000 maxblocks 346481.0 KBps (30.0 secs, 3 samples)
File Read 256 bufsize 500 maxblocks 319325.0 KBps (30.0 secs, 3 samples)
File Write 256 bufsize 500 maxblocks 148788.0 KBps (30.0 secs, 3 samples)
File Copy 256 bufsize 500 maxblocks 99051.0 KBps (30.0 secs, 3 samples)
File Read 4096 bufsize 8000 maxblocks 2058917.0 KBps (30.0 secs, 3 samples)
File Write 4096 bufsize 8000 maxblocks 1606109.0 KBps (30.0 secs, 3 samples)
File Copy 4096 bufsize 8000 maxblocks 854789.0 KBps (30.0 secs, 3 samples)
Dc: sqrt(2) to 99 decimal places 126145.2 lpm (30.0 secs, 3 samples)
INDEX VALUES
TEST BASELINE RESULT INDEX
Execl Throughput 43.0 2915.4 678.0
File Copy 1024 bufsize 2000 maxblocks 3960.0 346481.0 875.0
File Copy 256 bufsize 500 maxblocks 1655.0 99051.0 598.5
File Copy 4096 bufsize 8000 maxblocks 5800.0 854789.0 1473.8
Shell Scripts (8 concurrent) 6.0 1097.7 1829.5
=========
FINAL SCORE 991.3
== 2.6.26-rc2-mm1 + this set ==
Execl Throughput 3012.9 lps (29.9 secs, 3 samples)
C Compiler Throughput 981.0 lpm (60.0 secs, 3 samples)
Shell Scripts (1 concurrent) 5872.0 lpm (60.0 secs, 3 samples)
Shell Scripts (8 concurrent) 1120.3 lpm (60.0 secs, 3 samples)
Shell Scripts (16 concurrent) 578.0 lpm (60.0 secs, 3 samples)
File Read 1024 bufsize 2000 maxblocks 1003993.0 KBps (30.0 secs, 3 samples)
File Write 1024 bufsize 2000 maxblocks 550452.0 KBps (30.0 secs, 3 samples)
File Copy 1024 bufsize 2000 maxblocks 347159.0 KBps (30.0 secs, 3 samples)
File Read 256 bufsize 500 maxblocks 314644.0 KBps (30.0 secs, 3 samples)
File Write 256 bufsize 500 maxblocks 151852.0 KBps (30.0 secs, 3 samples)
File Copy 256 bufsize 500 maxblocks 101000.0 KBps (30.0 secs, 3 samples)
File Read 4096 bufsize 8000 maxblocks 2033256.0 KBps (30.0 secs, 3 samples)
File Write 4096 bufsize 8000 maxblocks 1611814.0 KBps (30.0 secs, 3 samples)
File Copy 4096 bufsize 8000 maxblocks 847979.0 KBps (30.0 secs, 3 samples)
Dc: sqrt(2) to 99 decimal places 128148.7 lpm (30.0 secs, 3 samples)
INDEX VALUES
TEST BASELINE RESULT INDEX
Execl Throughput 43.0 3012.9 700.7
File Copy 1024 bufsize 2000 maxblocks 3960.0 347159.0 876.7
File Copy 256 bufsize 500 maxblocks 1655.0 101000.0 610.3
File Copy 4096 bufsize 8000 maxblocks 5800.0 847979.0 1462.0
Shell Scripts (8 concurrent) 6.0 1120.3 1867.2
=========
FINAL SCORE 1004.6
This patch:
Remove refcnt from page_cgroup().
After this,
* A page is charged only when !page_mapped() && no page_cgroup is assigned.
* Anon page is newly mapped.
* File page is added to mapping->tree.
* A page is uncharged only when
* Anon page is fully unmapped.
* File page is removed from LRU.
There is no change in behavior from user's view.
This patch also removes unnecessary calls in rmap.c which was used only for
refcnt mangement.
[akpm@linux-foundation.org: fix warning]
[hugh@veritas.com: fix shmem_unuse_inode charging]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-25 12:47:14 +04:00
2011-07-26 04:12:37 +04:00
if ( error ! = - ENOMEM ) {
2011-08-04 03:21:23 +04:00
/*
* Truncation and eviction use free_swap_and_cache ( ) , which
* only does trylock page : if we raced , best clean up here .
*/
2008-02-05 09:28:50 +03:00
delete_from_swap_cache ( page ) ;
set_page_dirty ( page ) ;
2011-08-04 03:21:23 +04:00
if ( ! error ) {
spin_lock ( & info - > lock ) ;
info - > swapped - - ;
spin_unlock ( & info - > lock ) ;
swap_free ( swap ) ;
}
2008-02-05 09:28:53 +03:00
error = 1 ; /* not an error, but entry was found */
2005-04-17 02:20:36 +04:00
}
2008-02-05 09:28:53 +03:00
return error ;
2005-04-17 02:20:36 +04:00
}
/*
2011-08-04 03:21:23 +04:00
* Search through swapped inodes to find and replace swap by page .
2005-04-17 02:20:36 +04:00
*/
2011-08-04 03:21:21 +04:00
int shmem_unuse ( swp_entry_t swap , struct page * page )
2005-04-17 02:20:36 +04:00
{
2011-08-04 03:21:21 +04:00
struct list_head * this , * next ;
2005-04-17 02:20:36 +04:00
struct shmem_inode_info * info ;
int found = 0 ;
2011-05-12 02:13:37 +04:00
int error ;
/*
* Charge page using GFP_KERNEL while we can wait , before taking
* the shmem_swaplist_mutex which might hold up shmem_writepage ( ) .
* Charged back to the user ( not to caller ) when swap account is used .
*/
error = mem_cgroup_cache_charge ( page , current - > mm , GFP_KERNEL ) ;
if ( error )
goto out ;
2011-08-04 03:21:23 +04:00
/* No radix_tree_preload: swap entry keeps a place for page in tree */
2005-04-17 02:20:36 +04:00
tmpfs: make shmem_unuse more preemptible
shmem_unuse is at present an unbroken search through every swap vector page of
every tmpfs file which might be swapped, all under shmem_swaplist_lock. This
dates from long ago, when the caller held mmlist_lock over it all too: long
gone, but there's never been much pressure for preemptible swapoff.
Make it a little more preemptible, replacing shmem_swaplist_lock by
shmem_swaplist_mutex, inserting a cond_resched in the main loop, and a
cond_resched_lock (on info->lock) at one convenient point in the
shmem_unuse_inode loop, where it has no outstanding kmap_atomic.
If we're serious about preemptible swapoff, there's much further to go e.g.
I'm stupid to let the kmap_atomics of the decreasingly significant HIGHMEM
case dictate preemptiblility for other configs. But as in the earlier patch
to make swapoff scan ptes preemptibly, my hidden agenda is really towards
making memcgroups work, hardly about preemptibility at all.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:52 +03:00
mutex_lock ( & shmem_swaplist_mutex ) ;
2011-08-04 03:21:21 +04:00
list_for_each_safe ( this , next , & shmem_swaplist ) {
info = list_entry ( this , struct shmem_inode_info , swaplist ) ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
if ( info - > swapped )
2011-08-04 03:21:21 +04:00
found = shmem_unuse_inode ( info , swap , page ) ;
2011-08-04 03:21:25 +04:00
else
list_del_init ( & info - > swaplist ) ;
tmpfs: make shmem_unuse more preemptible
shmem_unuse is at present an unbroken search through every swap vector page of
every tmpfs file which might be swapped, all under shmem_swaplist_lock. This
dates from long ago, when the caller held mmlist_lock over it all too: long
gone, but there's never been much pressure for preemptible swapoff.
Make it a little more preemptible, replacing shmem_swaplist_lock by
shmem_swaplist_mutex, inserting a cond_resched in the main loop, and a
cond_resched_lock (on info->lock) at one convenient point in the
shmem_unuse_inode loop, where it has no outstanding kmap_atomic.
If we're serious about preemptible swapoff, there's much further to go e.g.
I'm stupid to let the kmap_atomics of the decreasingly significant HIGHMEM
case dictate preemptiblility for other configs. But as in the earlier patch
to make swapoff scan ptes preemptibly, my hidden agenda is really towards
making memcgroups work, hardly about preemptibility at all.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:52 +03:00
cond_resched ( ) ;
2008-02-05 09:28:53 +03:00
if ( found )
2011-05-12 02:13:37 +04:00
break ;
2005-04-17 02:20:36 +04:00
}
tmpfs: make shmem_unuse more preemptible
shmem_unuse is at present an unbroken search through every swap vector page of
every tmpfs file which might be swapped, all under shmem_swaplist_lock. This
dates from long ago, when the caller held mmlist_lock over it all too: long
gone, but there's never been much pressure for preemptible swapoff.
Make it a little more preemptible, replacing shmem_swaplist_lock by
shmem_swaplist_mutex, inserting a cond_resched in the main loop, and a
cond_resched_lock (on info->lock) at one convenient point in the
shmem_unuse_inode loop, where it has no outstanding kmap_atomic.
If we're serious about preemptible swapoff, there's much further to go e.g.
I'm stupid to let the kmap_atomics of the decreasingly significant HIGHMEM
case dictate preemptiblility for other configs. But as in the earlier patch
to make swapoff scan ptes preemptibly, my hidden agenda is really towards
making memcgroups work, hardly about preemptibility at all.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:52 +03:00
mutex_unlock ( & shmem_swaplist_mutex ) ;
2011-05-12 02:13:37 +04:00
if ( ! found )
mem_cgroup_uncharge_cache_page ( page ) ;
if ( found < 0 )
error = found ;
out :
2009-12-15 04:58:47 +03:00
unlock_page ( page ) ;
page_cache_release ( page ) ;
2011-05-12 02:13:37 +04:00
return error ;
2005-04-17 02:20:36 +04:00
}
/*
* Move the page from the page cache to the swap cache .
*/
static int shmem_writepage ( struct page * page , struct writeback_control * wbc )
{
struct shmem_inode_info * info ;
struct address_space * mapping ;
struct inode * inode ;
2011-08-04 03:21:25 +04:00
swp_entry_t swap ;
pgoff_t index ;
2005-04-17 02:20:36 +04:00
BUG_ON ( ! PageLocked ( page ) ) ;
mapping = page - > mapping ;
index = page - > index ;
inode = mapping - > host ;
info = SHMEM_I ( inode ) ;
if ( info - > flags & VM_LOCKED )
goto redirty ;
2008-02-05 09:28:51 +03:00
if ( ! total_swap_pages )
2005-04-17 02:20:36 +04:00
goto redirty ;
2008-02-05 09:28:51 +03:00
/*
* shmem_backing_dev_info ' s capabilities prevent regular writeback or
* sync from ever calling shmem_writepage ; but a stacking filesystem
2011-07-26 04:12:37 +04:00
* might use - > writepage of its underlying filesystem , in which case
2008-02-05 09:28:51 +03:00
* tmpfs should write out to swap only in response to memory pressure ,
2011-07-26 04:12:37 +04:00
* and not for the writeback threads or sync .
2008-02-05 09:28:51 +03:00
*/
2011-07-26 04:12:37 +04:00
if ( ! wbc - > for_reclaim ) {
WARN_ON_ONCE ( 1 ) ; /* Still happens? Tell us about it! */
goto redirty ;
}
swap = get_swap_page ( ) ;
if ( ! swap . val )
goto redirty ;
2008-02-05 09:28:51 +03:00
tmpfs: fix race between umount and writepage
Konstanin Khlebnikov reports that a dangerous race between umount and
shmem_writepage can be reproduced by this script:
for i in {1..300} ; do
mkdir $i
while true ; do
mount -t tmpfs none $i
dd if=/dev/zero of=$i/test bs=1M count=$(($RANDOM % 100))
umount $i
done &
done
on a 6xCPU node with 8Gb RAM: kernel very unstable after this accident. =)
Kernel log:
VFS: Busy inodes after unmount of tmpfs.
Self-destruct in 5 seconds. Have a nice day...
WARNING: at lib/list_debug.c:53 __list_del_entry+0x8d/0x98()
list_del corruption. prev->next should be ffff880222fdaac8, but was (null)
Pid: 11222, comm: mount.tmpfs Not tainted 2.6.39-rc2+ #4
Call Trace:
warn_slowpath_common+0x80/0x98
warn_slowpath_fmt+0x41/0x43
__list_del_entry+0x8d/0x98
evict+0x50/0x113
iput+0x138/0x141
...
BUG: unable to handle kernel paging request at ffffffffffffffff
IP: shmem_free_blocks+0x18/0x4c
Pid: 10422, comm: dd Tainted: G W 2.6.39-rc2+ #4
Call Trace:
shmem_recalc_inode+0x61/0x66
shmem_writepage+0xba/0x1dc
pageout+0x13c/0x24c
shrink_page_list+0x28e/0x4be
shrink_inactive_list+0x21f/0x382
...
shmem_writepage() calls igrab() on the inode for the page which came from
page reclaim, to add it later into shmem_swaplist for swapoff operation.
This igrab() can race with super-block deactivating process:
shrink_inactive_list() deactivate_super()
pageout() tmpfs_fs_type->kill_sb()
shmem_writepage() kill_litter_super()
generic_shutdown_super()
evict_inodes()
igrab()
atomic_read(&inode->i_count)
skip-inode
iput()
if (!list_empty(&sb->s_inodes))
printk("VFS: Busy inodes after...
This igrap-iput pair was added in commit 1b1b32f2c6f6 "tmpfs: fix
shmem_swaplist races" based on incorrect assumptions: igrab() protects the
inode from concurrent eviction by deletion, but it does nothing to protect
it from concurrent unmounting, which goes ahead despite the raised
i_count.
So this use of igrab() was wrong all along, but the race made much worse
in 2.6.37 when commit 63997e98a3be "split invalidate_inodes()" replaced
two attempts at invalidate_inodes() by a single evict_inodes().
Konstantin posted a plausible patch, raising sb->s_active too: I'm unsure
whether it was correct or not; but burnt once by igrab(), I am sure that
we don't want to rely more deeply upon externals here.
Fix it by adding the inode to shmem_swaplist earlier, while the page lock
on page in page cache still secures the inode against eviction, without
artifically raising i_count. It was originally added later because
shmem_unuse_inode() is liable to remove an inode from the list while it's
unswapped; but we can guard against that by taking spinlock before
dropping mutex.
Reported-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Tested-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-12 02:13:36 +04:00
/*
* Add inode to shmem_unuse ( ) ' s list of swapped - out inodes ,
2011-08-04 03:21:25 +04:00
* if it ' s not already there . Do it now before the page is
* moved to swap cache , when its pagelock no longer protects
tmpfs: fix race between umount and writepage
Konstanin Khlebnikov reports that a dangerous race between umount and
shmem_writepage can be reproduced by this script:
for i in {1..300} ; do
mkdir $i
while true ; do
mount -t tmpfs none $i
dd if=/dev/zero of=$i/test bs=1M count=$(($RANDOM % 100))
umount $i
done &
done
on a 6xCPU node with 8Gb RAM: kernel very unstable after this accident. =)
Kernel log:
VFS: Busy inodes after unmount of tmpfs.
Self-destruct in 5 seconds. Have a nice day...
WARNING: at lib/list_debug.c:53 __list_del_entry+0x8d/0x98()
list_del corruption. prev->next should be ffff880222fdaac8, but was (null)
Pid: 11222, comm: mount.tmpfs Not tainted 2.6.39-rc2+ #4
Call Trace:
warn_slowpath_common+0x80/0x98
warn_slowpath_fmt+0x41/0x43
__list_del_entry+0x8d/0x98
evict+0x50/0x113
iput+0x138/0x141
...
BUG: unable to handle kernel paging request at ffffffffffffffff
IP: shmem_free_blocks+0x18/0x4c
Pid: 10422, comm: dd Tainted: G W 2.6.39-rc2+ #4
Call Trace:
shmem_recalc_inode+0x61/0x66
shmem_writepage+0xba/0x1dc
pageout+0x13c/0x24c
shrink_page_list+0x28e/0x4be
shrink_inactive_list+0x21f/0x382
...
shmem_writepage() calls igrab() on the inode for the page which came from
page reclaim, to add it later into shmem_swaplist for swapoff operation.
This igrab() can race with super-block deactivating process:
shrink_inactive_list() deactivate_super()
pageout() tmpfs_fs_type->kill_sb()
shmem_writepage() kill_litter_super()
generic_shutdown_super()
evict_inodes()
igrab()
atomic_read(&inode->i_count)
skip-inode
iput()
if (!list_empty(&sb->s_inodes))
printk("VFS: Busy inodes after...
This igrap-iput pair was added in commit 1b1b32f2c6f6 "tmpfs: fix
shmem_swaplist races" based on incorrect assumptions: igrab() protects the
inode from concurrent eviction by deletion, but it does nothing to protect
it from concurrent unmounting, which goes ahead despite the raised
i_count.
So this use of igrab() was wrong all along, but the race made much worse
in 2.6.37 when commit 63997e98a3be "split invalidate_inodes()" replaced
two attempts at invalidate_inodes() by a single evict_inodes().
Konstantin posted a plausible patch, raising sb->s_active too: I'm unsure
whether it was correct or not; but burnt once by igrab(), I am sure that
we don't want to rely more deeply upon externals here.
Fix it by adding the inode to shmem_swaplist earlier, while the page lock
on page in page cache still secures the inode against eviction, without
artifically raising i_count. It was originally added later because
shmem_unuse_inode() is liable to remove an inode from the list while it's
unswapped; but we can guard against that by taking spinlock before
dropping mutex.
Reported-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Tested-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-12 02:13:36 +04:00
* the inode from eviction . But don ' t unlock the mutex until
2011-08-04 03:21:25 +04:00
* we ' ve incremented swapped , because shmem_unuse_inode ( ) will
* prune a ! swapped inode from the swaplist under this mutex .
tmpfs: fix race between umount and writepage
Konstanin Khlebnikov reports that a dangerous race between umount and
shmem_writepage can be reproduced by this script:
for i in {1..300} ; do
mkdir $i
while true ; do
mount -t tmpfs none $i
dd if=/dev/zero of=$i/test bs=1M count=$(($RANDOM % 100))
umount $i
done &
done
on a 6xCPU node with 8Gb RAM: kernel very unstable after this accident. =)
Kernel log:
VFS: Busy inodes after unmount of tmpfs.
Self-destruct in 5 seconds. Have a nice day...
WARNING: at lib/list_debug.c:53 __list_del_entry+0x8d/0x98()
list_del corruption. prev->next should be ffff880222fdaac8, but was (null)
Pid: 11222, comm: mount.tmpfs Not tainted 2.6.39-rc2+ #4
Call Trace:
warn_slowpath_common+0x80/0x98
warn_slowpath_fmt+0x41/0x43
__list_del_entry+0x8d/0x98
evict+0x50/0x113
iput+0x138/0x141
...
BUG: unable to handle kernel paging request at ffffffffffffffff
IP: shmem_free_blocks+0x18/0x4c
Pid: 10422, comm: dd Tainted: G W 2.6.39-rc2+ #4
Call Trace:
shmem_recalc_inode+0x61/0x66
shmem_writepage+0xba/0x1dc
pageout+0x13c/0x24c
shrink_page_list+0x28e/0x4be
shrink_inactive_list+0x21f/0x382
...
shmem_writepage() calls igrab() on the inode for the page which came from
page reclaim, to add it later into shmem_swaplist for swapoff operation.
This igrab() can race with super-block deactivating process:
shrink_inactive_list() deactivate_super()
pageout() tmpfs_fs_type->kill_sb()
shmem_writepage() kill_litter_super()
generic_shutdown_super()
evict_inodes()
igrab()
atomic_read(&inode->i_count)
skip-inode
iput()
if (!list_empty(&sb->s_inodes))
printk("VFS: Busy inodes after...
This igrap-iput pair was added in commit 1b1b32f2c6f6 "tmpfs: fix
shmem_swaplist races" based on incorrect assumptions: igrab() protects the
inode from concurrent eviction by deletion, but it does nothing to protect
it from concurrent unmounting, which goes ahead despite the raised
i_count.
So this use of igrab() was wrong all along, but the race made much worse
in 2.6.37 when commit 63997e98a3be "split invalidate_inodes()" replaced
two attempts at invalidate_inodes() by a single evict_inodes().
Konstantin posted a plausible patch, raising sb->s_active too: I'm unsure
whether it was correct or not; but burnt once by igrab(), I am sure that
we don't want to rely more deeply upon externals here.
Fix it by adding the inode to shmem_swaplist earlier, while the page lock
on page in page cache still secures the inode against eviction, without
artifically raising i_count. It was originally added later because
shmem_unuse_inode() is liable to remove an inode from the list while it's
unswapped; but we can guard against that by taking spinlock before
dropping mutex.
Reported-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Tested-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-12 02:13:36 +04:00
*/
2011-07-26 04:12:37 +04:00
mutex_lock ( & shmem_swaplist_mutex ) ;
if ( list_empty ( & info - > swaplist ) )
list_add_tail ( & info - > swaplist , & shmem_swaplist ) ;
tmpfs: fix race between umount and writepage
Konstanin Khlebnikov reports that a dangerous race between umount and
shmem_writepage can be reproduced by this script:
for i in {1..300} ; do
mkdir $i
while true ; do
mount -t tmpfs none $i
dd if=/dev/zero of=$i/test bs=1M count=$(($RANDOM % 100))
umount $i
done &
done
on a 6xCPU node with 8Gb RAM: kernel very unstable after this accident. =)
Kernel log:
VFS: Busy inodes after unmount of tmpfs.
Self-destruct in 5 seconds. Have a nice day...
WARNING: at lib/list_debug.c:53 __list_del_entry+0x8d/0x98()
list_del corruption. prev->next should be ffff880222fdaac8, but was (null)
Pid: 11222, comm: mount.tmpfs Not tainted 2.6.39-rc2+ #4
Call Trace:
warn_slowpath_common+0x80/0x98
warn_slowpath_fmt+0x41/0x43
__list_del_entry+0x8d/0x98
evict+0x50/0x113
iput+0x138/0x141
...
BUG: unable to handle kernel paging request at ffffffffffffffff
IP: shmem_free_blocks+0x18/0x4c
Pid: 10422, comm: dd Tainted: G W 2.6.39-rc2+ #4
Call Trace:
shmem_recalc_inode+0x61/0x66
shmem_writepage+0xba/0x1dc
pageout+0x13c/0x24c
shrink_page_list+0x28e/0x4be
shrink_inactive_list+0x21f/0x382
...
shmem_writepage() calls igrab() on the inode for the page which came from
page reclaim, to add it later into shmem_swaplist for swapoff operation.
This igrab() can race with super-block deactivating process:
shrink_inactive_list() deactivate_super()
pageout() tmpfs_fs_type->kill_sb()
shmem_writepage() kill_litter_super()
generic_shutdown_super()
evict_inodes()
igrab()
atomic_read(&inode->i_count)
skip-inode
iput()
if (!list_empty(&sb->s_inodes))
printk("VFS: Busy inodes after...
This igrap-iput pair was added in commit 1b1b32f2c6f6 "tmpfs: fix
shmem_swaplist races" based on incorrect assumptions: igrab() protects the
inode from concurrent eviction by deletion, but it does nothing to protect
it from concurrent unmounting, which goes ahead despite the raised
i_count.
So this use of igrab() was wrong all along, but the race made much worse
in 2.6.37 when commit 63997e98a3be "split invalidate_inodes()" replaced
two attempts at invalidate_inodes() by a single evict_inodes().
Konstantin posted a plausible patch, raising sb->s_active too: I'm unsure
whether it was correct or not; but burnt once by igrab(), I am sure that
we don't want to rely more deeply upon externals here.
Fix it by adding the inode to shmem_swaplist earlier, while the page lock
on page in page cache still secures the inode against eviction, without
artifically raising i_count. It was originally added later because
shmem_unuse_inode() is liable to remove an inode from the list while it's
unswapped; but we can guard against that by taking spinlock before
dropping mutex.
Reported-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Tested-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-12 02:13:36 +04:00
2011-07-26 04:12:37 +04:00
if ( add_to_swap_cache ( page , swap , GFP_ATOMIC ) = = 0 ) {
2009-12-15 04:58:47 +03:00
swap_shmem_alloc ( swap ) ;
2011-08-04 03:21:25 +04:00
shmem_delete_from_page_cache ( page , swp_to_radix_entry ( swap ) ) ;
spin_lock ( & info - > lock ) ;
info - > swapped + + ;
shmem_recalc_inode ( inode ) ;
tmpfs: fix race between truncate and writepage
While running fsx on tmpfs with a memhog then swapoff, swapoff was hanging
(interruptibly), repeatedly failing to locate the owner of a 0xff entry in
the swap_map.
Although shmem_writepage() does abandon when it sees incoming page index
is beyond eof, there was still a window in which shmem_truncate_range()
could come in between writepage's dropping lock and updating swap_map,
find the half-completed swap_map entry, and in trying to free it,
leave it in a state that swap_shmem_alloc() could not correct.
Arguably a bug in __swap_duplicate()'s and swap_entry_free()'s handling
of the different cases, but easiest to fix by moving swap_shmem_alloc()
under cover of the lock.
More interesting than the bug: it's been there since 2.6.33, why could
I not see it with earlier kernels? The mmotm of two weeks ago seems to
have some magic for generating races, this is just one of three I found.
With yesterday's git I first saw this in mainline, bisected in search of
that magic, but the easy reproducibility evaporated. Oh well, fix the bug.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-29 00:14:09 +04:00
spin_unlock ( & info - > lock ) ;
2011-08-04 03:21:25 +04:00
mutex_unlock ( & shmem_swaplist_mutex ) ;
2008-02-05 09:28:51 +03:00
BUG_ON ( page_mapped ( page ) ) ;
shmem: writepage directly to swap
Synopsis: if shmem_writepage calls swap_writepage directly, most shmem
swap loads benefit, and a catastrophic interaction between SLUB and some
flash storage is avoided.
shmem_writepage() has always been peculiar in making no attempt to write:
it has just transferred a shmem page from file cache to swap cache, then
let that page make its way around the LRU again before being written and
freed.
The idea was that people use tmpfs because they want those pages to stay
in RAM; so although we give it an overflow to swap, we should resist
writing too soon, giving those pages a second chance before they can be
reclaimed.
That was always questionable, and I've toyed with this patch for years;
but never had a clear justification to depart from the original design.
It became more questionable in 2.6.28, when the split LRU patches classed
shmem and tmpfs pages as SwapBacked rather than as file_cache: that in
itself gives them more resistance to reclaim than normal file pages. I
prepared this patch for 2.6.29, but the merge window arrived before I'd
completed gathering statistics to justify sending it in.
Then while comparing SLQB against SLUB, running SLUB on a laptop I'd
habitually used with SLAB, I found SLUB to run my tmpfs kbuild swapping
tests five times slower than SLAB or SLQB - other machines slower too, but
nowhere near so bad. Simpler "cp -a" swapping tests showed the same.
slub_max_order=0 brings sanity to all, but heavy swapping is too far from
normal to justify such a tuning. The crucial factor on that laptop turns
out to be that I'm using an SD card for swap. What happens is this:
By default, SLUB uses order-2 pages for shmem_inode_cache (and many other
fs inodes), so creating tmpfs files under memory pressure brings lumpy
reclaim into play. One subpage of the order is chosen from the bottom of
the LRU as usual, then the other three picked out from their random
positions on the LRUs.
In a tmpfs load, many of these pages will be ones which already passed
through shmem_writepage, so already have swap allocated. And though their
offsets on swap were probably allocated sequentially, now that the pages
are picked off at random, their swap offsets are scattered.
But the flash storage on the SD card is very sensitive to having its
writes merged: once swap is written at scattered offsets, performance
falls apart. Rotating disk seeks increase too, but less disastrously.
So: stop giving shmem/tmpfs pages a second pass around the LRU, write them
out to swap as soon as their swap has been allocated.
It's surely possible to devise an artificial load which runs faster the
old way, one whose sizing is such that the tmpfs pages on their second
pass are the ones that are wanted again, and other pages not.
But I've not yet found such a load: on all machines, under the loads I've
tried, immediate swap_writepage speeds up shmem swapping: especially when
using the SLUB allocator (and more effectively than slub_max_order=0), but
also with the others; and it also reduces the variance between runs. How
much faster varies widely: a factor of five is rare, 5% is common.
One load which might have suffered: imagine a swapping shmem load in a
limited mem_cgroup on a machine with plenty of memory. Before 2.6.29 the
swapcache was not charged, and such a load would have run quickest with
the shmem swapcache never written to swap. But now swapcache is charged,
so even this load benefits from shmem_writepage directly to swap.
Apologies for the #ifndef CONFIG_SWAP swap_writepage() stub in swap.h:
it's silly because that will never get called; but refactoring shmem.c
sensibly according to CONFIG_SWAP will be a separate task.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-01 02:23:33 +04:00
swap_writepage ( page , wbc ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2011-08-04 03:21:25 +04:00
mutex_unlock ( & shmem_swaplist_mutex ) ;
2009-06-17 02:32:52 +04:00
swapcache_free ( swap , NULL ) ;
2005-04-17 02:20:36 +04:00
redirty :
set_page_dirty ( page ) ;
2008-02-05 09:28:51 +03:00
if ( wbc - > for_reclaim )
return AOP_WRITEPAGE_ACTIVATE ; /* Return with page locked */
unlock_page ( page ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_NUMA
2008-02-08 15:21:48 +03:00
# ifdef CONFIG_TMPFS
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
static void shmem_show_mpol ( struct seq_file * seq , struct mempolicy * mpol )
2008-02-08 15:21:48 +03:00
{
mempolicy: rework shmem mpol parsing and display
mm/shmem.c currently contains functions to parse and display memory policy
strings for the tmpfs 'mpol' mount option. Move this to mm/mempolicy.c with
the rest of the mempolicy support. With subsequent patches, we'll be able to
remove knowledge of the details [mode, flags, policy, ...] completely from
shmem.c
1) replace shmem_parse_mpol() in mm/shmem.c with mpol_parse_str() in
mm/mempolicy.c. Rework to use the policy_types[] array [used by
mpol_to_str()] to look up mode by name.
2) use mpol_to_str() to format policy for shmem_show_mpol(). mpol_to_str()
expects a pointer to a struct mempolicy, so temporarily construct one.
This will be replaced with a reference to a struct mempolicy in the tmpfs
superblock in a subsequent patch.
NOTE 1: I changed mpol_to_str() to use a colon ':' rather than an equal
sign '=' as the nodemask delimiter to match mpol_parse_str() and the
tmpfs/shmem mpol mount option formatting that now uses mpol_to_str(). This
is a user visible change to numa_maps, but then the addition of the mode
flags already changed the display. It makes sense to me to have the mounts
and numa_maps display the policy in the same format. However, if anyone
objects strongly, I can pass the desired nodemask delimeter as an arg to
mpol_to_str().
Note 2: Like show_numa_map(), I don't check the return code from
mpol_to_str(). I do use a longer buffer than the one provided by
show_numa_map(), which seems to have sufficed so far.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:23 +04:00
char buffer [ 64 ] ;
2008-02-08 15:21:48 +03:00
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
if ( ! mpol | | mpol - > mode = = MPOL_DEFAULT )
mempolicy: rework shmem mpol parsing and display
mm/shmem.c currently contains functions to parse and display memory policy
strings for the tmpfs 'mpol' mount option. Move this to mm/mempolicy.c with
the rest of the mempolicy support. With subsequent patches, we'll be able to
remove knowledge of the details [mode, flags, policy, ...] completely from
shmem.c
1) replace shmem_parse_mpol() in mm/shmem.c with mpol_parse_str() in
mm/mempolicy.c. Rework to use the policy_types[] array [used by
mpol_to_str()] to look up mode by name.
2) use mpol_to_str() to format policy for shmem_show_mpol(). mpol_to_str()
expects a pointer to a struct mempolicy, so temporarily construct one.
This will be replaced with a reference to a struct mempolicy in the tmpfs
superblock in a subsequent patch.
NOTE 1: I changed mpol_to_str() to use a colon ':' rather than an equal
sign '=' as the nodemask delimiter to match mpol_parse_str() and the
tmpfs/shmem mpol mount option formatting that now uses mpol_to_str(). This
is a user visible change to numa_maps, but then the addition of the mode
flags already changed the display. It makes sense to me to have the mounts
and numa_maps display the policy in the same format. However, if anyone
objects strongly, I can pass the desired nodemask delimeter as an arg to
mpol_to_str().
Note 2: Like show_numa_map(), I don't check the return code from
mpol_to_str(). I do use a longer buffer than the one provided by
show_numa_map(), which seems to have sufficed so far.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:23 +04:00
return ; /* show nothing */
2008-02-08 15:21:48 +03:00
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
mpol_to_str ( buffer , sizeof ( buffer ) , mpol , 1 ) ;
mempolicy: rework shmem mpol parsing and display
mm/shmem.c currently contains functions to parse and display memory policy
strings for the tmpfs 'mpol' mount option. Move this to mm/mempolicy.c with
the rest of the mempolicy support. With subsequent patches, we'll be able to
remove knowledge of the details [mode, flags, policy, ...] completely from
shmem.c
1) replace shmem_parse_mpol() in mm/shmem.c with mpol_parse_str() in
mm/mempolicy.c. Rework to use the policy_types[] array [used by
mpol_to_str()] to look up mode by name.
2) use mpol_to_str() to format policy for shmem_show_mpol(). mpol_to_str()
expects a pointer to a struct mempolicy, so temporarily construct one.
This will be replaced with a reference to a struct mempolicy in the tmpfs
superblock in a subsequent patch.
NOTE 1: I changed mpol_to_str() to use a colon ':' rather than an equal
sign '=' as the nodemask delimiter to match mpol_parse_str() and the
tmpfs/shmem mpol mount option formatting that now uses mpol_to_str(). This
is a user visible change to numa_maps, but then the addition of the mode
flags already changed the display. It makes sense to me to have the mounts
and numa_maps display the policy in the same format. However, if anyone
objects strongly, I can pass the desired nodemask delimeter as an arg to
mpol_to_str().
Note 2: Like show_numa_map(), I don't check the return code from
mpol_to_str(). I do use a longer buffer than the one provided by
show_numa_map(), which seems to have sufficed so far.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:23 +04:00
seq_printf ( seq , " ,mpol=%s " , buffer ) ;
2008-02-08 15:21:48 +03:00
}
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
static struct mempolicy * shmem_get_sbmpol ( struct shmem_sb_info * sbinfo )
{
struct mempolicy * mpol = NULL ;
if ( sbinfo - > mpol ) {
spin_lock ( & sbinfo - > stat_lock ) ; /* prevent replace/use races */
mpol = sbinfo - > mpol ;
mpol_get ( mpol ) ;
spin_unlock ( & sbinfo - > stat_lock ) ;
}
return mpol ;
}
2008-02-08 15:21:48 +03:00
# endif /* CONFIG_TMPFS */
2011-08-04 03:21:21 +04:00
static struct page * shmem_swapin ( swp_entry_t swap , gfp_t gfp ,
struct shmem_inode_info * info , pgoff_t index )
2005-04-17 02:20:36 +04:00
{
mempolicy: rework mempolicy Reference Counting [yet again]
After further discussion with Christoph Lameter, it has become clear that my
earlier attempts to clean up the mempolicy reference counting were a bit of
overkill in some areas, resulting in superflous ref/unref in what are usually
fast paths. In other areas, further inspection reveals that I botched the
unref for interleave policies.
A separate patch, suitable for upstream/stable trees, fixes up the known
errors in the previous attempt to fix reference counting.
This patch reworks the memory policy referencing counting and, one hopes,
simplifies the code. Maybe I'll get it right this time.
See the update to the numa_memory_policy.txt document for a discussion of
memory policy reference counting that motivates this patch.
Summary:
Lookup of mempolicy, based on (vma, address) need only add a reference for
shared policy, and we need only unref the policy when finished for shared
policies. So, this patch backs out all of the unneeded extra reference
counting added by my previous attempt. It then unrefs only shared policies
when we're finished with them, using the mpol_cond_put() [conditional put]
helper function introduced by this patch.
Note that shmem_swapin() calls read_swap_cache_async() with a dummy vma
containing just the policy. read_swap_cache_async() can call alloc_page_vma()
multiple times, so we can't let alloc_page_vma() unref the shared policy in
this case. To avoid this, we make a copy of any non-null shared policy and
remove the MPOL_F_SHARED flag from the copy. This copy occurs before reading
a page [or multiple pages] from swap, so the overhead should not be an issue
here.
I introduced a new static inline function "mpol_cond_copy()" to copy the
shared policy to an on-stack policy and remove the flags that would require a
conditional free. The current implementation of mpol_cond_copy() assumes that
the struct mempolicy contains no pointers to dynamically allocated structures
that must be duplicated or reference counted during copy.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:16 +04:00
struct mempolicy mpol , * spol ;
2005-04-17 02:20:36 +04:00
struct vm_area_struct pvma ;
mempolicy: rework mempolicy Reference Counting [yet again]
After further discussion with Christoph Lameter, it has become clear that my
earlier attempts to clean up the mempolicy reference counting were a bit of
overkill in some areas, resulting in superflous ref/unref in what are usually
fast paths. In other areas, further inspection reveals that I botched the
unref for interleave policies.
A separate patch, suitable for upstream/stable trees, fixes up the known
errors in the previous attempt to fix reference counting.
This patch reworks the memory policy referencing counting and, one hopes,
simplifies the code. Maybe I'll get it right this time.
See the update to the numa_memory_policy.txt document for a discussion of
memory policy reference counting that motivates this patch.
Summary:
Lookup of mempolicy, based on (vma, address) need only add a reference for
shared policy, and we need only unref the policy when finished for shared
policies. So, this patch backs out all of the unneeded extra reference
counting added by my previous attempt. It then unrefs only shared policies
when we're finished with them, using the mpol_cond_put() [conditional put]
helper function introduced by this patch.
Note that shmem_swapin() calls read_swap_cache_async() with a dummy vma
containing just the policy. read_swap_cache_async() can call alloc_page_vma()
multiple times, so we can't let alloc_page_vma() unref the shared policy in
this case. To avoid this, we make a copy of any non-null shared policy and
remove the MPOL_F_SHARED flag from the copy. This copy occurs before reading
a page [or multiple pages] from swap, so the overhead should not be an issue
here.
I introduced a new static inline function "mpol_cond_copy()" to copy the
shared policy to an on-stack policy and remove the flags that would require a
conditional free. The current implementation of mpol_cond_copy() assumes that
the struct mempolicy contains no pointers to dynamically allocated structures
that must be duplicated or reference counted during copy.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:16 +04:00
spol = mpol_cond_copy ( & mpol ,
2011-08-04 03:21:21 +04:00
mpol_shared_policy_lookup ( & info - > policy , index ) ) ;
mempolicy: rework mempolicy Reference Counting [yet again]
After further discussion with Christoph Lameter, it has become clear that my
earlier attempts to clean up the mempolicy reference counting were a bit of
overkill in some areas, resulting in superflous ref/unref in what are usually
fast paths. In other areas, further inspection reveals that I botched the
unref for interleave policies.
A separate patch, suitable for upstream/stable trees, fixes up the known
errors in the previous attempt to fix reference counting.
This patch reworks the memory policy referencing counting and, one hopes,
simplifies the code. Maybe I'll get it right this time.
See the update to the numa_memory_policy.txt document for a discussion of
memory policy reference counting that motivates this patch.
Summary:
Lookup of mempolicy, based on (vma, address) need only add a reference for
shared policy, and we need only unref the policy when finished for shared
policies. So, this patch backs out all of the unneeded extra reference
counting added by my previous attempt. It then unrefs only shared policies
when we're finished with them, using the mpol_cond_put() [conditional put]
helper function introduced by this patch.
Note that shmem_swapin() calls read_swap_cache_async() with a dummy vma
containing just the policy. read_swap_cache_async() can call alloc_page_vma()
multiple times, so we can't let alloc_page_vma() unref the shared policy in
this case. To avoid this, we make a copy of any non-null shared policy and
remove the MPOL_F_SHARED flag from the copy. This copy occurs before reading
a page [or multiple pages] from swap, so the overhead should not be an issue
here.
I introduced a new static inline function "mpol_cond_copy()" to copy the
shared policy to an on-stack policy and remove the flags that would require a
conditional free. The current implementation of mpol_cond_copy() assumes that
the struct mempolicy contains no pointers to dynamically allocated structures
that must be duplicated or reference counted during copy.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:16 +04:00
2005-04-17 02:20:36 +04:00
/* Create a pseudo vma that just contains the policy */
swapin_readahead: excise NUMA bogosity
For three years swapin_readahead has been cluttered with fanciful CONFIG_NUMA
code, advancing addr, and stepping on to the next vma at the boundary, to line
up the mempolicy for each page allocation.
It _might_ be a good idea to allocate swap more according to vma layout; but
the fact is, that's not how we do it at all, 2.6 even less than 2.4: swap is
allocated as needed for pages as they sink to the bottom of the inactive LRUs.
Sometimes that may match vma layout, but not so often that it's worth going
to these misleading vma->vm_next lengths: rip all that out.
Originally I intended to retain the incrementation of addr, but correct its
initial value: valid_swaphandles generally supplies an offset below the target
addr (this is readaround rather than readahead), but addr has not been
adjusted accordingly, so in the interleave case it has usually been allocating
the target page from the "wrong" node (though that may not matter very much).
But look at the equivalent shmem_swapin code: either by oversight or by
design, though it has all the apparatus for choosing a new mempolicy per page,
it uses the same idx throughout, choosing the same mempolicy and interleave
node for each page of the cluster.
Which is actually a much better strategy: each node has its own LRUs and its
own kswapd, so if you're betting on any particular relationship between swap
and node, the best bet is that nearby swap entries belong to pages from the
same node - even when the mempolicy of the target page is to interleave. And
examining a map of nodes corresponding to swap entries on a numa=fake system
bears this out. (We could later tweak swap allocation to make it even more
likely, but this patch is merely about removing cruft.)
So, neither adjust nor increment addr in swapin_readahead, and then
shmem_swapin can use it too; the pseudo-vma to pass policy need only be set up
once per cluster, and so few fields of pvma are used, let's skip the memset -
from shmem_alloc_page also.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:40 +03:00
pvma . vm_start = 0 ;
2011-08-04 03:21:21 +04:00
pvma . vm_pgoff = index ;
swapin_readahead: excise NUMA bogosity
For three years swapin_readahead has been cluttered with fanciful CONFIG_NUMA
code, advancing addr, and stepping on to the next vma at the boundary, to line
up the mempolicy for each page allocation.
It _might_ be a good idea to allocate swap more according to vma layout; but
the fact is, that's not how we do it at all, 2.6 even less than 2.4: swap is
allocated as needed for pages as they sink to the bottom of the inactive LRUs.
Sometimes that may match vma layout, but not so often that it's worth going
to these misleading vma->vm_next lengths: rip all that out.
Originally I intended to retain the incrementation of addr, but correct its
initial value: valid_swaphandles generally supplies an offset below the target
addr (this is readaround rather than readahead), but addr has not been
adjusted accordingly, so in the interleave case it has usually been allocating
the target page from the "wrong" node (though that may not matter very much).
But look at the equivalent shmem_swapin code: either by oversight or by
design, though it has all the apparatus for choosing a new mempolicy per page,
it uses the same idx throughout, choosing the same mempolicy and interleave
node for each page of the cluster.
Which is actually a much better strategy: each node has its own LRUs and its
own kswapd, so if you're betting on any particular relationship between swap
and node, the best bet is that nearby swap entries belong to pages from the
same node - even when the mempolicy of the target page is to interleave. And
examining a map of nodes corresponding to swap entries on a numa=fake system
bears this out. (We could later tweak swap allocation to make it even more
likely, but this patch is merely about removing cruft.)
So, neither adjust nor increment addr in swapin_readahead, and then
shmem_swapin can use it too; the pseudo-vma to pass policy need only be set up
once per cluster, and so few fields of pvma are used, let's skip the memset -
from shmem_alloc_page also.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:40 +03:00
pvma . vm_ops = NULL ;
mempolicy: rework mempolicy Reference Counting [yet again]
After further discussion with Christoph Lameter, it has become clear that my
earlier attempts to clean up the mempolicy reference counting were a bit of
overkill in some areas, resulting in superflous ref/unref in what are usually
fast paths. In other areas, further inspection reveals that I botched the
unref for interleave policies.
A separate patch, suitable for upstream/stable trees, fixes up the known
errors in the previous attempt to fix reference counting.
This patch reworks the memory policy referencing counting and, one hopes,
simplifies the code. Maybe I'll get it right this time.
See the update to the numa_memory_policy.txt document for a discussion of
memory policy reference counting that motivates this patch.
Summary:
Lookup of mempolicy, based on (vma, address) need only add a reference for
shared policy, and we need only unref the policy when finished for shared
policies. So, this patch backs out all of the unneeded extra reference
counting added by my previous attempt. It then unrefs only shared policies
when we're finished with them, using the mpol_cond_put() [conditional put]
helper function introduced by this patch.
Note that shmem_swapin() calls read_swap_cache_async() with a dummy vma
containing just the policy. read_swap_cache_async() can call alloc_page_vma()
multiple times, so we can't let alloc_page_vma() unref the shared policy in
this case. To avoid this, we make a copy of any non-null shared policy and
remove the MPOL_F_SHARED flag from the copy. This copy occurs before reading
a page [or multiple pages] from swap, so the overhead should not be an issue
here.
I introduced a new static inline function "mpol_cond_copy()" to copy the
shared policy to an on-stack policy and remove the flags that would require a
conditional free. The current implementation of mpol_cond_copy() assumes that
the struct mempolicy contains no pointers to dynamically allocated structures
that must be duplicated or reference counted during copy.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:16 +04:00
pvma . vm_policy = spol ;
2011-08-04 03:21:21 +04:00
return swapin_readahead ( swap , gfp , & pvma , 0 ) ;
2005-04-17 02:20:36 +04:00
}
swapin needs gfp_mask for loop on tmpfs
Building in a filesystem on a loop device on a tmpfs file can hang when
swapping, the loop thread caught in that infamous throttle_vm_writeout.
In theory this is a long standing problem, which I've either never seen in
practice, or long ago suppressed the recollection, after discounting my load
and my tmpfs size as unrealistically high. But now, with the new aops, it has
become easy to hang on one machine.
Loop used to grab_cache_page before the old prepare_write to tmpfs, which
seems to have been enough to free up some memory for any swapin needed; but
the new write_begin lets tmpfs find or allocate the page (much nicer, since
grab_cache_page missed tmpfs pages in swapcache).
When allocating a fresh page, tmpfs respects loop's mapping_gfp_mask, which
has __GFP_IO|__GFP_FS stripped off, and throttle_vm_writeout is designed to
break out when __GFP_IO or GFP_FS is unset; but when tmfps swaps in,
read_swap_cache_async allocates with GFP_HIGHUSER_MOVABLE regardless of the
mapping_gfp_mask - hence the hang.
So, pass gfp_mask down the line from shmem_getpage to shmem_swapin to
swapin_readahead to read_swap_cache_async to add_to_swap_cache.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:42 +03:00
static struct page * shmem_alloc_page ( gfp_t gfp ,
2011-08-04 03:21:21 +04:00
struct shmem_inode_info * info , pgoff_t index )
2005-04-17 02:20:36 +04:00
{
struct vm_area_struct pvma ;
swapin_readahead: excise NUMA bogosity
For three years swapin_readahead has been cluttered with fanciful CONFIG_NUMA
code, advancing addr, and stepping on to the next vma at the boundary, to line
up the mempolicy for each page allocation.
It _might_ be a good idea to allocate swap more according to vma layout; but
the fact is, that's not how we do it at all, 2.6 even less than 2.4: swap is
allocated as needed for pages as they sink to the bottom of the inactive LRUs.
Sometimes that may match vma layout, but not so often that it's worth going
to these misleading vma->vm_next lengths: rip all that out.
Originally I intended to retain the incrementation of addr, but correct its
initial value: valid_swaphandles generally supplies an offset below the target
addr (this is readaround rather than readahead), but addr has not been
adjusted accordingly, so in the interleave case it has usually been allocating
the target page from the "wrong" node (though that may not matter very much).
But look at the equivalent shmem_swapin code: either by oversight or by
design, though it has all the apparatus for choosing a new mempolicy per page,
it uses the same idx throughout, choosing the same mempolicy and interleave
node for each page of the cluster.
Which is actually a much better strategy: each node has its own LRUs and its
own kswapd, so if you're betting on any particular relationship between swap
and node, the best bet is that nearby swap entries belong to pages from the
same node - even when the mempolicy of the target page is to interleave. And
examining a map of nodes corresponding to swap entries on a numa=fake system
bears this out. (We could later tweak swap allocation to make it even more
likely, but this patch is merely about removing cruft.)
So, neither adjust nor increment addr in swapin_readahead, and then
shmem_swapin can use it too; the pseudo-vma to pass policy need only be set up
once per cluster, and so few fields of pvma are used, let's skip the memset -
from shmem_alloc_page also.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:40 +03:00
/* Create a pseudo vma that just contains the policy */
pvma . vm_start = 0 ;
2011-08-04 03:21:21 +04:00
pvma . vm_pgoff = index ;
swapin_readahead: excise NUMA bogosity
For three years swapin_readahead has been cluttered with fanciful CONFIG_NUMA
code, advancing addr, and stepping on to the next vma at the boundary, to line
up the mempolicy for each page allocation.
It _might_ be a good idea to allocate swap more according to vma layout; but
the fact is, that's not how we do it at all, 2.6 even less than 2.4: swap is
allocated as needed for pages as they sink to the bottom of the inactive LRUs.
Sometimes that may match vma layout, but not so often that it's worth going
to these misleading vma->vm_next lengths: rip all that out.
Originally I intended to retain the incrementation of addr, but correct its
initial value: valid_swaphandles generally supplies an offset below the target
addr (this is readaround rather than readahead), but addr has not been
adjusted accordingly, so in the interleave case it has usually been allocating
the target page from the "wrong" node (though that may not matter very much).
But look at the equivalent shmem_swapin code: either by oversight or by
design, though it has all the apparatus for choosing a new mempolicy per page,
it uses the same idx throughout, choosing the same mempolicy and interleave
node for each page of the cluster.
Which is actually a much better strategy: each node has its own LRUs and its
own kswapd, so if you're betting on any particular relationship between swap
and node, the best bet is that nearby swap entries belong to pages from the
same node - even when the mempolicy of the target page is to interleave. And
examining a map of nodes corresponding to swap entries on a numa=fake system
bears this out. (We could later tweak swap allocation to make it even more
likely, but this patch is merely about removing cruft.)
So, neither adjust nor increment addr in swapin_readahead, and then
shmem_swapin can use it too; the pseudo-vma to pass policy need only be set up
once per cluster, and so few fields of pvma are used, let's skip the memset -
from shmem_alloc_page also.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:40 +03:00
pvma . vm_ops = NULL ;
2011-08-04 03:21:21 +04:00
pvma . vm_policy = mpol_shared_policy_lookup ( & info - > policy , index ) ;
mempolicy: rework mempolicy Reference Counting [yet again]
After further discussion with Christoph Lameter, it has become clear that my
earlier attempts to clean up the mempolicy reference counting were a bit of
overkill in some areas, resulting in superflous ref/unref in what are usually
fast paths. In other areas, further inspection reveals that I botched the
unref for interleave policies.
A separate patch, suitable for upstream/stable trees, fixes up the known
errors in the previous attempt to fix reference counting.
This patch reworks the memory policy referencing counting and, one hopes,
simplifies the code. Maybe I'll get it right this time.
See the update to the numa_memory_policy.txt document for a discussion of
memory policy reference counting that motivates this patch.
Summary:
Lookup of mempolicy, based on (vma, address) need only add a reference for
shared policy, and we need only unref the policy when finished for shared
policies. So, this patch backs out all of the unneeded extra reference
counting added by my previous attempt. It then unrefs only shared policies
when we're finished with them, using the mpol_cond_put() [conditional put]
helper function introduced by this patch.
Note that shmem_swapin() calls read_swap_cache_async() with a dummy vma
containing just the policy. read_swap_cache_async() can call alloc_page_vma()
multiple times, so we can't let alloc_page_vma() unref the shared policy in
this case. To avoid this, we make a copy of any non-null shared policy and
remove the MPOL_F_SHARED flag from the copy. This copy occurs before reading
a page [or multiple pages] from swap, so the overhead should not be an issue
here.
I introduced a new static inline function "mpol_cond_copy()" to copy the
shared policy to an on-stack policy and remove the flags that would require a
conditional free. The current implementation of mpol_cond_copy() assumes that
the struct mempolicy contains no pointers to dynamically allocated structures
that must be duplicated or reference counted during copy.
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:16 +04:00
/*
* alloc_page_vma ( ) will drop the shared policy reference
*/
return alloc_page_vma ( gfp , & pvma , 0 ) ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 15:21:48 +03:00
# else /* !CONFIG_NUMA */
# ifdef CONFIG_TMPFS
2011-08-04 03:21:21 +04:00
static inline void shmem_show_mpol ( struct seq_file * seq , struct mempolicy * mpol )
2008-02-08 15:21:48 +03:00
{
}
# endif /* CONFIG_TMPFS */
2011-08-04 03:21:21 +04:00
static inline struct page * shmem_swapin ( swp_entry_t swap , gfp_t gfp ,
struct shmem_inode_info * info , pgoff_t index )
2005-04-17 02:20:36 +04:00
{
2011-08-04 03:21:21 +04:00
return swapin_readahead ( swap , gfp , NULL , 0 ) ;
2005-04-17 02:20:36 +04:00
}
swapin needs gfp_mask for loop on tmpfs
Building in a filesystem on a loop device on a tmpfs file can hang when
swapping, the loop thread caught in that infamous throttle_vm_writeout.
In theory this is a long standing problem, which I've either never seen in
practice, or long ago suppressed the recollection, after discounting my load
and my tmpfs size as unrealistically high. But now, with the new aops, it has
become easy to hang on one machine.
Loop used to grab_cache_page before the old prepare_write to tmpfs, which
seems to have been enough to free up some memory for any swapin needed; but
the new write_begin lets tmpfs find or allocate the page (much nicer, since
grab_cache_page missed tmpfs pages in swapcache).
When allocating a fresh page, tmpfs respects loop's mapping_gfp_mask, which
has __GFP_IO|__GFP_FS stripped off, and throttle_vm_writeout is designed to
break out when __GFP_IO or GFP_FS is unset; but when tmfps swaps in,
read_swap_cache_async allocates with GFP_HIGHUSER_MOVABLE regardless of the
mapping_gfp_mask - hence the hang.
So, pass gfp_mask down the line from shmem_getpage to shmem_swapin to
swapin_readahead to read_swap_cache_async to add_to_swap_cache.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:28:42 +03:00
static inline struct page * shmem_alloc_page ( gfp_t gfp ,
2011-08-04 03:21:21 +04:00
struct shmem_inode_info * info , pgoff_t index )
2005-04-17 02:20:36 +04:00
{
2007-11-28 21:55:10 +03:00
return alloc_page ( gfp ) ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 15:21:48 +03:00
# endif /* CONFIG_NUMA */
2005-04-17 02:20:36 +04:00
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
# if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
static inline struct mempolicy * shmem_get_sbmpol ( struct shmem_sb_info * sbinfo )
{
return NULL ;
}
# endif
2005-04-17 02:20:36 +04:00
/*
2011-07-26 04:12:34 +04:00
* shmem_getpage_gfp - find page in cache , or get from swap , or allocate
2005-04-17 02:20:36 +04:00
*
* If we allocate a new one we do not mark it dirty . That ' s up to the
* vm . If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
2011-08-04 03:21:21 +04:00
static int shmem_getpage_gfp ( struct inode * inode , pgoff_t index ,
2011-07-26 04:12:34 +04:00
struct page * * pagep , enum sgp_type sgp , gfp_t gfp , int * fault_type )
2005-04-17 02:20:36 +04:00
{
struct address_space * mapping = inode - > i_mapping ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
struct shmem_inode_info * info ;
2005-04-17 02:20:36 +04:00
struct shmem_sb_info * sbinfo ;
2011-07-26 04:12:36 +04:00
struct page * page ;
2005-04-17 02:20:36 +04:00
swp_entry_t swap ;
int error ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
int once = 0 ;
2005-04-17 02:20:36 +04:00
2011-08-04 03:21:21 +04:00
if ( index > ( MAX_LFS_FILESIZE > > PAGE_CACHE_SHIFT ) )
2005-04-17 02:20:36 +04:00
return - EFBIG ;
repeat :
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
swap . val = 0 ;
2011-08-04 03:21:21 +04:00
page = find_lock_page ( mapping , index ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
if ( radix_tree_exceptional_entry ( page ) ) {
swap = radix_to_swp_entry ( page ) ;
page = NULL ;
}
if ( sgp ! = SGP_WRITE & &
( ( loff_t ) index < < PAGE_CACHE_SHIFT ) > = i_size_read ( inode ) ) {
error = - EINVAL ;
goto failed ;
}
if ( page | | ( sgp = = SGP_READ & & ! swap . val ) ) {
2008-02-05 09:28:54 +03:00
/*
2011-07-26 04:12:36 +04:00
* Once we can get the page lock , it must be uptodate :
* if there were an error in reading back from swap ,
* the page would not be inserted into the filecache .
2008-02-05 09:28:54 +03:00
*/
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
BUG_ON ( page & & ! PageUptodate ( page ) ) ;
* pagep = page ;
return 0 ;
2011-07-26 04:12:36 +04:00
}
/*
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
* Fast cache lookup did not find it :
* bring it back from swap or allocate .
2011-07-26 04:12:36 +04:00
*/
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
info = SHMEM_I ( inode ) ;
sbinfo = SHMEM_SB ( inode - > i_sb ) ;
2005-04-17 02:20:36 +04:00
if ( swap . val ) {
/* Look it up and read it in.. */
2011-07-26 04:12:36 +04:00
page = lookup_swap_cache ( swap ) ;
if ( ! page ) {
2005-04-17 02:20:36 +04:00
/* here we actually do the io */
2011-07-26 04:12:34 +04:00
if ( fault_type )
* fault_type | = VM_FAULT_MAJOR ;
2011-08-04 03:21:21 +04:00
page = shmem_swapin ( swap , gfp , info , index ) ;
2011-07-26 04:12:36 +04:00
if ( ! page ) {
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
error = - ENOMEM ;
goto failed ;
2005-04-17 02:20:36 +04:00
}
}
/* We have to do this with page locked to prevent races */
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
lock_page ( page ) ;
2011-07-26 04:12:36 +04:00
if ( ! PageUptodate ( page ) ) {
2005-04-17 02:20:36 +04:00
error = - EIO ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
goto failed ;
2005-04-17 02:20:36 +04:00
}
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
wait_on_page_writeback ( page ) ;
/* Someone may have already done it for us */
if ( page - > mapping ) {
if ( page - > mapping = = mapping & &
page - > index = = index )
goto done ;
error = - EEXIST ;
goto failed ;
2005-04-17 02:20:36 +04:00
}
2011-07-26 04:12:36 +04:00
2011-08-04 03:21:24 +04:00
error = mem_cgroup_cache_charge ( page , current - > mm ,
gfp & GFP_RECLAIM_MASK ) ;
if ( ! error )
error = shmem_add_to_page_cache ( page , mapping , index ,
gfp , swp_to_radix_entry ( swap ) ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
if ( error )
goto failed ;
spin_lock ( & info - > lock ) ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
info - > swapped - - ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
shmem_recalc_inode ( inode ) ;
2011-07-26 04:12:36 +04:00
spin_unlock ( & info - > lock ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
delete_from_swap_cache ( page ) ;
2011-07-26 04:12:36 +04:00
set_page_dirty ( page ) ;
swap_free ( swap ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
} else {
if ( shmem_acct_block ( info - > flags ) ) {
error = - ENOSPC ;
goto failed ;
2005-04-17 02:20:36 +04:00
}
2005-06-22 04:15:04 +04:00
if ( sbinfo - > max_blocks ) {
2011-04-15 02:22:07 +04:00
if ( percpu_counter_compare ( & sbinfo - > used_blocks ,
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
sbinfo - > max_blocks ) > = 0 ) {
error = - ENOSPC ;
goto unacct ;
}
2010-08-10 04:19:05 +04:00
percpu_counter_inc ( & sbinfo - > used_blocks ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
}
2005-04-17 02:20:36 +04:00
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
page = shmem_alloc_page ( gfp , info , index ) ;
if ( ! page ) {
error = - ENOMEM ;
goto decused ;
2005-04-17 02:20:36 +04:00
}
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
SetPageSwapBacked ( page ) ;
__set_page_locked ( page ) ;
2011-08-04 03:21:24 +04:00
error = mem_cgroup_cache_charge ( page , current - > mm ,
gfp & GFP_RECLAIM_MASK ) ;
if ( ! error )
error = shmem_add_to_page_cache ( page , mapping , index ,
gfp , NULL ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
if ( error )
goto decused ;
lru_cache_add_anon ( page ) ;
spin_lock ( & info - > lock ) ;
2005-04-17 02:20:36 +04:00
info - > alloced + + ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
inode - > i_blocks + = BLOCKS_PER_PAGE ;
shmem_recalc_inode ( inode ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & info - > lock ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
2011-07-26 04:12:36 +04:00
clear_highpage ( page ) ;
flush_dcache_page ( page ) ;
SetPageUptodate ( page ) ;
2008-02-05 09:28:51 +03:00
if ( sgp = = SGP_DIRTY )
2011-07-26 04:12:36 +04:00
set_page_dirty ( page ) ;
2005-04-17 02:20:36 +04:00
}
done :
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
/* Perhaps the file has been truncated since we checked */
if ( sgp ! = SGP_WRITE & &
( ( loff_t ) index < < PAGE_CACHE_SHIFT ) > = i_size_read ( inode ) ) {
error = - EINVAL ;
goto trunc ;
2011-07-26 04:12:35 +04:00
}
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
* pagep = page ;
return 0 ;
2005-04-17 02:20:36 +04:00
2011-05-12 02:13:38 +04:00
/*
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
* Error recovery .
2011-05-12 02:13:38 +04:00
*/
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
trunc :
ClearPageDirty ( page ) ;
delete_from_page_cache ( page ) ;
spin_lock ( & info - > lock ) ;
info - > alloced - - ;
inode - > i_blocks - = BLOCKS_PER_PAGE ;
2011-05-12 02:13:38 +04:00
spin_unlock ( & info - > lock ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
decused :
if ( sbinfo - > max_blocks )
percpu_counter_add ( & sbinfo - > used_blocks , - 1 ) ;
unacct :
shmem_unacct_blocks ( info - > flags , 1 ) ;
failed :
if ( swap . val & & error ! = - EINVAL ) {
struct page * test = find_get_page ( mapping , index ) ;
if ( test & & ! radix_tree_exceptional_entry ( test ) )
page_cache_release ( test ) ;
/* Have another try if the entry has changed */
if ( test ! = swp_to_radix_entry ( swap ) )
error = - EEXIST ;
}
2011-07-26 04:12:36 +04:00
if ( page ) {
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
unlock_page ( page ) ;
2011-07-26 04:12:36 +04:00
page_cache_release ( page ) ;
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
}
if ( error = = - ENOSPC & & ! once + + ) {
info = SHMEM_I ( inode ) ;
spin_lock ( & info - > lock ) ;
shmem_recalc_inode ( inode ) ;
spin_unlock ( & info - > lock ) ;
2011-07-26 04:12:36 +04:00
goto repeat ;
2010-08-10 04:19:06 +04:00
}
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
if ( error = = - EEXIST )
goto repeat ;
return error ;
2005-04-17 02:20:36 +04:00
}
2007-07-19 12:47:03 +04:00
static int shmem_fault ( struct vm_area_struct * vma , struct vm_fault * vmf )
2005-04-17 02:20:36 +04:00
{
2006-12-08 13:36:44 +03:00
struct inode * inode = vma - > vm_file - > f_path . dentry - > d_inode ;
2005-04-17 02:20:36 +04:00
int error ;
2011-07-26 04:12:34 +04:00
int ret = VM_FAULT_LOCKED ;
2005-04-17 02:20:36 +04:00
2008-02-05 09:28:43 +03:00
error = shmem_getpage ( inode , vmf - > pgoff , & vmf - > page , SGP_CACHE , & ret ) ;
2007-07-19 12:47:03 +04:00
if ( error )
return ( ( error = = - ENOMEM ) ? VM_FAULT_OOM : VM_FAULT_SIGBUS ) ;
2011-07-26 04:12:34 +04:00
2011-05-27 03:25:38 +04:00
if ( ret & VM_FAULT_MAJOR ) {
count_vm_event ( PGMAJFAULT ) ;
mem_cgroup_count_vm_event ( vma - > vm_mm , PGMAJFAULT ) ;
}
2011-07-26 04:12:34 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_NUMA
2011-08-04 03:21:21 +04:00
static int shmem_set_policy ( struct vm_area_struct * vma , struct mempolicy * mpol )
2005-04-17 02:20:36 +04:00
{
2011-08-04 03:21:21 +04:00
struct inode * inode = vma - > vm_file - > f_path . dentry - > d_inode ;
return mpol_set_shared_policy ( & SHMEM_I ( inode ) - > policy , vma , mpol ) ;
2005-04-17 02:20:36 +04:00
}
2007-10-16 12:26:26 +04:00
static struct mempolicy * shmem_get_policy ( struct vm_area_struct * vma ,
unsigned long addr )
2005-04-17 02:20:36 +04:00
{
2011-08-04 03:21:21 +04:00
struct inode * inode = vma - > vm_file - > f_path . dentry - > d_inode ;
pgoff_t index ;
2005-04-17 02:20:36 +04:00
2011-08-04 03:21:21 +04:00
index = ( ( addr - vma - > vm_start ) > > PAGE_SHIFT ) + vma - > vm_pgoff ;
return mpol_shared_policy_lookup ( & SHMEM_I ( inode ) - > policy , index ) ;
2005-04-17 02:20:36 +04:00
}
# endif
int shmem_lock ( struct file * file , int lock , struct user_struct * user )
{
2006-12-08 13:36:44 +03:00
struct inode * inode = file - > f_path . dentry - > d_inode ;
2005-04-17 02:20:36 +04:00
struct shmem_inode_info * info = SHMEM_I ( inode ) ;
int retval = - ENOMEM ;
spin_lock ( & info - > lock ) ;
if ( lock & & ! ( info - > flags & VM_LOCKED ) ) {
if ( ! user_shm_lock ( inode - > i_size , user ) )
goto out_nomem ;
info - > flags | = VM_LOCKED ;
2008-10-19 07:26:43 +04:00
mapping_set_unevictable ( file - > f_mapping ) ;
2005-04-17 02:20:36 +04:00
}
if ( ! lock & & ( info - > flags & VM_LOCKED ) & & user ) {
user_shm_unlock ( inode - > i_size , user ) ;
info - > flags & = ~ VM_LOCKED ;
2008-10-19 07:26:43 +04:00
mapping_clear_unevictable ( file - > f_mapping ) ;
scan_mapping_unevictable_pages ( file - > f_mapping ) ;
2005-04-17 02:20:36 +04:00
}
retval = 0 ;
2008-10-19 07:26:43 +04:00
2005-04-17 02:20:36 +04:00
out_nomem :
spin_unlock ( & info - > lock ) ;
return retval ;
}
2007-03-01 07:11:03 +03:00
static int shmem_mmap ( struct file * file , struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
file_accessed ( file ) ;
vma - > vm_ops = & shmem_vm_ops ;
2007-07-19 12:47:03 +04:00
vma - > vm_flags | = VM_CAN_NONLINEAR ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2010-03-04 17:32:18 +03:00
static struct inode * shmem_get_inode ( struct super_block * sb , const struct inode * dir ,
int mode , dev_t dev , unsigned long flags )
2005-04-17 02:20:36 +04:00
{
struct inode * inode ;
struct shmem_inode_info * info ;
struct shmem_sb_info * sbinfo = SHMEM_SB ( sb ) ;
2008-02-05 09:28:47 +03:00
if ( shmem_reserve_inode ( sb ) )
return NULL ;
2005-04-17 02:20:36 +04:00
inode = new_inode ( sb ) ;
if ( inode ) {
2010-10-23 19:19:54 +04:00
inode - > i_ino = get_next_ino ( ) ;
2010-03-04 17:32:18 +03:00
inode_init_owner ( inode , dir , mode ) ;
2005-04-17 02:20:36 +04:00
inode - > i_blocks = 0 ;
inode - > i_mapping - > backing_dev_info = & shmem_backing_dev_info ;
inode - > i_atime = inode - > i_mtime = inode - > i_ctime = CURRENT_TIME ;
2006-10-17 11:09:45 +04:00
inode - > i_generation = get_seconds ( ) ;
2005-04-17 02:20:36 +04:00
info = SHMEM_I ( inode ) ;
memset ( info , 0 , ( char * ) inode - ( char * ) info ) ;
spin_lock_init ( & info - > lock ) ;
2009-02-24 23:51:52 +03:00
info - > flags = flags & VM_NORESERVE ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & info - > swaplist ) ;
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
INIT_LIST_HEAD ( & info - > xattr_list ) ;
2009-06-25 00:58:48 +04:00
cache_no_acl ( inode ) ;
2005-04-17 02:20:36 +04:00
switch ( mode & S_IFMT ) {
default :
2006-09-29 13:01:35 +04:00
inode - > i_op = & shmem_special_inode_operations ;
2005-04-17 02:20:36 +04:00
init_special_inode ( inode , mode , dev ) ;
break ;
case S_IFREG :
2008-07-29 02:46:19 +04:00
inode - > i_mapping - > a_ops = & shmem_aops ;
2005-04-17 02:20:36 +04:00
inode - > i_op = & shmem_inode_operations ;
inode - > i_fop = & shmem_file_operations ;
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
mpol_shared_policy_init ( & info - > policy ,
shmem_get_sbmpol ( sbinfo ) ) ;
2005-04-17 02:20:36 +04:00
break ;
case S_IFDIR :
2006-10-01 10:29:04 +04:00
inc_nlink ( inode ) ;
2005-04-17 02:20:36 +04:00
/* Some things misbehave if size == 0 on a directory */
inode - > i_size = 2 * BOGO_DIRENT_SIZE ;
inode - > i_op = & shmem_dir_inode_operations ;
inode - > i_fop = & simple_dir_operations ;
break ;
case S_IFLNK :
/*
* Must not load anything in the rbtree ,
* mpol_free_shared_policy will not be called .
*/
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
mpol_shared_policy_init ( & info - > policy , NULL ) ;
2005-04-17 02:20:36 +04:00
break ;
}
2008-02-05 09:28:47 +03:00
} else
shmem_free_inode ( sb ) ;
2005-04-17 02:20:36 +04:00
return inode ;
}
# ifdef CONFIG_TMPFS
2007-02-12 11:55:39 +03:00
static const struct inode_operations shmem_symlink_inode_operations ;
2011-08-04 03:21:26 +04:00
static const struct inode_operations shmem_short_symlink_operations ;
2005-04-17 02:20:36 +04:00
static int
2007-10-16 12:25:03 +04:00
shmem_write_begin ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned flags ,
struct page * * pagep , void * * fsdata )
2005-04-17 02:20:36 +04:00
{
2007-10-16 12:25:03 +04:00
struct inode * inode = mapping - > host ;
pgoff_t index = pos > > PAGE_CACHE_SHIFT ;
return shmem_getpage ( inode , index , pagep , SGP_WRITE , NULL ) ;
}
static int
shmem_write_end ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned copied ,
struct page * page , void * fsdata )
{
struct inode * inode = mapping - > host ;
2008-02-05 09:28:44 +03:00
if ( pos + copied > inode - > i_size )
i_size_write ( inode , pos + copied ) ;
2007-10-16 12:25:03 +04:00
set_page_dirty ( page ) ;
2009-09-16 13:50:14 +04:00
unlock_page ( page ) ;
2007-10-16 12:25:03 +04:00
page_cache_release ( page ) ;
return copied ;
2005-04-17 02:20:36 +04:00
}
static void do_shmem_file_read ( struct file * filp , loff_t * ppos , read_descriptor_t * desc , read_actor_t actor )
{
2006-12-08 13:36:44 +03:00
struct inode * inode = filp - > f_path . dentry - > d_inode ;
2005-04-17 02:20:36 +04:00
struct address_space * mapping = inode - > i_mapping ;
2011-08-04 03:21:21 +04:00
pgoff_t index ;
unsigned long offset ;
2008-02-05 09:28:51 +03:00
enum sgp_type sgp = SGP_READ ;
/*
* Might this read be for a stacking filesystem ? Then when reading
* holes of a sparse file , we actually need to allocate those pages ,
* and even mark them dirty , so it cannot exceed the max_blocks limit .
*/
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
sgp = SGP_DIRTY ;
2005-04-17 02:20:36 +04:00
index = * ppos > > PAGE_CACHE_SHIFT ;
offset = * ppos & ~ PAGE_CACHE_MASK ;
for ( ; ; ) {
struct page * page = NULL ;
2011-08-04 03:21:21 +04:00
pgoff_t end_index ;
unsigned long nr , ret ;
2005-04-17 02:20:36 +04:00
loff_t i_size = i_size_read ( inode ) ;
end_index = i_size > > PAGE_CACHE_SHIFT ;
if ( index > end_index )
break ;
if ( index = = end_index ) {
nr = i_size & ~ PAGE_CACHE_MASK ;
if ( nr < = offset )
break ;
}
2008-02-05 09:28:51 +03:00
desc - > error = shmem_getpage ( inode , index , & page , sgp , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( desc - > error ) {
if ( desc - > error = = - EINVAL )
desc - > error = 0 ;
break ;
}
2008-02-05 09:28:44 +03:00
if ( page )
unlock_page ( page ) ;
2005-04-17 02:20:36 +04:00
/*
* We must evaluate after , since reads ( unlike writes )
2006-01-10 02:59:24 +03:00
* are called without i_mutex protection against truncate
2005-04-17 02:20:36 +04:00
*/
nr = PAGE_CACHE_SIZE ;
i_size = i_size_read ( inode ) ;
end_index = i_size > > PAGE_CACHE_SHIFT ;
if ( index = = end_index ) {
nr = i_size & ~ PAGE_CACHE_MASK ;
if ( nr < = offset ) {
if ( page )
page_cache_release ( page ) ;
break ;
}
}
nr - = offset ;
if ( page ) {
/*
* If users can be writing to this page using arbitrary
* virtual addresses , take care about potential aliasing
* before reading the page on the kernel side .
*/
if ( mapping_writably_mapped ( mapping ) )
flush_dcache_page ( page ) ;
/*
* Mark the page accessed if we read the beginning .
*/
if ( ! offset )
mark_page_accessed ( page ) ;
2005-10-30 04:16:12 +03:00
} else {
2005-04-17 02:20:36 +04:00
page = ZERO_PAGE ( 0 ) ;
2005-10-30 04:16:12 +03:00
page_cache_get ( page ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Ok , we have the page , and it ' s up - to - date , so
* now we can copy it to user space . . .
*
* The actor routine returns how many bytes were actually used . .
* NOTE ! This may not be the same as how much of a user buffer
* we filled up ( we may be padding etc ) , so we can only update
* " pos " here ( the actor routine has to update the user buffer
* pointers and the remaining count ) .
*/
ret = actor ( desc , page , offset , nr ) ;
offset + = ret ;
index + = offset > > PAGE_CACHE_SHIFT ;
offset & = ~ PAGE_CACHE_MASK ;
page_cache_release ( page ) ;
if ( ret ! = nr | | ! desc - > count )
break ;
cond_resched ( ) ;
}
* ppos = ( ( loff_t ) index < < PAGE_CACHE_SHIFT ) + offset ;
file_accessed ( filp ) ;
}
2008-07-24 08:27:35 +04:00
static ssize_t shmem_file_aio_read ( struct kiocb * iocb ,
const struct iovec * iov , unsigned long nr_segs , loff_t pos )
{
struct file * filp = iocb - > ki_filp ;
ssize_t retval ;
unsigned long seg ;
size_t count ;
loff_t * ppos = & iocb - > ki_pos ;
retval = generic_segment_checks ( iov , & nr_segs , & count , VERIFY_WRITE ) ;
if ( retval )
return retval ;
for ( seg = 0 ; seg < nr_segs ; seg + + ) {
read_descriptor_t desc ;
desc . written = 0 ;
desc . arg . buf = iov [ seg ] . iov_base ;
desc . count = iov [ seg ] . iov_len ;
if ( desc . count = = 0 )
continue ;
desc . error = 0 ;
do_shmem_file_read ( filp , ppos , & desc , file_read_actor ) ;
retval + = desc . written ;
if ( desc . error ) {
retval = retval ? : desc . error ;
break ;
}
if ( desc . count > 0 )
break ;
}
return retval ;
2005-04-17 02:20:36 +04:00
}
2011-07-26 04:12:32 +04:00
static ssize_t shmem_file_splice_read ( struct file * in , loff_t * ppos ,
struct pipe_inode_info * pipe , size_t len ,
unsigned int flags )
{
struct address_space * mapping = in - > f_mapping ;
2011-07-26 04:12:33 +04:00
struct inode * inode = mapping - > host ;
2011-07-26 04:12:32 +04:00
unsigned int loff , nr_pages , req_pages ;
struct page * pages [ PIPE_DEF_BUFFERS ] ;
struct partial_page partial [ PIPE_DEF_BUFFERS ] ;
struct page * page ;
pgoff_t index , end_index ;
loff_t isize , left ;
int error , page_nr ;
struct splice_pipe_desc spd = {
. pages = pages ,
. partial = partial ,
. flags = flags ,
. ops = & page_cache_pipe_buf_ops ,
. spd_release = spd_release_page ,
} ;
2011-07-26 04:12:33 +04:00
isize = i_size_read ( inode ) ;
2011-07-26 04:12:32 +04:00
if ( unlikely ( * ppos > = isize ) )
return 0 ;
left = isize - * ppos ;
if ( unlikely ( left < len ) )
len = left ;
if ( splice_grow_spd ( pipe , & spd ) )
return - ENOMEM ;
index = * ppos > > PAGE_CACHE_SHIFT ;
loff = * ppos & ~ PAGE_CACHE_MASK ;
req_pages = ( len + loff + PAGE_CACHE_SIZE - 1 ) > > PAGE_CACHE_SHIFT ;
nr_pages = min ( req_pages , pipe - > buffers ) ;
spd . nr_pages = find_get_pages_contig ( mapping , index ,
nr_pages , spd . pages ) ;
index + = spd . nr_pages ;
error = 0 ;
2011-07-26 04:12:33 +04:00
while ( spd . nr_pages < nr_pages ) {
error = shmem_getpage ( inode , index , & page , SGP_CACHE , NULL ) ;
if ( error )
break ;
unlock_page ( page ) ;
2011-07-26 04:12:32 +04:00
spd . pages [ spd . nr_pages + + ] = page ;
index + + ;
}
index = * ppos > > PAGE_CACHE_SHIFT ;
nr_pages = spd . nr_pages ;
spd . nr_pages = 0 ;
2011-07-26 04:12:33 +04:00
2011-07-26 04:12:32 +04:00
for ( page_nr = 0 ; page_nr < nr_pages ; page_nr + + ) {
unsigned int this_len ;
if ( ! len )
break ;
this_len = min_t ( unsigned long , len , PAGE_CACHE_SIZE - loff ) ;
page = spd . pages [ page_nr ] ;
2011-07-26 04:12:33 +04:00
if ( ! PageUptodate ( page ) | | page - > mapping ! = mapping ) {
error = shmem_getpage ( inode , index , & page ,
SGP_CACHE , NULL ) ;
if ( error )
2011-07-26 04:12:32 +04:00
break ;
2011-07-26 04:12:33 +04:00
unlock_page ( page ) ;
page_cache_release ( spd . pages [ page_nr ] ) ;
spd . pages [ page_nr ] = page ;
2011-07-26 04:12:32 +04:00
}
2011-07-26 04:12:33 +04:00
isize = i_size_read ( inode ) ;
2011-07-26 04:12:32 +04:00
end_index = ( isize - 1 ) > > PAGE_CACHE_SHIFT ;
if ( unlikely ( ! isize | | index > end_index ) )
break ;
if ( end_index = = index ) {
unsigned int plen ;
plen = ( ( isize - 1 ) & ~ PAGE_CACHE_MASK ) + 1 ;
if ( plen < = loff )
break ;
this_len = min ( this_len , plen - loff ) ;
len = this_len ;
}
spd . partial [ page_nr ] . offset = loff ;
spd . partial [ page_nr ] . len = this_len ;
len - = this_len ;
loff = 0 ;
spd . nr_pages + + ;
index + + ;
}
while ( page_nr < nr_pages )
page_cache_release ( spd . pages [ page_nr + + ] ) ;
if ( spd . nr_pages )
error = splice_to_pipe ( pipe , & spd ) ;
splice_shrink_spd ( pipe , & spd ) ;
if ( error > 0 ) {
* ppos + = error ;
file_accessed ( in ) ;
}
return error ;
}
2006-06-23 13:02:58 +04:00
static int shmem_statfs ( struct dentry * dentry , struct kstatfs * buf )
2005-04-17 02:20:36 +04:00
{
2006-06-23 13:02:58 +04:00
struct shmem_sb_info * sbinfo = SHMEM_SB ( dentry - > d_sb ) ;
2005-04-17 02:20:36 +04:00
buf - > f_type = TMPFS_MAGIC ;
buf - > f_bsize = PAGE_CACHE_SIZE ;
buf - > f_namelen = NAME_MAX ;
2005-06-22 04:15:04 +04:00
if ( sbinfo - > max_blocks ) {
2005-04-17 02:20:36 +04:00
buf - > f_blocks = sbinfo - > max_blocks ;
2011-08-04 03:21:21 +04:00
buf - > f_bavail =
buf - > f_bfree = sbinfo - > max_blocks -
percpu_counter_sum ( & sbinfo - > used_blocks ) ;
2005-06-22 04:15:04 +04:00
}
if ( sbinfo - > max_inodes ) {
2005-04-17 02:20:36 +04:00
buf - > f_files = sbinfo - > max_inodes ;
buf - > f_ffree = sbinfo - > free_inodes ;
}
/* else leave those fields 0 like simple_statfs */
return 0 ;
}
/*
* File creation . Allocate an inode , and we ' re done . .
*/
static int
shmem_mknod ( struct inode * dir , struct dentry * dentry , int mode , dev_t dev )
{
2009-02-24 23:51:52 +03:00
struct inode * inode ;
2005-04-17 02:20:36 +04:00
int error = - ENOSPC ;
2010-03-04 17:32:18 +03:00
inode = shmem_get_inode ( dir - > i_sb , dir , mode , dev , VM_NORESERVE ) ;
2005-04-17 02:20:36 +04:00
if ( inode ) {
2011-02-01 19:05:39 +03:00
error = security_inode_init_security ( inode , dir ,
& dentry - > d_name , NULL ,
NULL , NULL ) ;
2005-09-10 00:01:43 +04:00
if ( error ) {
if ( error ! = - EOPNOTSUPP ) {
iput ( inode ) ;
return error ;
}
2006-09-29 13:01:35 +04:00
}
2009-11-03 18:44:44 +03:00
# ifdef CONFIG_TMPFS_POSIX_ACL
error = generic_acl_init ( inode , dir ) ;
2006-09-29 13:01:35 +04:00
if ( error ) {
iput ( inode ) ;
return error ;
2005-09-10 00:01:43 +04:00
}
2009-12-17 03:35:36 +03:00
# else
error = 0 ;
2009-11-03 18:44:44 +03:00
# endif
2005-04-17 02:20:36 +04:00
dir - > i_size + = BOGO_DIRENT_SIZE ;
dir - > i_ctime = dir - > i_mtime = CURRENT_TIME ;
d_instantiate ( dentry , inode ) ;
dget ( dentry ) ; /* Extra count - pin the dentry in core */
}
return error ;
}
static int shmem_mkdir ( struct inode * dir , struct dentry * dentry , int mode )
{
int error ;
if ( ( error = shmem_mknod ( dir , dentry , mode | S_IFDIR , 0 ) ) )
return error ;
2006-10-01 10:29:04 +04:00
inc_nlink ( dir ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static int shmem_create ( struct inode * dir , struct dentry * dentry , int mode ,
struct nameidata * nd )
{
return shmem_mknod ( dir , dentry , mode | S_IFREG , 0 ) ;
}
/*
* Link a file . .
*/
static int shmem_link ( struct dentry * old_dentry , struct inode * dir , struct dentry * dentry )
{
struct inode * inode = old_dentry - > d_inode ;
2008-02-05 09:28:47 +03:00
int ret ;
2005-04-17 02:20:36 +04:00
/*
* No ordinary ( disk based ) filesystem counts links as inodes ;
* but each new link needs a new dentry , pinning lowmem , and
* tmpfs dentries cannot be pruned until they are unlinked .
*/
2008-02-05 09:28:47 +03:00
ret = shmem_reserve_inode ( inode - > i_sb ) ;
if ( ret )
goto out ;
2005-04-17 02:20:36 +04:00
dir - > i_size + = BOGO_DIRENT_SIZE ;
inode - > i_ctime = dir - > i_ctime = dir - > i_mtime = CURRENT_TIME ;
2006-10-01 10:29:04 +04:00
inc_nlink ( inode ) ;
2010-10-23 19:11:40 +04:00
ihold ( inode ) ; /* New dentry reference */
2005-04-17 02:20:36 +04:00
dget ( dentry ) ; /* Extra pinning count for the created dentry */
d_instantiate ( dentry , inode ) ;
2008-02-05 09:28:47 +03:00
out :
return ret ;
2005-04-17 02:20:36 +04:00
}
static int shmem_unlink ( struct inode * dir , struct dentry * dentry )
{
struct inode * inode = dentry - > d_inode ;
2008-02-05 09:28:47 +03:00
if ( inode - > i_nlink > 1 & & ! S_ISDIR ( inode - > i_mode ) )
shmem_free_inode ( inode - > i_sb ) ;
2005-04-17 02:20:36 +04:00
dir - > i_size - = BOGO_DIRENT_SIZE ;
inode - > i_ctime = dir - > i_ctime = dir - > i_mtime = CURRENT_TIME ;
2006-10-01 10:29:03 +04:00
drop_nlink ( inode ) ;
2005-04-17 02:20:36 +04:00
dput ( dentry ) ; /* Undo the count from "create" - this does all the work */
return 0 ;
}
static int shmem_rmdir ( struct inode * dir , struct dentry * dentry )
{
if ( ! simple_empty ( dentry ) )
return - ENOTEMPTY ;
2006-10-01 10:29:03 +04:00
drop_nlink ( dentry - > d_inode ) ;
drop_nlink ( dir ) ;
2005-04-17 02:20:36 +04:00
return shmem_unlink ( dir , dentry ) ;
}
/*
* The VFS layer already does all the dentry stuff for rename ,
* we just have to decrement the usage count for the target if
* it exists so that the VFS layer correctly free ' s it when it
* gets overwritten .
*/
static int shmem_rename ( struct inode * old_dir , struct dentry * old_dentry , struct inode * new_dir , struct dentry * new_dentry )
{
struct inode * inode = old_dentry - > d_inode ;
int they_are_dirs = S_ISDIR ( inode - > i_mode ) ;
if ( ! simple_empty ( new_dentry ) )
return - ENOTEMPTY ;
if ( new_dentry - > d_inode ) {
( void ) shmem_unlink ( new_dir , new_dentry ) ;
if ( they_are_dirs )
2006-10-01 10:29:03 +04:00
drop_nlink ( old_dir ) ;
2005-04-17 02:20:36 +04:00
} else if ( they_are_dirs ) {
2006-10-01 10:29:03 +04:00
drop_nlink ( old_dir ) ;
2006-10-01 10:29:04 +04:00
inc_nlink ( new_dir ) ;
2005-04-17 02:20:36 +04:00
}
old_dir - > i_size - = BOGO_DIRENT_SIZE ;
new_dir - > i_size + = BOGO_DIRENT_SIZE ;
old_dir - > i_ctime = old_dir - > i_mtime =
new_dir - > i_ctime = new_dir - > i_mtime =
inode - > i_ctime = CURRENT_TIME ;
return 0 ;
}
static int shmem_symlink ( struct inode * dir , struct dentry * dentry , const char * symname )
{
int error ;
int len ;
struct inode * inode ;
2011-07-26 04:12:34 +04:00
struct page * page ;
2005-04-17 02:20:36 +04:00
char * kaddr ;
struct shmem_inode_info * info ;
len = strlen ( symname ) + 1 ;
if ( len > PAGE_CACHE_SIZE )
return - ENAMETOOLONG ;
2010-03-04 17:32:18 +03:00
inode = shmem_get_inode ( dir - > i_sb , dir , S_IFLNK | S_IRWXUGO , 0 , VM_NORESERVE ) ;
2005-04-17 02:20:36 +04:00
if ( ! inode )
return - ENOSPC ;
2011-02-01 19:05:39 +03:00
error = security_inode_init_security ( inode , dir , & dentry - > d_name , NULL ,
NULL , NULL ) ;
2005-09-10 00:01:43 +04:00
if ( error ) {
if ( error ! = - EOPNOTSUPP ) {
iput ( inode ) ;
return error ;
}
error = 0 ;
}
2005-04-17 02:20:36 +04:00
info = SHMEM_I ( inode ) ;
inode - > i_size = len - 1 ;
2011-08-04 03:21:26 +04:00
if ( len < = SHORT_SYMLINK_LEN ) {
info - > symlink = kmemdup ( symname , len , GFP_KERNEL ) ;
if ( ! info - > symlink ) {
iput ( inode ) ;
return - ENOMEM ;
}
inode - > i_op = & shmem_short_symlink_operations ;
2005-04-17 02:20:36 +04:00
} else {
error = shmem_getpage ( inode , 0 , & page , SGP_WRITE , NULL ) ;
if ( error ) {
iput ( inode ) ;
return error ;
}
2008-07-29 02:46:19 +04:00
inode - > i_mapping - > a_ops = & shmem_aops ;
2005-04-17 02:20:36 +04:00
inode - > i_op = & shmem_symlink_inode_operations ;
kaddr = kmap_atomic ( page , KM_USER0 ) ;
memcpy ( kaddr , symname , len ) ;
kunmap_atomic ( kaddr , KM_USER0 ) ;
set_page_dirty ( page ) ;
2009-09-16 13:50:14 +04:00
unlock_page ( page ) ;
2005-04-17 02:20:36 +04:00
page_cache_release ( page ) ;
}
dir - > i_size + = BOGO_DIRENT_SIZE ;
dir - > i_ctime = dir - > i_mtime = CURRENT_TIME ;
d_instantiate ( dentry , inode ) ;
dget ( dentry ) ;
return 0 ;
}
2011-08-04 03:21:26 +04:00
static void * shmem_follow_short_symlink ( struct dentry * dentry , struct nameidata * nd )
2005-04-17 02:20:36 +04:00
{
2011-08-04 03:21:26 +04:00
nd_set_link ( nd , SHMEM_I ( dentry - > d_inode ) - > symlink ) ;
2005-08-20 05:02:56 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
}
2005-08-20 05:02:56 +04:00
static void * shmem_follow_link ( struct dentry * dentry , struct nameidata * nd )
2005-04-17 02:20:36 +04:00
{
struct page * page = NULL ;
2011-08-04 03:21:21 +04:00
int error = shmem_getpage ( dentry - > d_inode , 0 , & page , SGP_READ , NULL ) ;
nd_set_link ( nd , error ? ERR_PTR ( error ) : kmap ( page ) ) ;
2008-02-05 09:28:44 +03:00
if ( page )
unlock_page ( page ) ;
2005-08-20 05:02:56 +04:00
return page ;
2005-04-17 02:20:36 +04:00
}
2005-08-20 05:02:56 +04:00
static void shmem_put_link ( struct dentry * dentry , struct nameidata * nd , void * cookie )
2005-04-17 02:20:36 +04:00
{
if ( ! IS_ERR ( nd_get_link ( nd ) ) ) {
2005-08-20 05:02:56 +04:00
struct page * page = cookie ;
2005-04-17 02:20:36 +04:00
kunmap ( page ) ;
mark_page_accessed ( page ) ;
page_cache_release ( page ) ;
}
}
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# ifdef CONFIG_TMPFS_XATTR
2008-03-20 03:00:41 +03:00
/*
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
* Superblocks without xattr inode operations may get some security . * xattr
* support from the LSM " for free " . As soon as we have any other xattrs
2006-09-29 13:01:35 +04:00
* like ACLs , we also need to implement the security . * handlers at
* filesystem level , though .
*/
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
static int shmem_xattr_get ( struct dentry * dentry , const char * name ,
void * buffer , size_t size )
2006-09-29 13:01:35 +04:00
{
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
struct shmem_inode_info * info ;
struct shmem_xattr * xattr ;
int ret = - ENODATA ;
2006-09-29 13:01:35 +04:00
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
info = SHMEM_I ( dentry - > d_inode ) ;
spin_lock ( & info - > lock ) ;
list_for_each_entry ( xattr , & info - > xattr_list , list ) {
if ( strcmp ( name , xattr - > name ) )
continue ;
ret = xattr - > size ;
if ( buffer ) {
if ( size < xattr - > size )
ret = - ERANGE ;
else
memcpy ( buffer , xattr - > value , xattr - > size ) ;
}
break ;
}
spin_unlock ( & info - > lock ) ;
return ret ;
2006-09-29 13:01:35 +04:00
}
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
static int shmem_xattr_set ( struct dentry * dentry , const char * name ,
const void * value , size_t size , int flags )
2006-09-29 13:01:35 +04:00
{
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
struct inode * inode = dentry - > d_inode ;
struct shmem_inode_info * info = SHMEM_I ( inode ) ;
struct shmem_xattr * xattr ;
struct shmem_xattr * new_xattr = NULL ;
size_t len ;
int err = 0 ;
/* value == NULL means remove */
if ( value ) {
/* wrap around? */
len = sizeof ( * new_xattr ) + size ;
if ( len < = sizeof ( * new_xattr ) )
return - ENOMEM ;
new_xattr = kmalloc ( len , GFP_KERNEL ) ;
if ( ! new_xattr )
return - ENOMEM ;
new_xattr - > name = kstrdup ( name , GFP_KERNEL ) ;
if ( ! new_xattr - > name ) {
kfree ( new_xattr ) ;
return - ENOMEM ;
}
new_xattr - > size = size ;
memcpy ( new_xattr - > value , value , size ) ;
}
spin_lock ( & info - > lock ) ;
list_for_each_entry ( xattr , & info - > xattr_list , list ) {
if ( ! strcmp ( name , xattr - > name ) ) {
if ( flags & XATTR_CREATE ) {
xattr = new_xattr ;
err = - EEXIST ;
} else if ( new_xattr ) {
list_replace ( & xattr - > list , & new_xattr - > list ) ;
} else {
list_del ( & xattr - > list ) ;
}
goto out ;
}
}
if ( flags & XATTR_REPLACE ) {
xattr = new_xattr ;
err = - ENODATA ;
} else {
list_add ( & new_xattr - > list , & info - > xattr_list ) ;
xattr = NULL ;
}
out :
spin_unlock ( & info - > lock ) ;
if ( xattr )
kfree ( xattr - > name ) ;
kfree ( xattr ) ;
return err ;
2006-09-29 13:01:35 +04:00
}
2010-05-14 04:53:14 +04:00
static const struct xattr_handler * shmem_xattr_handlers [ ] = {
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# ifdef CONFIG_TMPFS_POSIX_ACL
2009-11-03 18:44:44 +03:00
& generic_acl_access_handler ,
& generic_acl_default_handler ,
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# endif
2006-09-29 13:01:35 +04:00
NULL
} ;
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
static int shmem_xattr_validate ( const char * name )
{
struct { const char * prefix ; size_t len ; } arr [ ] = {
{ XATTR_SECURITY_PREFIX , XATTR_SECURITY_PREFIX_LEN } ,
{ XATTR_TRUSTED_PREFIX , XATTR_TRUSTED_PREFIX_LEN }
} ;
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( arr ) ; i + + ) {
size_t preflen = arr [ i ] . len ;
if ( strncmp ( name , arr [ i ] . prefix , preflen ) = = 0 ) {
if ( ! name [ preflen ] )
return - EINVAL ;
return 0 ;
}
}
return - EOPNOTSUPP ;
}
static ssize_t shmem_getxattr ( struct dentry * dentry , const char * name ,
void * buffer , size_t size )
{
int err ;
/*
* If this is a request for a synthetic attribute in the system . *
* namespace use the generic infrastructure to resolve a handler
* for it via sb - > s_xattr .
*/
if ( ! strncmp ( name , XATTR_SYSTEM_PREFIX , XATTR_SYSTEM_PREFIX_LEN ) )
return generic_getxattr ( dentry , name , buffer , size ) ;
err = shmem_xattr_validate ( name ) ;
if ( err )
return err ;
return shmem_xattr_get ( dentry , name , buffer , size ) ;
}
static int shmem_setxattr ( struct dentry * dentry , const char * name ,
const void * value , size_t size , int flags )
{
int err ;
/*
* If this is a request for a synthetic attribute in the system . *
* namespace use the generic infrastructure to resolve a handler
* for it via sb - > s_xattr .
*/
if ( ! strncmp ( name , XATTR_SYSTEM_PREFIX , XATTR_SYSTEM_PREFIX_LEN ) )
return generic_setxattr ( dentry , name , value , size , flags ) ;
err = shmem_xattr_validate ( name ) ;
if ( err )
return err ;
if ( size = = 0 )
value = " " ; /* empty EA, do not remove */
return shmem_xattr_set ( dentry , name , value , size , flags ) ;
}
static int shmem_removexattr ( struct dentry * dentry , const char * name )
{
int err ;
/*
* If this is a request for a synthetic attribute in the system . *
* namespace use the generic infrastructure to resolve a handler
* for it via sb - > s_xattr .
*/
if ( ! strncmp ( name , XATTR_SYSTEM_PREFIX , XATTR_SYSTEM_PREFIX_LEN ) )
return generic_removexattr ( dentry , name ) ;
err = shmem_xattr_validate ( name ) ;
if ( err )
return err ;
return shmem_xattr_set ( dentry , name , NULL , 0 , XATTR_REPLACE ) ;
}
static bool xattr_is_trusted ( const char * name )
{
return ! strncmp ( name , XATTR_TRUSTED_PREFIX , XATTR_TRUSTED_PREFIX_LEN ) ;
}
static ssize_t shmem_listxattr ( struct dentry * dentry , char * buffer , size_t size )
{
bool trusted = capable ( CAP_SYS_ADMIN ) ;
struct shmem_xattr * xattr ;
struct shmem_inode_info * info ;
size_t used = 0 ;
info = SHMEM_I ( dentry - > d_inode ) ;
spin_lock ( & info - > lock ) ;
list_for_each_entry ( xattr , & info - > xattr_list , list ) {
size_t len ;
/* skip "trusted." attributes for unprivileged callers */
if ( ! trusted & & xattr_is_trusted ( xattr - > name ) )
continue ;
len = strlen ( xattr - > name ) + 1 ;
used + = len ;
if ( buffer ) {
if ( size < used ) {
used = - ERANGE ;
break ;
}
memcpy ( buffer , xattr - > name , len ) ;
buffer + = len ;
}
}
spin_unlock ( & info - > lock ) ;
return used ;
}
# endif /* CONFIG_TMPFS_XATTR */
2011-08-04 03:21:26 +04:00
static const struct inode_operations shmem_short_symlink_operations = {
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
. readlink = generic_readlink ,
2011-08-04 03:21:26 +04:00
. follow_link = shmem_follow_short_symlink ,
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# ifdef CONFIG_TMPFS_XATTR
. setxattr = shmem_setxattr ,
. getxattr = shmem_getxattr ,
. listxattr = shmem_listxattr ,
. removexattr = shmem_removexattr ,
# endif
} ;
static const struct inode_operations shmem_symlink_inode_operations = {
. readlink = generic_readlink ,
. follow_link = shmem_follow_link ,
. put_link = shmem_put_link ,
# ifdef CONFIG_TMPFS_XATTR
. setxattr = shmem_setxattr ,
. getxattr = shmem_getxattr ,
. listxattr = shmem_listxattr ,
. removexattr = shmem_removexattr ,
2006-09-29 13:01:35 +04:00
# endif
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
} ;
2006-09-29 13:01:35 +04:00
2006-10-17 11:09:45 +04:00
static struct dentry * shmem_get_parent ( struct dentry * child )
{
return ERR_PTR ( - ESTALE ) ;
}
static int shmem_match ( struct inode * ino , void * vfh )
{
__u32 * fh = vfh ;
__u64 inum = fh [ 2 ] ;
inum = ( inum < < 32 ) | fh [ 1 ] ;
return ino - > i_ino = = inum & & fh [ 0 ] = = ino - > i_generation ;
}
2007-10-22 03:42:13 +04:00
static struct dentry * shmem_fh_to_dentry ( struct super_block * sb ,
struct fid * fid , int fh_len , int fh_type )
2006-10-17 11:09:45 +04:00
{
struct inode * inode ;
2007-10-22 03:42:13 +04:00
struct dentry * dentry = NULL ;
u64 inum = fid - > raw [ 2 ] ;
inum = ( inum < < 32 ) | fid - > raw [ 1 ] ;
if ( fh_len < 3 )
return NULL ;
2006-10-17 11:09:45 +04:00
2007-10-22 03:42:13 +04:00
inode = ilookup5 ( sb , ( unsigned long ) ( inum + fid - > raw [ 0 ] ) ,
shmem_match , fid - > raw ) ;
2006-10-17 11:09:45 +04:00
if ( inode ) {
2007-10-22 03:42:13 +04:00
dentry = d_find_alias ( inode ) ;
2006-10-17 11:09:45 +04:00
iput ( inode ) ;
}
2007-10-22 03:42:13 +04:00
return dentry ;
2006-10-17 11:09:45 +04:00
}
static int shmem_encode_fh ( struct dentry * dentry , __u32 * fh , int * len ,
int connectable )
{
struct inode * inode = dentry - > d_inode ;
2011-01-29 16:13:25 +03:00
if ( * len < 3 ) {
* len = 3 ;
2006-10-17 11:09:45 +04:00
return 255 ;
2011-01-29 16:13:25 +03:00
}
2006-10-17 11:09:45 +04:00
2010-10-23 23:19:20 +04:00
if ( inode_unhashed ( inode ) ) {
2006-10-17 11:09:45 +04:00
/* Unfortunately insert_inode_hash is not idempotent,
* so as we hash inodes here rather than at creation
* time , we need a lock to ensure we only try
* to do it once
*/
static DEFINE_SPINLOCK ( lock ) ;
spin_lock ( & lock ) ;
2010-10-23 23:19:20 +04:00
if ( inode_unhashed ( inode ) )
2006-10-17 11:09:45 +04:00
__insert_inode_hash ( inode ,
inode - > i_ino + inode - > i_generation ) ;
spin_unlock ( & lock ) ;
}
fh [ 0 ] = inode - > i_generation ;
fh [ 1 ] = inode - > i_ino ;
fh [ 2 ] = ( ( __u64 ) inode - > i_ino ) > > 32 ;
* len = 3 ;
return 1 ;
}
2007-10-22 03:42:17 +04:00
static const struct export_operations shmem_export_ops = {
2006-10-17 11:09:45 +04:00
. get_parent = shmem_get_parent ,
. encode_fh = shmem_encode_fh ,
2007-10-22 03:42:13 +04:00
. fh_to_dentry = shmem_fh_to_dentry ,
2006-10-17 11:09:45 +04:00
} ;
2008-02-08 15:21:48 +03:00
static int shmem_parse_options ( char * options , struct shmem_sb_info * sbinfo ,
bool remount )
2005-04-17 02:20:36 +04:00
{
char * this_char , * value , * rest ;
2006-02-22 02:49:47 +03:00
while ( options ! = NULL ) {
this_char = options ;
for ( ; ; ) {
/*
* NUL - terminate this option : unfortunately ,
* mount options form a comma - separated list ,
* but mpol ' s nodelist may also contain commas .
*/
options = strchr ( options , ' , ' ) ;
if ( options = = NULL )
break ;
options + + ;
if ( ! isdigit ( * options ) ) {
options [ - 1 ] = ' \0 ' ;
break ;
}
}
2005-04-17 02:20:36 +04:00
if ( ! * this_char )
continue ;
if ( ( value = strchr ( this_char , ' = ' ) ) ! = NULL ) {
* value + + = 0 ;
} else {
printk ( KERN_ERR
" tmpfs: No value for mount option '%s' \n " ,
this_char ) ;
return 1 ;
}
if ( ! strcmp ( this_char , " size " ) ) {
unsigned long long size ;
size = memparse ( value , & rest ) ;
if ( * rest = = ' % ' ) {
size < < = PAGE_SHIFT ;
size * = totalram_pages ;
do_div ( size , 100 ) ;
rest + + ;
}
if ( * rest )
goto bad_val ;
2008-02-08 15:21:48 +03:00
sbinfo - > max_blocks =
DIV_ROUND_UP ( size , PAGE_CACHE_SIZE ) ;
2005-04-17 02:20:36 +04:00
} else if ( ! strcmp ( this_char , " nr_blocks " ) ) {
2008-02-08 15:21:48 +03:00
sbinfo - > max_blocks = memparse ( value , & rest ) ;
2005-04-17 02:20:36 +04:00
if ( * rest )
goto bad_val ;
} else if ( ! strcmp ( this_char , " nr_inodes " ) ) {
2008-02-08 15:21:48 +03:00
sbinfo - > max_inodes = memparse ( value , & rest ) ;
2005-04-17 02:20:36 +04:00
if ( * rest )
goto bad_val ;
} else if ( ! strcmp ( this_char , " mode " ) ) {
2008-02-08 15:21:48 +03:00
if ( remount )
2005-04-17 02:20:36 +04:00
continue ;
2008-02-08 15:21:48 +03:00
sbinfo - > mode = simple_strtoul ( value , & rest , 8 ) & 07777 ;
2005-04-17 02:20:36 +04:00
if ( * rest )
goto bad_val ;
} else if ( ! strcmp ( this_char , " uid " ) ) {
2008-02-08 15:21:48 +03:00
if ( remount )
2005-04-17 02:20:36 +04:00
continue ;
2008-02-08 15:21:48 +03:00
sbinfo - > uid = simple_strtoul ( value , & rest , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( * rest )
goto bad_val ;
} else if ( ! strcmp ( this_char , " gid " ) ) {
2008-02-08 15:21:48 +03:00
if ( remount )
2005-04-17 02:20:36 +04:00
continue ;
2008-02-08 15:21:48 +03:00
sbinfo - > gid = simple_strtoul ( value , & rest , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( * rest )
goto bad_val ;
2006-01-15 00:20:48 +03:00
} else if ( ! strcmp ( this_char , " mpol " ) ) {
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
if ( mpol_parse_str ( value , & sbinfo - > mpol , 1 ) )
2006-01-15 00:20:48 +03:00
goto bad_val ;
2005-04-17 02:20:36 +04:00
} else {
printk ( KERN_ERR " tmpfs: Bad mount option %s \n " ,
this_char ) ;
return 1 ;
}
}
return 0 ;
bad_val :
printk ( KERN_ERR " tmpfs: Bad value '%s' for mount option '%s' \n " ,
value , this_char ) ;
return 1 ;
}
static int shmem_remount_fs ( struct super_block * sb , int * flags , char * data )
{
struct shmem_sb_info * sbinfo = SHMEM_SB ( sb ) ;
2008-02-08 15:21:48 +03:00
struct shmem_sb_info config = * sbinfo ;
2005-06-22 04:15:04 +04:00
unsigned long inodes ;
int error = - EINVAL ;
2008-02-08 15:21:48 +03:00
if ( shmem_parse_options ( data , & config , true ) )
2005-06-22 04:15:04 +04:00
return error ;
2005-04-17 02:20:36 +04:00
2005-06-22 04:15:04 +04:00
spin_lock ( & sbinfo - > stat_lock ) ;
inodes = sbinfo - > max_inodes - sbinfo - > free_inodes ;
2010-08-10 04:19:05 +04:00
if ( percpu_counter_compare ( & sbinfo - > used_blocks , config . max_blocks ) > 0 )
2005-06-22 04:15:04 +04:00
goto out ;
2008-02-08 15:21:48 +03:00
if ( config . max_inodes < inodes )
2005-06-22 04:15:04 +04:00
goto out ;
/*
tmpfs: convert shmem_getpage_gfp to radix-swap
Convert shmem_getpage_gfp(), the engine-room of shmem, to expect page or
swap entry returned from radix tree by find_lock_page().
Whereas the repetitive old method proceeded mainly under info->lock,
dropping and repeating whenever one of the conditions needed was not
met, now we can proceed without it, leaving shmem_add_to_page_cache() to
check for a race.
This way there is no need to preallocate a page, no need for an early
radix_tree_preload(), no need for mem_cgroup_shmem_charge_fallback().
Move the error unwinding down to the bottom instead of repeating it
throughout. ENOSPC handling is a little different from before: there is
no longer any race between find_lock_page() and finding swap, but we can
arrive at ENOSPC before calling shmem_recalc_inode(), which might
occasionally discover freed space.
Be stricter to check i_size before returning. info->lock is used for
little but alloced, swapped, i_blocks updates. Move i_blocks updates
out from under the max_blocks check, so even an unlimited size=0 mount
can show accurate du.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:24 +04:00
* Those tests disallow limited - > unlimited while any are in use ;
2005-06-22 04:15:04 +04:00
* but we must separately disallow unlimited - > limited , because
* in that case we have no record of how much is already in use .
*/
2008-02-08 15:21:48 +03:00
if ( config . max_blocks & & ! sbinfo - > max_blocks )
2005-06-22 04:15:04 +04:00
goto out ;
2008-02-08 15:21:48 +03:00
if ( config . max_inodes & & ! sbinfo - > max_inodes )
2005-06-22 04:15:04 +04:00
goto out ;
error = 0 ;
2008-02-08 15:21:48 +03:00
sbinfo - > max_blocks = config . max_blocks ;
sbinfo - > max_inodes = config . max_inodes ;
sbinfo - > free_inodes = config . max_inodes - inodes ;
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
mpol_put ( sbinfo - > mpol ) ;
sbinfo - > mpol = config . mpol ; /* transfers initial ref */
2005-06-22 04:15:04 +04:00
out :
spin_unlock ( & sbinfo - > stat_lock ) ;
return error ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 15:21:48 +03:00
static int shmem_show_options ( struct seq_file * seq , struct vfsmount * vfs )
{
struct shmem_sb_info * sbinfo = SHMEM_SB ( vfs - > mnt_sb ) ;
if ( sbinfo - > max_blocks ! = shmem_default_max_blocks ( ) )
seq_printf ( seq , " ,size=%luk " ,
sbinfo - > max_blocks < < ( PAGE_CACHE_SHIFT - 10 ) ) ;
if ( sbinfo - > max_inodes ! = shmem_default_max_inodes ( ) )
seq_printf ( seq , " ,nr_inodes=%lu " , sbinfo - > max_inodes ) ;
if ( sbinfo - > mode ! = ( S_IRWXUGO | S_ISVTX ) )
seq_printf ( seq , " ,mode=%03o " , sbinfo - > mode ) ;
if ( sbinfo - > uid ! = 0 )
seq_printf ( seq , " ,uid=%u " , sbinfo - > uid ) ;
if ( sbinfo - > gid ! = 0 )
seq_printf ( seq , " ,gid=%u " , sbinfo - > gid ) ;
mempolicy: use struct mempolicy pointer in shmem_sb_info
This patch replaces the mempolicy mode, mode_flags, and nodemask in the
shmem_sb_info struct with a struct mempolicy pointer, initialized to NULL.
This removes dependency on the details of mempolicy from shmem.c and hugetlbfs
inode.c and simplifies the interfaces.
mpol_parse_str() in mempolicy.c is changed to return, via a pointer to a
pointer arg, a struct mempolicy pointer on success. For MPOL_DEFAULT, the
returned pointer is NULL. Further, mpol_parse_str() now takes a 'no_context'
argument that causes the input nodemask to be stored in the w.user_nodemask of
the created mempolicy for use when the mempolicy is installed in a tmpfs inode
shared policy tree. At that time, any cpuset contextualization is applied to
the original input nodemask. This preserves the previous behavior where the
input nodemask was stored in the superblock. We can think of the returned
mempolicy as "context free".
Because mpol_parse_str() is now calling mpol_new(), we can remove from
mpol_to_str() the semantic checks that mpol_new() already performs.
Add 'no_context' parameter to mpol_to_str() to specify that it should format
the nodemask in w.user_nodemask for 'bind' and 'interleave' policies.
Change mpol_shared_policy_init() to take a pointer to a "context free" struct
mempolicy and to create a new, "contextualized" mempolicy using the mode,
mode_flags and user_nodemask from the input mempolicy.
Note: we know that the mempolicy passed to mpol_to_str() or
mpol_shared_policy_init() from a tmpfs superblock is "context free". This
is currently the only instance thereof. However, if we found more uses for
this concept, and introduced any ambiguity as to whether a mempolicy was
context free or not, we could add another internal mode flag to identify
context free mempolicies. Then, we could remove the 'no_context' argument
from mpol_to_str().
Added shmem_get_sbmpol() to return a reference counted superblock mempolicy,
if one exists, to pass to mpol_shared_policy_init(). We must add the
reference under the sb stat_lock to prevent races with replacement of the mpol
by remount. This reference is removed in mpol_shared_policy_init().
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: another build fix]
[akpm@linux-foundation.org: yet another build fix]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:26 +04:00
shmem_show_mpol ( seq , sbinfo - > mpol ) ;
2008-02-08 15:21:48 +03:00
return 0 ;
}
# endif /* CONFIG_TMPFS */
2005-04-17 02:20:36 +04:00
static void shmem_put_super ( struct super_block * sb )
{
2010-08-18 02:23:56 +04:00
struct shmem_sb_info * sbinfo = SHMEM_SB ( sb ) ;
percpu_counter_destroy ( & sbinfo - > used_blocks ) ;
kfree ( sbinfo ) ;
2005-04-17 02:20:36 +04:00
sb - > s_fs_info = NULL ;
}
Driver Core: devtmpfs - kernel-maintained tmpfs-based /dev
Devtmpfs lets the kernel create a tmpfs instance called devtmpfs
very early at kernel initialization, before any driver-core device
is registered. Every device with a major/minor will provide a
device node in devtmpfs.
Devtmpfs can be changed and altered by userspace at any time,
and in any way needed - just like today's udev-mounted tmpfs.
Unmodified udev versions will run just fine on top of it, and will
recognize an already existing kernel-created device node and use it.
The default node permissions are root:root 0600. Proper permissions
and user/group ownership, meaningful symlinks, all other policy still
needs to be applied by userspace.
If a node is created by devtmps, devtmpfs will remove the device node
when the device goes away. If the device node was created by
userspace, or the devtmpfs created node was replaced by userspace, it
will no longer be removed by devtmpfs.
If it is requested to auto-mount it, it makes init=/bin/sh work
without any further userspace support. /dev will be fully populated
and dynamic, and always reflect the current device state of the kernel.
With the commonly used dynamic device numbers, it solves the problem
where static devices nodes may point to the wrong devices.
It is intended to make the initial bootup logic simpler and more robust,
by de-coupling the creation of the inital environment, to reliably run
userspace processes, from a complex userspace bootstrap logic to provide
a working /dev.
Signed-off-by: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Jan Blunck <jblunck@suse.de>
Tested-By: Harald Hoyer <harald@redhat.com>
Tested-By: Scott James Remnant <scott@ubuntu.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2009-04-30 17:23:42 +04:00
int shmem_fill_super ( struct super_block * sb , void * data , int silent )
2005-04-17 02:20:36 +04:00
{
struct inode * inode ;
struct dentry * root ;
2005-06-22 04:15:04 +04:00
struct shmem_sb_info * sbinfo ;
2008-02-08 15:21:48 +03:00
int err = - ENOMEM ;
/* Round up to L1_CACHE_BYTES to resist false sharing */
2009-09-22 04:03:50 +04:00
sbinfo = kzalloc ( max ( ( int ) sizeof ( struct shmem_sb_info ) ,
2008-02-08 15:21:48 +03:00
L1_CACHE_BYTES ) , GFP_KERNEL ) ;
if ( ! sbinfo )
return - ENOMEM ;
sbinfo - > mode = S_IRWXUGO | S_ISVTX ;
2008-11-14 02:39:12 +03:00
sbinfo - > uid = current_fsuid ( ) ;
sbinfo - > gid = current_fsgid ( ) ;
2008-02-08 15:21:48 +03:00
sb - > s_fs_info = sbinfo ;
2005-04-17 02:20:36 +04:00
2005-06-22 04:15:04 +04:00
# ifdef CONFIG_TMPFS
2005-04-17 02:20:36 +04:00
/*
* Per default we only allow half of the physical ram per
* tmpfs instance , limiting inodes to one per page of lowmem ;
* but the internal instance is left unlimited .
*/
if ( ! ( sb - > s_flags & MS_NOUSER ) ) {
2008-02-08 15:21:48 +03:00
sbinfo - > max_blocks = shmem_default_max_blocks ( ) ;
sbinfo - > max_inodes = shmem_default_max_inodes ( ) ;
if ( shmem_parse_options ( data , sbinfo , false ) ) {
err = - EINVAL ;
goto failed ;
}
2005-04-17 02:20:36 +04:00
}
2006-10-17 11:09:45 +04:00
sb - > s_export_op = & shmem_export_ops ;
2005-04-17 02:20:36 +04:00
# else
sb - > s_flags | = MS_NOUSER ;
# endif
2005-06-22 04:15:04 +04:00
spin_lock_init ( & sbinfo - > stat_lock ) ;
2010-08-18 02:23:56 +04:00
if ( percpu_counter_init ( & sbinfo - > used_blocks , 0 ) )
goto failed ;
2008-02-08 15:21:48 +03:00
sbinfo - > free_inodes = sbinfo - > max_inodes ;
2005-06-22 04:15:04 +04:00
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
sb - > s_maxbytes = MAX_LFS_FILESIZE ;
2005-04-17 02:20:36 +04:00
sb - > s_blocksize = PAGE_CACHE_SIZE ;
sb - > s_blocksize_bits = PAGE_CACHE_SHIFT ;
sb - > s_magic = TMPFS_MAGIC ;
sb - > s_op = & shmem_ops ;
[PATCH] tmpfs: time granularity fix for [acm]time going backwards
I noticed a strange behavior in a tmpfs file system the other day, while
building packages - occasionally, and seemingly at random, make decided to
rebuild a target. However, only on tmpfs.
A file would be created, and if checked, it had a sub-second timestamp.
However, after an utimes related call where sub-seconds should be set, they
were zeroed instead. In the case that a file was created, and utimes(...,NULL)
was used on it in the same second, the timestamp on the file moved backwards.
After some digging, I found that this was being caused by tmpfs not having a
time granularity set, thus inheriting the default 1 second granularity.
Hugh adds: yes, we missed tmpfs when the s_time_gran mods went into 2.6.11.
Unfortunately, the granularity of CURRENT_TIME, often used in filesystems,
does not match the default granularity set by alloc_super. A few more such
discrepancies have been found, but this is the most important to fix now.
Signed-off-by: Robin H. Johnson <robbat2@gentoo.org>
Acked-by: Andi Kleen <ak@suse.de>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-13 00:50:25 +04:00
sb - > s_time_gran = 1 ;
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# ifdef CONFIG_TMPFS_XATTR
2006-09-29 13:01:35 +04:00
sb - > s_xattr = shmem_xattr_handlers ;
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# endif
# ifdef CONFIG_TMPFS_POSIX_ACL
2006-09-29 13:01:35 +04:00
sb - > s_flags | = MS_POSIXACL ;
# endif
2005-06-22 04:15:04 +04:00
2010-03-04 17:32:18 +03:00
inode = shmem_get_inode ( sb , NULL , S_IFDIR | sbinfo - > mode , 0 , VM_NORESERVE ) ;
2005-04-17 02:20:36 +04:00
if ( ! inode )
goto failed ;
2008-02-08 15:21:48 +03:00
inode - > i_uid = sbinfo - > uid ;
inode - > i_gid = sbinfo - > gid ;
2005-04-17 02:20:36 +04:00
root = d_alloc_root ( inode ) ;
if ( ! root )
goto failed_iput ;
sb - > s_root = root ;
return 0 ;
failed_iput :
iput ( inode ) ;
failed :
shmem_put_super ( sb ) ;
return err ;
}
2006-03-22 11:08:13 +03:00
static struct kmem_cache * shmem_inode_cachep ;
2005-04-17 02:20:36 +04:00
static struct inode * shmem_alloc_inode ( struct super_block * sb )
{
2011-08-04 03:21:21 +04:00
struct shmem_inode_info * info ;
info = kmem_cache_alloc ( shmem_inode_cachep , GFP_KERNEL ) ;
if ( ! info )
2005-04-17 02:20:36 +04:00
return NULL ;
2011-08-04 03:21:21 +04:00
return & info - > vfs_inode ;
2005-04-17 02:20:36 +04:00
}
2011-08-04 03:21:21 +04:00
static void shmem_destroy_callback ( struct rcu_head * head )
2011-01-07 09:49:49 +03:00
{
struct inode * inode = container_of ( head , struct inode , i_rcu ) ;
INIT_LIST_HEAD ( & inode - > i_dentry ) ;
kmem_cache_free ( shmem_inode_cachep , SHMEM_I ( inode ) ) ;
}
2005-04-17 02:20:36 +04:00
static void shmem_destroy_inode ( struct inode * inode )
{
2011-08-04 03:21:26 +04:00
if ( ( inode - > i_mode & S_IFMT ) = = S_IFREG )
2005-04-17 02:20:36 +04:00
mpol_free_shared_policy ( & SHMEM_I ( inode ) - > policy ) ;
2011-08-04 03:21:21 +04:00
call_rcu ( & inode - > i_rcu , shmem_destroy_callback ) ;
2005-04-17 02:20:36 +04:00
}
2011-08-04 03:21:21 +04:00
static void shmem_init_inode ( void * foo )
2005-04-17 02:20:36 +04:00
{
2011-08-04 03:21:21 +04:00
struct shmem_inode_info * info = foo ;
inode_init_once ( & info - > vfs_inode ) ;
2005-04-17 02:20:36 +04:00
}
2011-08-04 03:21:21 +04:00
static int shmem_init_inodecache ( void )
2005-04-17 02:20:36 +04:00
{
shmem_inode_cachep = kmem_cache_create ( " shmem_inode_cache " ,
sizeof ( struct shmem_inode_info ) ,
2011-08-04 03:21:21 +04:00
0 , SLAB_PANIC , shmem_init_inode ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2011-08-04 03:21:21 +04:00
static void shmem_destroy_inodecache ( void )
2005-04-17 02:20:36 +04:00
{
2006-09-27 12:49:40 +04:00
kmem_cache_destroy ( shmem_inode_cachep ) ;
2005-04-17 02:20:36 +04:00
}
2006-06-28 15:26:44 +04:00
static const struct address_space_operations shmem_aops = {
2005-04-17 02:20:36 +04:00
. writepage = shmem_writepage ,
2007-02-10 12:43:15 +03:00
. set_page_dirty = __set_page_dirty_no_writeback ,
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_TMPFS
2007-10-16 12:25:03 +04:00
. write_begin = shmem_write_begin ,
. write_end = shmem_write_end ,
2005-04-17 02:20:36 +04:00
# endif
[PATCH] add migratepage address space op to shmem
Basic problem: pages of a shared memory segment can only be migrated once.
In 2.6.16 through 2.6.17-rc1, shared memory mappings do not have a
migratepage address space op. Therefore, migrate_pages() falls back to
default processing. In this path, it will try to pageout() dirty pages.
Once a shared memory page has been migrated it becomes dirty, so
migrate_pages() will try to page it out. However, because the page count
is 3 [cache + current + pte], pageout() will return PAGE_KEEP because
is_page_cache_freeable() returns false. This will abort all subsequent
migrations.
This patch adds a migratepage address space op to shared memory segments to
avoid taking the default path. We use the "migrate_page()" function
because it knows how to migrate dirty pages. This allows shared memory
segment pages to migrate, subject to other conditions such as # pte's
referencing the page [page_mapcount(page)], when requested.
I think this is safe. If we're migrating a shared memory page, then we
found the page via a page table, so it must be in memory.
Can be verified with memtoy and the shmem-mbind-test script, both
available at: http://free.linux.hp.com/~lts/Tools/
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-04-22 13:35:48 +04:00
. migratepage = migrate_page ,
2009-09-16 13:50:16 +04:00
. error_remove_page = generic_error_remove_page ,
2005-04-17 02:20:36 +04:00
} ;
2006-12-07 07:40:36 +03:00
static const struct file_operations shmem_file_operations = {
2005-04-17 02:20:36 +04:00
. mmap = shmem_mmap ,
# ifdef CONFIG_TMPFS
. llseek = generic_file_llseek ,
2008-07-24 08:27:35 +04:00
. read = do_sync_read ,
2008-02-05 09:28:44 +03:00
. write = do_sync_write ,
2008-07-24 08:27:35 +04:00
. aio_read = shmem_file_aio_read ,
2008-02-05 09:28:44 +03:00
. aio_write = generic_file_aio_write ,
2010-05-26 19:53:41 +04:00
. fsync = noop_fsync ,
2011-07-26 04:12:32 +04:00
. splice_read = shmem_file_splice_read ,
2007-06-04 12:00:39 +04:00
. splice_write = generic_file_splice_write ,
2005-04-17 02:20:36 +04:00
# endif
} ;
2007-02-12 11:55:39 +03:00
static const struct inode_operations shmem_inode_operations = {
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
. setattr = shmem_setattr ,
[PATCH] madvise(MADV_REMOVE): remove pages from tmpfs shm backing store
Here is the patch to implement madvise(MADV_REMOVE) - which frees up a
given range of pages & its associated backing store. Current
implementation supports only shmfs/tmpfs and other filesystems return
-ENOSYS.
"Some app allocates large tmpfs files, then when some task quits and some
client disconnect, some memory can be released. However the only way to
release tmpfs-swap is to MADV_REMOVE". - Andrea Arcangeli
Databases want to use this feature to drop a section of their bufferpool
(shared memory segments) - without writing back to disk/swap space.
This feature is also useful for supporting hot-plug memory on UML.
Concerns raised by Andrew Morton:
- "We have no plan for holepunching! If we _do_ have such a plan (or
might in the future) then what would the API look like? I think
sys_holepunch(fd, start, len), so we should start out with that."
- Using madvise is very weird, because people will ask "why do I need to
mmap my file before I can stick a hole in it?"
- None of the other madvise operations call into the filesystem in this
manner. A broad question is: is this capability an MM operation or a
filesytem operation? truncate, for example, is a filesystem operation
which sometimes has MM side-effects. madvise is an mm operation and with
this patch, it gains FS side-effects, only they're really, really
significant ones."
Comments:
- Andrea suggested the fs operation too but then it's more efficient to
have it as a mm operation with fs side effects, because they don't
immediatly know fd and physical offset of the range. It's possible to
fixup in userland and to use the fs operation but it's more expensive,
the vmas are already in the kernel and we can use them.
Short term plan & Future Direction:
- We seem to need this interface only for shmfs/tmpfs files in the short
term. We have to add hooks into the filesystem for correctness and
completeness. This is what this patch does.
- In the future, plan is to support both fs and mmap apis also. This
also involves (other) filesystem specific functions to be implemented.
- Current patch doesn't support VM_NONLINEAR - which can be addressed in
the future.
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Andrea Arcangeli <andrea@suse.de>
Cc: Michael Kerrisk <mtk-manpages@gmx.net>
Cc: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-06 11:10:38 +03:00
. truncate_range = shmem_truncate_range ,
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# ifdef CONFIG_TMPFS_XATTR
. setxattr = shmem_setxattr ,
. getxattr = shmem_getxattr ,
. listxattr = shmem_listxattr ,
. removexattr = shmem_removexattr ,
# endif
2005-04-17 02:20:36 +04:00
} ;
2007-02-12 11:55:39 +03:00
static const struct inode_operations shmem_dir_inode_operations = {
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_TMPFS
. create = shmem_create ,
. lookup = simple_lookup ,
. link = shmem_link ,
. unlink = shmem_unlink ,
. symlink = shmem_symlink ,
. mkdir = shmem_mkdir ,
. rmdir = shmem_rmdir ,
. mknod = shmem_mknod ,
. rename = shmem_rename ,
# endif
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# ifdef CONFIG_TMPFS_XATTR
. setxattr = shmem_setxattr ,
. getxattr = shmem_getxattr ,
. listxattr = shmem_listxattr ,
. removexattr = shmem_removexattr ,
# endif
2006-09-29 13:01:35 +04:00
# ifdef CONFIG_TMPFS_POSIX_ACL
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
. setattr = shmem_setattr ,
2006-09-29 13:01:35 +04:00
# endif
} ;
2007-02-12 11:55:39 +03:00
static const struct inode_operations shmem_special_inode_operations = {
tmpfs: implement generic xattr support
Implement generic xattrs for tmpfs filesystems. The Feodra project, while
trying to replace suid apps with file capabilities, realized that tmpfs,
which is used on the build systems, does not support file capabilities and
thus cannot be used to build packages which use file capabilities. Xattrs
are also needed for overlayfs.
The xattr interface is a bit odd. If a filesystem does not implement any
{get,set,list}xattr functions the VFS will call into some random LSM hooks
and the running LSM can then implement some method for handling xattrs.
SELinux for example provides a method to support security.selinux but no
other security.* xattrs.
As it stands today when one enables CONFIG_TMPFS_POSIX_ACL tmpfs will have
xattr handler routines specifically to handle acls. Because of this tmpfs
would loose the VFS/LSM helpers to support the running LSM. To make up
for that tmpfs had stub functions that did nothing but call into the LSM
hooks which implement the helpers.
This new patch does not use the LSM fallback functions and instead just
implements a native get/set/list xattr feature for the full security.* and
trusted.* namespace like a normal filesystem. This means that tmpfs can
now support both security.selinux and security.capability, which was not
previously possible.
The basic implementation is that I attach a:
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name;
size_t size;
char value[0];
};
Into the struct shmem_inode_info for each xattr that is set. This
implementation could easily support the user.* namespace as well, except
some care needs to be taken to prevent large amounts of unswappable memory
being allocated for unprivileged users.
[mszeredi@suse.cz: new config option, suport trusted.*, support symlinks]
Signed-off-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Acked-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Tested-by: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Hugh Dickins <hughd@google.com>
Tested-by: Jordi Pujol <jordipujolp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-05-25 04:12:39 +04:00
# ifdef CONFIG_TMPFS_XATTR
. setxattr = shmem_setxattr ,
. getxattr = shmem_getxattr ,
. listxattr = shmem_listxattr ,
. removexattr = shmem_removexattr ,
# endif
2006-09-29 13:01:35 +04:00
# ifdef CONFIG_TMPFS_POSIX_ACL
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
. setattr = shmem_setattr ,
2006-09-29 13:01:35 +04:00
# endif
2005-04-17 02:20:36 +04:00
} ;
2007-03-05 11:30:28 +03:00
static const struct super_operations shmem_ops = {
2005-04-17 02:20:36 +04:00
. alloc_inode = shmem_alloc_inode ,
. destroy_inode = shmem_destroy_inode ,
# ifdef CONFIG_TMPFS
. statfs = shmem_statfs ,
. remount_fs = shmem_remount_fs ,
2008-02-08 15:21:48 +03:00
. show_options = shmem_show_options ,
2005-04-17 02:20:36 +04:00
# endif
2010-06-06 03:10:41 +04:00
. evict_inode = shmem_evict_inode ,
2005-04-17 02:20:36 +04:00
. drop_inode = generic_delete_inode ,
. put_super = shmem_put_super ,
} ;
2009-09-27 22:29:37 +04:00
static const struct vm_operations_struct shmem_vm_ops = {
2007-07-19 12:46:59 +04:00
. fault = shmem_fault ,
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_NUMA
. set_policy = shmem_set_policy ,
. get_policy = shmem_get_policy ,
# endif
} ;
2010-07-25 11:46:36 +04:00
static struct dentry * shmem_mount ( struct file_system_type * fs_type ,
int flags , const char * dev_name , void * data )
2005-04-17 02:20:36 +04:00
{
2010-07-25 11:46:36 +04:00
return mount_nodev ( fs_type , flags , data , shmem_fill_super ) ;
2005-04-17 02:20:36 +04:00
}
2011-08-04 03:21:21 +04:00
static struct file_system_type shmem_fs_type = {
2005-04-17 02:20:36 +04:00
. owner = THIS_MODULE ,
. name = " tmpfs " ,
2010-07-25 11:46:36 +04:00
. mount = shmem_mount ,
2005-04-17 02:20:36 +04:00
. kill_sb = kill_litter_super ,
} ;
2011-08-04 03:21:21 +04:00
int __init shmem_init ( void )
2005-04-17 02:20:36 +04:00
{
int error ;
2007-10-17 10:25:46 +04:00
error = bdi_init ( & shmem_backing_dev_info ) ;
if ( error )
goto out4 ;
2011-08-04 03:21:21 +04:00
error = shmem_init_inodecache ( ) ;
2005-04-17 02:20:36 +04:00
if ( error )
goto out3 ;
2011-08-04 03:21:21 +04:00
error = register_filesystem ( & shmem_fs_type ) ;
2005-04-17 02:20:36 +04:00
if ( error ) {
printk ( KERN_ERR " Could not register tmpfs \n " ) ;
goto out2 ;
}
2005-06-21 08:15:16 +04:00
2011-08-04 03:21:21 +04:00
shm_mnt = vfs_kern_mount ( & shmem_fs_type , MS_NOUSER ,
shmem_fs_type . name , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( IS_ERR ( shm_mnt ) ) {
error = PTR_ERR ( shm_mnt ) ;
printk ( KERN_ERR " Could not kern_mount tmpfs \n " ) ;
goto out1 ;
}
return 0 ;
out1 :
2011-08-04 03:21:21 +04:00
unregister_filesystem ( & shmem_fs_type ) ;
2005-04-17 02:20:36 +04:00
out2 :
2011-08-04 03:21:21 +04:00
shmem_destroy_inodecache ( ) ;
2005-04-17 02:20:36 +04:00
out3 :
2007-10-17 10:25:46 +04:00
bdi_destroy ( & shmem_backing_dev_info ) ;
out4 :
2005-04-17 02:20:36 +04:00
shm_mnt = ERR_PTR ( error ) ;
return error ;
}
2009-01-07 01:40:20 +03:00
# else /* !CONFIG_SHMEM */
/*
* tiny - shmem : simple shmemfs and tmpfs using ramfs code
*
* This is intended for small system where the benefits of the full
* shmem code ( swap - backed and resource - limited ) are outweighed by
* their complexity . On systems without swap this code should be
* effectively equivalent , but much lighter weight .
*/
# include <linux/ramfs.h>
2011-08-04 03:21:21 +04:00
static struct file_system_type shmem_fs_type = {
2009-01-07 01:40:20 +03:00
. name = " tmpfs " ,
2010-07-25 11:46:36 +04:00
. mount = ramfs_mount ,
2009-01-07 01:40:20 +03:00
. kill_sb = kill_litter_super ,
} ;
2011-08-04 03:21:21 +04:00
int __init shmem_init ( void )
2009-01-07 01:40:20 +03:00
{
2011-08-04 03:21:21 +04:00
BUG_ON ( register_filesystem ( & shmem_fs_type ) ! = 0 ) ;
2009-01-07 01:40:20 +03:00
2011-08-04 03:21:21 +04:00
shm_mnt = kern_mount ( & shmem_fs_type ) ;
2009-01-07 01:40:20 +03:00
BUG_ON ( IS_ERR ( shm_mnt ) ) ;
return 0 ;
}
2011-08-04 03:21:21 +04:00
int shmem_unuse ( swp_entry_t swap , struct page * page )
2009-01-07 01:40:20 +03:00
{
return 0 ;
}
2009-09-22 04:03:37 +04:00
int shmem_lock ( struct file * file , int lock , struct user_struct * user )
{
return 0 ;
}
2011-08-04 03:21:21 +04:00
void shmem_truncate_range ( struct inode * inode , loff_t lstart , loff_t lend )
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
{
2011-08-04 03:21:21 +04:00
truncate_inode_pages_range ( inode - > i_mapping , lstart , lend ) ;
tmpfs: take control of its truncate_range
2.6.35's new truncate convention gave tmpfs the opportunity to control
its file truncation, no longer enforced from outside by vmtruncate().
We shall want to build upon that, to handle pagecache and swap together.
Slightly redefine the ->truncate_range interface: let it now be called
between the unmap_mapping_range()s, with the filesystem responsible for
doing the truncate_inode_pages_range() from it - just as the filesystem
is nowadays responsible for doing that from its ->setattr.
Let's rename shmem_notify_change() to shmem_setattr(). Instead of
calling the generic truncate_setsize(), bring that code in so we can
call shmem_truncate_range() - which will later be updated to perform its
own variant of truncate_inode_pages_range().
Remove the punch_hole unmap_mapping_range() from shmem_truncate_range():
now that the COW's unmap_mapping_range() comes after ->truncate_range,
there is no need to call it a third time.
Export shmem_truncate_range() and add it to the list in shmem_fs.h, so
that i915_gem_object_truncate() can call it explicitly in future; get
this patch in first, then update drm/i915 once this is available (until
then, i915 will just be doing the truncate_inode_pages() twice).
Though introduced five years ago, no other filesystem is implementing
->truncate_range, and its only other user is madvise(,,MADV_REMOVE): we
expect to convert it to fallocate(,FALLOC_FL_PUNCH_HOLE,,) shortly,
whereupon ->truncate_range can be removed from inode_operations -
shmem_truncate_range() will help i915 across that transition too.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-28 03:18:03 +04:00
}
EXPORT_SYMBOL_GPL ( shmem_truncate_range ) ;
2009-02-24 23:51:52 +03:00
# define shmem_vm_ops generic_file_vm_ops
# define shmem_file_operations ramfs_file_operations
2010-03-04 17:32:18 +03:00
# define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2009-02-24 23:51:52 +03:00
# define shmem_acct_size(flags, size) 0
# define shmem_unacct_size(flags, size) do {} while (0)
2009-01-07 01:40:20 +03:00
# endif /* CONFIG_SHMEM */
/* common code */
2005-04-17 02:20:36 +04:00
2008-03-20 03:00:41 +03:00
/**
2005-04-17 02:20:36 +04:00
* shmem_file_setup - get an unlinked file living in tmpfs
* @ name : name for dentry ( to be seen in / proc / < pid > / maps
* @ size : size to be set for the file
2009-02-24 23:51:52 +03:00
* @ flags : VM_NORESERVE suppresses pre - accounting of the entire object size
2005-04-17 02:20:36 +04:00
*/
2009-06-17 02:33:02 +04:00
struct file * shmem_file_setup ( const char * name , loff_t size , unsigned long flags )
2005-04-17 02:20:36 +04:00
{
int error ;
struct file * file ;
struct inode * inode ;
2009-08-09 00:52:35 +04:00
struct path path ;
struct dentry * root ;
2005-04-17 02:20:36 +04:00
struct qstr this ;
if ( IS_ERR ( shm_mnt ) )
return ( void * ) shm_mnt ;
tmpfs: demolish old swap vector support
The maximum size of a shmem/tmpfs file has been limited by the maximum
size of its triple-indirect swap vector. With 4kB page size, maximum
filesize was just over 2TB on a 32-bit kernel, but sadly one eighth of
that on a 64-bit kernel. (With 8kB page size, maximum filesize was just
over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
MAX_LFS_FILESIZE being then more restrictive than swap vector layout.)
It's a shame that tmpfs should be more restrictive than ramfs, and this
limitation has now been noticed. Add another level to the swap vector?
No, it became obscure and hard to maintain, once I complicated it to
make use of highmem pages nine years ago: better choose another way.
Surely, if 2.4 had had the radix tree pagecache introduced in 2.5, then
tmpfs would never have invented its own peculiar radix tree: we would
have fitted swap entries into the common radix tree instead, in much the
same way as we fit swap entries into page tables.
And why should each file have a separate radix tree for its pages and
for its swap entries? The swap entries are required precisely where and
when the pages are not. We want to put them together in a single radix
tree: which can then avoid much of the locking which was needed to
prevent them from being exchanged underneath us.
This also avoids the waste of memory devoted to swap vectors, first in
the shmem_inode itself, then at least two more pages once a file grew
beyond 16 data pages (pages accounted by df and du, but not by memcg).
Allocated upfront, to avoid allocation when under swapping pressure, but
pure waste when CONFIG_SWAP is not set - I have never spattered around
the ifdefs to prevent that, preferring this move to sharing the common
radix tree instead.
There are three downsides to sharing the radix tree. One, that it binds
tmpfs more tightly to the rest of mm, either requiring knowledge of swap
entries in radix tree there, or duplication of its code here in shmem.c.
I believe that the simplications and memory savings (and probable higher
performance, not yet measured) justify that.
Two, that on HIGHMEM systems with SWAP enabled, it's the lowmem radix
nodes that cannot be freed under memory pressure - whereas before it was
the less precious highmem swap vector pages that could not be freed.
I'm hoping that 64-bit has now been accessible for long enough, that the
highmem argument has grown much less persuasive.
Three, that swapoff is slower than it used to be on tmpfs files, since
it's using a simple generic mechanism not tailored to it: I find this
noticeable, and shall want to improve, but maybe nobody else will
notice.
So... now remove most of the old swap vector code from shmem.c. But,
for the moment, keep the simple i_direct vector of 16 pages, with simple
accessors shmem_put_swap() and shmem_get_swap(), as a toy implementation
to help mark where swap needs to be handled in subsequent patches.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-08-04 03:21:20 +04:00
if ( size < 0 | | size > MAX_LFS_FILESIZE )
2005-04-17 02:20:36 +04:00
return ERR_PTR ( - EINVAL ) ;
if ( shmem_acct_size ( flags , size ) )
return ERR_PTR ( - ENOMEM ) ;
error = - ENOMEM ;
this . name = name ;
this . len = strlen ( name ) ;
this . hash = 0 ; /* will go */
root = shm_mnt - > mnt_root ;
2009-08-09 00:52:35 +04:00
path . dentry = d_alloc ( root , & this ) ;
if ( ! path . dentry )
2005-04-17 02:20:36 +04:00
goto put_memory ;
2009-08-09 00:52:35 +04:00
path . mnt = mntget ( shm_mnt ) ;
2005-04-17 02:20:36 +04:00
error = - ENOSPC ;
2010-03-04 17:32:18 +03:00
inode = shmem_get_inode ( root - > d_sb , NULL , S_IFREG | S_IRWXUGO , 0 , flags ) ;
2005-04-17 02:20:36 +04:00
if ( ! inode )
2009-08-05 18:25:56 +04:00
goto put_dentry ;
2005-04-17 02:20:36 +04:00
2009-08-09 00:52:35 +04:00
d_instantiate ( path . dentry , inode ) ;
2005-04-17 02:20:36 +04:00
inode - > i_size = size ;
inode - > i_nlink = 0 ; /* It is unlinked */
2009-01-07 01:40:20 +03:00
# ifndef CONFIG_MMU
error = ramfs_nommu_expand_for_mapping ( inode , size ) ;
if ( error )
2009-08-05 18:25:56 +04:00
goto put_dentry ;
2009-01-07 01:40:20 +03:00
# endif
2009-08-05 18:25:56 +04:00
error = - ENFILE ;
2009-08-09 00:52:35 +04:00
file = alloc_file ( & path , FMODE_WRITE | FMODE_READ ,
2009-08-05 18:25:56 +04:00
& shmem_file_operations ) ;
if ( ! file )
goto put_dentry ;
2005-04-17 02:20:36 +04:00
return file ;
put_dentry :
2009-08-09 00:52:35 +04:00
path_put ( & path ) ;
2005-04-17 02:20:36 +04:00
put_memory :
shmem_unacct_size ( flags , size ) ;
return ERR_PTR ( error ) ;
}
2008-06-20 11:08:06 +04:00
EXPORT_SYMBOL_GPL ( shmem_file_setup ) ;
2005-04-17 02:20:36 +04:00
2008-03-20 03:00:41 +03:00
/**
2005-04-17 02:20:36 +04:00
* shmem_zero_setup - setup a shared anonymous mapping
* @ vma : the vma to be mmapped is prepared by do_mmap_pgoff
*/
int shmem_zero_setup ( struct vm_area_struct * vma )
{
struct file * file ;
loff_t size = vma - > vm_end - vma - > vm_start ;
file = shmem_file_setup ( " dev/zero " , size , vma - > vm_flags ) ;
if ( IS_ERR ( file ) )
return PTR_ERR ( file ) ;
if ( vma - > vm_file )
fput ( vma - > vm_file ) ;
vma - > vm_file = file ;
vma - > vm_ops = & shmem_vm_ops ;
2011-03-23 02:33:43 +03:00
vma - > vm_flags | = VM_CAN_NONLINEAR ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2011-06-28 03:18:04 +04:00
/**
* shmem_read_mapping_page_gfp - read into page cache , using specified page allocation flags .
* @ mapping : the page ' s address_space
* @ index : the page index
* @ gfp : the page allocator flags to use if allocating
*
* This behaves as a tmpfs " read_cache_page_gfp(mapping, index, gfp) " ,
* with any new page allocations done using the specified allocation flags .
* But read_cache_page_gfp ( ) uses the - > readpage ( ) method : which does not
* suit tmpfs , since it may have pages in swapcache , and needs to find those
* for itself ; although drivers / gpu / drm i915 and ttm rely upon this support .
*
2011-07-26 04:12:34 +04:00
* i915_gem_object_get_pages_gtt ( ) mixes __GFP_NORETRY | __GFP_NOWARN in
* with the mapping_gfp_mask ( ) , to avoid OOMing the machine unnecessarily .
2011-06-28 03:18:04 +04:00
*/
struct page * shmem_read_mapping_page_gfp ( struct address_space * mapping ,
pgoff_t index , gfp_t gfp )
{
2011-07-26 04:12:34 +04:00
# ifdef CONFIG_SHMEM
struct inode * inode = mapping - > host ;
2011-07-26 04:12:34 +04:00
struct page * page ;
2011-07-26 04:12:34 +04:00
int error ;
BUG_ON ( mapping - > a_ops ! = & shmem_aops ) ;
error = shmem_getpage_gfp ( inode , index , & page , SGP_CACHE , gfp , NULL ) ;
if ( error )
page = ERR_PTR ( error ) ;
else
unlock_page ( page ) ;
return page ;
# else
/*
* The tiny ! SHMEM case uses ramfs without swap
*/
2011-06-28 03:18:04 +04:00
return read_cache_page_gfp ( mapping , index , gfp ) ;
2011-07-26 04:12:34 +04:00
# endif
2011-06-28 03:18:04 +04:00
}
EXPORT_SYMBOL_GPL ( shmem_read_mapping_page_gfp ) ;