2018-09-04 15:46:30 -07:00
// SPDX-License-Identifier: GPL-2.0+
2009-04-06 19:01:34 -07:00
/*
2021-11-08 18:35:01 -08:00
* NILFS directory entry operations
2009-04-06 19:01:34 -07:00
*
* Copyright ( C ) 2005 - 2008 Nippon Telegraph and Telephone Corporation .
*
2016-05-23 16:23:09 -07:00
* Modified for NILFS by Amagai Yoshiji .
2009-04-06 19:01:34 -07:00
*/
/*
* linux / fs / ext2 / dir . c
*
* Copyright ( C ) 1992 , 1993 , 1994 , 1995
* Remy Card ( card @ masi . ibp . fr )
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie ( Paris VI )
*
* from
*
* linux / fs / minix / dir . c
*
* Copyright ( C ) 1991 , 1992 Linus Torvalds
*
* ext2 directory handling functions
*
* Big - endian to little - endian byte - swapping / bitmaps by
* David S . Miller ( davem @ caip . rutgers . edu ) , 1995
*
* All code that works with directory layout had been switched to pagecache
* and moved here . AV
*/
# include <linux/pagemap.h>
# include "nilfs.h"
# include "page.h"
2016-08-02 14:05:30 -07:00
static inline unsigned int nilfs_rec_len_from_disk ( __le16 dlen )
{
unsigned int len = le16_to_cpu ( dlen ) ;
# if (PAGE_SIZE >= 65536)
if ( len = = NILFS_MAX_REC_LEN )
return 1 < < 16 ;
# endif
return len ;
}
static inline __le16 nilfs_rec_len_to_disk ( unsigned int len )
{
# if (PAGE_SIZE >= 65536)
if ( len = = ( 1 < < 16 ) )
return cpu_to_le16 ( NILFS_MAX_REC_LEN ) ;
BUG_ON ( len > ( 1 < < 16 ) ) ;
# endif
return cpu_to_le16 ( len ) ;
}
2009-04-06 19:01:34 -07:00
/*
* nilfs uses block - sized chunks . Arguably , sector - sized ones would be
* more robust , but we have what we have
*/
2016-05-23 16:23:39 -07:00
static inline unsigned int nilfs_chunk_size ( struct inode * inode )
2009-04-06 19:01:34 -07:00
{
return inode - > i_sb - > s_blocksize ;
}
/*
* Return the offset into page ` page_nr ' of the last valid
* byte in that page , plus one .
*/
2016-05-23 16:23:39 -07:00
static unsigned int nilfs_last_byte ( struct inode * inode , unsigned long page_nr )
2009-04-06 19:01:34 -07:00
{
2016-05-23 16:23:39 -07:00
unsigned int last_byte = inode - > i_size ;
2009-04-06 19:01:34 -07:00
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
last_byte - = page_nr < < PAGE_SHIFT ;
if ( last_byte > PAGE_SIZE )
last_byte = PAGE_SIZE ;
2009-04-06 19:01:34 -07:00
return last_byte ;
}
2016-05-23 16:23:39 -07:00
static int nilfs_prepare_chunk ( struct page * page , unsigned int from ,
unsigned int to )
2009-04-06 19:01:34 -07:00
{
loff_t pos = page_offset ( page ) + from ;
2016-05-23 16:23:25 -07:00
2010-06-04 11:29:57 +02:00
return __block_write_begin ( page , pos , to - from , nilfs_get_block ) ;
2009-04-06 19:01:34 -07:00
}
2009-11-27 19:41:11 +09:00
static void nilfs_commit_chunk ( struct page * page ,
struct address_space * mapping ,
2016-05-23 16:23:39 -07:00
unsigned int from , unsigned int to )
2009-04-06 19:01:34 -07:00
{
struct inode * dir = mapping - > host ;
loff_t pos = page_offset ( page ) + from ;
2016-05-23 16:23:39 -07:00
unsigned int len = to - from ;
unsigned int nr_dirty , copied ;
2009-04-06 19:01:34 -07:00
int err ;
nr_dirty = nilfs_page_count_clean_buffers ( page , from , to ) ;
copied = block_write_end ( NULL , mapping , pos , len , len , page , NULL ) ;
2009-11-27 19:41:12 +09:00
if ( pos + copied > dir - > i_size )
2009-04-06 19:01:34 -07:00
i_size_write ( dir , pos + copied ) ;
if ( IS_DIRSYNC ( dir ) )
nilfs_set_transaction_flag ( NILFS_TI_SYNC ) ;
2010-12-27 00:05:49 +09:00
err = nilfs_set_file_dirty ( dir , nr_dirty ) ;
2009-11-27 19:41:11 +09:00
WARN_ON ( err ) ; /* do not happen */
2009-04-06 19:01:34 -07:00
unlock_page ( page ) ;
}
2023-11-27 23:30:28 +09:00
static bool nilfs_check_folio ( struct folio * folio , char * kaddr )
2009-04-06 19:01:34 -07:00
{
2023-11-27 23:30:28 +09:00
struct inode * dir = folio - > mapping - > host ;
2009-04-06 19:01:34 -07:00
struct super_block * sb = dir - > i_sb ;
2016-05-23 16:23:39 -07:00
unsigned int chunk_size = nilfs_chunk_size ( dir ) ;
2023-11-27 23:30:28 +09:00
size_t offs , rec_len ;
size_t limit = folio_size ( folio ) ;
2009-04-06 19:01:34 -07:00
struct nilfs_dir_entry * p ;
char * error ;
2023-11-27 23:30:28 +09:00
if ( dir - > i_size < folio_pos ( folio ) + limit ) {
limit = dir - > i_size - folio_pos ( folio ) ;
2009-04-06 19:01:34 -07:00
if ( limit & ( chunk_size - 1 ) )
goto Ebadsize ;
if ( ! limit )
goto out ;
}
for ( offs = 0 ; offs < = limit - NILFS_DIR_REC_LEN ( 1 ) ; offs + = rec_len ) {
p = ( struct nilfs_dir_entry * ) ( kaddr + offs ) ;
2010-07-25 20:39:03 +09:00
rec_len = nilfs_rec_len_from_disk ( p - > rec_len ) ;
2009-04-06 19:01:34 -07:00
if ( rec_len < NILFS_DIR_REC_LEN ( 1 ) )
goto Eshort ;
if ( rec_len & 3 )
goto Ealign ;
if ( rec_len < NILFS_DIR_REC_LEN ( p - > name_len ) )
goto Enamelen ;
if ( ( ( offs + rec_len - 1 ) ^ offs ) & ~ ( chunk_size - 1 ) )
goto Espan ;
}
if ( offs ! = limit )
goto Eend ;
out :
2023-11-27 23:30:28 +09:00
folio_set_checked ( folio ) ;
2016-04-22 15:06:44 -04:00
return true ;
2009-04-06 19:01:34 -07:00
/* Too bad, we had an error */
Ebadsize :
2016-08-02 14:05:00 -07:00
nilfs_error ( sb ,
2009-04-06 19:01:34 -07:00
" size of directory #%lu is not a multiple of chunk size " ,
2016-08-02 14:05:00 -07:00
dir - > i_ino ) ;
2009-04-06 19:01:34 -07:00
goto fail ;
Eshort :
error = " rec_len is smaller than minimal " ;
goto bad_entry ;
Ealign :
error = " unaligned directory entry " ;
goto bad_entry ;
Enamelen :
error = " rec_len is too small for name_len " ;
goto bad_entry ;
Espan :
error = " directory entry across blocks " ;
bad_entry :
2016-08-02 14:05:00 -07:00
nilfs_error ( sb ,
2023-11-27 23:30:28 +09:00
" bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d " ,
dir - > i_ino , error , ( folio - > index < < PAGE_SHIFT ) + offs ,
2016-08-02 14:05:00 -07:00
( unsigned long ) le64_to_cpu ( p - > inode ) ,
2009-04-06 19:01:34 -07:00
rec_len , p - > name_len ) ;
goto fail ;
Eend :
p = ( struct nilfs_dir_entry * ) ( kaddr + offs ) ;
2016-08-02 14:05:00 -07:00
nilfs_error ( sb ,
" entry in directory #%lu spans the page boundary offset=%lu, inode=%lu " ,
2023-11-27 23:30:28 +09:00
dir - > i_ino , ( folio - > index < < PAGE_SHIFT ) + offs ,
2016-08-02 14:05:00 -07:00
( unsigned long ) le64_to_cpu ( p - > inode ) ) ;
2009-04-06 19:01:34 -07:00
fail :
2023-11-27 23:30:28 +09:00
folio_set_error ( folio ) ;
2016-04-22 15:06:44 -04:00
return false ;
2009-04-06 19:01:34 -07:00
}
2023-11-27 23:30:28 +09:00
static void * nilfs_get_folio ( struct inode * dir , unsigned long n ,
struct folio * * foliop )
2009-04-06 19:01:34 -07:00
{
struct address_space * mapping = dir - > i_mapping ;
2023-11-27 23:30:28 +09:00
struct folio * folio = read_mapping_folio ( mapping , n , NULL ) ;
2023-11-27 23:30:25 +09:00
void * kaddr ;
2010-07-24 17:09:10 +09:00
2023-11-27 23:30:28 +09:00
if ( IS_ERR ( folio ) )
return folio ;
2023-11-27 23:30:25 +09:00
2023-11-27 23:30:28 +09:00
kaddr = kmap_local_folio ( folio , 0 ) ;
if ( unlikely ( ! folio_test_checked ( folio ) ) ) {
if ( ! nilfs_check_folio ( folio , kaddr ) )
2023-11-27 23:30:25 +09:00
goto fail ;
2009-04-06 19:01:34 -07:00
}
2023-11-27 23:30:25 +09:00
2023-11-27 23:30:28 +09:00
* foliop = folio ;
2023-11-27 23:30:25 +09:00
return kaddr ;
2009-04-06 19:01:34 -07:00
fail :
2023-11-27 23:30:28 +09:00
folio_release_kmap ( folio , kaddr ) ;
2009-04-06 19:01:34 -07:00
return ERR_PTR ( - EIO ) ;
}
2023-11-27 23:30:28 +09:00
static void * nilfs_get_page ( struct inode * dir , unsigned long n ,
struct page * * pagep )
{
struct folio * folio ;
void * kaddr = nilfs_get_folio ( dir , n , & folio ) ;
if ( ! IS_ERR ( kaddr ) )
* pagep = & folio - > page ;
return kaddr ;
}
2009-04-06 19:01:34 -07:00
/*
* NOTE ! unlike strncmp , nilfs_match returns 1 for success , 0 for failure .
*
* len < = NILFS_NAME_LEN and de ! = NULL are guaranteed by caller .
*/
static int
2010-01-31 21:03:58 -05:00
nilfs_match ( int len , const unsigned char * name , struct nilfs_dir_entry * de )
2009-04-06 19:01:34 -07:00
{
if ( len ! = de - > name_len )
return 0 ;
if ( ! de - > inode )
return 0 ;
return ! memcmp ( name , de - > name , len ) ;
}
/*
* p is at least 6 bytes before the end of page
*/
static struct nilfs_dir_entry * nilfs_next_entry ( struct nilfs_dir_entry * p )
{
2010-07-25 20:39:03 +09:00
return ( struct nilfs_dir_entry * ) ( ( char * ) p +
nilfs_rec_len_from_disk ( p - > rec_len ) ) ;
2009-04-06 19:01:34 -07:00
}
static unsigned char
nilfs_filetype_table [ NILFS_FT_MAX ] = {
[ NILFS_FT_UNKNOWN ] = DT_UNKNOWN ,
[ NILFS_FT_REG_FILE ] = DT_REG ,
[ NILFS_FT_DIR ] = DT_DIR ,
[ NILFS_FT_CHRDEV ] = DT_CHR ,
[ NILFS_FT_BLKDEV ] = DT_BLK ,
[ NILFS_FT_FIFO ] = DT_FIFO ,
[ NILFS_FT_SOCK ] = DT_SOCK ,
[ NILFS_FT_SYMLINK ] = DT_LNK ,
} ;
# define S_SHIFT 12
static unsigned char
nilfs_type_by_mode [ S_IFMT > > S_SHIFT ] = {
[ S_IFREG > > S_SHIFT ] = NILFS_FT_REG_FILE ,
[ S_IFDIR > > S_SHIFT ] = NILFS_FT_DIR ,
[ S_IFCHR > > S_SHIFT ] = NILFS_FT_CHRDEV ,
[ S_IFBLK > > S_SHIFT ] = NILFS_FT_BLKDEV ,
[ S_IFIFO > > S_SHIFT ] = NILFS_FT_FIFO ,
[ S_IFSOCK > > S_SHIFT ] = NILFS_FT_SOCK ,
[ S_IFLNK > > S_SHIFT ] = NILFS_FT_SYMLINK ,
} ;
static void nilfs_set_de_type ( struct nilfs_dir_entry * de , struct inode * inode )
{
2011-07-26 03:07:14 -04:00
umode_t mode = inode - > i_mode ;
2009-04-06 19:01:34 -07:00
de - > file_type = nilfs_type_by_mode [ ( mode & S_IFMT ) > > S_SHIFT ] ;
}
2013-05-16 14:36:14 -04:00
static int nilfs_readdir ( struct file * file , struct dir_context * ctx )
2009-04-06 19:01:34 -07:00
{
2013-05-16 14:36:14 -04:00
loff_t pos = ctx - > pos ;
struct inode * inode = file_inode ( file ) ;
2009-04-06 19:01:34 -07:00
struct super_block * sb = inode - > i_sb ;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
unsigned int offset = pos & ~ PAGE_MASK ;
unsigned long n = pos > > PAGE_SHIFT ;
2009-04-06 19:01:34 -07:00
unsigned long npages = dir_pages ( inode ) ;
if ( pos > inode - > i_size - NILFS_DIR_REC_LEN ( 1 ) )
2013-05-16 14:36:14 -04:00
return 0 ;
2009-04-06 19:01:34 -07:00
for ( ; n < npages ; n + + , offset = 0 ) {
char * kaddr , * limit ;
struct nilfs_dir_entry * de ;
2023-11-27 23:30:29 +09:00
struct folio * folio ;
2009-04-06 19:01:34 -07:00
2023-11-27 23:30:29 +09:00
kaddr = nilfs_get_folio ( inode , n , & folio ) ;
2023-11-27 23:30:25 +09:00
if ( IS_ERR ( kaddr ) ) {
2016-08-02 14:05:00 -07:00
nilfs_error ( sb , " bad page in #%lu " , inode - > i_ino ) ;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
ctx - > pos + = PAGE_SIZE - offset ;
2013-05-16 14:36:14 -04:00
return - EIO ;
2009-04-06 19:01:34 -07:00
}
de = ( struct nilfs_dir_entry * ) ( kaddr + offset ) ;
limit = kaddr + nilfs_last_byte ( inode , n ) -
NILFS_DIR_REC_LEN ( 1 ) ;
for ( ; ( char * ) de < = limit ; de = nilfs_next_entry ( de ) ) {
if ( de - > rec_len = = 0 ) {
2016-08-02 14:05:00 -07:00
nilfs_error ( sb , " zero-length directory entry " ) ;
2023-11-27 23:30:29 +09:00
folio_release_kmap ( folio , kaddr ) ;
2013-05-16 14:36:14 -04:00
return - EIO ;
2009-04-06 19:01:34 -07:00
}
if ( de - > inode ) {
2013-05-16 14:36:14 -04:00
unsigned char t ;
2009-04-06 19:01:34 -07:00
2013-05-16 14:36:14 -04:00
if ( de - > file_type < NILFS_FT_MAX )
t = nilfs_filetype_table [ de - > file_type ] ;
else
t = DT_UNKNOWN ;
2009-04-06 19:01:34 -07:00
2013-05-16 14:36:14 -04:00
if ( ! dir_emit ( ctx , de - > name , de - > name_len ,
le64_to_cpu ( de - > inode ) , t ) ) {
2023-11-27 23:30:29 +09:00
folio_release_kmap ( folio , kaddr ) ;
2013-05-16 14:36:14 -04:00
return 0 ;
2009-04-06 19:01:34 -07:00
}
}
2013-05-16 14:36:14 -04:00
ctx - > pos + = nilfs_rec_len_from_disk ( de - > rec_len ) ;
2009-04-06 19:01:34 -07:00
}
2023-11-27 23:30:29 +09:00
folio_release_kmap ( folio , kaddr ) ;
2009-04-06 19:01:34 -07:00
}
2013-05-16 14:36:14 -04:00
return 0 ;
2009-04-06 19:01:34 -07:00
}
/*
* nilfs_find_entry ( )
*
* finds an entry in the specified directory with the wanted name . It
* returns the page in which the entry was found , and the entry itself
* ( as a parameter - res_dir ) . Page is returned mapped and unlocked .
* Entry is guaranteed to be valid .
*/
struct nilfs_dir_entry *
2010-01-31 21:02:09 -05:00
nilfs_find_entry ( struct inode * dir , const struct qstr * qstr ,
2009-04-06 19:01:34 -07:00
struct page * * res_page )
{
2010-01-31 21:02:09 -05:00
const unsigned char * name = qstr - > name ;
int namelen = qstr - > len ;
2016-05-23 16:23:39 -07:00
unsigned int reclen = NILFS_DIR_REC_LEN ( namelen ) ;
2009-04-06 19:01:34 -07:00
unsigned long start , n ;
unsigned long npages = dir_pages ( dir ) ;
2023-11-27 23:30:30 +09:00
struct folio * folio = NULL ;
2009-04-06 19:01:34 -07:00
struct nilfs_inode_info * ei = NILFS_I ( dir ) ;
struct nilfs_dir_entry * de ;
if ( npages = = 0 )
goto out ;
/* OFFSET_CACHE */
* res_page = NULL ;
start = ei - > i_dir_start_lookup ;
if ( start > = npages )
start = 0 ;
n = start ;
do {
2023-11-27 23:30:30 +09:00
char * kaddr = nilfs_get_folio ( dir , n , & folio ) ;
2016-05-23 16:23:25 -07:00
2023-11-27 23:30:25 +09:00
if ( ! IS_ERR ( kaddr ) ) {
2009-04-06 19:01:34 -07:00
de = ( struct nilfs_dir_entry * ) kaddr ;
kaddr + = nilfs_last_byte ( dir , n ) - reclen ;
while ( ( char * ) de < = kaddr ) {
if ( de - > rec_len = = 0 ) {
2016-08-02 14:05:00 -07:00
nilfs_error ( dir - > i_sb ,
2009-04-06 19:01:34 -07:00
" zero-length directory entry " ) ;
2023-11-27 23:30:30 +09:00
folio_release_kmap ( folio , kaddr ) ;
2009-04-06 19:01:34 -07:00
goto out ;
}
if ( nilfs_match ( namelen , name , de ) )
goto found ;
de = nilfs_next_entry ( de ) ;
}
2023-11-27 23:30:30 +09:00
folio_release_kmap ( folio , kaddr ) ;
2009-04-06 19:01:34 -07:00
}
if ( + + n > = npages )
n = 0 ;
2023-11-27 23:30:30 +09:00
/* next folio is past the blocks we've got */
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
if ( unlikely ( n > ( dir - > i_blocks > > ( PAGE_SHIFT - 9 ) ) ) ) {
2016-08-02 14:05:00 -07:00
nilfs_error ( dir - > i_sb ,
2010-03-14 03:17:45 +09:00
" dir %lu size %lld exceeds block count %llu " ,
2009-04-06 19:01:34 -07:00
dir - > i_ino , dir - > i_size ,
( unsigned long long ) dir - > i_blocks ) ;
goto out ;
}
} while ( n ! = start ) ;
out :
return NULL ;
found :
2023-11-27 23:30:30 +09:00
* res_page = & folio - > page ;
2009-04-06 19:01:34 -07:00
ei - > i_dir_start_lookup = n ;
return de ;
}
struct nilfs_dir_entry * nilfs_dotdot ( struct inode * dir , struct page * * p )
{
2023-11-27 23:30:25 +09:00
struct nilfs_dir_entry * de = nilfs_get_page ( dir , 0 , p ) ;
2009-04-06 19:01:34 -07:00
2023-11-27 23:30:25 +09:00
if ( IS_ERR ( de ) )
return NULL ;
return nilfs_next_entry ( de ) ;
2009-04-06 19:01:34 -07:00
}
2010-01-31 21:02:09 -05:00
ino_t nilfs_inode_by_name ( struct inode * dir , const struct qstr * qstr )
2009-04-06 19:01:34 -07:00
{
ino_t res = 0 ;
struct nilfs_dir_entry * de ;
struct page * page ;
2010-01-31 21:02:09 -05:00
de = nilfs_find_entry ( dir , qstr , & page ) ;
2009-04-06 19:01:34 -07:00
if ( de ) {
res = le64_to_cpu ( de - > inode ) ;
2023-11-27 23:30:27 +09:00
unmap_and_put_page ( page , de ) ;
2009-04-06 19:01:34 -07:00
}
return res ;
}
void nilfs_set_link ( struct inode * dir , struct nilfs_dir_entry * de ,
struct page * page , struct inode * inode )
{
2023-11-27 23:30:22 +09:00
unsigned int from = offset_in_page ( de ) ;
2016-05-23 16:23:39 -07:00
unsigned int to = from + nilfs_rec_len_from_disk ( de - > rec_len ) ;
2009-04-06 19:01:34 -07:00
struct address_space * mapping = page - > mapping ;
int err ;
lock_page ( page ) ;
2010-06-04 11:29:56 +02:00
err = nilfs_prepare_chunk ( page , from , to ) ;
2009-04-06 19:01:34 -07:00
BUG_ON ( err ) ;
de - > inode = cpu_to_le64 ( inode - > i_ino ) ;
nilfs_set_de_type ( de , inode ) ;
2009-11-27 19:41:11 +09:00
nilfs_commit_chunk ( page , mapping , from , to ) ;
2023-10-04 14:52:38 -04:00
inode_set_mtime_to_ts ( dir , inode_set_ctime_current ( dir ) ) ;
2009-04-06 19:01:34 -07:00
}
/*
* Parent is locked .
*/
int nilfs_add_link ( struct dentry * dentry , struct inode * inode )
{
2015-03-17 22:25:59 +00:00
struct inode * dir = d_inode ( dentry - > d_parent ) ;
2010-01-31 21:03:58 -05:00
const unsigned char * name = dentry - > d_name . name ;
2009-04-06 19:01:34 -07:00
int namelen = dentry - > d_name . len ;
2016-05-23 16:23:39 -07:00
unsigned int chunk_size = nilfs_chunk_size ( dir ) ;
unsigned int reclen = NILFS_DIR_REC_LEN ( namelen ) ;
2009-04-06 19:01:34 -07:00
unsigned short rec_len , name_len ;
struct page * page = NULL ;
struct nilfs_dir_entry * de ;
unsigned long npages = dir_pages ( dir ) ;
unsigned long n ;
char * kaddr ;
2016-05-23 16:23:39 -07:00
unsigned int from , to ;
2009-04-06 19:01:34 -07:00
int err ;
/*
* We take care of directory expansion in the same loop .
* This code plays outside i_size , so it locks the page
* to protect that region .
*/
for ( n = 0 ; n < = npages ; n + + ) {
char * dir_end ;
2023-11-27 23:30:25 +09:00
kaddr = nilfs_get_page ( dir , n , & page ) ;
err = PTR_ERR ( kaddr ) ;
if ( IS_ERR ( kaddr ) )
2009-04-06 19:01:34 -07:00
goto out ;
lock_page ( page ) ;
dir_end = kaddr + nilfs_last_byte ( dir , n ) ;
de = ( struct nilfs_dir_entry * ) kaddr ;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
kaddr + = PAGE_SIZE - reclen ;
2009-04-06 19:01:34 -07:00
while ( ( char * ) de < = kaddr ) {
if ( ( char * ) de = = dir_end ) {
/* We hit i_size */
name_len = 0 ;
rec_len = chunk_size ;
2010-07-25 20:39:03 +09:00
de - > rec_len = nilfs_rec_len_to_disk ( chunk_size ) ;
2009-04-06 19:01:34 -07:00
de - > inode = 0 ;
goto got_it ;
}
if ( de - > rec_len = = 0 ) {
2016-08-02 14:05:00 -07:00
nilfs_error ( dir - > i_sb ,
2009-04-06 19:01:34 -07:00
" zero-length directory entry " ) ;
err = - EIO ;
goto out_unlock ;
}
err = - EEXIST ;
if ( nilfs_match ( namelen , name , de ) )
goto out_unlock ;
name_len = NILFS_DIR_REC_LEN ( de - > name_len ) ;
2010-07-25 20:39:03 +09:00
rec_len = nilfs_rec_len_from_disk ( de - > rec_len ) ;
2009-04-06 19:01:34 -07:00
if ( ! de - > inode & & rec_len > = reclen )
goto got_it ;
if ( rec_len > = name_len + reclen )
goto got_it ;
de = ( struct nilfs_dir_entry * ) ( ( char * ) de + rec_len ) ;
}
unlock_page ( page ) ;
2023-11-27 23:30:27 +09:00
unmap_and_put_page ( page , kaddr ) ;
2009-04-06 19:01:34 -07:00
}
BUG ( ) ;
return - EINVAL ;
got_it :
2023-11-27 23:30:23 +09:00
from = offset_in_page ( de ) ;
2009-04-06 19:01:34 -07:00
to = from + rec_len ;
2010-06-04 11:29:56 +02:00
err = nilfs_prepare_chunk ( page , from , to ) ;
2009-04-06 19:01:34 -07:00
if ( err )
goto out_unlock ;
if ( de - > inode ) {
struct nilfs_dir_entry * de1 ;
de1 = ( struct nilfs_dir_entry * ) ( ( char * ) de + name_len ) ;
2010-07-25 20:39:03 +09:00
de1 - > rec_len = nilfs_rec_len_to_disk ( rec_len - name_len ) ;
de - > rec_len = nilfs_rec_len_to_disk ( name_len ) ;
2009-04-06 19:01:34 -07:00
de = de1 ;
}
de - > name_len = namelen ;
memcpy ( de - > name , name , namelen ) ;
de - > inode = cpu_to_le64 ( inode - > i_ino ) ;
nilfs_set_de_type ( de , inode ) ;
2009-11-27 19:41:11 +09:00
nilfs_commit_chunk ( page , page - > mapping , from , to ) ;
2023-10-04 14:52:38 -04:00
inode_set_mtime_to_ts ( dir , inode_set_ctime_current ( dir ) ) ;
2009-11-27 19:41:14 +09:00
nilfs_mark_inode_dirty ( dir ) ;
2009-04-06 19:01:34 -07:00
/* OFFSET_CACHE */
out_put :
2023-11-27 23:30:27 +09:00
unmap_and_put_page ( page , de ) ;
2009-04-06 19:01:34 -07:00
out :
return err ;
out_unlock :
unlock_page ( page ) ;
goto out_put ;
}
/*
* nilfs_delete_entry deletes a directory entry by merging it with the
nilfs2: move page release outside of nilfs_delete_entry and nilfs_set_link
Patch series "nilfs2: Folio conversions for directory paths".
This series applies page->folio conversions to nilfs2 directory
operations. This reduces hidden compound_head() calls and also converts
deprecated kmap calls to kmap_local in the directory code.
Although nilfs2 does not yet support large folios, Matthew has done his
best here to include support for large folios, which will be needed for
devices with large block sizes.
This series corresponds to the second half of the original post [1], but
with two complementary patches inserted at the beginning and some
adjustments, to prevent a kmap_local constraint violation found during
testing with highmem mapping.
[1] https://lkml.kernel.org/r/20231106173903.1734114-1-willy@infradead.org
I have reviewed all changes and tested this for regular and small block
sizes, both on machines with and without highmem mapping. No issues
found.
This patch (of 17):
In a few directory operations, the call to nilfs_put_page() for a page
obtained using nilfs_find_entry() or nilfs_dotdot() is hidden in
nilfs_set_link() and nilfs_delete_entry(), making it difficult to track
page release and preventing change of its call position.
By moving nilfs_put_page() out of these functions, this makes the page
get/put correspondence clearer and makes it easier to swap
nilfs_put_page() calls (and kunmap calls within them) when modifying
multiple directory entries simultaneously in nilfs_rename().
Also, update comments for nilfs_set_link() and nilfs_delete_entry() to
reflect changes in their behavior.
To make nilfs_put_page() visible from namei.c, this moves its definition
to nilfs.h and replaces existing equivalents to use it, but the exposure
of that definition is temporary and will be removed on a later kmap ->
kmap_local conversion.
Link: https://lkml.kernel.org/r/20231127143036.2425-1-konishi.ryusuke@gmail.com
Link: https://lkml.kernel.org/r/20231127143036.2425-2-konishi.ryusuke@gmail.com
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-11-27 23:30:20 +09:00
* previous entry . Page is up - to - date .
2009-04-06 19:01:34 -07:00
*/
int nilfs_delete_entry ( struct nilfs_dir_entry * dir , struct page * page )
{
struct address_space * mapping = page - > mapping ;
struct inode * inode = mapping - > host ;
2023-11-27 23:30:24 +09:00
char * kaddr = ( char * ) ( ( unsigned long ) dir & PAGE_MASK ) ;
2016-05-23 16:23:39 -07:00
unsigned int from , to ;
struct nilfs_dir_entry * de , * pde = NULL ;
2009-04-06 19:01:34 -07:00
int err ;
2016-05-23 16:23:39 -07:00
from = ( ( char * ) dir - kaddr ) & ~ ( nilfs_chunk_size ( inode ) - 1 ) ;
to = ( ( char * ) dir - kaddr ) + nilfs_rec_len_from_disk ( dir - > rec_len ) ;
de = ( struct nilfs_dir_entry * ) ( kaddr + from ) ;
2009-04-06 19:01:34 -07:00
while ( ( char * ) de < ( char * ) dir ) {
if ( de - > rec_len = = 0 ) {
2016-08-02 14:05:00 -07:00
nilfs_error ( inode - > i_sb ,
2009-04-06 19:01:34 -07:00
" zero-length directory entry " ) ;
err = - EIO ;
goto out ;
}
pde = de ;
de = nilfs_next_entry ( de ) ;
}
if ( pde )
2023-11-27 23:30:24 +09:00
from = ( char * ) pde - kaddr ;
2009-04-06 19:01:34 -07:00
lock_page ( page ) ;
2010-06-04 11:29:56 +02:00
err = nilfs_prepare_chunk ( page , from , to ) ;
2009-04-06 19:01:34 -07:00
BUG_ON ( err ) ;
if ( pde )
2010-07-25 20:39:03 +09:00
pde - > rec_len = nilfs_rec_len_to_disk ( to - from ) ;
2009-04-06 19:01:34 -07:00
dir - > inode = 0 ;
2009-11-27 19:41:11 +09:00
nilfs_commit_chunk ( page , mapping , from , to ) ;
2023-10-04 14:52:38 -04:00
inode_set_mtime_to_ts ( inode , inode_set_ctime_current ( inode ) ) ;
2009-04-06 19:01:34 -07:00
out :
return err ;
}
/*
* Set the first fragment of directory .
*/
int nilfs_make_empty ( struct inode * inode , struct inode * parent )
{
struct address_space * mapping = inode - > i_mapping ;
struct page * page = grab_cache_page ( mapping , 0 ) ;
2016-05-23 16:23:39 -07:00
unsigned int chunk_size = nilfs_chunk_size ( inode ) ;
2009-04-06 19:01:34 -07:00
struct nilfs_dir_entry * de ;
int err ;
void * kaddr ;
if ( ! page )
return - ENOMEM ;
2010-06-04 11:29:56 +02:00
err = nilfs_prepare_chunk ( page , 0 , chunk_size ) ;
2009-04-06 19:01:34 -07:00
if ( unlikely ( err ) ) {
unlock_page ( page ) ;
goto fail ;
}
2011-11-25 23:14:33 +08:00
kaddr = kmap_atomic ( page ) ;
2009-04-06 19:01:34 -07:00
memset ( kaddr , 0 , chunk_size ) ;
de = ( struct nilfs_dir_entry * ) kaddr ;
de - > name_len = 1 ;
2010-07-25 20:39:03 +09:00
de - > rec_len = nilfs_rec_len_to_disk ( NILFS_DIR_REC_LEN ( 1 ) ) ;
2009-04-06 19:01:34 -07:00
memcpy ( de - > name , " . \0 \0 " , 4 ) ;
de - > inode = cpu_to_le64 ( inode - > i_ino ) ;
nilfs_set_de_type ( de , inode ) ;
de = ( struct nilfs_dir_entry * ) ( kaddr + NILFS_DIR_REC_LEN ( 1 ) ) ;
de - > name_len = 2 ;
2010-07-25 20:39:03 +09:00
de - > rec_len = nilfs_rec_len_to_disk ( chunk_size - NILFS_DIR_REC_LEN ( 1 ) ) ;
2009-04-06 19:01:34 -07:00
de - > inode = cpu_to_le64 ( parent - > i_ino ) ;
memcpy ( de - > name , " .. \0 " , 4 ) ;
nilfs_set_de_type ( de , inode ) ;
2011-11-25 23:14:33 +08:00
kunmap_atomic ( kaddr ) ;
2009-11-27 19:41:11 +09:00
nilfs_commit_chunk ( page , mapping , 0 , chunk_size ) ;
2009-04-06 19:01:34 -07:00
fail :
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
put_page ( page ) ;
2009-04-06 19:01:34 -07:00
return err ;
}
/*
* routine to check that the specified directory is empty ( for rmdir )
*/
int nilfs_empty_dir ( struct inode * inode )
{
struct page * page = NULL ;
2023-11-27 23:30:27 +09:00
char * kaddr ;
2009-04-06 19:01:34 -07:00
unsigned long i , npages = dir_pages ( inode ) ;
for ( i = 0 ; i < npages ; i + + ) {
struct nilfs_dir_entry * de ;
2023-11-27 23:30:25 +09:00
kaddr = nilfs_get_page ( inode , i , & page ) ;
if ( IS_ERR ( kaddr ) )
2009-04-06 19:01:34 -07:00
continue ;
de = ( struct nilfs_dir_entry * ) kaddr ;
kaddr + = nilfs_last_byte ( inode , i ) - NILFS_DIR_REC_LEN ( 1 ) ;
while ( ( char * ) de < = kaddr ) {
if ( de - > rec_len = = 0 ) {
2016-08-02 14:05:00 -07:00
nilfs_error ( inode - > i_sb ,
2016-05-23 16:23:31 -07:00
" zero-length directory entry (kaddr=%p, de=%p) " ,
kaddr , de ) ;
2009-04-06 19:01:34 -07:00
goto not_empty ;
}
if ( de - > inode ! = 0 ) {
/* check for . and .. */
if ( de - > name [ 0 ] ! = ' . ' )
goto not_empty ;
if ( de - > name_len > 2 )
goto not_empty ;
if ( de - > name_len < 2 ) {
if ( de - > inode ! =
cpu_to_le64 ( inode - > i_ino ) )
goto not_empty ;
} else if ( de - > name [ 1 ] ! = ' . ' )
goto not_empty ;
}
de = nilfs_next_entry ( de ) ;
}
2023-11-27 23:30:27 +09:00
unmap_and_put_page ( page , kaddr ) ;
2009-04-06 19:01:34 -07:00
}
return 1 ;
not_empty :
2023-11-27 23:30:27 +09:00
unmap_and_put_page ( page , kaddr ) ;
2009-04-06 19:01:34 -07:00
return 0 ;
}
2009-10-01 15:43:56 -07:00
const struct file_operations nilfs_dir_operations = {
2009-04-06 19:01:34 -07:00
. llseek = generic_file_llseek ,
. read = generic_read_dir ,
2016-04-30 22:37:34 -04:00
. iterate_shared = nilfs_readdir ,
2009-04-06 19:01:53 -07:00
. unlocked_ioctl = nilfs_ioctl ,
2009-04-06 19:01:34 -07:00
# ifdef CONFIG_COMPAT
2011-02-03 21:26:17 +09:00
. compat_ioctl = nilfs_compat_ioctl ,
2009-04-06 19:01:34 -07:00
# endif /* CONFIG_COMPAT */
. fsync = nilfs_sync_file ,
} ;