2010-10-20 08:17:58 +04:00
/*
* pNFS functions to call and manage layout drivers .
*
* Copyright ( c ) 2002 [ year of first publication ]
* The Regents of the University of Michigan
* All Rights Reserved
*
* Dean Hildebrand < dhildebz @ umich . edu >
*
* Permission is granted to use , copy , create derivative works , and
* redistribute this software and such derivative works for any purpose ,
* so long as the name of the University of Michigan is not used in
* any advertising or publicity pertaining to the use or distribution
* of this software without specific , written prior authorization . If
* the above copyright notice or any other identification of the
* University of Michigan is included in any copy of any portion of
* this software , then the disclaimer below must also be included .
*
* This software is provided as is , without representation or warranty
* of any kind either express or implied , including without limitation
* the implied warranties of merchantability , fitness for a particular
* purpose , or noninfringement . The Regents of the University of
* Michigan shall not be liable for any damages , including special ,
* indirect , incidental , or consequential damages , with respect to any
* claim arising out of or in connection with the use of the software ,
* even if it has been or is hereafter advised of the possibility of
* such damages .
*/
# include <linux/nfs_fs.h>
2011-07-13 23:58:28 +04:00
# include <linux/nfs_page.h>
2011-07-01 22:23:34 +04:00
# include <linux/module.h>
2010-10-20 08:18:02 +04:00
# include "internal.h"
2010-10-20 08:17:58 +04:00
# include "pnfs.h"
2011-03-01 04:34:16 +03:00
# include "iostat.h"
2013-08-14 23:31:28 +04:00
# include "nfs4trace.h"
2015-01-24 21:54:37 +03:00
# include "delegation.h"
2010-10-20 08:17:58 +04:00
# define NFSDBG_FACILITY NFSDBG_PNFS
2012-09-19 01:01:12 +04:00
# define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
2010-10-20 08:17:58 +04:00
2010-10-20 08:17:59 +04:00
/* Locking:
*
* pnfs_spinlock :
* protects pnfs_modules_tbl .
*/
static DEFINE_SPINLOCK ( pnfs_spinlock ) ;
/*
* pnfs_modules_tbl holds all pnfs modules
*/
static LIST_HEAD ( pnfs_modules_tbl ) ;
2014-09-05 20:53:25 +04:00
static int
pnfs_send_layoutreturn ( struct pnfs_layout_hdr * lo , nfs4_stateid stateid ,
2014-11-17 04:30:40 +03:00
enum pnfs_iomode iomode , bool sync ) ;
2014-09-05 20:53:25 +04:00
2010-10-20 08:17:59 +04:00
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked ( u32 id )
{
struct pnfs_layoutdriver_type * local ;
list_for_each_entry ( local , & pnfs_modules_tbl , pnfs_tblid )
if ( local - > id = = id )
goto out ;
local = NULL ;
out :
dprintk ( " %s: Searching for id %u, found %p \n " , __func__ , id , local ) ;
return local ;
}
2010-10-20 08:17:58 +04:00
static struct pnfs_layoutdriver_type *
find_pnfs_driver ( u32 id )
{
2010-10-20 08:17:59 +04:00
struct pnfs_layoutdriver_type * local ;
spin_lock ( & pnfs_spinlock ) ;
local = find_pnfs_driver_locked ( id ) ;
2012-06-15 21:02:58 +04:00
if ( local ! = NULL & & ! try_module_get ( local - > owner ) ) {
dprintk ( " %s: Could not grab reference on module \n " , __func__ ) ;
local = NULL ;
}
2010-10-20 08:17:59 +04:00
spin_unlock ( & pnfs_spinlock ) ;
return local ;
2010-10-20 08:17:58 +04:00
}
void
unset_pnfs_layoutdriver ( struct nfs_server * nfss )
{
2011-07-31 04:52:36 +04:00
if ( nfss - > pnfs_curr_ld ) {
if ( nfss - > pnfs_curr_ld - > clear_layoutdriver )
nfss - > pnfs_curr_ld - > clear_layoutdriver ( nfss ) ;
2012-06-14 21:08:38 +04:00
/* Decrement the MDS count. Purge the deviceid cache if zero */
if ( atomic_dec_and_test ( & nfss - > nfs_client - > cl_mds_count ) )
nfs4_deviceid_purge_client ( nfss - > nfs_client ) ;
2010-10-20 08:17:59 +04:00
module_put ( nfss - > pnfs_curr_ld - > owner ) ;
2011-07-31 04:52:36 +04:00
}
2010-10-20 08:17:58 +04:00
nfss - > pnfs_curr_ld = NULL ;
}
/*
* Try to set the server ' s pnfs module to the pnfs layout type specified by id .
* Currently only one pNFS layout driver per filesystem is supported .
*
* @ id layout type . Zero ( illegal layout type ) indicates pNFS not in use .
*/
void
2011-07-31 04:52:36 +04:00
set_pnfs_layoutdriver ( struct nfs_server * server , const struct nfs_fh * mntfh ,
u32 id )
2010-10-20 08:17:58 +04:00
{
struct pnfs_layoutdriver_type * ld_type = NULL ;
if ( id = = 0 )
goto out_no_driver ;
if ( ! ( server - > nfs_client - > cl_exchange_flags &
( EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS ) ) ) {
2012-01-26 22:32:23 +04:00
printk ( KERN_ERR " NFS: %s: id %u cl_exchange_flags 0x%x \n " ,
__func__ , id , server - > nfs_client - > cl_exchange_flags ) ;
2010-10-20 08:17:58 +04:00
goto out_no_driver ;
}
ld_type = find_pnfs_driver ( id ) ;
if ( ! ld_type ) {
request_module ( " %s-%u " , LAYOUT_NFSV4_1_MODULE_PREFIX , id ) ;
ld_type = find_pnfs_driver ( id ) ;
if ( ! ld_type ) {
dprintk ( " %s: No pNFS module found for %u. \n " ,
__func__ , id ) ;
goto out_no_driver ;
}
}
server - > pnfs_curr_ld = ld_type ;
2011-07-31 04:52:36 +04:00
if ( ld_type - > set_layoutdriver
& & ld_type - > set_layoutdriver ( server , mntfh ) ) {
2012-01-26 22:32:23 +04:00
printk ( KERN_ERR " NFS: %s: Error initializing pNFS layout "
" driver %u. \n " , __func__ , id ) ;
2011-07-31 04:52:36 +04:00
module_put ( ld_type - > owner ) ;
goto out_no_driver ;
}
2012-06-14 21:08:38 +04:00
/* Bump the MDS count */
atomic_inc ( & server - > nfs_client - > cl_mds_count ) ;
2011-03-01 04:34:21 +03:00
2010-10-20 08:17:58 +04:00
dprintk ( " %s: pNFS module for %u set \n " , __func__ , id ) ;
return ;
out_no_driver :
dprintk ( " %s: Using NFSv4 I/O \n " , __func__ ) ;
server - > pnfs_curr_ld = NULL ;
}
2010-10-20 08:17:59 +04:00
int
pnfs_register_layoutdriver ( struct pnfs_layoutdriver_type * ld_type )
{
int status = - EINVAL ;
struct pnfs_layoutdriver_type * tmp ;
if ( ld_type - > id = = 0 ) {
2012-01-26 22:32:23 +04:00
printk ( KERN_ERR " NFS: %s id 0 is reserved \n " , __func__ ) ;
2010-10-20 08:17:59 +04:00
return status ;
}
2010-10-20 08:18:03 +04:00
if ( ! ld_type - > alloc_lseg | | ! ld_type - > free_lseg ) {
2012-01-26 22:32:23 +04:00
printk ( KERN_ERR " NFS: %s Layout driver must provide "
2010-10-20 08:18:03 +04:00
" alloc_lseg and free_lseg. \n " , __func__ ) ;
return status ;
}
2010-10-20 08:17:59 +04:00
spin_lock ( & pnfs_spinlock ) ;
tmp = find_pnfs_driver_locked ( ld_type - > id ) ;
if ( ! tmp ) {
list_add ( & ld_type - > pnfs_tblid , & pnfs_modules_tbl ) ;
status = 0 ;
dprintk ( " %s Registering id:%u name:%s \n " , __func__ , ld_type - > id ,
ld_type - > name ) ;
} else {
2012-01-26 22:32:23 +04:00
printk ( KERN_ERR " NFS: %s Module with id %d already loaded! \n " ,
2010-10-20 08:17:59 +04:00
__func__ , ld_type - > id ) ;
}
spin_unlock ( & pnfs_spinlock ) ;
return status ;
}
EXPORT_SYMBOL_GPL ( pnfs_register_layoutdriver ) ;
void
pnfs_unregister_layoutdriver ( struct pnfs_layoutdriver_type * ld_type )
{
dprintk ( " %s Deregistering id:%u \n " , __func__ , ld_type - > id ) ;
spin_lock ( & pnfs_spinlock ) ;
list_del ( & ld_type - > pnfs_tblid ) ;
spin_unlock ( & pnfs_spinlock ) ;
}
EXPORT_SYMBOL_GPL ( pnfs_unregister_layoutdriver ) ;
2010-10-20 08:18:01 +04:00
2010-10-20 08:18:03 +04:00
/*
* pNFS client layout cache
*/
2011-01-06 14:36:28 +03:00
/* Need to hold i_lock if caller does not already hold reference */
2011-01-06 14:36:30 +03:00
void
2012-09-19 04:51:13 +04:00
pnfs_get_layout_hdr ( struct pnfs_layout_hdr * lo )
2010-10-20 08:18:01 +04:00
{
2011-01-06 14:36:28 +03:00
atomic_inc ( & lo - > plh_refcount ) ;
2010-10-20 08:18:01 +04:00
}
2011-05-22 20:51:33 +04:00
static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr ( struct inode * ino , gfp_t gfp_flags )
{
struct pnfs_layoutdriver_type * ld = NFS_SERVER ( ino ) - > pnfs_curr_ld ;
2012-09-21 04:37:23 +04:00
return ld - > alloc_layout_hdr ( ino , gfp_flags ) ;
2011-05-22 20:51:33 +04:00
}
static void
pnfs_free_layout_hdr ( struct pnfs_layout_hdr * lo )
{
2012-09-21 01:31:43 +04:00
struct nfs_server * server = NFS_SERVER ( lo - > plh_inode ) ;
struct pnfs_layoutdriver_type * ld = server - > pnfs_curr_ld ;
if ( ! list_empty ( & lo - > plh_layouts ) ) {
struct nfs_client * clp = server - > nfs_client ;
spin_lock ( & clp - > cl_lock ) ;
list_del_init ( & lo - > plh_layouts ) ;
spin_unlock ( & clp - > cl_lock ) ;
}
2011-07-31 04:52:32 +04:00
put_rpccred ( lo - > plh_lc_cred ) ;
2012-09-21 04:37:23 +04:00
return ld - > free_layout_hdr ( lo ) ;
2011-05-22 20:51:33 +04:00
}
2010-10-20 08:18:01 +04:00
static void
2012-09-21 01:23:11 +04:00
pnfs_detach_layout_hdr ( struct pnfs_layout_hdr * lo )
2010-10-20 08:18:01 +04:00
{
2012-09-20 23:52:13 +04:00
struct nfs_inode * nfsi = NFS_I ( lo - > plh_inode ) ;
2011-01-06 14:36:28 +03:00
dprintk ( " %s: freeing layout cache %p \n " , __func__ , lo ) ;
2012-09-20 23:52:13 +04:00
nfsi - > layout = NULL ;
/* Reset MDS Threshold I/O counters */
nfsi - > write_io = 0 ;
nfsi - > read_io = 0 ;
2010-10-20 08:18:01 +04:00
}
2010-10-20 08:18:03 +04:00
void
2012-09-19 04:51:13 +04:00
pnfs_put_layout_hdr ( struct pnfs_layout_hdr * lo )
2010-10-20 08:18:02 +04:00
{
2011-01-06 14:36:28 +03:00
struct inode * inode = lo - > plh_inode ;
if ( atomic_dec_and_lock ( & lo - > plh_refcount , & inode - > i_lock ) ) {
2014-10-10 19:25:46 +04:00
if ( ! list_empty ( & lo - > plh_segs ) )
WARN_ONCE ( 1 , " NFS: BUG unfreed layout segments. \n " ) ;
2012-09-21 01:23:11 +04:00
pnfs_detach_layout_hdr ( lo ) ;
2011-01-06 14:36:28 +03:00
spin_unlock ( & inode - > i_lock ) ;
2012-09-21 01:23:11 +04:00
pnfs_free_layout_hdr ( lo ) ;
2011-01-06 14:36:28 +03:00
}
2010-10-20 08:18:02 +04:00
}
2012-09-19 00:41:18 +04:00
static int
pnfs_iomode_to_fail_bit ( u32 iomode )
{
return iomode = = IOMODE_RW ?
NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED ;
}
static void
2012-09-24 21:07:16 +04:00
pnfs_layout_set_fail_bit ( struct pnfs_layout_hdr * lo , int fail_bit )
2012-09-19 00:41:18 +04:00
{
2012-09-19 01:01:12 +04:00
lo - > plh_retry_timestamp = jiffies ;
2013-01-04 16:19:49 +04:00
if ( ! test_and_set_bit ( fail_bit , & lo - > plh_flags ) )
2012-09-24 21:07:16 +04:00
atomic_inc ( & lo - > plh_refcount ) ;
}
static void
pnfs_layout_clear_fail_bit ( struct pnfs_layout_hdr * lo , int fail_bit )
{
if ( test_and_clear_bit ( fail_bit , & lo - > plh_flags ) )
atomic_dec ( & lo - > plh_refcount ) ;
}
static void
pnfs_layout_io_set_failed ( struct pnfs_layout_hdr * lo , u32 iomode )
{
struct inode * inode = lo - > plh_inode ;
2012-09-21 05:19:43 +04:00
struct pnfs_layout_range range = {
. iomode = iomode ,
. offset = 0 ,
. length = NFS4_MAX_UINT64 ,
} ;
LIST_HEAD ( head ) ;
2012-09-24 21:07:16 +04:00
spin_lock ( & inode - > i_lock ) ;
pnfs_layout_set_fail_bit ( lo , pnfs_iomode_to_fail_bit ( iomode ) ) ;
2012-09-21 05:19:43 +04:00
pnfs_mark_matching_lsegs_invalid ( lo , & head , & range ) ;
2012-09-24 21:07:16 +04:00
spin_unlock ( & inode - > i_lock ) ;
2012-09-21 05:19:43 +04:00
pnfs_free_lseg_list ( & head ) ;
2012-09-19 00:41:18 +04:00
dprintk ( " %s Setting layout IOMODE_%s fail bit \n " , __func__ ,
iomode = = IOMODE_RW ? " RW " : " READ " ) ;
}
static bool
pnfs_layout_io_test_failed ( struct pnfs_layout_hdr * lo , u32 iomode )
{
2012-09-19 01:01:12 +04:00
unsigned long start , end ;
2012-09-24 21:07:16 +04:00
int fail_bit = pnfs_iomode_to_fail_bit ( iomode ) ;
if ( test_bit ( fail_bit , & lo - > plh_flags ) = = 0 )
2012-09-19 01:01:12 +04:00
return false ;
end = jiffies ;
start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT ;
if ( ! time_in_range ( lo - > plh_retry_timestamp , start , end ) ) {
/* It is time to retry the failed layoutgets */
2012-09-24 21:07:16 +04:00
pnfs_layout_clear_fail_bit ( lo , fail_bit ) ;
2012-09-19 01:01:12 +04:00
return false ;
}
return true ;
2012-09-19 00:41:18 +04:00
}
2010-10-20 08:18:02 +04:00
static void
init_lseg ( struct pnfs_layout_hdr * lo , struct pnfs_layout_segment * lseg )
{
2011-01-06 14:36:20 +03:00
INIT_LIST_HEAD ( & lseg - > pls_list ) ;
2011-07-31 04:52:33 +04:00
INIT_LIST_HEAD ( & lseg - > pls_lc_list ) ;
2011-01-06 14:36:23 +03:00
atomic_set ( & lseg - > pls_refcount , 1 ) ;
smp_mb ( ) ;
set_bit ( NFS_LSEG_VALID , & lseg - > pls_flags ) ;
2011-01-06 14:36:20 +03:00
lseg - > pls_layout = lo ;
2010-10-20 08:18:02 +04:00
}
2012-09-21 04:46:49 +04:00
static void pnfs_free_lseg ( struct pnfs_layout_segment * lseg )
2010-10-20 08:18:02 +04:00
{
2011-01-06 14:36:21 +03:00
struct inode * ino = lseg - > pls_layout - > plh_inode ;
2010-10-20 08:18:02 +04:00
2010-10-20 08:18:03 +04:00
NFS_SERVER ( ino ) - > pnfs_curr_ld - > free_lseg ( lseg ) ;
2010-10-20 08:18:02 +04:00
}
2011-03-01 04:34:13 +03:00
static void
2012-09-21 00:33:30 +04:00
pnfs_layout_remove_lseg ( struct pnfs_layout_hdr * lo ,
struct pnfs_layout_segment * lseg )
2011-03-01 04:34:13 +03:00
{
2012-09-21 00:33:30 +04:00
struct inode * inode = lo - > plh_inode ;
2011-03-01 04:34:13 +03:00
2011-05-22 20:52:03 +04:00
WARN_ON ( test_bit ( NFS_LSEG_VALID , & lseg - > pls_flags ) ) ;
2011-03-01 04:34:13 +03:00
list_del_init ( & lseg - > pls_list ) ;
2012-09-21 04:57:11 +04:00
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
atomic_dec ( & lo - > plh_refcount ) ;
2012-09-21 23:49:42 +04:00
if ( list_empty ( & lo - > plh_segs ) )
clear_bit ( NFS_LAYOUT_BULK_RECALL , & lo - > plh_flags ) ;
2011-03-01 04:34:13 +03:00
rpc_wake_up ( & NFS_SERVER ( inode ) - > roc_rpcwaitq ) ;
}
2014-09-05 20:53:25 +04:00
/* Return true if layoutreturn is needed */
static bool
pnfs_layout_need_return ( struct pnfs_layout_hdr * lo ,
2014-10-20 10:44:38 +04:00
struct pnfs_layout_segment * lseg )
2014-09-05 20:53:25 +04:00
{
struct pnfs_layout_segment * s ;
if ( ! test_bit ( NFS_LSEG_LAYOUTRETURN , & lseg - > pls_flags ) )
return false ;
list_for_each_entry ( s , & lo - > plh_segs , pls_list )
2014-10-20 10:44:38 +04:00
if ( s ! = lseg & & test_bit ( NFS_LSEG_LAYOUTRETURN , & s - > pls_flags ) )
2014-09-05 20:53:25 +04:00
return false ;
return true ;
}
2015-02-06 01:27:39 +03:00
static void pnfs_layoutreturn_before_put_lseg ( struct pnfs_layout_segment * lseg ,
struct pnfs_layout_hdr * lo , struct inode * inode )
2014-10-20 10:44:38 +04:00
{
lo = lseg - > pls_layout ;
inode = lo - > plh_inode ;
spin_lock ( & inode - > i_lock ) ;
if ( pnfs_layout_need_return ( lo , lseg ) ) {
nfs4_stateid stateid ;
enum pnfs_iomode iomode ;
stateid = lo - > plh_stateid ;
iomode = lo - > plh_return_iomode ;
/* decreased in pnfs_send_layoutreturn() */
lo - > plh_block_lgets + + ;
lo - > plh_return_iomode = 0 ;
spin_unlock ( & inode - > i_lock ) ;
2015-02-06 01:27:39 +03:00
pnfs_get_layout_hdr ( lo ) ;
2014-10-20 10:44:38 +04:00
2015-02-06 01:27:39 +03:00
/* Send an async layoutreturn so we dont deadlock */
pnfs_send_layoutreturn ( lo , stateid , iomode , false ) ;
2014-10-20 10:44:38 +04:00
} else
2015-02-06 01:27:39 +03:00
spin_unlock ( & inode - > i_lock ) ;
2014-10-20 10:44:38 +04:00
}
2011-03-01 04:34:15 +03:00
void
2012-09-19 04:57:08 +04:00
pnfs_put_lseg ( struct pnfs_layout_segment * lseg )
2010-10-20 08:18:02 +04:00
{
2012-09-21 00:33:30 +04:00
struct pnfs_layout_hdr * lo ;
2011-03-01 04:34:13 +03:00
struct inode * inode ;
if ( ! lseg )
return ;
2011-01-06 14:36:23 +03:00
dprintk ( " %s: lseg %p ref %d valid %d \n " , __func__ , lseg ,
atomic_read ( & lseg - > pls_refcount ) ,
test_bit ( NFS_LSEG_VALID , & lseg - > pls_flags ) ) ;
2015-02-06 01:27:39 +03:00
/* Handle the case where refcount != 1 */
if ( atomic_add_unless ( & lseg - > pls_refcount , - 1 , 1 ) )
return ;
2012-09-21 00:33:30 +04:00
lo = lseg - > pls_layout ;
inode = lo - > plh_inode ;
2015-02-06 01:27:39 +03:00
/* Do we need a layoutreturn? */
if ( test_bit ( NFS_LSEG_LAYOUTRETURN , & lseg - > pls_flags ) )
pnfs_layoutreturn_before_put_lseg ( lseg , lo , inode ) ;
2011-03-01 04:34:13 +03:00
if ( atomic_dec_and_lock ( & lseg - > pls_refcount , & inode - > i_lock ) ) {
2012-09-21 04:57:11 +04:00
pnfs_get_layout_hdr ( lo ) ;
2015-02-06 01:27:39 +03:00
pnfs_layout_remove_lseg ( lo , lseg ) ;
spin_unlock ( & inode - > i_lock ) ;
pnfs_free_lseg ( lseg ) ;
pnfs_put_layout_hdr ( lo ) ;
2011-01-06 14:36:23 +03:00
}
}
2012-09-19 04:57:08 +04:00
EXPORT_SYMBOL_GPL ( pnfs_put_lseg ) ;
2010-10-20 08:18:02 +04:00
2014-10-09 00:39:12 +04:00
static void pnfs_free_lseg_async_work ( struct work_struct * work )
2014-07-18 04:42:18 +04:00
{
struct pnfs_layout_segment * lseg ;
2014-10-09 00:39:12 +04:00
struct pnfs_layout_hdr * lo ;
2014-07-18 04:42:18 +04:00
lseg = container_of ( work , struct pnfs_layout_segment , pls_work ) ;
2014-10-09 00:39:12 +04:00
lo = lseg - > pls_layout ;
2014-07-18 04:42:18 +04:00
2014-10-09 00:39:12 +04:00
pnfs_free_lseg ( lseg ) ;
pnfs_put_layout_hdr ( lo ) ;
2014-07-18 04:42:18 +04:00
}
2014-10-09 00:39:12 +04:00
static void pnfs_free_lseg_async ( struct pnfs_layout_segment * lseg )
2014-07-18 04:42:18 +04:00
{
2014-10-09 00:39:12 +04:00
INIT_WORK ( & lseg - > pls_work , pnfs_free_lseg_async_work ) ;
2014-07-18 04:42:18 +04:00
schedule_work ( & lseg - > pls_work ) ;
}
2014-10-09 00:39:12 +04:00
void
pnfs_put_lseg_locked ( struct pnfs_layout_segment * lseg )
{
if ( ! lseg )
return ;
assert_spin_locked ( & lseg - > pls_layout - > plh_inode - > i_lock ) ;
dprintk ( " %s: lseg %p ref %d valid %d \n " , __func__ , lseg ,
atomic_read ( & lseg - > pls_refcount ) ,
test_bit ( NFS_LSEG_VALID , & lseg - > pls_flags ) ) ;
if ( atomic_dec_and_test ( & lseg - > pls_refcount ) ) {
struct pnfs_layout_hdr * lo = lseg - > pls_layout ;
pnfs_get_layout_hdr ( lo ) ;
pnfs_layout_remove_lseg ( lo , lseg ) ;
pnfs_free_lseg_async ( lseg ) ;
}
}
EXPORT_SYMBOL_GPL ( pnfs_put_lseg_locked ) ;
2014-07-18 04:42:18 +04:00
2013-06-03 19:24:36 +04:00
static u64
2011-05-22 20:47:26 +04:00
end_offset ( u64 start , u64 len )
{
u64 end ;
end = start + len ;
return end > = start ? end : NFS4_MAX_UINT64 ;
}
/*
* is l2 fully contained in l1 ?
* start1 end1
* [ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - )
* start2 end2
* [ - - - - - - - - - - - - - - - - )
*/
2013-06-03 19:24:36 +04:00
static bool
2013-06-03 19:30:24 +04:00
pnfs_lseg_range_contained ( const struct pnfs_layout_range * l1 ,
2013-06-03 19:24:36 +04:00
const struct pnfs_layout_range * l2 )
2011-05-22 20:47:26 +04:00
{
u64 start1 = l1 - > offset ;
u64 end1 = end_offset ( start1 , l1 - > length ) ;
u64 start2 = l2 - > offset ;
u64 end2 = end_offset ( start2 , l2 - > length ) ;
return ( start1 < = start2 ) & & ( end1 > = end2 ) ;
}
/*
* is l1 and l2 intersecting ?
* start1 end1
* [ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - )
* start2 end2
* [ - - - - - - - - - - - - - - - - )
*/
2013-06-03 19:24:36 +04:00
static bool
2013-06-03 19:30:24 +04:00
pnfs_lseg_range_intersecting ( const struct pnfs_layout_range * l1 ,
2013-06-03 19:24:36 +04:00
const struct pnfs_layout_range * l2 )
2011-05-22 20:47:26 +04:00
{
u64 start1 = l1 - > offset ;
u64 end1 = end_offset ( start1 , l1 - > length ) ;
u64 start2 = l2 - > offset ;
u64 end2 = end_offset ( start2 , l2 - > length ) ;
return ( end1 = = NFS4_MAX_UINT64 | | end1 > start2 ) & &
( end2 = = NFS4_MAX_UINT64 | | end2 > start1 ) ;
}
2011-01-06 14:36:23 +03:00
static bool
2013-06-03 19:24:36 +04:00
should_free_lseg ( const struct pnfs_layout_range * lseg_range ,
const struct pnfs_layout_range * recall_range )
2011-01-06 14:36:23 +03:00
{
2011-05-22 20:48:02 +04:00
return ( recall_range - > iomode = = IOMODE_ANY | |
lseg_range - > iomode = = recall_range - > iomode ) & &
2013-06-03 19:30:24 +04:00
pnfs_lseg_range_intersecting ( lseg_range , recall_range ) ;
2010-10-20 08:18:02 +04:00
}
2013-03-20 21:03:00 +04:00
static bool pnfs_lseg_dec_and_remove_zero ( struct pnfs_layout_segment * lseg ,
struct list_head * tmp_list )
{
if ( ! atomic_dec_and_test ( & lseg - > pls_refcount ) )
return false ;
pnfs_layout_remove_lseg ( lseg - > pls_layout , lseg ) ;
list_add ( & lseg - > pls_list , tmp_list ) ;
return true ;
}
2011-01-06 14:36:23 +03:00
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid ( struct pnfs_layout_segment * lseg ,
struct list_head * tmp_list )
{
int rv = 0 ;
if ( test_and_clear_bit ( NFS_LSEG_VALID , & lseg - > pls_flags ) ) {
/* Remove the reference keeping the lseg in the
* list . It will now be removed when all
* outstanding io is finished .
*/
2011-03-01 04:34:13 +03:00
dprintk ( " %s: lseg %p ref %d \n " , __func__ , lseg ,
atomic_read ( & lseg - > pls_refcount ) ) ;
2013-03-20 21:03:00 +04:00
if ( pnfs_lseg_dec_and_remove_zero ( lseg , tmp_list ) )
2011-03-01 04:34:13 +03:00
rv = 1 ;
2011-01-06 14:36:23 +03:00
}
return rv ;
}
/* Returns count of number of matching invalid lsegs remaining in list
* after call .
*/
2011-01-06 14:36:30 +03:00
int
2012-09-19 04:43:31 +04:00
pnfs_mark_matching_lsegs_invalid ( struct pnfs_layout_hdr * lo ,
2011-01-06 14:36:23 +03:00
struct list_head * tmp_list ,
2011-05-22 20:48:02 +04:00
struct pnfs_layout_range * recall_range )
2010-10-20 08:18:02 +04:00
{
struct pnfs_layout_segment * lseg , * next ;
2011-01-06 14:36:23 +03:00
int invalid = 0 , removed = 0 ;
2010-10-20 08:18:02 +04:00
dprintk ( " %s:Begin lo %p \n " , __func__ , lo ) ;
2012-09-21 22:48:04 +04:00
if ( list_empty ( & lo - > plh_segs ) )
2011-02-03 21:28:50 +03:00
return 0 ;
2011-01-06 14:36:23 +03:00
list_for_each_entry_safe ( lseg , next , & lo - > plh_segs , pls_list )
2011-05-22 20:48:02 +04:00
if ( ! recall_range | |
should_free_lseg ( & lseg - > pls_range , recall_range ) ) {
2011-01-06 14:36:23 +03:00
dprintk ( " %s: freeing lseg %p iomode %d "
" offset %llu length %llu \n " , __func__ ,
lseg , lseg - > pls_range . iomode , lseg - > pls_range . offset ,
lseg - > pls_range . length ) ;
invalid + + ;
removed + = mark_lseg_invalid ( lseg , tmp_list ) ;
}
dprintk ( " %s:Return %i \n " , __func__ , invalid - removed ) ;
return invalid - removed ;
2010-10-20 08:18:02 +04:00
}
2011-02-03 21:28:52 +03:00
/* note free_me must contain lsegs from a single layout_hdr */
2011-01-06 14:36:30 +03:00
void
2011-01-06 14:36:23 +03:00
pnfs_free_lseg_list ( struct list_head * free_me )
2010-10-20 08:18:02 +04:00
{
2011-01-06 14:36:23 +03:00
struct pnfs_layout_segment * lseg , * tmp ;
2011-02-03 21:28:52 +03:00
if ( list_empty ( free_me ) )
return ;
2011-01-06 14:36:23 +03:00
list_for_each_entry_safe ( lseg , tmp , free_me , pls_list ) {
2011-01-06 14:36:20 +03:00
list_del ( & lseg - > pls_list ) ;
2012-09-21 04:46:49 +04:00
pnfs_free_lseg ( lseg ) ;
2010-10-20 08:18:02 +04:00
}
}
2010-10-20 08:18:01 +04:00
void
pnfs_destroy_layout ( struct nfs_inode * nfsi )
{
struct pnfs_layout_hdr * lo ;
2010-10-20 08:18:02 +04:00
LIST_HEAD ( tmp_list ) ;
2010-10-20 08:18:01 +04:00
spin_lock ( & nfsi - > vfs_inode . i_lock ) ;
lo = nfsi - > layout ;
if ( lo ) {
2011-02-03 21:28:50 +03:00
lo - > plh_block_lgets + + ; /* permanently block new LAYOUTGETs */
2012-09-19 04:43:31 +04:00
pnfs_mark_matching_lsegs_invalid ( lo , & tmp_list , NULL ) ;
2012-09-24 21:07:16 +04:00
pnfs_get_layout_hdr ( lo ) ;
pnfs_layout_clear_fail_bit ( lo , NFS_LAYOUT_RO_FAILED ) ;
pnfs_layout_clear_fail_bit ( lo , NFS_LAYOUT_RW_FAILED ) ;
2014-12-01 03:22:18 +03:00
pnfs_clear_retry_layoutget ( lo ) ;
2012-09-24 21:07:16 +04:00
spin_unlock ( & nfsi - > vfs_inode . i_lock ) ;
pnfs_free_lseg_list ( & tmp_list ) ;
pnfs_put_layout_hdr ( lo ) ;
} else
spin_unlock ( & nfsi - > vfs_inode . i_lock ) ;
2010-10-20 08:18:02 +04:00
}
2012-04-28 01:53:53 +04:00
EXPORT_SYMBOL_GPL ( pnfs_destroy_layout ) ;
2010-10-20 08:18:02 +04:00
2013-02-12 18:48:42 +04:00
static bool
pnfs_layout_add_bulk_destroy_list ( struct inode * inode ,
struct list_head * layout_list )
2010-10-20 08:18:02 +04:00
{
struct pnfs_layout_hdr * lo ;
2013-02-12 18:48:42 +04:00
bool ret = false ;
2010-10-20 08:18:02 +04:00
2013-02-12 18:48:42 +04:00
spin_lock ( & inode - > i_lock ) ;
lo = NFS_I ( inode ) - > layout ;
if ( lo ! = NULL & & list_empty ( & lo - > plh_bulk_destroy ) ) {
pnfs_get_layout_hdr ( lo ) ;
list_add ( & lo - > plh_bulk_destroy , layout_list ) ;
ret = true ;
}
spin_unlock ( & inode - > i_lock ) ;
return ret ;
}
/* Caller must hold rcu_read_lock and clp->cl_lock */
static int
pnfs_layout_bulk_destroy_byserver_locked ( struct nfs_client * clp ,
struct nfs_server * server ,
struct list_head * layout_list )
{
struct pnfs_layout_hdr * lo , * next ;
struct inode * inode ;
list_for_each_entry_safe ( lo , next , & server - > layouts , plh_layouts ) {
inode = igrab ( lo - > plh_inode ) ;
if ( inode = = NULL )
continue ;
list_del_init ( & lo - > plh_layouts ) ;
if ( pnfs_layout_add_bulk_destroy_list ( inode , layout_list ) )
continue ;
rcu_read_unlock ( ) ;
spin_unlock ( & clp - > cl_lock ) ;
iput ( inode ) ;
spin_lock ( & clp - > cl_lock ) ;
rcu_read_lock ( ) ;
return - EAGAIN ;
}
return 0 ;
}
static int
pnfs_layout_free_bulk_destroy_list ( struct list_head * layout_list ,
bool is_bulk_recall )
{
struct pnfs_layout_hdr * lo ;
struct inode * inode ;
struct pnfs_layout_range range = {
. iomode = IOMODE_ANY ,
. offset = 0 ,
. length = NFS4_MAX_UINT64 ,
} ;
LIST_HEAD ( lseg_list ) ;
int ret = 0 ;
while ( ! list_empty ( layout_list ) ) {
lo = list_entry ( layout_list - > next , struct pnfs_layout_hdr ,
plh_bulk_destroy ) ;
dprintk ( " %s freeing layout for inode %lu \n " , __func__ ,
lo - > plh_inode - > i_ino ) ;
inode = lo - > plh_inode ;
2014-09-10 19:23:29 +04:00
pnfs_layoutcommit_inode ( inode , false ) ;
2013-02-12 18:48:42 +04:00
spin_lock ( & inode - > i_lock ) ;
list_del_init ( & lo - > plh_bulk_destroy ) ;
lo - > plh_block_lgets + + ; /* permanently block new LAYOUTGETs */
if ( is_bulk_recall )
set_bit ( NFS_LAYOUT_BULK_RECALL , & lo - > plh_flags ) ;
if ( pnfs_mark_matching_lsegs_invalid ( lo , & lseg_list , & range ) )
ret = - EAGAIN ;
spin_unlock ( & inode - > i_lock ) ;
pnfs_free_lseg_list ( & lseg_list ) ;
pnfs_put_layout_hdr ( lo ) ;
iput ( inode ) ;
}
return ret ;
}
int
pnfs_destroy_layouts_byfsid ( struct nfs_client * clp ,
struct nfs_fsid * fsid ,
bool is_recall )
{
struct nfs_server * server ;
LIST_HEAD ( layout_list ) ;
2011-06-16 01:52:40 +04:00
2010-10-20 08:18:02 +04:00
spin_lock ( & clp - > cl_lock ) ;
2011-06-02 00:44:44 +04:00
rcu_read_lock ( ) ;
2013-02-12 18:48:42 +04:00
restart :
2011-06-02 00:44:44 +04:00
list_for_each_entry_rcu ( server , & clp - > cl_superblocks , client_link ) {
2013-02-12 18:48:42 +04:00
if ( memcmp ( & server - > fsid , fsid , sizeof ( * fsid ) ) ! = 0 )
continue ;
if ( pnfs_layout_bulk_destroy_byserver_locked ( clp ,
server ,
& layout_list ) ! = 0 )
goto restart ;
2011-06-02 00:44:44 +04:00
}
rcu_read_unlock ( ) ;
2010-10-20 08:18:02 +04:00
spin_unlock ( & clp - > cl_lock ) ;
2013-02-12 18:48:42 +04:00
if ( list_empty ( & layout_list ) )
return 0 ;
return pnfs_layout_free_bulk_destroy_list ( & layout_list , is_recall ) ;
}
int
pnfs_destroy_layouts_byclid ( struct nfs_client * clp ,
bool is_recall )
{
struct nfs_server * server ;
LIST_HEAD ( layout_list ) ;
spin_lock ( & clp - > cl_lock ) ;
rcu_read_lock ( ) ;
restart :
list_for_each_entry_rcu ( server , & clp - > cl_superblocks , client_link ) {
if ( pnfs_layout_bulk_destroy_byserver_locked ( clp ,
server ,
& layout_list ) ! = 0 )
goto restart ;
2010-10-20 08:18:02 +04:00
}
2013-02-12 18:48:42 +04:00
rcu_read_unlock ( ) ;
spin_unlock ( & clp - > cl_lock ) ;
if ( list_empty ( & layout_list ) )
return 0 ;
return pnfs_layout_free_bulk_destroy_list ( & layout_list , is_recall ) ;
}
/*
* Called by the state manger to remove all layouts established under an
* expired lease .
*/
void
pnfs_destroy_all_layouts ( struct nfs_client * clp )
{
nfs4_deviceid_mark_client_invalid ( clp ) ;
nfs4_deviceid_purge_client ( clp ) ;
pnfs_destroy_layouts_byclid ( clp , false ) ;
2010-10-20 08:18:01 +04:00
}
2012-10-03 03:47:14 +04:00
/*
* Compare 2 layout stateid sequence ids , to see which is newer ,
* taking into account wraparound issues .
*/
static bool pnfs_seqid_is_newer ( u32 s1 , u32 s2 )
{
2014-02-12 18:36:59 +04:00
return ( s32 ) ( s1 - s2 ) > 0 ;
2012-10-03 03:47:14 +04:00
}
2011-01-06 14:36:22 +03:00
/* update lo->plh_stateid with new if is more recent */
2011-01-06 14:36:30 +03:00
void
pnfs_set_layout_stateid ( struct pnfs_layout_hdr * lo , const nfs4_stateid * new ,
bool update_barrier )
2010-10-20 08:18:03 +04:00
{
2012-10-05 03:32:22 +04:00
u32 oldseq , newseq , new_barrier ;
int empty = list_empty ( & lo - > plh_segs ) ;
2010-10-20 08:18:03 +04:00
2012-03-05 03:13:57 +04:00
oldseq = be32_to_cpu ( lo - > plh_stateid . seqid ) ;
newseq = be32_to_cpu ( new - > seqid ) ;
2012-10-05 03:32:22 +04:00
if ( empty | | pnfs_seqid_is_newer ( newseq , oldseq ) ) {
2012-03-05 03:13:56 +04:00
nfs4_stateid_copy ( & lo - > plh_stateid , new ) ;
2011-01-06 14:36:30 +03:00
if ( update_barrier ) {
2012-10-05 03:32:22 +04:00
new_barrier = be32_to_cpu ( new - > seqid ) ;
2011-01-06 14:36:30 +03:00
} else {
/* Because of wraparound, we want to keep the barrier
2012-10-05 03:32:22 +04:00
* " close " to the current seqids .
2011-01-06 14:36:30 +03:00
*/
2012-10-05 03:32:22 +04:00
new_barrier = newseq - atomic_read ( & lo - > plh_outstanding ) ;
2011-01-06 14:36:30 +03:00
}
2012-10-05 03:32:22 +04:00
if ( empty | | pnfs_seqid_is_newer ( new_barrier , lo - > plh_barrier ) )
lo - > plh_barrier = new_barrier ;
2011-01-06 14:36:30 +03:00
}
2010-10-20 08:18:03 +04:00
}
2011-01-06 14:36:25 +03:00
static bool
2012-10-06 03:56:58 +04:00
pnfs_layout_stateid_blocked ( const struct pnfs_layout_hdr * lo ,
const nfs4_stateid * stateid )
2011-01-06 14:36:30 +03:00
{
2012-10-06 03:56:58 +04:00
u32 seqid = be32_to_cpu ( stateid - > seqid ) ;
2012-10-03 03:56:49 +04:00
2012-10-06 03:56:58 +04:00
return ! pnfs_seqid_is_newer ( seqid , lo - > plh_barrier ) ;
}
2014-09-05 20:53:24 +04:00
static bool
pnfs_layout_returning ( const struct pnfs_layout_hdr * lo ,
struct pnfs_layout_range * range )
{
return test_bit ( NFS_LAYOUT_RETURN , & lo - > plh_flags ) & &
( lo - > plh_return_iomode = = IOMODE_ANY | |
lo - > plh_return_iomode = = range - > iomode ) ;
}
2012-10-06 03:56:58 +04:00
/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
2014-09-05 20:53:24 +04:00
pnfs_layoutgets_blocked ( const struct pnfs_layout_hdr * lo ,
struct pnfs_layout_range * range , int lget )
2012-10-06 03:56:58 +04:00
{
2011-01-06 14:36:32 +03:00
return lo - > plh_block_lgets | |
test_bit ( NFS_LAYOUT_BULK_RECALL , & lo - > plh_flags ) | |
2011-01-06 14:36:30 +03:00
( list_empty ( & lo - > plh_segs ) & &
2014-09-05 20:53:24 +04:00
( atomic_read ( & lo - > plh_outstanding ) > lget ) ) | |
pnfs_layout_returning ( lo , range ) ;
2011-01-06 14:36:25 +03:00
}
2011-01-06 14:36:22 +03:00
int
pnfs_choose_layoutget_stateid ( nfs4_stateid * dst , struct pnfs_layout_hdr * lo ,
2014-09-05 20:53:24 +04:00
struct pnfs_layout_range * range ,
2011-01-06 14:36:22 +03:00
struct nfs4_state * open_state )
2010-10-20 08:18:03 +04:00
{
2011-01-06 14:36:22 +03:00
int status = 0 ;
2010-10-20 08:18:02 +04:00
2010-10-20 08:18:03 +04:00
dprintk ( " --> %s \n " , __func__ ) ;
2011-01-06 14:36:22 +03:00
spin_lock ( & lo - > plh_inode - > i_lock ) ;
2014-09-05 20:53:24 +04:00
if ( pnfs_layoutgets_blocked ( lo , range , 1 ) ) {
2011-01-06 14:36:25 +03:00
status = - EAGAIN ;
2013-03-15 00:57:48 +04:00
} else if ( ! nfs4_valid_open_stateid ( open_state ) ) {
status = - EBADF ;
2014-08-21 20:09:22 +04:00
} else if ( list_empty ( & lo - > plh_segs ) | |
test_bit ( NFS_LAYOUT_INVALID_STID , & lo - > plh_flags ) ) {
2011-01-06 14:36:22 +03:00
int seq ;
do {
seq = read_seqbegin ( & open_state - > seqlock ) ;
2012-03-05 03:13:56 +04:00
nfs4_stateid_copy ( dst , & open_state - > stateid ) ;
2011-01-06 14:36:22 +03:00
} while ( read_seqretry ( & open_state - > seqlock , seq ) ) ;
} else
2012-03-05 03:13:56 +04:00
nfs4_stateid_copy ( dst , & lo - > plh_stateid ) ;
2011-01-06 14:36:22 +03:00
spin_unlock ( & lo - > plh_inode - > i_lock ) ;
2010-10-20 08:18:03 +04:00
dprintk ( " <-- %s \n " , __func__ ) ;
2011-01-06 14:36:22 +03:00
return status ;
2010-10-20 08:18:03 +04:00
}
/*
* Get layout from server .
* for now , assume that whole file layouts are requested .
* arg - > offset : 0
* arg - > length : all ones
*/
2010-10-20 08:18:01 +04:00
static struct pnfs_layout_segment *
send_layoutget ( struct pnfs_layout_hdr * lo ,
struct nfs_open_context * ctx ,
2011-05-22 20:47:26 +04:00
struct pnfs_layout_range * range ,
2011-05-12 02:00:51 +04:00
gfp_t gfp_flags )
2010-10-20 08:18:01 +04:00
{
2011-01-06 14:36:21 +03:00
struct inode * ino = lo - > plh_inode ;
2010-10-20 08:18:03 +04:00
struct nfs_server * server = NFS_SERVER ( ino ) ;
struct nfs4_layoutget * lgp ;
2012-09-18 01:12:15 +04:00
struct pnfs_layout_segment * lseg ;
2010-10-20 08:18:03 +04:00
dprintk ( " --> %s \n " , __func__ ) ;
2010-10-20 08:18:01 +04:00
2011-05-12 02:00:51 +04:00
lgp = kzalloc ( sizeof ( * lgp ) , gfp_flags ) ;
2011-01-06 14:36:25 +03:00
if ( lgp = = NULL )
2010-10-20 08:18:03 +04:00
return NULL ;
2011-03-24 23:48:21 +03:00
2011-05-22 20:47:26 +04:00
lgp - > args . minlength = PAGE_CACHE_SIZE ;
if ( lgp - > args . minlength > range - > length )
lgp - > args . minlength = range - > length ;
2010-10-20 08:18:03 +04:00
lgp - > args . maxcount = PNFS_LAYOUT_MAXSIZE ;
2011-05-22 20:47:26 +04:00
lgp - > args . range = * range ;
2010-10-20 08:18:03 +04:00
lgp - > args . type = server - > pnfs_curr_ld - > id ;
lgp - > args . inode = ino ;
lgp - > args . ctx = get_nfs_open_context ( ctx ) ;
2011-05-12 02:00:51 +04:00
lgp - > gfp_flags = gfp_flags ;
2013-05-20 18:49:34 +04:00
lgp - > cred = lo - > plh_lc_cred ;
2010-10-20 08:18:03 +04:00
/* Synchronously retrieve layout information from server and
* store in lseg .
*/
2012-09-18 01:12:15 +04:00
lseg = nfs4_proc_layoutget ( lgp , gfp_flags ) ;
if ( IS_ERR ( lseg ) ) {
switch ( PTR_ERR ( lseg ) ) {
case - ENOMEM :
case - ERESTARTSYS :
break ;
default :
/* remember that LAYOUTGET failed and suspend trying */
2012-09-19 00:41:18 +04:00
pnfs_layout_io_set_failed ( lo , range - > iomode ) ;
2012-09-18 01:12:15 +04:00
}
return NULL ;
2014-12-12 01:02:04 +03:00
} else
pnfs_layout_clear_fail_bit ( lo ,
pnfs_iomode_to_fail_bit ( range - > iomode ) ) ;
2011-03-24 23:48:21 +03:00
2010-10-20 08:18:02 +04:00
return lseg ;
}
2013-03-20 21:03:00 +04:00
static void pnfs_clear_layoutcommit ( struct inode * inode ,
struct list_head * head )
{
struct nfs_inode * nfsi = NFS_I ( inode ) ;
struct pnfs_layout_segment * lseg , * tmp ;
if ( ! test_and_clear_bit ( NFS_INO_LAYOUTCOMMIT , & nfsi - > flags ) )
return ;
list_for_each_entry_safe ( lseg , tmp , & nfsi - > layout - > plh_segs , pls_list ) {
if ( ! test_and_clear_bit ( NFS_LSEG_LAYOUTCOMMIT , & lseg - > pls_flags ) )
continue ;
pnfs_lseg_dec_and_remove_zero ( lseg , head ) ;
}
}
2014-12-12 01:02:04 +03:00
void pnfs_clear_layoutreturn_waitbit ( struct pnfs_layout_hdr * lo )
{
clear_bit_unlock ( NFS_LAYOUT_RETURN , & lo - > plh_flags ) ;
smp_mb__after_atomic ( ) ;
wake_up_bit ( & lo - > plh_flags , NFS_LAYOUT_RETURN ) ;
}
2014-09-05 20:53:22 +04:00
static int
pnfs_send_layoutreturn ( struct pnfs_layout_hdr * lo , nfs4_stateid stateid ,
2014-11-17 04:30:40 +03:00
enum pnfs_iomode iomode , bool sync )
2014-09-05 20:53:22 +04:00
{
struct inode * ino = lo - > plh_inode ;
struct nfs4_layoutreturn * lrp ;
int status = 0 ;
2015-02-06 01:05:08 +03:00
lrp = kzalloc ( sizeof ( * lrp ) , GFP_NOFS ) ;
2014-09-05 20:53:22 +04:00
if ( unlikely ( lrp = = NULL ) ) {
status = - ENOMEM ;
spin_lock ( & ino - > i_lock ) ;
lo - > plh_block_lgets - - ;
2014-12-12 01:02:04 +03:00
pnfs_clear_layoutreturn_waitbit ( lo ) ;
2014-11-17 04:30:41 +03:00
rpc_wake_up ( & NFS_SERVER ( ino ) - > roc_rpcwaitq ) ;
2014-09-05 20:53:22 +04:00
spin_unlock ( & ino - > i_lock ) ;
pnfs_put_layout_hdr ( lo ) ;
goto out ;
}
lrp - > args . stateid = stateid ;
lrp - > args . layout_type = NFS_SERVER ( ino ) - > pnfs_curr_ld - > id ;
lrp - > args . inode = ino ;
2014-11-17 04:30:36 +03:00
lrp - > args . range . iomode = iomode ;
lrp - > args . range . offset = 0 ;
lrp - > args . range . length = NFS4_MAX_UINT64 ;
2014-09-05 20:53:22 +04:00
lrp - > args . layout = lo ;
lrp - > clp = NFS_SERVER ( ino ) - > nfs_client ;
lrp - > cred = lo - > plh_lc_cred ;
2014-11-17 04:30:40 +03:00
status = nfs4_proc_layoutreturn ( lrp , sync ) ;
2014-09-05 20:53:22 +04:00
out :
dprintk ( " <-- %s status: %d \n " , __func__ , status ) ;
return status ;
}
2012-06-20 23:03:34 +04:00
/*
* Initiates a LAYOUTRETURN ( FILE ) , and removes the pnfs_layout_hdr
* when the layout segment list is empty .
*
* Note that a pnfs_layout_hdr can exist with an empty layout segment
* list when LAYOUTGET has failed , or when LAYOUTGET succeeded , but the
* deviceid is marked invalid .
*/
2011-05-22 20:52:37 +04:00
int
_pnfs_return_layout ( struct inode * ino )
{
struct pnfs_layout_hdr * lo = NULL ;
struct nfs_inode * nfsi = NFS_I ( ino ) ;
LIST_HEAD ( tmp_list ) ;
nfs4_stateid stateid ;
2012-06-20 23:03:34 +04:00
int status = 0 , empty ;
2011-05-22 20:52:37 +04:00
2012-06-20 23:03:33 +04:00
dprintk ( " NFS: %s for inode %lu \n " , __func__ , ino - > i_ino ) ;
2011-05-22 20:52:37 +04:00
spin_lock ( & ino - > i_lock ) ;
lo = nfsi - > layout ;
2012-09-22 00:37:02 +04:00
if ( ! lo ) {
2011-05-22 20:52:37 +04:00
spin_unlock ( & ino - > i_lock ) ;
2012-06-20 23:03:34 +04:00
dprintk ( " NFS: %s no layout to return \n " , __func__ ) ;
goto out ;
2011-05-22 20:52:37 +04:00
}
stateid = nfsi - > layout - > plh_stateid ;
/* Reference matched in nfs4_layoutreturn_release */
2012-09-19 04:51:13 +04:00
pnfs_get_layout_hdr ( lo ) ;
2012-06-20 23:03:34 +04:00
empty = list_empty ( & lo - > plh_segs ) ;
2013-03-20 21:03:00 +04:00
pnfs_clear_layoutcommit ( ino , & tmp_list ) ;
2012-09-19 04:43:31 +04:00
pnfs_mark_matching_lsegs_invalid ( lo , & tmp_list , NULL ) ;
2014-09-10 19:23:31 +04:00
if ( NFS_SERVER ( ino ) - > pnfs_curr_ld - > return_range ) {
struct pnfs_layout_range range = {
. iomode = IOMODE_ANY ,
. offset = 0 ,
. length = NFS4_MAX_UINT64 ,
} ;
NFS_SERVER ( ino ) - > pnfs_curr_ld - > return_range ( lo , & range ) ;
}
2012-06-20 23:03:34 +04:00
/* Don't send a LAYOUTRETURN if list was initially empty */
if ( empty ) {
spin_unlock ( & ino - > i_lock ) ;
2012-09-19 04:51:13 +04:00
pnfs_put_layout_hdr ( lo ) ;
2012-06-20 23:03:34 +04:00
dprintk ( " NFS: %s no layout segments to return \n " , __func__ ) ;
goto out ;
}
2014-08-21 20:09:22 +04:00
set_bit ( NFS_LAYOUT_INVALID_STID , & lo - > plh_flags ) ;
2011-06-15 20:31:02 +04:00
lo - > plh_block_lgets + + ;
2011-05-22 20:52:37 +04:00
spin_unlock ( & ino - > i_lock ) ;
pnfs_free_lseg_list ( & tmp_list ) ;
2014-11-17 04:30:40 +03:00
status = pnfs_send_layoutreturn ( lo , stateid , IOMODE_ANY , true ) ;
2011-05-22 20:52:37 +04:00
out :
dprintk ( " <-- %s status: %d \n " , __func__ , status ) ;
return status ;
}
2012-04-28 01:53:50 +04:00
EXPORT_SYMBOL_GPL ( _pnfs_return_layout ) ;
2011-05-22 20:52:37 +04:00
2013-03-20 21:23:33 +04:00
int
pnfs_commit_and_return_layout ( struct inode * inode )
{
struct pnfs_layout_hdr * lo ;
int ret ;
spin_lock ( & inode - > i_lock ) ;
lo = NFS_I ( inode ) - > layout ;
if ( lo = = NULL ) {
spin_unlock ( & inode - > i_lock ) ;
return 0 ;
}
pnfs_get_layout_hdr ( lo ) ;
/* Block new layoutgets and read/write to ds */
lo - > plh_block_lgets + + ;
spin_unlock ( & inode - > i_lock ) ;
filemap_fdatawait ( inode - > i_mapping ) ;
ret = pnfs_layoutcommit_inode ( inode , true ) ;
if ( ret = = 0 )
ret = _pnfs_return_layout ( inode ) ;
spin_lock ( & inode - > i_lock ) ;
lo - > plh_block_lgets - - ;
spin_unlock ( & inode - > i_lock ) ;
pnfs_put_layout_hdr ( lo ) ;
return ret ;
}
2011-01-06 14:36:32 +03:00
bool pnfs_roc ( struct inode * ino )
{
2015-01-24 21:54:37 +03:00
struct nfs_inode * nfsi = NFS_I ( ino ) ;
struct nfs_open_context * ctx ;
struct nfs4_state * state ;
2011-01-06 14:36:32 +03:00
struct pnfs_layout_hdr * lo ;
struct pnfs_layout_segment * lseg , * tmp ;
2014-11-17 04:30:41 +03:00
nfs4_stateid stateid ;
2011-01-06 14:36:32 +03:00
LIST_HEAD ( tmp_list ) ;
2014-11-17 04:30:41 +03:00
bool found = false , layoutreturn = false ;
2011-01-06 14:36:32 +03:00
spin_lock ( & ino - > i_lock ) ;
2015-01-24 21:54:37 +03:00
lo = nfsi - > layout ;
2011-01-06 14:36:32 +03:00
if ( ! lo | | ! test_and_clear_bit ( NFS_LAYOUT_ROC , & lo - > plh_flags ) | |
test_bit ( NFS_LAYOUT_BULK_RECALL , & lo - > plh_flags ) )
2015-01-24 21:54:37 +03:00
goto out_noroc ;
/* Don't return layout if we hold a delegation */
if ( nfs4_check_delegation ( ino , FMODE_READ ) )
goto out_noroc ;
list_for_each_entry ( ctx , & nfsi - > open_files , list ) {
state = ctx - > state ;
/* Don't return layout if there is open file state */
if ( state ! = NULL & & state - > state ! = 0 )
goto out_noroc ;
}
2014-12-01 03:22:18 +03:00
pnfs_clear_retry_layoutget ( lo ) ;
2011-01-06 14:36:32 +03:00
list_for_each_entry_safe ( lseg , tmp , & lo - > plh_segs , pls_list )
if ( test_bit ( NFS_LSEG_ROC , & lseg - > pls_flags ) ) {
mark_lseg_invalid ( lseg , & tmp_list ) ;
found = true ;
}
if ( ! found )
2015-01-24 21:54:37 +03:00
goto out_noroc ;
2011-01-06 14:36:32 +03:00
lo - > plh_block_lgets + + ;
2012-09-19 04:51:13 +04:00
pnfs_get_layout_hdr ( lo ) ; /* matched in pnfs_roc_release */
2011-01-06 14:36:32 +03:00
spin_unlock ( & ino - > i_lock ) ;
pnfs_free_lseg_list ( & tmp_list ) ;
return true ;
2015-01-24 21:54:37 +03:00
out_noroc :
2014-11-17 04:30:41 +03:00
if ( lo ) {
stateid = lo - > plh_stateid ;
layoutreturn =
test_and_clear_bit ( NFS_LAYOUT_RETURN_BEFORE_CLOSE ,
& lo - > plh_flags ) ;
if ( layoutreturn ) {
lo - > plh_block_lgets + + ;
pnfs_get_layout_hdr ( lo ) ;
}
}
2011-01-06 14:36:32 +03:00
spin_unlock ( & ino - > i_lock ) ;
2014-11-17 04:30:41 +03:00
if ( layoutreturn )
2014-10-20 10:44:38 +04:00
pnfs_send_layoutreturn ( lo , stateid , IOMODE_ANY , true ) ;
2011-01-06 14:36:32 +03:00
return false ;
}
void pnfs_roc_release ( struct inode * ino )
{
struct pnfs_layout_hdr * lo ;
spin_lock ( & ino - > i_lock ) ;
lo = NFS_I ( ino ) - > layout ;
lo - > plh_block_lgets - - ;
2012-09-21 01:23:11 +04:00
if ( atomic_dec_and_test ( & lo - > plh_refcount ) ) {
pnfs_detach_layout_hdr ( lo ) ;
spin_unlock ( & ino - > i_lock ) ;
pnfs_free_layout_hdr ( lo ) ;
} else
spin_unlock ( & ino - > i_lock ) ;
2011-01-06 14:36:32 +03:00
}
void pnfs_roc_set_barrier ( struct inode * ino , u32 barrier )
{
struct pnfs_layout_hdr * lo ;
spin_lock ( & ino - > i_lock ) ;
lo = NFS_I ( ino ) - > layout ;
2012-10-05 03:28:17 +04:00
if ( pnfs_seqid_is_newer ( barrier , lo - > plh_barrier ) )
2011-01-06 14:36:32 +03:00
lo - > plh_barrier = barrier ;
spin_unlock ( & ino - > i_lock ) ;
}
2012-09-21 04:15:57 +04:00
bool pnfs_roc_drain ( struct inode * ino , u32 * barrier , struct rpc_task * task )
2011-01-06 14:36:32 +03:00
{
struct nfs_inode * nfsi = NFS_I ( ino ) ;
2012-09-21 04:15:57 +04:00
struct pnfs_layout_hdr * lo ;
2011-01-06 14:36:32 +03:00
struct pnfs_layout_segment * lseg ;
2014-11-17 04:30:41 +03:00
nfs4_stateid stateid ;
2012-09-21 04:15:57 +04:00
u32 current_seqid ;
2014-11-17 04:30:41 +03:00
bool found = false , layoutreturn = false ;
2011-01-06 14:36:32 +03:00
spin_lock ( & ino - > i_lock ) ;
list_for_each_entry ( lseg , & nfsi - > layout - > plh_segs , pls_list )
if ( test_bit ( NFS_LSEG_ROC , & lseg - > pls_flags ) ) {
2012-09-21 04:15:57 +04:00
rpc_sleep_on ( & NFS_SERVER ( ino ) - > roc_rpcwaitq , task , NULL ) ;
2011-01-06 14:36:32 +03:00
found = true ;
2012-09-21 04:15:57 +04:00
goto out ;
2011-01-06 14:36:32 +03:00
}
2012-09-21 04:15:57 +04:00
lo = nfsi - > layout ;
current_seqid = be32_to_cpu ( lo - > plh_stateid . seqid ) ;
2011-01-06 14:36:32 +03:00
2012-09-21 04:15:57 +04:00
/* Since close does not return a layout stateid for use as
* a barrier , we choose the worst - case barrier .
*/
* barrier = current_seqid + atomic_read ( & lo - > plh_outstanding ) ;
out :
2014-11-17 04:30:41 +03:00
if ( ! found ) {
stateid = lo - > plh_stateid ;
layoutreturn =
test_and_clear_bit ( NFS_LAYOUT_RETURN_BEFORE_CLOSE ,
& lo - > plh_flags ) ;
if ( layoutreturn ) {
lo - > plh_block_lgets + + ;
pnfs_get_layout_hdr ( lo ) ;
}
}
2011-01-06 14:36:32 +03:00
spin_unlock ( & ino - > i_lock ) ;
2014-11-17 04:30:41 +03:00
if ( layoutreturn ) {
rpc_sleep_on ( & NFS_SERVER ( ino ) - > roc_rpcwaitq , task , NULL ) ;
2014-10-20 10:44:38 +04:00
pnfs_send_layoutreturn ( lo , stateid , IOMODE_ANY , false ) ;
2014-11-17 04:30:41 +03:00
}
2011-01-06 14:36:32 +03:00
return found ;
}
2010-10-20 08:18:03 +04:00
/*
* Compare two layout segments for sorting into layout cache .
* We want to preferentially return RW over RO layouts , so ensure those
* are seen first .
*/
static s64
2013-06-03 19:30:24 +04:00
pnfs_lseg_range_cmp ( const struct pnfs_layout_range * l1 ,
2013-06-03 19:24:36 +04:00
const struct pnfs_layout_range * l2 )
2010-10-20 08:18:03 +04:00
{
2011-05-22 20:47:26 +04:00
s64 d ;
/* high offset > low offset */
d = l1 - > offset - l2 - > offset ;
if ( d )
return d ;
/* short length > long length */
d = l2 - > length - l1 - > length ;
if ( d )
return d ;
2010-10-20 08:18:03 +04:00
/* read > read/write */
2011-05-22 20:47:26 +04:00
return ( int ) ( l1 - > iomode = = IOMODE_READ ) - ( int ) ( l2 - > iomode = = IOMODE_READ ) ;
2010-10-20 08:18:03 +04:00
}
2010-10-20 08:18:02 +04:00
static void
2012-09-21 00:33:30 +04:00
pnfs_layout_insert_lseg ( struct pnfs_layout_hdr * lo ,
2010-10-20 08:18:02 +04:00
struct pnfs_layout_segment * lseg )
{
2010-10-20 08:18:03 +04:00
struct pnfs_layout_segment * lp ;
2010-10-20 08:18:02 +04:00
dprintk ( " %s:Begin \n " , __func__ ) ;
2011-01-06 14:36:21 +03:00
list_for_each_entry ( lp , & lo - > plh_segs , pls_list ) {
2013-06-03 19:30:24 +04:00
if ( pnfs_lseg_range_cmp ( & lseg - > pls_range , & lp - > pls_range ) > 0 )
2010-10-20 08:18:03 +04:00
continue ;
2011-01-06 14:36:20 +03:00
list_add_tail ( & lseg - > pls_list , & lp - > pls_list ) ;
2010-10-20 08:18:03 +04:00
dprintk ( " %s: inserted lseg %p "
" iomode %d offset %llu length %llu before "
" lp %p iomode %d offset %llu length %llu \n " ,
2011-01-06 14:36:20 +03:00
__func__ , lseg , lseg - > pls_range . iomode ,
lseg - > pls_range . offset , lseg - > pls_range . length ,
lp , lp - > pls_range . iomode , lp - > pls_range . offset ,
lp - > pls_range . length ) ;
2011-05-22 20:47:26 +04:00
goto out ;
2010-10-20 08:18:02 +04:00
}
2011-05-22 20:47:26 +04:00
list_add_tail ( & lseg - > pls_list , & lo - > plh_segs ) ;
dprintk ( " %s: inserted lseg %p "
" iomode %d offset %llu length %llu at tail \n " ,
__func__ , lseg , lseg - > pls_range . iomode ,
lseg - > pls_range . offset , lseg - > pls_range . length ) ;
out :
2012-09-19 04:51:13 +04:00
pnfs_get_layout_hdr ( lo ) ;
2010-10-20 08:18:02 +04:00
dprintk ( " %s:Return \n " , __func__ ) ;
2010-10-20 08:18:01 +04:00
}
static struct pnfs_layout_hdr *
2011-07-31 04:52:32 +04:00
alloc_init_layout_hdr ( struct inode * ino ,
struct nfs_open_context * ctx ,
gfp_t gfp_flags )
2010-10-20 08:18:01 +04:00
{
struct pnfs_layout_hdr * lo ;
2011-05-22 20:51:33 +04:00
lo = pnfs_alloc_layout_hdr ( ino , gfp_flags ) ;
2010-10-20 08:18:01 +04:00
if ( ! lo )
return NULL ;
2011-01-06 14:36:28 +03:00
atomic_set ( & lo - > plh_refcount , 1 ) ;
2011-01-06 14:36:21 +03:00
INIT_LIST_HEAD ( & lo - > plh_layouts ) ;
INIT_LIST_HEAD ( & lo - > plh_segs ) ;
2013-02-12 18:48:42 +04:00
INIT_LIST_HEAD ( & lo - > plh_bulk_destroy ) ;
2011-01-06 14:36:21 +03:00
lo - > plh_inode = ino ;
2013-05-21 17:26:49 +04:00
lo - > plh_lc_cred = get_rpccred ( ctx - > cred ) ;
2010-10-20 08:18:01 +04:00
return lo ;
}
static struct pnfs_layout_hdr *
2011-07-31 04:52:32 +04:00
pnfs_find_alloc_layout ( struct inode * ino ,
struct nfs_open_context * ctx ,
gfp_t gfp_flags )
2010-10-20 08:18:01 +04:00
{
struct nfs_inode * nfsi = NFS_I ( ino ) ;
struct pnfs_layout_hdr * new = NULL ;
dprintk ( " %s Begin ino=%p layout=%p \n " , __func__ , ino , nfsi - > layout ) ;
2012-10-03 02:41:05 +04:00
if ( nfsi - > layout ! = NULL )
goto out_existing ;
2010-10-20 08:18:01 +04:00
spin_unlock ( & ino - > i_lock ) ;
2011-07-31 04:52:32 +04:00
new = alloc_init_layout_hdr ( ino , ctx , gfp_flags ) ;
2010-10-20 08:18:01 +04:00
spin_lock ( & ino - > i_lock ) ;
2012-10-03 02:41:05 +04:00
if ( likely ( nfsi - > layout = = NULL ) ) { /* Won the race? */
2010-10-20 08:18:01 +04:00
nfsi - > layout = new ;
2012-10-03 02:41:05 +04:00
return new ;
2012-10-31 12:05:48 +04:00
} else if ( new ! = NULL )
pnfs_free_layout_hdr ( new ) ;
2012-10-03 02:41:05 +04:00
out_existing :
pnfs_get_layout_hdr ( nfsi - > layout ) ;
2010-10-20 08:18:01 +04:00
return nfsi - > layout ;
}
2010-10-20 08:18:03 +04:00
/*
* iomode matching rules :
* iomode lseg match
* - - - - - - - - - - - - - - -
* ANY READ true
* ANY RW true
* RW READ false
* RW RW true
* READ READ true
* READ RW true
*/
2013-06-03 19:24:36 +04:00
static bool
2013-06-03 19:30:24 +04:00
pnfs_lseg_range_match ( const struct pnfs_layout_range * ls_range ,
2013-06-03 19:24:36 +04:00
const struct pnfs_layout_range * range )
2010-10-20 08:18:03 +04:00
{
2011-05-22 20:47:26 +04:00
struct pnfs_layout_range range1 ;
if ( ( range - > iomode = = IOMODE_RW & &
ls_range - > iomode ! = IOMODE_RW ) | |
2013-06-03 19:30:24 +04:00
! pnfs_lseg_range_intersecting ( ls_range , range ) )
2011-05-22 20:47:26 +04:00
return 0 ;
/* range1 covers only the first byte in the range */
range1 = * range ;
range1 . length = 1 ;
2013-06-03 19:30:24 +04:00
return pnfs_lseg_range_contained ( ls_range , & range1 ) ;
2010-10-20 08:18:03 +04:00
}
/*
* lookup range in layout
*/
2010-10-20 08:18:01 +04:00
static struct pnfs_layout_segment *
2011-05-22 20:47:26 +04:00
pnfs_find_lseg ( struct pnfs_layout_hdr * lo ,
struct pnfs_layout_range * range )
2010-10-20 08:18:01 +04:00
{
2010-10-20 08:18:03 +04:00
struct pnfs_layout_segment * lseg , * ret = NULL ;
dprintk ( " %s:Begin \n " , __func__ ) ;
2011-01-06 14:36:21 +03:00
list_for_each_entry ( lseg , & lo - > plh_segs , pls_list ) {
2011-01-06 14:36:23 +03:00
if ( test_bit ( NFS_LSEG_VALID , & lseg - > pls_flags ) & &
2014-09-05 20:53:24 +04:00
! test_bit ( NFS_LSEG_LAYOUTRETURN , & lseg - > pls_flags ) & &
2013-06-03 19:30:24 +04:00
pnfs_lseg_range_match ( & lseg - > pls_range , range ) ) {
2012-09-19 04:57:08 +04:00
ret = pnfs_get_lseg ( lseg ) ;
2010-10-20 08:18:03 +04:00
break ;
}
2011-06-15 00:30:16 +04:00
if ( lseg - > pls_range . offset > range - > offset )
2010-10-20 08:18:03 +04:00
break ;
}
dprintk ( " %s:Return lseg %p ref %d \n " ,
2011-01-06 14:36:23 +03:00
__func__ , ret , ret ? atomic_read ( & ret - > pls_refcount ) : 0 ) ;
2010-10-20 08:18:03 +04:00
return ret ;
2010-10-20 08:18:01 +04:00
}
2012-05-23 13:02:37 +04:00
/*
* Use mdsthreshold hints set at each OPEN to determine if I / O should go
* to the MDS or over pNFS
*
* The nfs_inode read_io and write_io fields are cumulative counters reset
* when there are no layout segments . Note that in pnfs_update_layout iomode
* is set to IOMODE_READ for a READ request , and set to IOMODE_RW for a
* WRITE request .
*
* A return of true means use MDS I / O .
*
* From rfc 5661 :
* If a file ' s size is smaller than the file size threshold , data accesses
* SHOULD be sent to the metadata server . If an I / O request has a length that
* is below the I / O size threshold , the I / O SHOULD be sent to the metadata
* server . If both file size and I / O size are provided , the client SHOULD
* reach or exceed both thresholds before sending its read or write
* requests to the data server .
*/
static bool pnfs_within_mdsthreshold ( struct nfs_open_context * ctx ,
struct inode * ino , int iomode )
{
struct nfs4_threshold * t = ctx - > mdsthreshold ;
struct nfs_inode * nfsi = NFS_I ( ino ) ;
loff_t fsize = i_size_read ( ino ) ;
bool size = false , size_set = false , io = false , io_set = false , ret = false ;
if ( t = = NULL )
return ret ;
dprintk ( " %s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu \n " ,
__func__ , t - > bm , t - > rd_sz , t - > wr_sz , t - > rd_io_sz , t - > wr_io_sz ) ;
switch ( iomode ) {
case IOMODE_READ :
if ( t - > bm & THRESHOLD_RD ) {
dprintk ( " %s fsize %llu \n " , __func__ , fsize ) ;
size_set = true ;
if ( fsize < t - > rd_sz )
size = true ;
}
if ( t - > bm & THRESHOLD_RD_IO ) {
dprintk ( " %s nfsi->read_io %llu \n " , __func__ ,
nfsi - > read_io ) ;
io_set = true ;
if ( nfsi - > read_io < t - > rd_io_sz )
io = true ;
}
break ;
case IOMODE_RW :
if ( t - > bm & THRESHOLD_WR ) {
dprintk ( " %s fsize %llu \n " , __func__ , fsize ) ;
size_set = true ;
if ( fsize < t - > wr_sz )
size = true ;
}
if ( t - > bm & THRESHOLD_WR_IO ) {
dprintk ( " %s nfsi->write_io %llu \n " , __func__ ,
nfsi - > write_io ) ;
io_set = true ;
if ( nfsi - > write_io < t - > wr_io_sz )
io = true ;
}
break ;
}
if ( size_set & & io_set ) {
if ( size & & io )
ret = true ;
} else if ( size | | io )
ret = true ;
dprintk ( " <-- %s size %d io %d ret %d \n " , __func__ , size , io , ret ) ;
return ret ;
}
2014-12-01 03:22:23 +03:00
/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
static int pnfs_layoutget_retry_bit_wait ( struct wait_bit_key * key )
{
if ( ! test_bit ( NFS_LAYOUT_RETRY_LAYOUTGET , key - > flags ) )
return 1 ;
return nfs_wait_bit_killable ( key ) ;
}
static bool pnfs_prepare_to_retry_layoutget ( struct pnfs_layout_hdr * lo )
{
/*
* send layoutcommit as it can hold up layoutreturn due to lseg
* reference
*/
pnfs_layoutcommit_inode ( lo - > plh_inode , false ) ;
return ! wait_on_bit_action ( & lo - > plh_flags , NFS_LAYOUT_RETURN ,
pnfs_layoutget_retry_bit_wait ,
TASK_UNINTERRUPTIBLE ) ;
}
2014-12-12 01:02:04 +03:00
static void pnfs_clear_first_layoutget ( struct pnfs_layout_hdr * lo )
{
unsigned long * bitlock = & lo - > plh_flags ;
clear_bit_unlock ( NFS_LAYOUT_FIRST_LAYOUTGET , bitlock ) ;
smp_mb__after_atomic ( ) ;
wake_up_bit ( bitlock , NFS_LAYOUT_FIRST_LAYOUTGET ) ;
}
2010-10-20 08:18:01 +04:00
/*
* Layout segment is retreived from the server if not cached .
* The appropriate layout segment is referenced and returned to the caller .
*/
2011-06-14 02:22:38 +04:00
struct pnfs_layout_segment *
2010-10-20 08:18:01 +04:00
pnfs_update_layout ( struct inode * ino ,
struct nfs_open_context * ctx ,
2011-05-22 20:47:26 +04:00
loff_t pos ,
u64 count ,
2011-05-12 02:00:51 +04:00
enum pnfs_iomode iomode ,
gfp_t gfp_flags )
2010-10-20 08:18:01 +04:00
{
2011-05-22 20:47:26 +04:00
struct pnfs_layout_range arg = {
. iomode = iomode ,
. offset = pos ,
. length = count ,
} ;
2011-05-22 20:47:46 +04:00
unsigned pg_offset ;
2011-06-02 00:44:44 +04:00
struct nfs_server * server = NFS_SERVER ( ino ) ;
struct nfs_client * clp = server - > nfs_client ;
2010-10-20 08:18:01 +04:00
struct pnfs_layout_hdr * lo ;
struct pnfs_layout_segment * lseg = NULL ;
2013-03-01 05:30:10 +04:00
bool first ;
2010-10-20 08:18:01 +04:00
if ( ! pnfs_enabled_sb ( NFS_SERVER ( ino ) ) )
2012-09-26 19:21:40 +04:00
goto out ;
2012-05-23 13:02:37 +04:00
if ( pnfs_within_mdsthreshold ( ctx , ino , iomode ) )
2012-09-26 19:21:40 +04:00
goto out ;
2012-05-23 13:02:37 +04:00
2014-08-22 13:37:41 +04:00
lookup_again :
first = false ;
2010-10-20 08:18:01 +04:00
spin_lock ( & ino - > i_lock ) ;
2011-07-31 04:52:32 +04:00
lo = pnfs_find_alloc_layout ( ino , ctx , gfp_flags ) ;
2012-09-21 05:25:19 +04:00
if ( lo = = NULL ) {
spin_unlock ( & ino - > i_lock ) ;
goto out ;
}
2010-10-20 08:18:01 +04:00
2011-01-06 14:36:30 +03:00
/* Do we even need to bother with this? */
2012-03-01 20:17:47 +04:00
if ( test_bit ( NFS_LAYOUT_BULK_RECALL , & lo - > plh_flags ) ) {
2011-01-06 14:36:30 +03:00
dprintk ( " %s matches recall, use MDS \n " , __func__ ) ;
2010-10-20 08:18:01 +04:00
goto out_unlock ;
}
/* if LAYOUTGET already failed once we don't try again */
2014-12-01 03:22:23 +03:00
if ( pnfs_layout_io_test_failed ( lo , iomode ) & &
! pnfs_should_retry_layoutget ( lo ) )
2010-10-20 08:18:01 +04:00
goto out_unlock ;
2014-08-22 13:37:41 +04:00
first = list_empty ( & lo - > plh_segs ) ;
if ( first ) {
/* The first layoutget for the file. Need to serialize per
* RFC 5661 Errata 3208.
*/
if ( test_and_set_bit ( NFS_LAYOUT_FIRST_LAYOUTGET ,
& lo - > plh_flags ) ) {
spin_unlock ( & ino - > i_lock ) ;
wait_on_bit ( & lo - > plh_flags , NFS_LAYOUT_FIRST_LAYOUTGET ,
TASK_UNINTERRUPTIBLE ) ;
pnfs_put_layout_hdr ( lo ) ;
goto lookup_again ;
}
} else {
/* Check to see if the layout for the given range
* already exists
*/
lseg = pnfs_find_lseg ( lo , & arg ) ;
if ( lseg )
goto out_unlock ;
}
2011-03-01 04:34:22 +03:00
2014-12-01 03:22:23 +03:00
/*
* Because we free lsegs before sending LAYOUTRETURN , we need to wait
* for LAYOUTRETURN even if first is true .
*/
if ( ! lseg & & pnfs_should_retry_layoutget ( lo ) & &
test_bit ( NFS_LAYOUT_RETURN , & lo - > plh_flags ) ) {
spin_unlock ( & ino - > i_lock ) ;
dprintk ( " %s wait for layoutreturn \n " , __func__ ) ;
if ( pnfs_prepare_to_retry_layoutget ( lo ) ) {
2014-12-12 01:02:04 +03:00
if ( first )
pnfs_clear_first_layoutget ( lo ) ;
2014-12-01 03:22:23 +03:00
pnfs_put_layout_hdr ( lo ) ;
dprintk ( " %s retrying \n " , __func__ ) ;
goto lookup_again ;
}
goto out_put_layout_hdr ;
}
2014-09-05 20:53:24 +04:00
if ( pnfs_layoutgets_blocked ( lo , & arg , 0 ) )
2011-01-06 14:36:25 +03:00
goto out_unlock ;
atomic_inc ( & lo - > plh_outstanding ) ;
2011-02-03 21:28:52 +03:00
spin_unlock ( & ino - > i_lock ) ;
2013-03-01 05:30:10 +04:00
2014-08-22 13:37:40 +04:00
if ( list_empty ( & lo - > plh_layouts ) ) {
2011-01-06 14:36:26 +03:00
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL ( FILE ) coming in .
*/
spin_lock ( & clp - > cl_lock ) ;
2014-08-22 13:37:40 +04:00
if ( list_empty ( & lo - > plh_layouts ) )
list_add_tail ( & lo - > plh_layouts , & server - > layouts ) ;
2011-01-06 14:36:26 +03:00
spin_unlock ( & clp - > cl_lock ) ;
}
2010-10-20 08:18:01 +04:00
2011-05-22 20:47:46 +04:00
pg_offset = arg . offset & ~ PAGE_CACHE_MASK ;
if ( pg_offset ) {
arg . offset - = pg_offset ;
arg . length + = pg_offset ;
}
2011-06-14 02:22:38 +04:00
if ( arg . length ! = NFS4_MAX_UINT64 )
arg . length = PAGE_CACHE_ALIGN ( arg . length ) ;
2011-05-22 20:47:46 +04:00
2011-05-22 20:47:26 +04:00
lseg = send_layoutget ( lo , ctx , & arg , gfp_flags ) ;
2014-12-01 03:22:18 +03:00
pnfs_clear_retry_layoutget ( lo ) ;
2011-01-06 14:36:25 +03:00
atomic_dec ( & lo - > plh_outstanding ) ;
2012-09-21 05:25:19 +04:00
out_put_layout_hdr :
2014-12-12 01:02:04 +03:00
if ( first )
pnfs_clear_first_layoutget ( lo ) ;
2012-09-19 04:51:13 +04:00
pnfs_put_layout_hdr ( lo ) ;
2010-10-20 08:18:01 +04:00
out :
2012-09-26 19:21:40 +04:00
dprintk ( " %s: inode %s/%llu pNFS layout segment %s for "
" (%s, offset: %llu, length: %llu) \n " ,
__func__ , ino - > i_sb - > s_id ,
( unsigned long long ) NFS_FILEID ( ino ) ,
lseg = = NULL ? " not found " : " found " ,
iomode = = IOMODE_RW ? " read/write " : " read-only " ,
( unsigned long long ) pos ,
( unsigned long long ) count ) ;
2010-10-20 08:18:01 +04:00
return lseg ;
out_unlock :
spin_unlock ( & ino - > i_lock ) ;
2012-09-21 05:25:19 +04:00
goto out_put_layout_hdr ;
2010-10-20 08:18:01 +04:00
}
2011-06-14 02:22:38 +04:00
EXPORT_SYMBOL_GPL ( pnfs_update_layout ) ;
2010-10-20 08:18:03 +04:00
2012-09-18 01:12:15 +04:00
struct pnfs_layout_segment *
2010-10-20 08:18:03 +04:00
pnfs_layout_process ( struct nfs4_layoutget * lgp )
{
struct pnfs_layout_hdr * lo = NFS_I ( lgp - > args . inode ) - > layout ;
struct nfs4_layoutget_res * res = & lgp - > res ;
struct pnfs_layout_segment * lseg ;
2011-01-06 14:36:21 +03:00
struct inode * ino = lo - > plh_inode ;
2014-02-12 19:02:27 +04:00
LIST_HEAD ( free_me ) ;
2010-10-20 08:18:03 +04:00
int status = 0 ;
/* Inject layout blob into I/O device driver */
2011-05-12 02:00:51 +04:00
lseg = NFS_SERVER ( ino ) - > pnfs_curr_ld - > alloc_lseg ( lo , res , lgp - > gfp_flags ) ;
2010-10-20 08:18:03 +04:00
if ( ! lseg | | IS_ERR ( lseg ) ) {
if ( ! lseg )
status = - ENOMEM ;
else
status = PTR_ERR ( lseg ) ;
dprintk ( " %s: Could not allocate layout: error %d \n " ,
__func__ , status ) ;
goto out ;
}
2014-08-21 20:09:18 +04:00
init_lseg ( lo , lseg ) ;
lseg - > pls_range = res - > range ;
2010-10-20 08:18:03 +04:00
spin_lock ( & ino - > i_lock ) ;
2012-03-01 20:17:47 +04:00
if ( test_bit ( NFS_LAYOUT_BULK_RECALL , & lo - > plh_flags ) ) {
2011-01-06 14:36:30 +03:00
dprintk ( " %s forget reply due to recall \n " , __func__ ) ;
goto out_forget_reply ;
}
2014-09-05 20:53:24 +04:00
if ( pnfs_layoutgets_blocked ( lo , & lgp - > args . range , 1 ) ) {
2011-01-06 14:36:30 +03:00
dprintk ( " %s forget reply due to state \n " , __func__ ) ;
goto out_forget_reply ;
}
2012-10-03 03:38:41 +04:00
2014-08-21 20:09:20 +04:00
if ( nfs4_stateid_match_other ( & lo - > plh_stateid , & res - > stateid ) ) {
/* existing state ID, make sure the sequence number matches. */
if ( pnfs_layout_stateid_blocked ( lo , & res - > stateid ) ) {
dprintk ( " %s forget reply due to sequence \n " , __func__ ) ;
goto out_forget_reply ;
}
pnfs_set_layout_stateid ( lo , & res - > stateid , false ) ;
} else {
/*
* We got an entirely new state ID . Mark all segments for the
* inode invalid , and don ' t bother validating the stateid
* sequence number .
*/
pnfs_mark_matching_lsegs_invalid ( lo , & free_me , NULL ) ;
nfs4_stateid_copy ( & lo - > plh_stateid , & res - > stateid ) ;
lo - > plh_barrier = be32_to_cpu ( res - > stateid . seqid ) ;
}
2012-10-03 03:38:41 +04:00
2014-08-21 20:09:22 +04:00
clear_bit ( NFS_LAYOUT_INVALID_STID , & lo - > plh_flags ) ;
2012-09-19 04:57:08 +04:00
pnfs_get_lseg ( lseg ) ;
2012-09-21 00:33:30 +04:00
pnfs_layout_insert_lseg ( lo , lseg ) ;
2010-10-20 08:18:03 +04:00
2011-01-06 14:36:32 +03:00
if ( res - > return_on_close ) {
set_bit ( NFS_LSEG_ROC , & lseg - > pls_flags ) ;
set_bit ( NFS_LAYOUT_ROC , & lo - > plh_flags ) ;
}
2010-10-20 08:18:03 +04:00
spin_unlock ( & ino - > i_lock ) ;
2014-02-12 19:02:27 +04:00
pnfs_free_lseg_list ( & free_me ) ;
2012-09-18 01:12:15 +04:00
return lseg ;
2010-10-20 08:18:03 +04:00
out :
2012-09-18 01:12:15 +04:00
return ERR_PTR ( status ) ;
2011-01-06 14:36:30 +03:00
out_forget_reply :
spin_unlock ( & ino - > i_lock ) ;
lseg - > pls_layout = lo ;
NFS_SERVER ( ino ) - > pnfs_curr_ld - > free_lseg ( lseg ) ;
goto out ;
2010-10-20 08:18:03 +04:00
}
2014-09-05 20:53:23 +04:00
static void
pnfs_mark_matching_lsegs_return ( struct pnfs_layout_hdr * lo ,
struct list_head * tmp_list ,
struct pnfs_layout_range * return_range )
{
struct pnfs_layout_segment * lseg , * next ;
dprintk ( " %s:Begin lo %p \n " , __func__ , lo ) ;
if ( list_empty ( & lo - > plh_segs ) )
return ;
list_for_each_entry_safe ( lseg , next , & lo - > plh_segs , pls_list )
if ( should_free_lseg ( & lseg - > pls_range , return_range ) ) {
dprintk ( " %s: marking lseg %p iomode %d "
" offset %llu length %llu \n " , __func__ ,
lseg , lseg - > pls_range . iomode ,
lseg - > pls_range . offset ,
lseg - > pls_range . length ) ;
set_bit ( NFS_LSEG_LAYOUTRETURN , & lseg - > pls_flags ) ;
mark_lseg_invalid ( lseg , tmp_list ) ;
}
}
void pnfs_error_mark_layout_for_return ( struct inode * inode ,
struct pnfs_layout_segment * lseg )
{
struct pnfs_layout_hdr * lo = NFS_I ( inode ) - > layout ;
int iomode = pnfs_iomode_to_fail_bit ( lseg - > pls_range . iomode ) ;
struct pnfs_layout_range range = {
. iomode = lseg - > pls_range . iomode ,
. offset = 0 ,
. length = NFS4_MAX_UINT64 ,
} ;
LIST_HEAD ( free_me ) ;
spin_lock ( & inode - > i_lock ) ;
/* set failure bit so that pnfs path will be retried later */
pnfs_layout_set_fail_bit ( lo , iomode ) ;
set_bit ( NFS_LAYOUT_RETURN , & lo - > plh_flags ) ;
if ( lo - > plh_return_iomode = = 0 )
lo - > plh_return_iomode = range . iomode ;
else if ( lo - > plh_return_iomode ! = range . iomode )
lo - > plh_return_iomode = IOMODE_ANY ;
/*
* mark all matching lsegs so that we are sure to have no live
* segments at hand when sending layoutreturn . See pnfs_put_lseg ( )
* for how it works .
*/
pnfs_mark_matching_lsegs_return ( lo , & free_me , & range ) ;
spin_unlock ( & inode - > i_lock ) ;
pnfs_free_lseg_list ( & free_me ) ;
}
EXPORT_SYMBOL_GPL ( pnfs_error_mark_layout_for_return ) ;
2011-06-10 21:30:23 +04:00
void
pnfs_generic_pg_init_read ( struct nfs_pageio_descriptor * pgio , struct nfs_page * req )
{
2012-09-25 10:55:57 +04:00
u64 rd_size = req - > wb_bytes ;
2015-01-24 17:14:52 +03:00
if ( pgio - > pg_lseg = = NULL ) {
if ( pgio - > pg_dreq = = NULL )
rd_size = i_size_read ( pgio - > pg_inode ) - req_offset ( req ) ;
else
rd_size = nfs_dreq_bytes_left ( pgio - > pg_dreq ) ;
pgio - > pg_lseg = pnfs_update_layout ( pgio - > pg_inode ,
req - > wb_context ,
req_offset ( req ) ,
rd_size ,
IOMODE_READ ,
GFP_KERNEL ) ;
}
2011-06-10 21:30:23 +04:00
/* If no lseg, fall back to read through mds */
if ( pgio - > pg_lseg = = NULL )
2011-07-13 23:59:57 +04:00
nfs_pageio_reset_read_mds ( pgio ) ;
2011-06-10 21:30:23 +04:00
2011-06-10 21:30:23 +04:00
}
EXPORT_SYMBOL_GPL ( pnfs_generic_pg_init_read ) ;
void
2012-09-25 10:55:57 +04:00
pnfs_generic_pg_init_write ( struct nfs_pageio_descriptor * pgio ,
struct nfs_page * req , u64 wb_size )
2011-06-10 21:30:23 +04:00
{
2015-01-24 17:14:52 +03:00
if ( pgio - > pg_lseg = = NULL )
pgio - > pg_lseg = pnfs_update_layout ( pgio - > pg_inode ,
req - > wb_context ,
req_offset ( req ) ,
wb_size ,
IOMODE_RW ,
GFP_NOFS ) ;
2011-06-10 21:30:23 +04:00
/* If no lseg, fall back to write through mds */
if ( pgio - > pg_lseg = = NULL )
2011-07-13 23:59:57 +04:00
nfs_pageio_reset_write_mds ( pgio ) ;
2011-06-10 21:30:23 +04:00
}
EXPORT_SYMBOL_GPL ( pnfs_generic_pg_init_write ) ;
2014-09-10 23:48:01 +04:00
void
pnfs_generic_pg_cleanup ( struct nfs_pageio_descriptor * desc )
{
if ( desc - > pg_lseg ) {
pnfs_put_lseg ( desc - > pg_lseg ) ;
desc - > pg_lseg = NULL ;
}
}
EXPORT_SYMBOL_GPL ( pnfs_generic_pg_cleanup ) ;
2014-05-15 19:56:43 +04:00
/*
* Return 0 if @ req cannot be coalesced into @ pgio , otherwise return the number
* of bytes ( maximum @ req - > wb_bytes ) that can be coalesced .
*/
size_t
2014-09-19 18:55:07 +04:00
pnfs_generic_pg_test ( struct nfs_pageio_descriptor * pgio ,
struct nfs_page * prev , struct nfs_page * req )
2011-03-01 04:34:14 +03:00
{
2014-05-15 19:56:51 +04:00
unsigned int size ;
2014-06-10 01:47:26 +04:00
u64 seg_end , req_start , seg_left ;
2014-05-15 19:56:51 +04:00
size = nfs_generic_pg_test ( pgio , prev , req ) ;
if ( ! size )
return 0 ;
2011-03-01 04:34:14 +03:00
2011-06-10 21:30:23 +04:00
/*
2014-06-10 01:47:26 +04:00
* ' size ' contains the number of bytes left in the current page ( up
* to the original size asked for in @ req - > wb_bytes ) .
*
* Calculate how many bytes are left in the layout segment
* and if there are less bytes than ' size ' , return that instead .
2011-06-10 21:30:23 +04:00
*
* Please also note that ' end_offset ' is actually the offset of the
* first byte that lies outside the pnfs_layout_range . FIXME ?
*
*/
2014-05-15 19:56:55 +04:00
if ( pgio - > pg_lseg ) {
2014-06-10 01:47:26 +04:00
seg_end = end_offset ( pgio - > pg_lseg - > pls_range . offset ,
pgio - > pg_lseg - > pls_range . length ) ;
req_start = req_offset ( req ) ;
2015-01-30 19:01:02 +03:00
WARN_ON_ONCE ( req_start > = seg_end ) ;
2014-06-10 01:47:26 +04:00
/* start of request is past the last byte of this segment */
2015-01-30 19:01:02 +03:00
if ( req_start > = seg_end ) {
/* reference the new lseg */
if ( pgio - > pg_ops - > pg_cleanup )
pgio - > pg_ops - > pg_cleanup ( pgio ) ;
if ( pgio - > pg_ops - > pg_init )
pgio - > pg_ops - > pg_init ( pgio , req ) ;
2014-05-15 19:56:55 +04:00
return 0 ;
2015-01-30 19:01:02 +03:00
}
2014-06-10 01:47:26 +04:00
/* adjust 'size' iff there are fewer bytes left in the
* segment than what nfs_generic_pg_test returned */
seg_left = seg_end - req_start ;
if ( seg_left < size )
size = ( unsigned int ) seg_left ;
2014-05-15 19:56:55 +04:00
}
2014-05-15 19:56:51 +04:00
2014-05-15 19:56:55 +04:00
return size ;
2011-03-01 04:34:14 +03:00
}
2011-05-25 21:54:40 +04:00
EXPORT_SYMBOL_GPL ( pnfs_generic_pg_test ) ;
2011-03-01 04:34:14 +03:00
2014-06-09 19:48:38 +04:00
int pnfs_write_done_resend_to_mds ( struct nfs_pgio_header * hdr )
2012-01-06 17:57:46 +04:00
{
struct nfs_pageio_descriptor pgio ;
/* Resend all requests through the MDS */
2014-06-09 19:48:38 +04:00
nfs_pageio_init_write ( & pgio , hdr - > inode , FLUSH_STABLE , true ,
hdr - > completion_ops ) ;
return nfs_pageio_resend ( & pgio , hdr ) ;
2012-01-06 17:57:46 +04:00
}
2012-04-28 01:53:46 +04:00
EXPORT_SYMBOL_GPL ( pnfs_write_done_resend_to_mds ) ;
2012-01-06 17:57:46 +04:00
2014-06-09 19:48:35 +04:00
static void pnfs_ld_handle_write_error ( struct nfs_pgio_header * hdr )
2012-04-20 22:47:37 +04:00
{
2012-04-20 22:47:44 +04:00
dprintk ( " pnfs write error = %d \n " , hdr - > pnfs_error ) ;
if ( NFS_SERVER ( hdr - > inode ) - > pnfs_curr_ld - > flags &
2012-04-20 22:47:37 +04:00
PNFS_LAYOUTRET_ON_ERROR ) {
2012-04-20 22:47:44 +04:00
pnfs_return_layout ( hdr - > inode ) ;
2012-04-20 22:47:37 +04:00
}
2012-04-20 22:47:47 +04:00
if ( ! test_and_set_bit ( NFS_IOHDR_REDO , & hdr - > flags ) )
2014-06-09 19:48:38 +04:00
hdr - > task . tk_status = pnfs_write_done_resend_to_mds ( hdr ) ;
2012-04-20 22:47:37 +04:00
}
2011-05-22 20:52:03 +04:00
/*
* Called by non rpc - based layout drivers
*/
2014-06-09 19:48:35 +04:00
void pnfs_ld_write_done ( struct nfs_pgio_header * hdr )
2011-03-03 18:13:44 +03:00
{
2014-06-09 19:48:35 +04:00
trace_nfs4_pnfs_write ( hdr , hdr - > pnfs_error ) ;
2012-04-20 22:47:44 +04:00
if ( ! hdr - > pnfs_error ) {
2014-06-09 19:48:35 +04:00
pnfs_set_layoutcommit ( hdr ) ;
hdr - > mds_ops - > rpc_call_done ( & hdr - > task , hdr ) ;
2012-04-20 22:47:37 +04:00
} else
2014-06-09 19:48:35 +04:00
pnfs_ld_handle_write_error ( hdr ) ;
hdr - > mds_ops - > rpc_release ( hdr ) ;
2011-03-03 18:13:44 +03:00
}
2011-05-22 20:52:03 +04:00
EXPORT_SYMBOL_GPL ( pnfs_ld_write_done ) ;
2011-03-03 18:13:44 +03:00
2011-07-13 23:59:19 +04:00
static void
pnfs_write_through_mds ( struct nfs_pageio_descriptor * desc ,
2014-06-09 19:48:35 +04:00
struct nfs_pgio_header * hdr )
2011-07-13 23:59:19 +04:00
{
2014-11-10 03:35:35 +03:00
struct nfs_pgio_mirror * mirror = nfs_pgio_current_mirror ( desc ) ;
2014-09-19 18:55:07 +04:00
2012-04-20 22:47:47 +04:00
if ( ! test_and_set_bit ( NFS_IOHDR_REDO , & hdr - > flags ) ) {
2014-09-19 18:55:07 +04:00
list_splice_tail_init ( & hdr - > pages , & mirror - > pg_list ) ;
2012-04-20 22:47:47 +04:00
nfs_pageio_reset_write_mds ( desc ) ;
2014-09-19 18:55:07 +04:00
mirror - > pg_recoalesce = 1 ;
2012-04-20 22:47:47 +04:00
}
2014-06-09 19:48:35 +04:00
nfs_pgio_data_destroy ( hdr ) ;
2011-07-13 23:59:19 +04:00
}
static enum pnfs_try_status
2014-06-09 19:48:35 +04:00
pnfs_try_to_write_data ( struct nfs_pgio_header * hdr ,
2011-07-13 23:59:19 +04:00
const struct rpc_call_ops * call_ops ,
struct pnfs_layout_segment * lseg ,
int how )
2011-03-03 18:13:45 +03:00
{
2012-04-20 22:47:44 +04:00
struct inode * inode = hdr - > inode ;
2011-03-03 18:13:45 +03:00
enum pnfs_try_status trypnfs ;
struct nfs_server * nfss = NFS_SERVER ( inode ) ;
2012-04-20 22:47:44 +04:00
hdr - > mds_ops = call_ops ;
2011-03-03 18:13:45 +03:00
dprintk ( " %s: Writing ino:%lu %u@%llu (how %d) \n " , __func__ ,
2014-06-09 19:48:35 +04:00
inode - > i_ino , hdr - > args . count , hdr - > args . offset , how ) ;
trypnfs = nfss - > pnfs_curr_ld - > write_pagelist ( hdr , how ) ;
2012-04-20 22:47:47 +04:00
if ( trypnfs ! = PNFS_NOT_ATTEMPTED )
2011-03-03 18:13:45 +03:00
nfs_inc_stats ( inode , NFSIOS_PNFS_WRITE ) ;
dprintk ( " %s End (trypnfs:%d) \n " , __func__ , trypnfs ) ;
return trypnfs ;
}
2011-07-13 23:59:19 +04:00
static void
2014-05-15 19:56:53 +04:00
pnfs_do_write ( struct nfs_pageio_descriptor * desc ,
struct nfs_pgio_header * hdr , int how )
2011-07-13 23:59:19 +04:00
{
const struct rpc_call_ops * call_ops = desc - > pg_rpc_callops ;
struct pnfs_layout_segment * lseg = desc - > pg_lseg ;
2014-05-15 19:56:53 +04:00
enum pnfs_try_status trypnfs ;
2011-07-13 23:59:19 +04:00
2014-06-09 19:48:35 +04:00
trypnfs = pnfs_try_to_write_data ( hdr , call_ops , lseg , how ) ;
2014-05-15 19:56:53 +04:00
if ( trypnfs = = PNFS_NOT_ATTEMPTED )
2014-06-09 19:48:35 +04:00
pnfs_write_through_mds ( desc , hdr ) ;
2011-07-13 23:59:19 +04:00
}
2012-04-20 22:47:47 +04:00
static void pnfs_writehdr_free ( struct nfs_pgio_header * hdr )
{
2012-09-19 04:57:08 +04:00
pnfs_put_lseg ( hdr - > lseg ) ;
2014-06-09 19:48:33 +04:00
nfs_pgio_header_free ( hdr ) ;
2012-04-20 22:47:47 +04:00
}
2012-07-31 00:05:25 +04:00
EXPORT_SYMBOL_GPL ( pnfs_writehdr_free ) ;
2012-04-20 22:47:47 +04:00
2011-07-13 23:59:19 +04:00
int
pnfs_generic_pg_writepages ( struct nfs_pageio_descriptor * desc )
{
2014-11-10 03:35:35 +03:00
struct nfs_pgio_mirror * mirror = nfs_pgio_current_mirror ( desc ) ;
2014-09-19 18:55:07 +04:00
2012-04-20 22:47:47 +04:00
struct nfs_pgio_header * hdr ;
2011-07-13 23:59:19 +04:00
int ret ;
2014-06-09 19:48:33 +04:00
hdr = nfs_pgio_header_alloc ( desc - > pg_rw_ops ) ;
if ( ! hdr ) {
2014-09-19 18:55:07 +04:00
desc - > pg_completion_ops - > error_cleanup ( & mirror - > pg_list ) ;
2012-04-20 22:47:47 +04:00
return - ENOMEM ;
2011-07-13 23:59:19 +04:00
}
2012-04-20 22:47:47 +04:00
nfs_pgheader_init ( desc , hdr , pnfs_writehdr_free ) ;
2014-09-10 23:48:01 +04:00
2012-09-19 04:57:08 +04:00
hdr - > lseg = pnfs_get_lseg ( desc - > pg_lseg ) ;
2014-05-06 17:12:36 +04:00
ret = nfs_generic_pgio ( desc , hdr ) ;
2014-09-10 23:48:01 +04:00
if ( ! ret )
2014-05-15 19:56:53 +04:00
pnfs_do_write ( desc , hdr , desc - > pg_ioflags ) ;
2014-09-19 18:55:07 +04:00
2012-04-20 22:47:47 +04:00
return ret ;
2011-07-13 23:59:19 +04:00
}
EXPORT_SYMBOL_GPL ( pnfs_generic_pg_writepages ) ;
2014-06-09 19:48:38 +04:00
int pnfs_read_done_resend_to_mds ( struct nfs_pgio_header * hdr )
2011-11-10 23:30:37 +04:00
{
struct nfs_pageio_descriptor pgio ;
2012-04-20 22:47:37 +04:00
/* Resend all requests through the MDS */
2014-06-09 19:48:38 +04:00
nfs_pageio_init_read ( & pgio , hdr - > inode , true , hdr - > completion_ops ) ;
return nfs_pageio_resend ( & pgio , hdr ) ;
2012-04-20 22:47:37 +04:00
}
2012-04-28 01:53:46 +04:00
EXPORT_SYMBOL_GPL ( pnfs_read_done_resend_to_mds ) ;
2012-04-20 22:47:37 +04:00
2014-06-09 19:48:35 +04:00
static void pnfs_ld_handle_read_error ( struct nfs_pgio_header * hdr )
2012-04-20 22:47:37 +04:00
{
2012-04-20 22:47:44 +04:00
dprintk ( " pnfs read error = %d \n " , hdr - > pnfs_error ) ;
if ( NFS_SERVER ( hdr - > inode ) - > pnfs_curr_ld - > flags &
2012-04-20 22:47:37 +04:00
PNFS_LAYOUTRET_ON_ERROR ) {
2012-04-20 22:47:44 +04:00
pnfs_return_layout ( hdr - > inode ) ;
2012-04-20 22:47:37 +04:00
}
2012-04-20 22:47:46 +04:00
if ( ! test_and_set_bit ( NFS_IOHDR_REDO , & hdr - > flags ) )
2014-06-09 19:48:38 +04:00
hdr - > task . tk_status = pnfs_read_done_resend_to_mds ( hdr ) ;
2011-11-10 23:30:37 +04:00
}
2011-05-22 20:52:03 +04:00
/*
* Called by non rpc - based layout drivers
*/
2014-06-09 19:48:35 +04:00
void pnfs_ld_read_done ( struct nfs_pgio_header * hdr )
2011-05-22 20:52:03 +04:00
{
2014-06-09 19:48:35 +04:00
trace_nfs4_pnfs_read ( hdr , hdr - > pnfs_error ) ;
2012-04-20 22:47:44 +04:00
if ( likely ( ! hdr - > pnfs_error ) ) {
2014-06-09 19:48:35 +04:00
__nfs4_read_done_cb ( hdr ) ;
hdr - > mds_ops - > rpc_call_done ( & hdr - > task , hdr ) ;
2011-11-10 23:30:37 +04:00
} else
2014-06-09 19:48:35 +04:00
pnfs_ld_handle_read_error ( hdr ) ;
hdr - > mds_ops - > rpc_release ( hdr ) ;
2011-05-22 20:52:03 +04:00
}
EXPORT_SYMBOL_GPL ( pnfs_ld_read_done ) ;
2011-07-13 23:58:28 +04:00
static void
pnfs_read_through_mds ( struct nfs_pageio_descriptor * desc ,
2014-06-09 19:48:35 +04:00
struct nfs_pgio_header * hdr )
2011-07-13 23:58:28 +04:00
{
2014-11-10 03:35:35 +03:00
struct nfs_pgio_mirror * mirror = nfs_pgio_current_mirror ( desc ) ;
2014-09-19 18:55:07 +04:00
2012-04-20 22:47:46 +04:00
if ( ! test_and_set_bit ( NFS_IOHDR_REDO , & hdr - > flags ) ) {
2014-09-19 18:55:07 +04:00
list_splice_tail_init ( & hdr - > pages , & mirror - > pg_list ) ;
2012-04-20 22:47:46 +04:00
nfs_pageio_reset_read_mds ( desc ) ;
2014-09-19 18:55:07 +04:00
mirror - > pg_recoalesce = 1 ;
2012-04-20 22:47:46 +04:00
}
2014-06-09 19:48:35 +04:00
nfs_pgio_data_destroy ( hdr ) ;
2011-07-13 23:58:28 +04:00
}
2011-03-01 04:34:16 +03:00
/*
* Call the appropriate parallel I / O subsystem read function .
*/
2011-07-13 23:58:28 +04:00
static enum pnfs_try_status
2014-06-09 19:48:35 +04:00
pnfs_try_to_read_data ( struct nfs_pgio_header * hdr ,
2011-07-13 23:58:28 +04:00
const struct rpc_call_ops * call_ops ,
struct pnfs_layout_segment * lseg )
2011-03-01 04:34:16 +03:00
{
2012-04-20 22:47:44 +04:00
struct inode * inode = hdr - > inode ;
2011-03-01 04:34:16 +03:00
struct nfs_server * nfss = NFS_SERVER ( inode ) ;
enum pnfs_try_status trypnfs ;
2012-04-20 22:47:44 +04:00
hdr - > mds_ops = call_ops ;
2011-03-01 04:34:16 +03:00
dprintk ( " %s: Reading ino:%lu %u@%llu \n " ,
2014-06-09 19:48:35 +04:00
__func__ , inode - > i_ino , hdr - > args . count , hdr - > args . offset ) ;
2011-03-01 04:34:16 +03:00
2014-06-09 19:48:35 +04:00
trypnfs = nfss - > pnfs_curr_ld - > read_pagelist ( hdr ) ;
2012-04-20 22:47:46 +04:00
if ( trypnfs ! = PNFS_NOT_ATTEMPTED )
2011-03-01 04:34:16 +03:00
nfs_inc_stats ( inode , NFSIOS_PNFS_READ ) ;
dprintk ( " %s End (trypnfs:%d) \n " , __func__ , trypnfs ) ;
return trypnfs ;
}
2011-03-23 16:27:54 +03:00
2014-11-10 03:35:38 +03:00
/* Resend all requests through pnfs. */
int pnfs_read_resend_pnfs ( struct nfs_pgio_header * hdr )
{
struct nfs_pageio_descriptor pgio ;
nfs_pageio_init_read ( & pgio , hdr - > inode , false , hdr - > completion_ops ) ;
return nfs_pageio_resend ( & pgio , hdr ) ;
}
EXPORT_SYMBOL_GPL ( pnfs_read_resend_pnfs ) ;
2011-07-13 23:58:28 +04:00
static void
2014-05-15 19:56:53 +04:00
pnfs_do_read ( struct nfs_pageio_descriptor * desc , struct nfs_pgio_header * hdr )
2011-07-13 23:58:28 +04:00
{
const struct rpc_call_ops * call_ops = desc - > pg_rpc_callops ;
struct pnfs_layout_segment * lseg = desc - > pg_lseg ;
2014-05-15 19:56:53 +04:00
enum pnfs_try_status trypnfs ;
2014-11-10 03:35:38 +03:00
int err = 0 ;
2011-07-13 23:58:28 +04:00
2014-06-09 19:48:35 +04:00
trypnfs = pnfs_try_to_read_data ( hdr , call_ops , lseg ) ;
2014-11-10 03:35:38 +03:00
if ( trypnfs = = PNFS_TRY_AGAIN )
err = pnfs_read_resend_pnfs ( hdr ) ;
if ( trypnfs = = PNFS_NOT_ATTEMPTED | | err )
2014-06-09 19:48:35 +04:00
pnfs_read_through_mds ( desc , hdr ) ;
2011-07-13 23:58:28 +04:00
}
2012-04-20 22:47:46 +04:00
static void pnfs_readhdr_free ( struct nfs_pgio_header * hdr )
{
2012-09-19 04:57:08 +04:00
pnfs_put_lseg ( hdr - > lseg ) ;
2014-06-09 19:48:33 +04:00
nfs_pgio_header_free ( hdr ) ;
2012-04-20 22:47:46 +04:00
}
2012-07-31 00:05:25 +04:00
EXPORT_SYMBOL_GPL ( pnfs_readhdr_free ) ;
2012-04-20 22:47:46 +04:00
2011-07-13 23:58:28 +04:00
int
pnfs_generic_pg_readpages ( struct nfs_pageio_descriptor * desc )
{
2014-11-10 03:35:35 +03:00
struct nfs_pgio_mirror * mirror = nfs_pgio_current_mirror ( desc ) ;
2014-09-19 18:55:07 +04:00
2012-04-20 22:47:46 +04:00
struct nfs_pgio_header * hdr ;
2011-07-13 23:58:28 +04:00
int ret ;
2014-06-09 19:48:33 +04:00
hdr = nfs_pgio_header_alloc ( desc - > pg_rw_ops ) ;
if ( ! hdr ) {
2014-09-19 18:55:07 +04:00
desc - > pg_completion_ops - > error_cleanup ( & mirror - > pg_list ) ;
2014-09-10 23:48:01 +04:00
return - ENOMEM ;
2011-07-13 23:58:28 +04:00
}
2012-04-20 22:47:46 +04:00
nfs_pgheader_init ( desc , hdr , pnfs_readhdr_free ) ;
2012-09-19 04:57:08 +04:00
hdr - > lseg = pnfs_get_lseg ( desc - > pg_lseg ) ;
2014-05-06 17:12:36 +04:00
ret = nfs_generic_pgio ( desc , hdr ) ;
2014-09-10 23:48:01 +04:00
if ( ! ret )
2014-05-15 19:56:53 +04:00
pnfs_do_read ( desc , hdr ) ;
2012-04-20 22:47:46 +04:00
return ret ;
2011-07-13 23:58:28 +04:00
}
EXPORT_SYMBOL_GPL ( pnfs_generic_pg_readpages ) ;
2014-01-13 22:34:36 +04:00
static void pnfs_clear_layoutcommitting ( struct inode * inode )
{
unsigned long * bitlock = & NFS_I ( inode ) - > flags ;
clear_bit_unlock ( NFS_INO_LAYOUTCOMMITTING , bitlock ) ;
2014-03-17 21:06:10 +04:00
smp_mb__after_atomic ( ) ;
2014-01-13 22:34:36 +04:00
wake_up_bit ( bitlock , NFS_INO_LAYOUTCOMMITTING ) ;
}
2011-03-23 16:27:54 +03:00
/*
2011-07-31 04:52:33 +04:00
* There can be multiple RW segments .
2011-03-23 16:27:54 +03:00
*/
2011-07-31 04:52:33 +04:00
static void pnfs_list_write_lseg ( struct inode * inode , struct list_head * listp )
2011-03-23 16:27:54 +03:00
{
2011-07-31 04:52:33 +04:00
struct pnfs_layout_segment * lseg ;
2011-03-23 16:27:54 +03:00
2011-07-31 04:52:33 +04:00
list_for_each_entry ( lseg , & NFS_I ( inode ) - > layout - > plh_segs , pls_list ) {
if ( lseg - > pls_range . iomode = = IOMODE_RW & &
2013-03-20 20:34:32 +04:00
test_and_clear_bit ( NFS_LSEG_LAYOUTCOMMIT , & lseg - > pls_flags ) )
2011-07-31 04:52:33 +04:00
list_add ( & lseg - > pls_lc_list , listp ) ;
}
2011-03-23 16:27:54 +03:00
}
2013-03-20 20:34:32 +04:00
static void pnfs_list_write_lseg_done ( struct inode * inode , struct list_head * listp )
{
struct pnfs_layout_segment * lseg , * tmp ;
/* Matched by references in pnfs_set_layoutcommit */
list_for_each_entry_safe ( lseg , tmp , listp , pls_lc_list ) {
list_del_init ( & lseg - > pls_lc_list ) ;
pnfs_put_lseg ( lseg ) ;
}
2014-01-13 22:34:36 +04:00
pnfs_clear_layoutcommitting ( inode ) ;
2013-03-20 20:34:32 +04:00
}
2011-09-23 05:50:12 +04:00
void pnfs_set_lo_fail ( struct pnfs_layout_segment * lseg )
{
2012-09-19 00:41:18 +04:00
pnfs_layout_io_set_failed ( lseg - > pls_layout , lseg - > pls_range . iomode ) ;
2011-09-23 05:50:12 +04:00
}
EXPORT_SYMBOL_GPL ( pnfs_set_lo_fail ) ;
2011-03-23 16:27:54 +03:00
void
2014-06-09 19:48:35 +04:00
pnfs_set_layoutcommit ( struct nfs_pgio_header * hdr )
2011-03-23 16:27:54 +03:00
{
2012-04-20 22:47:44 +04:00
struct inode * inode = hdr - > inode ;
struct nfs_inode * nfsi = NFS_I ( inode ) ;
2014-06-09 19:48:35 +04:00
loff_t end_pos = hdr - > mds_offset + hdr - > res . count ;
2011-04-13 18:53:51 +04:00
bool mark_as_dirty = false ;
2011-03-23 16:27:54 +03:00
2012-04-20 22:47:44 +04:00
spin_lock ( & inode - > i_lock ) ;
2011-03-23 16:27:54 +03:00
if ( ! test_and_set_bit ( NFS_INO_LAYOUTCOMMIT , & nfsi - > flags ) ) {
2011-04-13 18:53:51 +04:00
mark_as_dirty = true ;
2011-03-23 16:27:54 +03:00
dprintk ( " %s: Set layoutcommit for inode %lu " ,
2012-04-20 22:47:44 +04:00
__func__ , inode - > i_ino ) ;
2011-03-23 16:27:54 +03:00
}
2012-04-20 22:47:44 +04:00
if ( ! test_and_set_bit ( NFS_LSEG_LAYOUTCOMMIT , & hdr - > lseg - > pls_flags ) ) {
2011-07-31 04:52:33 +04:00
/* references matched in nfs4_layoutcommit_release */
2012-09-19 04:57:08 +04:00
pnfs_get_lseg ( hdr - > lseg ) ;
2011-07-31 04:52:33 +04:00
}
2011-07-31 04:52:31 +04:00
if ( end_pos > nfsi - > layout - > plh_lwb )
nfsi - > layout - > plh_lwb = end_pos ;
2012-04-20 22:47:44 +04:00
spin_unlock ( & inode - > i_lock ) ;
2011-07-31 04:52:31 +04:00
dprintk ( " %s: lseg %p end_pos %llu \n " ,
2012-04-20 22:47:44 +04:00
__func__ , hdr - > lseg , nfsi - > layout - > plh_lwb ) ;
2011-04-13 18:53:51 +04:00
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
if ( mark_as_dirty )
2012-04-20 22:47:44 +04:00
mark_inode_dirty_sync ( inode ) ;
2011-03-23 16:27:54 +03:00
}
EXPORT_SYMBOL_GPL ( pnfs_set_layoutcommit ) ;
2014-08-07 06:15:02 +04:00
void pnfs_commit_set_layoutcommit ( struct nfs_commit_data * data )
{
struct inode * inode = data - > inode ;
struct nfs_inode * nfsi = NFS_I ( inode ) ;
bool mark_as_dirty = false ;
spin_lock ( & inode - > i_lock ) ;
if ( ! test_and_set_bit ( NFS_INO_LAYOUTCOMMIT , & nfsi - > flags ) ) {
mark_as_dirty = true ;
dprintk ( " %s: Set layoutcommit for inode %lu " ,
__func__ , inode - > i_ino ) ;
}
if ( ! test_and_set_bit ( NFS_LSEG_LAYOUTCOMMIT , & data - > lseg - > pls_flags ) ) {
/* references matched in nfs4_layoutcommit_release */
pnfs_get_lseg ( data - > lseg ) ;
}
if ( data - > lwb > nfsi - > layout - > plh_lwb )
nfsi - > layout - > plh_lwb = data - > lwb ;
spin_unlock ( & inode - > i_lock ) ;
dprintk ( " %s: lseg %p end_pos %llu \n " ,
__func__ , data - > lseg , nfsi - > layout - > plh_lwb ) ;
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
if ( mark_as_dirty )
mark_inode_dirty_sync ( inode ) ;
}
EXPORT_SYMBOL_GPL ( pnfs_commit_set_layoutcommit ) ;
2011-07-31 04:52:38 +04:00
void pnfs_cleanup_layoutcommit ( struct nfs4_layoutcommit_data * data )
{
struct nfs_server * nfss = NFS_SERVER ( data - > args . inode ) ;
if ( nfss - > pnfs_curr_ld - > cleanup_layoutcommit )
nfss - > pnfs_curr_ld - > cleanup_layoutcommit ( data ) ;
2013-03-20 20:34:32 +04:00
pnfs_list_write_lseg_done ( data - > args . inode , & data - > lseg_list ) ;
2011-07-31 04:52:38 +04:00
}
2011-03-12 10:58:09 +03:00
/*
* For the LAYOUT4_NFSV4_1_FILES layout type , NFS_DATA_SYNC WRITEs and
* NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
* data to disk to allow the server to recover the data if it crashes .
* LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
* is off , and a COMMIT is sent to a data server , or
* if WRITEs to a data server return NFS_DATA_SYNC .
*/
2011-03-23 16:27:54 +03:00
int
2011-03-12 10:58:10 +03:00
pnfs_layoutcommit_inode ( struct inode * inode , bool sync )
2011-03-23 16:27:54 +03:00
{
2014-08-21 20:09:25 +04:00
struct pnfs_layoutdriver_type * ld = NFS_SERVER ( inode ) - > pnfs_curr_ld ;
2011-03-23 16:27:54 +03:00
struct nfs4_layoutcommit_data * data ;
struct nfs_inode * nfsi = NFS_I ( inode ) ;
loff_t end_pos ;
2014-01-13 22:34:36 +04:00
int status ;
2011-03-23 16:27:54 +03:00
2014-01-13 22:34:36 +04:00
if ( ! pnfs_layoutcommit_outstanding ( inode ) )
2011-03-12 10:58:09 +03:00
return 0 ;
2014-01-13 22:34:36 +04:00
dprintk ( " --> %s inode %lu \n " , __func__ , inode - > i_ino ) ;
2011-10-24 07:21:17 +04:00
2014-01-13 22:34:36 +04:00
status = - EAGAIN ;
2011-10-24 07:21:17 +04:00
if ( test_and_set_bit ( NFS_INO_LAYOUTCOMMITTING , & nfsi - > flags ) ) {
2014-01-13 22:34:36 +04:00
if ( ! sync )
goto out ;
sched: Remove proliferation of wait_on_bit() action functions
The current "wait_on_bit" interface requires an 'action'
function to be provided which does the actual waiting.
There are over 20 such functions, many of them identical.
Most cases can be satisfied by one of just two functions, one
which uses io_schedule() and one which just uses schedule().
So:
Rename wait_on_bit and wait_on_bit_lock to
wait_on_bit_action and wait_on_bit_lock_action
to make it explicit that they need an action function.
Introduce new wait_on_bit{,_lock} and wait_on_bit{,_lock}_io
which are *not* given an action function but implicitly use
a standard one.
The decision to error-out if a signal is pending is now made
based on the 'mode' argument rather than being encoded in the action
function.
All instances of the old wait_on_bit and wait_on_bit_lock which
can use the new version have been changed accordingly and their
action functions have been discarded.
wait_on_bit{_lock} does not return any specific error code in the
event of a signal so the caller must check for non-zero and
interpolate their own error code as appropriate.
The wait_on_bit() call in __fscache_wait_on_invalidate() was
ambiguous as it specified TASK_UNINTERRUPTIBLE but used
fscache_wait_bit_interruptible as an action function.
David Howells confirms this should be uniformly
"uninterruptible"
The main remaining user of wait_on_bit{,_lock}_action is NFS
which needs to use a freezer-aware schedule() call.
A comment in fs/gfs2/glock.c notes that having multiple 'action'
functions is useful as they display differently in the 'wchan'
field of 'ps'. (and /proc/$PID/wchan).
As the new bit_wait{,_io} functions are tagged "__sched", they
will not show up at all, but something higher in the stack. So
the distinction will still be visible, only with different
function names (gds2_glock_wait versus gfs2_glock_dq_wait in the
gfs2/glock.c case).
Since first version of this patch (against 3.15) two new action
functions appeared, on in NFS and one in CIFS. CIFS also now
uses an action function that makes the same freezer aware
schedule call as NFS.
Signed-off-by: NeilBrown <neilb@suse.de>
Acked-by: David Howells <dhowells@redhat.com> (fscache, keys)
Acked-by: Steven Whitehouse <swhiteho@redhat.com> (gfs2)
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steve French <sfrench@samba.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140707051603.28027.72349.stgit@notabene.brown
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-07-07 09:16:04 +04:00
status = wait_on_bit_lock_action ( & nfsi - > flags ,
2014-01-13 22:34:36 +04:00
NFS_INO_LAYOUTCOMMITTING ,
nfs_wait_bit_killable ,
TASK_KILLABLE ) ;
2011-10-24 07:21:17 +04:00
if ( status )
2014-01-13 22:34:36 +04:00
goto out ;
2011-10-24 07:21:17 +04:00
}
2014-01-13 22:34:36 +04:00
status = - ENOMEM ;
/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
data = kzalloc ( sizeof ( * data ) , GFP_NOFS ) ;
if ( ! data )
goto clear_layoutcommitting ;
status = 0 ;
2011-03-12 10:58:09 +03:00
spin_lock ( & inode - > i_lock ) ;
2014-01-13 22:34:36 +04:00
if ( ! test_and_clear_bit ( NFS_INO_LAYOUTCOMMIT , & nfsi - > flags ) )
goto out_unlock ;
2011-07-31 04:52:33 +04:00
2014-01-13 22:34:36 +04:00
INIT_LIST_HEAD ( & data - > lseg_list ) ;
2011-07-31 04:52:33 +04:00
pnfs_list_write_lseg ( inode , & data - > lseg_list ) ;
2011-03-23 16:27:54 +03:00
2011-07-31 04:52:31 +04:00
end_pos = nfsi - > layout - > plh_lwb ;
nfsi - > layout - > plh_lwb = 0 ;
2011-03-23 16:27:54 +03:00
2012-03-05 03:13:56 +04:00
nfs4_stateid_copy ( & data - > args . stateid , & nfsi - > layout - > plh_stateid ) ;
2011-03-23 16:27:54 +03:00
spin_unlock ( & inode - > i_lock ) ;
data - > args . inode = inode ;
2011-07-31 04:52:32 +04:00
data - > cred = get_rpccred ( nfsi - > layout - > plh_lc_cred ) ;
2011-03-23 16:27:54 +03:00
nfs_fattr_init ( & data - > fattr ) ;
data - > args . bitmask = NFS_SERVER ( inode ) - > cache_consistency_bitmask ;
data - > res . fattr = & data - > fattr ;
data - > args . lastbytewritten = end_pos - 1 ;
data - > res . server = NFS_SERVER ( inode ) ;
2014-08-21 20:09:25 +04:00
if ( ld - > prepare_layoutcommit ) {
status = ld - > prepare_layoutcommit ( & data - > args ) ;
if ( status ) {
spin_lock ( & inode - > i_lock ) ;
if ( end_pos < nfsi - > layout - > plh_lwb )
nfsi - > layout - > plh_lwb = end_pos ;
spin_unlock ( & inode - > i_lock ) ;
put_rpccred ( data - > cred ) ;
set_bit ( NFS_INO_LAYOUTCOMMIT , & nfsi - > flags ) ;
goto clear_layoutcommitting ;
}
}
2011-03-23 16:27:54 +03:00
status = nfs4_proc_layoutcommit ( data , sync ) ;
out :
2011-10-24 07:21:17 +04:00
if ( status )
mark_inode_dirty_sync ( inode ) ;
2011-03-23 16:27:54 +03:00
dprintk ( " <-- %s status %d \n " , __func__ , status ) ;
return status ;
2014-01-13 22:34:36 +04:00
out_unlock :
spin_unlock ( & inode - > i_lock ) ;
2011-10-24 07:21:17 +04:00
kfree ( data ) ;
2014-01-13 22:34:36 +04:00
clear_layoutcommitting :
pnfs_clear_layoutcommitting ( inode ) ;
2011-10-24 07:21:17 +04:00
goto out ;
2011-03-23 16:27:54 +03:00
}
2014-08-07 06:12:38 +04:00
EXPORT_SYMBOL_GPL ( pnfs_layoutcommit_inode ) ;
2012-05-23 13:02:35 +04:00
struct nfs4_threshold * pnfs_mdsthreshold_alloc ( void )
{
struct nfs4_threshold * thp ;
thp = kzalloc ( sizeof ( * thp ) , GFP_NOFS ) ;
if ( ! thp ) {
dprintk ( " %s mdsthreshold allocation failed \n " , __func__ ) ;
return NULL ;
}
return thp ;
}