2005-04-17 02:20:36 +04:00
/*
* fs / cifs / misc . c
*
2008-02-08 02:25:02 +03:00
* Copyright ( C ) International Business Machines Corp . , 2002 , 2008
2005-04-17 02:20:36 +04:00
* Author ( s ) : Steve French ( sfrench @ us . ibm . com )
*
* This library is free software ; you can redistribute it and / or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation ; either version 2.1 of the License , or
* ( at your option ) any later version .
*
* This library is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See
* the GNU Lesser General Public License for more details .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library ; if not , write to the Free Software
2007-07-10 05:16:18 +04:00
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2005-04-17 02:20:36 +04:00
*/
# include <linux/slab.h>
# include <linux/ctype.h>
# include <linux/mempool.h>
# include "cifspdu.h"
# include "cifsglob.h"
# include "cifsproto.h"
# include "cifs_debug.h"
# include "smberr.h"
# include "nterr.h"
2005-04-29 09:41:06 +04:00
# include "cifs_unicode.h"
2012-01-12 22:40:50 +04:00
# ifdef CONFIG_CIFS_SMB2
# include "smb2pdu.h"
# endif
2005-04-17 02:20:36 +04:00
extern mempool_t * cifs_sm_req_poolp ;
extern mempool_t * cifs_req_poolp ;
2007-07-10 05:16:18 +04:00
/* The xid serves as a useful identifier for each incoming vfs request,
in a similar way to the mid which is useful to track each sent smb ,
and CurrentXid can also provide a running counter ( although it
will eventually wrap past zero ) of the total vfs operations handled
2005-04-17 02:20:36 +04:00
since the cifs fs was mounted */
unsigned int
2012-06-20 11:21:16 +04:00
_get_xid ( void )
2005-04-17 02:20:36 +04:00
{
unsigned int xid ;
spin_lock ( & GlobalMid_Lock ) ;
GlobalTotalActiveXid + + ;
2007-07-13 04:33:32 +04:00
/* keep high water mark for number of simultaneous ops in filesystem */
2005-04-17 02:20:36 +04:00
if ( GlobalTotalActiveXid > GlobalMaxActiveXid )
2007-07-13 04:33:32 +04:00
GlobalMaxActiveXid = GlobalTotalActiveXid ;
2007-07-07 23:25:05 +04:00
if ( GlobalTotalActiveXid > 65000 )
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " warning: more than 65000 requests active " ) ;
2005-04-17 02:20:36 +04:00
xid = GlobalCurrentXid + + ;
spin_unlock ( & GlobalMid_Lock ) ;
return xid ;
}
void
2012-06-20 11:21:16 +04:00
_free_xid ( unsigned int xid )
2005-04-17 02:20:36 +04:00
{
spin_lock ( & GlobalMid_Lock ) ;
2007-07-07 23:25:05 +04:00
/* if (GlobalTotalActiveXid == 0)
2005-04-17 02:20:36 +04:00
BUG ( ) ; */
GlobalTotalActiveXid - - ;
spin_unlock ( & GlobalMid_Lock ) ;
}
2011-05-27 08:34:02 +04:00
struct cifs_ses *
2005-04-17 02:20:36 +04:00
sesInfoAlloc ( void )
{
2011-05-27 08:34:02 +04:00
struct cifs_ses * ret_buf ;
2005-04-17 02:20:36 +04:00
2011-05-27 08:34:02 +04:00
ret_buf = kzalloc ( sizeof ( struct cifs_ses ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ret_buf ) {
atomic_inc ( & sesInfoAllocCount ) ;
ret_buf - > status = CifsNew ;
2008-11-14 21:53:46 +03:00
+ + ret_buf - > ses_count ;
INIT_LIST_HEAD ( & ret_buf - > smb_ses_list ) ;
2008-11-15 19:12:47 +03:00
INIT_LIST_HEAD ( & ret_buf - > tcon_list ) ;
2010-02-25 08:36:46 +03:00
mutex_init ( & ret_buf - > session_mutex ) ;
2005-04-17 02:20:36 +04:00
}
return ret_buf ;
}
void
2011-05-27 08:34:02 +04:00
sesInfoFree ( struct cifs_ses * buf_to_free )
2005-04-17 02:20:36 +04:00
{
if ( buf_to_free = = NULL ) {
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " Null buffer passed to sesInfoFree " ) ;
2005-04-17 02:20:36 +04:00
return ;
}
atomic_dec ( & sesInfoAllocCount ) ;
2005-11-07 12:01:34 +03:00
kfree ( buf_to_free - > serverOS ) ;
kfree ( buf_to_free - > serverDomain ) ;
kfree ( buf_to_free - > serverNOS ) ;
2008-12-06 04:41:21 +03:00
if ( buf_to_free - > password ) {
memset ( buf_to_free - > password , 0 , strlen ( buf_to_free - > password ) ) ;
kfree ( buf_to_free - > password ) ;
}
2011-02-25 10:11:56 +03:00
kfree ( buf_to_free - > user_name ) ;
2006-06-01 02:40:51 +04:00
kfree ( buf_to_free - > domainName ) ;
2005-04-17 02:20:36 +04:00
kfree ( buf_to_free ) ;
}
2011-05-27 08:34:02 +04:00
struct cifs_tcon *
2005-04-17 02:20:36 +04:00
tconInfoAlloc ( void )
{
2011-05-27 08:34:02 +04:00
struct cifs_tcon * ret_buf ;
ret_buf = kzalloc ( sizeof ( struct cifs_tcon ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ret_buf ) {
atomic_inc ( & tconInfoAllocCount ) ;
ret_buf - > tidStatus = CifsNew ;
2008-11-15 19:12:47 +03:00
+ + ret_buf - > tc_count ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & ret_buf - > openFileList ) ;
2008-11-15 19:12:47 +03:00
INIT_LIST_HEAD ( & ret_buf - > tcon_list ) ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_CIFS_STATS
spin_lock_init ( & ret_buf - > stat_lock ) ;
# endif
}
return ret_buf ;
}
void
2011-05-27 08:34:02 +04:00
tconInfoFree ( struct cifs_tcon * buf_to_free )
2005-04-17 02:20:36 +04:00
{
if ( buf_to_free = = NULL ) {
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " Null buffer passed to tconInfoFree " ) ;
2005-04-17 02:20:36 +04:00
return ;
}
atomic_dec ( & tconInfoAllocCount ) ;
2005-11-07 12:01:34 +03:00
kfree ( buf_to_free - > nativeFileSystem ) ;
2008-12-06 04:41:21 +03:00
if ( buf_to_free - > password ) {
memset ( buf_to_free - > password , 0 , strlen ( buf_to_free - > password ) ) ;
kfree ( buf_to_free - > password ) ;
}
2005-04-17 02:20:36 +04:00
kfree ( buf_to_free ) ;
}
struct smb_hdr *
cifs_buf_get ( void )
{
struct smb_hdr * ret_buf = NULL ;
2012-01-12 22:40:50 +04:00
size_t buf_size = sizeof ( struct smb_hdr ) ;
# ifdef CONFIG_CIFS_SMB2
/*
* SMB2 header is bigger than CIFS one - no problems to clean some
* more bytes for CIFS .
*/
buf_size = sizeof ( struct smb2_hdr ) ;
# endif
/*
* We could use negotiated size instead of max_msgsize -
* but it may be more efficient to always alloc same size
* albeit slightly larger than necessary and maxbuffersize
* defaults to this and can not be bigger .
*/
2008-09-15 14:22:54 +04:00
ret_buf = mempool_alloc ( cifs_req_poolp , GFP_NOFS ) ;
2005-04-17 02:20:36 +04:00
/* clear the first few header bytes */
/* for most paths, more is cleared in header_assemble */
if ( ret_buf ) {
2012-01-12 22:40:50 +04:00
memset ( ret_buf , 0 , buf_size + 3 ) ;
2005-04-17 02:20:36 +04:00
atomic_inc ( & bufAllocCount ) ;
2005-12-04 00:58:57 +03:00
# ifdef CONFIG_CIFS_STATS2
atomic_inc ( & totBufAllocCount ) ;
# endif /* CONFIG_CIFS_STATS2 */
2005-04-17 02:20:36 +04:00
}
return ret_buf ;
}
void
cifs_buf_release ( void * buf_to_free )
{
if ( buf_to_free = = NULL ) {
2010-04-21 07:50:45 +04:00
/* cFYI(1, "Null buffer passed to cifs_buf_release");*/
2005-04-17 02:20:36 +04:00
return ;
}
2007-07-10 05:16:18 +04:00
mempool_free ( buf_to_free , cifs_req_poolp ) ;
2005-04-17 02:20:36 +04:00
atomic_dec ( & bufAllocCount ) ;
return ;
}
struct smb_hdr *
cifs_small_buf_get ( void )
{
struct smb_hdr * ret_buf = NULL ;
2007-07-10 05:16:18 +04:00
/* We could use negotiated size instead of max_msgsize -
but it may be more efficient to always alloc same size
albeit slightly larger than necessary and maxbuffersize
2005-04-17 02:20:36 +04:00
defaults to this and can not be bigger */
2008-09-15 14:22:54 +04:00
ret_buf = mempool_alloc ( cifs_sm_req_poolp , GFP_NOFS ) ;
2005-04-17 02:20:36 +04:00
if ( ret_buf ) {
/* No need to clear memory here, cleared in header assemble */
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
atomic_inc ( & smBufAllocCount ) ;
2005-12-04 00:58:57 +03:00
# ifdef CONFIG_CIFS_STATS2
atomic_inc ( & totSmBufAllocCount ) ;
# endif /* CONFIG_CIFS_STATS2 */
2005-04-17 02:20:36 +04:00
}
return ret_buf ;
}
void
cifs_small_buf_release ( void * buf_to_free )
{
if ( buf_to_free = = NULL ) {
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " Null buffer passed to cifs_small_buf_release " ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2007-07-10 05:16:18 +04:00
mempool_free ( buf_to_free , cifs_sm_req_poolp ) ;
2005-04-17 02:20:36 +04:00
atomic_dec ( & smBufAllocCount ) ;
return ;
}
2005-08-17 23:38:22 +04:00
/* NB: MID can not be set if treeCon not passed in, in that
case it is responsbility of caller to set the mid */
2005-04-17 02:20:36 +04:00
void
header_assemble ( struct smb_hdr * buffer , char smb_command /* command */ ,
2011-05-27 08:34:02 +04:00
const struct cifs_tcon * treeCon , int word_count
2005-04-17 02:20:36 +04:00
/* length of fixed section (word count) in two byte units */ )
{
char * temp = ( char * ) buffer ;
2007-07-10 05:16:18 +04:00
memset ( temp , 0 , 256 ) ; /* bigger than MAX_CIFS_HDR_SIZE */
2005-04-17 02:20:36 +04:00
2011-04-29 09:40:20 +04:00
buffer - > smb_buf_length = cpu_to_be32 (
2007-10-26 01:17:17 +04:00
( 2 * word_count ) + sizeof ( struct smb_hdr ) -
2005-04-17 02:20:36 +04:00
4 /* RFC 1001 length field does not count */ +
2011-04-29 09:40:20 +04:00
2 /* for bcc field itself */ ) ;
2005-04-17 02:20:36 +04:00
buffer - > Protocol [ 0 ] = 0xFF ;
buffer - > Protocol [ 1 ] = ' S ' ;
buffer - > Protocol [ 2 ] = ' M ' ;
buffer - > Protocol [ 3 ] = ' B ' ;
buffer - > Command = smb_command ;
buffer - > Flags = 0x00 ; /* case sensitive */
buffer - > Flags2 = SMBFLG2_KNOWS_LONG_NAMES ;
buffer - > Pid = cpu_to_le16 ( ( __u16 ) current - > tgid ) ;
buffer - > PidHigh = cpu_to_le16 ( ( __u16 ) ( current - > tgid > > 16 ) ) ;
if ( treeCon ) {
buffer - > Tid = treeCon - > tid ;
if ( treeCon - > ses ) {
if ( treeCon - > ses - > capabilities & CAP_UNICODE )
buffer - > Flags2 | = SMBFLG2_UNICODE ;
2008-02-08 02:25:02 +03:00
if ( treeCon - > ses - > capabilities & CAP_STATUS32 )
2005-04-17 02:20:36 +04:00
buffer - > Flags2 | = SMBFLG2_ERR_STATUS ;
2008-02-08 02:25:02 +03:00
2005-08-17 23:38:22 +04:00
/* Uid is not converted */
buffer - > Uid = treeCon - > ses - > Suid ;
2012-05-23 14:01:59 +04:00
buffer - > Mid = get_next_mid ( treeCon - > ses - > server ) ;
2005-04-17 02:20:36 +04:00
}
if ( treeCon - > Flags & SMB_SHARE_IS_IN_DFS )
buffer - > Flags2 | = SMBFLG2_DFS ;
2005-08-19 22:04:29 +04:00
if ( treeCon - > nocase )
buffer - > Flags | = SMBFLG_CASELESS ;
2007-07-07 23:25:05 +04:00
if ( ( treeCon - > ses ) & & ( treeCon - > ses - > server ) )
2011-05-27 08:34:02 +04:00
if ( treeCon - > ses - > server - > sec_mode &
2005-04-17 02:20:36 +04:00
( SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED ) )
buffer - > Flags2 | = SMBFLG2_SECURITY_SIGNATURE ;
}
/* endian conversion of flags is now done just before sending */
buffer - > WordCount = ( char ) word_count ;
return ;
}
2006-09-28 23:43:08 +04:00
static int
2011-01-28 23:05:42 +03:00
check_smb_hdr ( struct smb_hdr * smb , __u16 mid )
2005-04-17 02:20:36 +04:00
{
2011-01-28 23:05:42 +03:00
/* does it have the right SMB "signature" ? */
if ( * ( __le32 * ) smb - > Protocol ! = cpu_to_le32 ( 0x424d53ff ) ) {
cERROR ( 1 , " Bad protocol string signature header 0x%x " ,
* ( unsigned int * ) smb - > Protocol ) ;
return 1 ;
}
/* Make sure that message ids match */
if ( mid ! = smb - > Mid ) {
cERROR ( 1 , " Mids do not match. received=%u expected=%u " ,
smb - > Mid , mid ) ;
return 1 ;
2005-04-17 02:20:36 +04:00
}
2011-01-28 23:05:42 +03:00
/* if it's a response then accept */
if ( smb - > Flags & SMBFLG_RESPONSE )
return 0 ;
/* only one valid case where server sends us request */
if ( smb - > Command = = SMB_COM_LOCKING_ANDX )
return 0 ;
cERROR ( 1 , " Server sent request, not response. mid=%u " , smb - > Mid ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
int
2012-03-23 22:28:02 +04:00
checkSMB ( char * buf , unsigned int total_read )
2005-04-17 02:20:36 +04:00
{
2012-03-23 22:28:02 +04:00
struct smb_hdr * smb = ( struct smb_hdr * ) buf ;
__u16 mid = smb - > Mid ;
2011-10-11 14:41:32 +04:00
__u32 rfclen = be32_to_cpu ( smb - > smb_buf_length ) ;
2005-10-10 22:48:26 +04:00
__u32 clc_len ; /* calculated length */
2011-10-11 14:41:32 +04:00
cFYI ( 0 , " checkSMB Length: 0x%x, smb_buf_length: 0x%x " ,
total_read , rfclen ) ;
2006-10-12 21:49:24 +04:00
2011-10-11 14:41:32 +04:00
/* is this frame too small to even get to a BCC? */
if ( total_read < 2 + sizeof ( struct smb_hdr ) ) {
if ( ( total_read > = sizeof ( struct smb_hdr ) - 1 )
2005-04-17 02:20:36 +04:00
& & ( smb - > Status . CifsError ! = 0 ) ) {
2011-10-11 14:41:32 +04:00
/* it's an error return */
2006-10-12 21:49:24 +04:00
smb - > WordCount = 0 ;
/* some error cases do not return wct and bcc */
return 0 ;
2011-10-11 14:41:32 +04:00
} else if ( ( total_read = = sizeof ( struct smb_hdr ) + 1 ) & &
2006-10-12 21:49:24 +04:00
( smb - > WordCount = = 0 ) ) {
2007-07-10 05:16:18 +04:00
char * tmp = ( char * ) smb ;
2006-10-12 21:49:24 +04:00
/* Need to work around a bug in two servers here */
/* First, check if the part of bcc they sent was zero */
if ( tmp [ sizeof ( struct smb_hdr ) ] = = 0 ) {
/* some servers return only half of bcc
* on simple responses ( wct , bcc both zero )
* in particular have seen this on
* ulogoffX and FindClose . This leaves
* one byte of bcc potentially unitialized
*/
/* zero rest of bcc */
tmp [ sizeof ( struct smb_hdr ) + 1 ] = 0 ;
2006-03-02 03:07:08 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2010-04-21 07:50:45 +04:00
cERROR ( 1 , " rcvd invalid byte count (bcc) " ) ;
2006-10-12 21:49:24 +04:00
} else {
2010-04-21 07:50:45 +04:00
cERROR ( 1 , " Length less than smb header size " ) ;
2005-04-17 02:20:36 +04:00
}
2011-10-11 14:41:32 +04:00
return - EIO ;
2005-04-17 02:20:36 +04:00
}
2011-10-11 14:41:32 +04:00
/* otherwise, there is enough to get to the BCC */
2011-01-28 23:05:42 +03:00
if ( check_smb_hdr ( smb , mid ) )
2011-10-11 14:41:32 +04:00
return - EIO ;
2011-05-04 16:05:26 +04:00
clc_len = smbCalcSize ( smb ) ;
2006-02-24 09:15:11 +03:00
2011-10-11 14:41:32 +04:00
if ( 4 + rfclen ! = total_read ) {
2010-04-21 07:50:45 +04:00
cERROR ( 1 , " Length read does not match RFC1001 length %d " ,
2011-10-11 14:41:32 +04:00
rfclen ) ;
return - EIO ;
2006-02-24 09:15:11 +03:00
}
2011-10-11 14:41:32 +04:00
if ( 4 + rfclen ! = clc_len ) {
2006-02-24 09:15:11 +03:00
/* check if bcc wrapped around for large read responses */
2011-10-11 14:41:32 +04:00
if ( ( rfclen > 64 * 1024 ) & & ( rfclen > clc_len ) ) {
2006-02-24 09:15:11 +03:00
/* check if lengths match mod 64K */
2011-10-11 14:41:32 +04:00
if ( ( ( 4 + rfclen ) & 0xFFFF ) = = ( clc_len & 0xFFFF ) )
2007-07-10 05:16:18 +04:00
return 0 ; /* bcc wrapped */
2006-02-24 09:15:11 +03:00
}
2011-01-31 17:14:17 +03:00
cFYI ( 1 , " Calculated size %u vs length %u mismatch for mid=%u " ,
2011-10-11 14:41:32 +04:00
clc_len , 4 + rfclen , smb - > Mid ) ;
2011-01-31 17:14:17 +03:00
2011-10-11 14:41:32 +04:00
if ( 4 + rfclen < clc_len ) {
2011-01-31 17:14:17 +03:00
cERROR ( 1 , " RFC1001 size %u smaller than SMB for mid=%u " ,
2011-10-11 14:41:32 +04:00
rfclen , smb - > Mid ) ;
return - EIO ;
} else if ( rfclen > clc_len + 512 ) {
2011-01-31 17:14:17 +03:00
/*
* Some servers ( Windows XP in particular ) send more
* data than the lengths in the SMB packet would
* indicate on certain calls ( byte range locks and
* trans2 find first calls in particular ) . While the
* client can handle such a frame by ignoring the
* trailing data , we choose limit the amount of extra
* data to 512 bytes .
*/
cERROR ( 1 , " RFC1001 size %u more than 512 bytes larger "
2011-10-11 14:41:32 +04:00
" than SMB for mid=%u " , rfclen , smb - > Mid ) ;
return - EIO ;
2006-03-02 03:07:08 +03:00
}
2005-04-17 02:20:36 +04:00
}
2005-09-22 09:05:57 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2008-04-29 04:06:05 +04:00
bool
2012-03-23 22:28:02 +04:00
is_valid_oplock_break ( char * buffer , struct TCP_Server_Info * srv )
2007-07-10 05:16:18 +04:00
{
2012-03-23 22:28:02 +04:00
struct smb_hdr * buf = ( struct smb_hdr * ) buffer ;
2007-07-10 05:16:18 +04:00
struct smb_com_lock_req * pSMB = ( struct smb_com_lock_req * ) buf ;
2008-11-15 19:12:47 +03:00
struct list_head * tmp , * tmp1 , * tmp2 ;
2011-05-27 08:34:02 +04:00
struct cifs_ses * ses ;
struct cifs_tcon * tcon ;
2008-11-15 19:12:47 +03:00
struct cifsInodeInfo * pCifsInode ;
2005-04-17 02:20:36 +04:00
struct cifsFileInfo * netfile ;
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " Checking for oplock break or dnotify response " ) ;
2007-07-07 23:25:05 +04:00
if ( ( pSMB - > hdr . Command = = SMB_COM_NT_TRANSACT ) & &
2005-04-17 02:20:36 +04:00
( pSMB - > hdr . Flags & SMBFLG_RESPONSE ) ) {
2007-07-10 05:16:18 +04:00
struct smb_com_transaction_change_notify_rsp * pSMBr =
2005-04-17 02:20:36 +04:00
( struct smb_com_transaction_change_notify_rsp * ) buf ;
2007-07-10 05:16:18 +04:00
struct file_notify_information * pnotify ;
2005-04-17 02:20:36 +04:00
__u32 data_offset = 0 ;
2011-05-04 16:05:26 +04:00
if ( get_bcc ( buf ) > sizeof ( struct file_notify_information ) ) {
2005-04-17 02:20:36 +04:00
data_offset = le32_to_cpu ( pSMBr - > DataOffset ) ;
2006-06-01 02:40:51 +04:00
pnotify = ( struct file_notify_information * )
( ( char * ) & pSMBr - > hdr . Protocol + data_offset ) ;
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " dnotify on %s Action: 0x%x " ,
pnotify - > FileName , pnotify - > Action ) ;
2007-07-10 05:16:18 +04:00
/* cifs_dump_mem("Rcvd notify Data: ",buf,
2006-06-01 02:40:51 +04:00
sizeof ( struct smb_hdr ) + 60 ) ; */
2008-04-29 04:06:05 +04:00
return true ;
2005-04-17 02:20:36 +04:00
}
2007-07-07 23:25:05 +04:00
if ( pSMBr - > hdr . Status . CifsError ) {
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " notify err 0x%d " ,
pSMBr - > hdr . Status . CifsError ) ;
2008-04-29 04:06:05 +04:00
return true ;
2005-04-17 02:20:36 +04:00
}
2008-04-29 04:06:05 +04:00
return false ;
2007-07-10 05:16:18 +04:00
}
2007-07-07 23:25:05 +04:00
if ( pSMB - > hdr . Command ! = SMB_COM_LOCKING_ANDX )
2008-04-29 04:06:05 +04:00
return false ;
2007-07-07 23:25:05 +04:00
if ( pSMB - > hdr . Flags & SMBFLG_RESPONSE ) {
2005-04-17 02:20:36 +04:00
/* no sense logging error on invalid handle on oplock
break - harmless race between close request and oplock
break response is expected from time to time writing out
large dirty files cached on the client */
2007-07-10 05:16:18 +04:00
if ( ( NT_STATUS_INVALID_HANDLE ) = =
le32_to_cpu ( pSMB - > hdr . Status . CifsError ) ) {
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " invalid handle on oplock break " ) ;
2008-04-29 04:06:05 +04:00
return true ;
2007-07-10 05:16:18 +04:00
} else if ( ERRbadfid = =
2005-04-17 02:20:36 +04:00
le16_to_cpu ( pSMB - > hdr . Status . DosError . Error ) ) {
2008-04-29 04:06:05 +04:00
return true ;
2005-04-17 02:20:36 +04:00
} else {
2008-04-29 04:06:05 +04:00
return false ; /* on valid oplock brk we get "request" */
2005-04-17 02:20:36 +04:00
}
}
2007-07-07 23:25:05 +04:00
if ( pSMB - > hdr . WordCount ! = 8 )
2008-04-29 04:06:05 +04:00
return false ;
2005-04-17 02:20:36 +04:00
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " oplock type 0x%d level 0x%d " ,
pSMB - > LockType , pSMB - > OplockLevel ) ;
2007-07-07 23:25:05 +04:00
if ( ! ( pSMB - > LockType & LOCKING_ANDX_OPLOCK_RELEASE ) )
2008-04-29 04:06:05 +04:00
return false ;
2005-04-17 02:20:36 +04:00
/* look up tcon based on tid & uid */
2010-10-18 21:59:37 +04:00
spin_lock ( & cifs_tcp_ses_lock ) ;
2008-11-15 19:12:47 +03:00
list_for_each ( tmp , & srv - > smb_ses_list ) {
2011-05-27 08:34:02 +04:00
ses = list_entry ( tmp , struct cifs_ses , smb_ses_list ) ;
2008-11-15 19:12:47 +03:00
list_for_each ( tmp1 , & ses - > tcon_list ) {
2011-05-27 08:34:02 +04:00
tcon = list_entry ( tmp1 , struct cifs_tcon , tcon_list ) ;
2008-11-15 19:12:47 +03:00
if ( tcon - > tid ! = buf - > Tid )
continue ;
2012-05-28 14:16:31 +04:00
cifs_stats_inc ( & tcon - > stats . cifs_stats . num_oplock_brks ) ;
2010-10-15 23:34:03 +04:00
spin_lock ( & cifs_file_list_lock ) ;
2008-11-15 19:12:47 +03:00
list_for_each ( tmp2 , & tcon - > openFileList ) {
netfile = list_entry ( tmp2 , struct cifsFileInfo ,
2005-04-29 09:41:10 +04:00
tlist ) ;
2012-09-19 03:20:26 +04:00
if ( pSMB - > Fid ! = netfile - > fid . netfid )
2008-11-15 19:12:47 +03:00
continue ;
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " file id match, oplock break " ) ;
2010-10-11 23:07:18 +04:00
pCifsInode = CIFS_I ( netfile - > dentry - > d_inode ) ;
2010-07-21 00:09:02 +04:00
2010-11-03 10:58:57 +03:00
cifs_set_oplock_level ( pCifsInode ,
2011-01-17 20:15:44 +03:00
pSMB - > OplockLevel ? OPLOCK_READ : 0 ) ;
2012-03-23 22:40:53 +04:00
queue_work ( cifsiod_wq ,
2011-07-26 20:20:17 +04:00
& netfile - > oplock_break ) ;
2010-07-21 00:09:02 +04:00
netfile - > oplock_break_cancelled = false ;
2010-10-15 23:34:03 +04:00
spin_unlock ( & cifs_file_list_lock ) ;
2010-10-18 21:59:37 +04:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
2008-11-15 19:12:47 +03:00
return true ;
2005-04-17 02:20:36 +04:00
}
2010-10-15 23:34:03 +04:00
spin_unlock ( & cifs_file_list_lock ) ;
2010-10-18 21:59:37 +04:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " No matching file for oplock break " ) ;
2008-04-29 04:06:05 +04:00
return true ;
2005-04-17 02:20:36 +04:00
}
}
2010-10-18 21:59:37 +04:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
2010-04-21 07:50:45 +04:00
cFYI ( 1 , " Can not process oplock break for non-existent connection " ) ;
2008-04-29 04:06:05 +04:00
return true ;
2005-04-17 02:20:36 +04:00
}
void
2012-03-23 22:28:02 +04:00
dump_smb ( void * buf , int smb_buf_length )
2005-04-17 02:20:36 +04:00
{
int i , j ;
char debug_line [ 17 ] ;
2012-03-23 22:28:02 +04:00
unsigned char * buffer = buf ;
2005-04-17 02:20:36 +04:00
if ( traceSMB = = 0 )
return ;
for ( i = 0 , j = 0 ; i < smb_buf_length ; i + + , j + + ) {
2008-02-08 02:25:02 +03:00
if ( i % 8 = = 0 ) {
/* have reached the beginning of line */
2005-04-17 02:20:36 +04:00
printk ( KERN_DEBUG " | " ) ;
j = 0 ;
}
printk ( " %0#4x " , buffer [ i ] ) ;
debug_line [ 2 * j ] = ' ' ;
if ( isprint ( buffer [ i ] ) )
debug_line [ 1 + ( 2 * j ) ] = buffer [ i ] ;
else
debug_line [ 1 + ( 2 * j ) ] = ' _ ' ;
2008-02-08 02:25:02 +03:00
if ( i % 8 = = 7 ) {
/* reached end of line, time to print ascii */
2005-04-17 02:20:36 +04:00
debug_line [ 16 ] = 0 ;
printk ( " | %s \n " , debug_line ) ;
}
}
for ( ; j < 8 ; j + + ) {
printk ( " " ) ;
debug_line [ 2 * j ] = ' ' ;
debug_line [ 1 + ( 2 * j ) ] = ' ' ;
}
2008-02-08 02:25:02 +03:00
printk ( " | %s \n " , debug_line ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2005-04-29 09:41:05 +04:00
2009-11-06 22:18:29 +03:00
void
cifs_autodisable_serverino ( struct cifs_sb_info * cifs_sb )
{
if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM ) {
2009-11-16 09:33:16 +03:00
cifs_sb - > mnt_cifs_flags & = ~ CIFS_MOUNT_SERVER_INUM ;
2010-04-21 07:50:45 +04:00
cERROR ( 1 , " Autodisabling the use of server inode numbers on "
2009-11-06 22:18:29 +03:00
" %s. This server doesn't seem to support them "
" properly. Hardlinks will not be recognized on this "
" mount. Consider mounting with the \" noserverino \" "
" option to silence this message. " ,
2010-09-21 03:01:35 +04:00
cifs_sb_master_tcon ( cifs_sb ) - > treeName ) ;
2009-11-06 22:18:29 +03:00
}
}
2010-11-02 12:00:42 +03:00
2010-11-03 10:58:57 +03:00
void cifs_set_oplock_level ( struct cifsInodeInfo * cinode , __u32 oplock )
2010-11-02 12:00:42 +03:00
{
2010-11-03 10:58:57 +03:00
oplock & = 0xF ;
2010-11-02 12:00:42 +03:00
2010-11-03 10:58:57 +03:00
if ( oplock = = OPLOCK_EXCLUSIVE ) {
2010-11-02 12:00:42 +03:00
cinode - > clientCanCacheAll = true ;
cinode - > clientCanCacheRead = true ;
2010-11-03 10:58:57 +03:00
cFYI ( 1 , " Exclusive Oplock granted on inode %p " ,
& cinode - > vfs_inode ) ;
} else if ( oplock = = OPLOCK_READ ) {
2010-11-02 12:00:42 +03:00
cinode - > clientCanCacheAll = false ;
cinode - > clientCanCacheRead = true ;
2010-11-03 10:58:57 +03:00
cFYI ( 1 , " Level II Oplock granted on inode %p " ,
& cinode - > vfs_inode ) ;
2010-11-02 12:00:42 +03:00
} else {
cinode - > clientCanCacheAll = false ;
cinode - > clientCanCacheRead = false ;
}
}
2011-09-26 18:56:44 +04:00
bool
backup_cred ( struct cifs_sb_info * cifs_sb )
{
if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID ) {
if ( cifs_sb - > mnt_backupuid = = current_fsuid ( ) )
return true ;
}
if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID ) {
if ( in_group_p ( cifs_sb - > mnt_backupgid ) )
return true ;
}
return false ;
}
2012-09-19 17:22:45 +04:00
void
cifs_del_pending_open ( struct cifs_pending_open * open )
{
spin_lock ( & cifs_file_list_lock ) ;
list_del ( & open - > olist ) ;
spin_unlock ( & cifs_file_list_lock ) ;
}
void
cifs_add_pending_open_locked ( struct cifs_fid * fid , struct tcon_link * tlink ,
struct cifs_pending_open * open )
{
# ifdef CONFIG_CIFS_SMB2
memcpy ( open - > lease_key , fid - > lease_key , SMB2_LEASE_KEY_SIZE ) ;
# endif
open - > oplock = CIFS_OPLOCK_NO_CHANGE ;
open - > tlink = tlink ;
fid - > pending_open = open ;
list_add_tail ( & open - > olist , & tlink_tcon ( tlink ) - > pending_opens ) ;
}
void
cifs_add_pending_open ( struct cifs_fid * fid , struct tcon_link * tlink ,
struct cifs_pending_open * open )
{
spin_lock ( & cifs_file_list_lock ) ;
cifs_add_pending_open_locked ( fid , tlink , open ) ;
spin_unlock ( & cifs_file_list_lock ) ;
}