2021-06-18 08:31:49 +03:00
// SPDX-License-Identifier: LGPL-2.1
2011-06-08 15:51:07 +04:00
/*
*
* Copyright ( C ) International Business Machines Corp . , 2002 , 2011
* Etersoft , 2012
* Author ( s ) : Steve French ( sfrench @ us . ibm . com )
* Pavel Shilovsky ( pshilovsky @ samba . org ) 2012
*
*/
# include <linux/ctype.h>
# include "cifsglob.h"
# include "cifsproto.h"
# include "smb2proto.h"
# include "cifs_debug.h"
# include "cifs_unicode.h"
# include "smb2status.h"
2016-10-25 01:33:04 +03:00
# include "smb2glob.h"
2019-09-20 07:31:10 +03:00
# include "nterr.h"
2022-08-11 06:00:08 +03:00
# include "cached_dir.h"
2011-06-08 15:51:07 +04:00
static int
2021-11-05 02:39:01 +03:00
check_smb2_hdr ( struct smb2_hdr * shdr , __u64 mid )
2011-06-08 15:51:07 +04:00
{
2016-10-25 01:33:04 +03:00
__u64 wire_mid = le64_to_cpu ( shdr - > MessageId ) ;
2014-12-09 20:37:00 +03:00
2011-06-08 15:51:07 +04:00
/*
* Make sure that this really is an SMB , that it is a response ,
* and that the message ids match .
*/
2016-10-25 01:33:04 +03:00
if ( ( shdr - > ProtocolId = = SMB2_PROTO_NUMBER ) & &
2014-12-09 20:37:00 +03:00
( mid = = wire_mid ) ) {
2016-10-25 01:33:04 +03:00
if ( shdr - > Flags & SMB2_FLAGS_SERVER_TO_REDIR )
2011-06-08 15:51:07 +04:00
return 0 ;
else {
/* only one valid case where server sends us request */
2016-10-25 01:33:04 +03:00
if ( shdr - > Command = = SMB2_OPLOCK_BREAK )
2011-06-08 15:51:07 +04:00
return 0 ;
else
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " Received Request not response \n " ) ;
2011-06-08 15:51:07 +04:00
}
} else { /* bad signature or mid */
2016-10-25 01:33:04 +03:00
if ( shdr - > ProtocolId ! = SMB2_PROTO_NUMBER )
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " Bad protocol string signature header %x \n " ,
2016-10-25 01:33:04 +03:00
le32_to_cpu ( shdr - > ProtocolId ) ) ;
2014-12-09 20:37:00 +03:00
if ( mid ! = wire_mid )
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " Mids do not match: %llu and %llu \n " ,
2014-12-09 20:37:00 +03:00
mid , wire_mid ) ;
2011-06-08 15:51:07 +04:00
}
2014-12-09 20:37:00 +03:00
cifs_dbg ( VFS , " Bad SMB detected. The Mid=%llu \n " , wire_mid ) ;
2011-06-08 15:51:07 +04:00
return 1 ;
}
/*
* The following table defines the expected " StructureSize " of SMB2 responses
* in order by SMB2 command . This is similar to " wct " in SMB / CIFS responses .
*
* Note that commands are defined in smb2pdu . h in le16 but the array below is
* indexed by command in host byte order
*/
static const __le16 smb2_rsp_struct_sizes [ NUMBER_OF_SMB2_COMMANDS ] = {
2014-12-11 02:41:15 +03:00
/* SMB2_NEGOTIATE */ cpu_to_le16 ( 65 ) ,
/* SMB2_SESSION_SETUP */ cpu_to_le16 ( 9 ) ,
/* SMB2_LOGOFF */ cpu_to_le16 ( 4 ) ,
/* SMB2_TREE_CONNECT */ cpu_to_le16 ( 16 ) ,
/* SMB2_TREE_DISCONNECT */ cpu_to_le16 ( 4 ) ,
/* SMB2_CREATE */ cpu_to_le16 ( 89 ) ,
/* SMB2_CLOSE */ cpu_to_le16 ( 60 ) ,
/* SMB2_FLUSH */ cpu_to_le16 ( 4 ) ,
/* SMB2_READ */ cpu_to_le16 ( 17 ) ,
/* SMB2_WRITE */ cpu_to_le16 ( 17 ) ,
/* SMB2_LOCK */ cpu_to_le16 ( 4 ) ,
/* SMB2_IOCTL */ cpu_to_le16 ( 49 ) ,
2011-06-08 15:51:07 +04:00
/* BB CHECK this ... not listed in documentation */
2014-12-11 02:41:15 +03:00
/* SMB2_CANCEL */ cpu_to_le16 ( 0 ) ,
/* SMB2_ECHO */ cpu_to_le16 ( 4 ) ,
/* SMB2_QUERY_DIRECTORY */ cpu_to_le16 ( 9 ) ,
/* SMB2_CHANGE_NOTIFY */ cpu_to_le16 ( 9 ) ,
/* SMB2_QUERY_INFO */ cpu_to_le16 ( 9 ) ,
/* SMB2_SET_INFO */ cpu_to_le16 ( 2 ) ,
2011-06-08 15:51:07 +04:00
/* BB FIXME can also be 44 for lease break */
2014-12-11 02:41:15 +03:00
/* SMB2_OPLOCK_BREAK */ cpu_to_le16 ( 24 )
2011-06-08 15:51:07 +04:00
} ;
2021-11-05 02:39:01 +03:00
# define SMB311_NEGPROT_BASE_SIZE (sizeof(struct smb2_hdr) + sizeof(struct smb2_negotiate_rsp))
2020-12-09 10:12:35 +03:00
2021-11-05 02:39:01 +03:00
static __u32 get_neg_ctxt_len ( struct smb2_hdr * hdr , __u32 len ,
2018-06-01 03:53:07 +03:00
__u32 non_ctxlen )
2018-04-09 00:14:31 +03:00
{
__u16 neg_count ;
__u32 nc_offset , size_of_pad_before_neg_ctxts ;
struct smb2_negotiate_rsp * pneg_rsp = ( struct smb2_negotiate_rsp * ) hdr ;
/* Negotiate contexts are only valid for latest dialect SMB3.11 */
neg_count = le16_to_cpu ( pneg_rsp - > NegotiateContextCount ) ;
if ( ( neg_count = = 0 ) | |
( pneg_rsp - > DialectRevision ! = cpu_to_le16 ( SMB311_PROT_ID ) ) )
return 0 ;
2020-12-10 06:25:13 +03:00
/*
* if SPNEGO blob present ( ie the RFC2478 GSS info which indicates
* which security mechanisms the server supports ) make sure that
* the negotiate contexts start after it
*/
2018-04-09 00:14:31 +03:00
nc_offset = le32_to_cpu ( pneg_rsp - > NegotiateContextOffset ) ;
2020-12-10 06:25:13 +03:00
/*
* non_ctxlen is at least shdr - > StructureSize + pdu - > StructureSize2
* and the latter is 1 byte bigger than the fix - sized area of the
* NEGOTIATE response
*/
2020-12-09 10:12:35 +03:00
if ( nc_offset + 1 < non_ctxlen ) {
pr_warn_once ( " Invalid negotiate context offset %d \n " , nc_offset ) ;
2018-04-09 00:14:31 +03:00
return 0 ;
2020-12-09 10:12:35 +03:00
} else if ( nc_offset + 1 = = non_ctxlen ) {
cifs_dbg ( FYI , " no SPNEGO security blob in negprot rsp \n " ) ;
size_of_pad_before_neg_ctxts = 0 ;
} else if ( non_ctxlen = = SMB311_NEGPROT_BASE_SIZE )
/* has padding, but no SPNEGO blob */
size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen + 1 ;
else
size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen ;
2018-04-09 00:14:31 +03:00
/* Verify that at least minimal negotiate contexts fit within frame */
if ( len < nc_offset + ( neg_count * sizeof ( struct smb2_neg_context ) ) ) {
2020-04-15 08:42:53 +03:00
pr_warn_once ( " negotiate context goes beyond end \n " ) ;
2018-04-09 00:14:31 +03:00
return 0 ;
}
cifs_dbg ( FYI , " length of negcontexts %d pad %d \n " ,
len - nc_offset , size_of_pad_before_neg_ctxts ) ;
/* length of negcontexts including pad from end of sec blob to them */
return ( len - nc_offset ) + size_of_pad_before_neg_ctxts ;
}
2011-06-08 15:51:07 +04:00
int
2022-07-19 20:31:51 +03:00
smb2_check_message ( char * buf , unsigned int len , struct TCP_Server_Info * server )
2011-06-08 15:51:07 +04:00
{
2022-10-28 12:52:26 +03:00
struct TCP_Server_Info * pserver ;
2021-11-05 02:39:01 +03:00
struct smb2_hdr * shdr = ( struct smb2_hdr * ) buf ;
struct smb2_pdu * pdu = ( struct smb2_pdu * ) shdr ;
int hdr_size = sizeof ( struct smb2_hdr ) ;
2022-07-19 20:31:51 +03:00
int pdu_size = sizeof ( struct smb2_pdu ) ;
int command ;
__u32 calc_len ; /* calculated length */
__u64 mid ;
2011-06-08 15:51:07 +04:00
2022-10-28 12:52:26 +03:00
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN ( server ) ? server - > primary_server : server ;
2011-06-08 15:51:07 +04:00
/*
* Add function to do table lookup of StructureSize by command
* ie Validate the wct via smb2_struct_sizes table above
*/
2016-10-25 01:33:04 +03:00
if ( shdr - > ProtocolId = = SMB2_TRANSFORM_PROTO_NUM ) {
2015-12-18 22:05:30 +03:00
struct smb2_transform_hdr * thdr =
( struct smb2_transform_hdr * ) buf ;
struct cifs_ses * ses = NULL ;
2022-04-01 00:55:41 +03:00
struct cifs_ses * iter ;
2015-12-18 22:05:30 +03:00
/* decrypt frame now that it is completely read in */
spin_lock ( & cifs_tcp_ses_lock ) ;
2022-10-28 12:52:26 +03:00
list_for_each_entry ( iter , & pserver - > smb_ses_list , smb_ses_list ) {
2022-04-01 00:55:41 +03:00
if ( iter - > Suid = = le64_to_cpu ( thdr - > SessionId ) ) {
ses = iter ;
2015-12-18 22:05:30 +03:00
break ;
2022-04-01 00:55:41 +03:00
}
2015-12-18 22:05:30 +03:00
}
spin_unlock ( & cifs_tcp_ses_lock ) ;
2022-04-01 00:55:41 +03:00
if ( ! ses ) {
2015-12-18 22:05:30 +03:00
cifs_dbg ( VFS , " no decryption - session id not found \n " ) ;
return 1 ;
}
}
2016-10-25 01:33:04 +03:00
mid = le64_to_cpu ( shdr - > MessageId ) ;
2018-05-31 00:43:34 +03:00
if ( len < pdu_size ) {
if ( ( len > = hdr_size )
2016-10-25 01:33:04 +03:00
& & ( shdr - > Status ! = 0 ) ) {
2011-06-08 15:51:07 +04:00
pdu - > StructureSize2 = 0 ;
/*
* As with SMB / CIFS , on some error cases servers may
* not return wct properly
*/
return 0 ;
} else {
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " Length less than SMB header size \n " ) ;
2011-06-08 15:51:07 +04:00
}
return 1 ;
}
2018-06-01 03:53:07 +03:00
if ( len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE ) {
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " SMB length greater than maximum, mid=%llu \n " ,
mid ) ;
2011-06-08 15:51:07 +04:00
return 1 ;
}
2016-10-25 01:33:04 +03:00
if ( check_smb2_hdr ( shdr , mid ) )
2011-06-08 15:51:07 +04:00
return 1 ;
2016-10-25 01:33:04 +03:00
if ( shdr - > StructureSize ! = SMB2_HEADER_STRUCTURE_SIZE ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " Invalid structure size %u \n " ,
2016-10-25 01:33:04 +03:00
le16_to_cpu ( shdr - > StructureSize ) ) ;
2011-06-08 15:51:07 +04:00
return 1 ;
}
2016-10-25 01:33:04 +03:00
command = le16_to_cpu ( shdr - > Command ) ;
2011-06-08 15:51:07 +04:00
if ( command > = NUMBER_OF_SMB2_COMMANDS ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " Invalid SMB2 command %d \n " , command ) ;
2011-06-08 15:51:07 +04:00
return 1 ;
}
if ( smb2_rsp_struct_sizes [ command ] ! = pdu - > StructureSize2 ) {
2016-10-25 01:33:04 +03:00
if ( command ! = SMB2_OPLOCK_BREAK_HE & & ( shdr - > Status = = 0 | |
2022-03-25 06:09:29 +03:00
pdu - > StructureSize2 ! = SMB2_ERROR_STRUCTURE_SIZE2_LE ) ) {
2011-06-08 15:51:07 +04:00
/* error packets have 9 byte structure size */
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " Invalid response size %u for command %d \n " ,
2013-05-05 07:12:25 +04:00
le16_to_cpu ( pdu - > StructureSize2 ) , command ) ;
2011-06-08 15:51:07 +04:00
return 1 ;
2016-10-25 01:33:04 +03:00
} else if ( command = = SMB2_OPLOCK_BREAK_HE
& & ( shdr - > Status = = 0 )
2012-09-19 17:22:45 +04:00
& & ( le16_to_cpu ( pdu - > StructureSize2 ) ! = 44 )
& & ( le16_to_cpu ( pdu - > StructureSize2 ) ! = 36 ) ) {
/* special case for SMB2.1 lease break message */
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " Invalid response size %d for oplock break \n " ,
2013-05-05 07:12:25 +04:00
le16_to_cpu ( pdu - > StructureSize2 ) ) ;
2012-09-19 17:22:45 +04:00
return 1 ;
2011-06-08 15:51:07 +04:00
}
}
2022-08-17 20:14:02 +03:00
calc_len = smb2_calc_size ( buf ) ;
2022-07-19 20:31:51 +03:00
/* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might
* be 0 , and not a real miscalculation */
if ( command = = SMB2_IOCTL_HE & & calc_len = = 0 )
return 0 ;
2011-06-08 15:51:07 +04:00
2022-07-19 20:31:51 +03:00
if ( command = = SMB2_NEGOTIATE_HE )
calc_len + = get_neg_ctxt_len ( shdr , len , calc_len ) ;
2018-06-29 03:30:23 +03:00
2022-07-19 20:31:51 +03:00
if ( len ! = calc_len ) {
2013-08-14 19:25:21 +04:00
/* create failed on symlink */
if ( command = = SMB2_CREATE_HE & &
2016-10-25 01:33:04 +03:00
shdr - > Status = = STATUS_STOPPED_ON_SYMLINK )
2013-08-14 19:25:21 +04:00
return 0 ;
2012-09-19 03:20:33 +04:00
/* Windows 7 server returns 24 bytes more */
2022-07-19 20:31:51 +03:00
if ( calc_len + 24 = = len & & command = = SMB2_OPLOCK_BREAK_HE )
2012-09-19 03:20:33 +04:00
return 0 ;
2014-08-16 08:49:01 +04:00
/* server can return one byte more due to implied bcc[0] */
2022-07-19 20:31:51 +03:00
if ( calc_len = = len + 1 )
2012-07-27 01:20:41 +04:00
return 0 ;
2014-08-16 08:49:01 +04:00
2018-08-22 05:19:24 +03:00
/*
* Some windows servers ( win2016 ) will pad also the final
* PDU in a compound to 8 bytes .
*/
2022-10-13 06:53:09 +03:00
if ( ALIGN ( calc_len , 8 ) = = len )
2018-08-22 05:19:24 +03:00
return 0 ;
2014-08-16 08:49:01 +04:00
/*
* MacOS server pads after SMB2 .1 write response with 3 bytes
* of junk . Other servers match RFC1001 len to actual
* SMB2 / SMB3 frame length ( header + smb2 response specific data )
2018-08-29 17:22:22 +03:00
* Some windows servers also pad up to 8 bytes when compounding .
2014-08-16 08:49:01 +04:00
*/
2022-07-19 20:31:51 +03:00
if ( calc_len < len )
2014-08-16 08:49:01 +04:00
return 0 ;
2019-11-08 10:01:35 +03:00
2022-07-19 20:31:51 +03:00
/* Only log a message if len was really miscalculated */
if ( unlikely ( cifsFYI ) )
cifs_dbg ( FYI , " Server response too short: calculated "
" length %u doesn't match read length %u (cmd=%d, mid=%llu) \n " ,
calc_len , len , command , mid ) ;
else
pr_warn ( " Server response too short: calculated length "
" %u doesn't match read length %u (cmd=%d, mid=%llu) \n " ,
calc_len , len , command , mid ) ;
2014-08-16 08:49:01 +04:00
2011-06-08 15:51:07 +04:00
return 1 ;
}
return 0 ;
}
/*
* The size of the variable area depends on the offset and length fields
* located in different fields for various SMB2 responses . SMB2 responses
* with no variable length info , show an offset of zero for the offset field .
*/
static const bool has_smb2_data_area [ NUMBER_OF_SMB2_COMMANDS ] = {
/* SMB2_NEGOTIATE */ true ,
/* SMB2_SESSION_SETUP */ true ,
/* SMB2_LOGOFF */ false ,
/* SMB2_TREE_CONNECT */ false ,
/* SMB2_TREE_DISCONNECT */ false ,
/* SMB2_CREATE */ true ,
/* SMB2_CLOSE */ false ,
/* SMB2_FLUSH */ false ,
/* SMB2_READ */ true ,
/* SMB2_WRITE */ false ,
/* SMB2_LOCK */ false ,
/* SMB2_IOCTL */ true ,
/* SMB2_CANCEL */ false , /* BB CHECK this not listed in documentation */
/* SMB2_ECHO */ false ,
/* SMB2_QUERY_DIRECTORY */ true ,
/* SMB2_CHANGE_NOTIFY */ true ,
/* SMB2_QUERY_INFO */ true ,
/* SMB2_SET_INFO */ false ,
/* SMB2_OPLOCK_BREAK */ false
} ;
/*
* Returns the pointer to the beginning of the data area . Length of the data
* area and the offset to it ( from the beginning of the smb are also returned .
*/
2011-12-27 16:12:43 +04:00
char *
2021-11-05 02:39:01 +03:00
smb2_get_data_area_len ( int * off , int * len , struct smb2_hdr * shdr )
2011-06-08 15:51:07 +04:00
{
* off = 0 ;
* len = 0 ;
/* error responses do not have data area */
2016-10-25 01:33:04 +03:00
if ( shdr - > Status & & shdr - > Status ! = STATUS_MORE_PROCESSING_REQUIRED & &
2018-06-01 03:53:05 +03:00
( ( ( struct smb2_err_rsp * ) shdr ) - > StructureSize ) = =
2022-03-25 06:09:29 +03:00
SMB2_ERROR_STRUCTURE_SIZE2_LE )
2011-06-08 15:51:07 +04:00
return NULL ;
/*
* Following commands have data areas so we have to get the location
* of the data buffer offset and data buffer length for the particular
* command .
*/
2016-10-25 01:33:04 +03:00
switch ( shdr - > Command ) {
2011-06-08 15:51:07 +04:00
case SMB2_NEGOTIATE :
2011-12-27 16:12:43 +04:00
* off = le16_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_negotiate_rsp * ) shdr ) - > SecurityBufferOffset ) ;
2011-12-27 16:12:43 +04:00
* len = le16_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_negotiate_rsp * ) shdr ) - > SecurityBufferLength ) ;
2011-12-27 16:12:43 +04:00
break ;
2011-06-08 15:51:07 +04:00
case SMB2_SESSION_SETUP :
2011-12-27 16:22:00 +04:00
* off = le16_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_sess_setup_rsp * ) shdr ) - > SecurityBufferOffset ) ;
2011-12-27 16:22:00 +04:00
* len = le16_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_sess_setup_rsp * ) shdr ) - > SecurityBufferLength ) ;
2011-12-27 16:22:00 +04:00
break ;
2011-06-08 15:51:07 +04:00
case SMB2_CREATE :
2011-12-26 22:58:46 +04:00
* off = le32_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_create_rsp * ) shdr ) - > CreateContextsOffset ) ;
2011-12-26 22:58:46 +04:00
* len = le32_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_create_rsp * ) shdr ) - > CreateContextsLength ) ;
2011-12-26 22:58:46 +04:00
break ;
2011-06-08 15:51:07 +04:00
case SMB2_QUERY_INFO :
2011-12-29 17:06:33 +04:00
* off = le16_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_query_info_rsp * ) shdr ) - > OutputBufferOffset ) ;
2011-12-29 17:06:33 +04:00
* len = le32_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_query_info_rsp * ) shdr ) - > OutputBufferLength ) ;
2011-12-29 17:06:33 +04:00
break ;
case SMB2_READ :
2018-06-01 03:53:05 +03:00
/* TODO: is this a bug ? */
* off = ( ( struct smb2_read_rsp * ) shdr ) - > DataOffset ;
* len = le32_to_cpu ( ( ( struct smb2_read_rsp * ) shdr ) - > DataLength ) ;
2012-09-19 03:20:29 +04:00
break ;
2011-06-08 15:51:07 +04:00
case SMB2_QUERY_DIRECTORY :
2012-09-19 03:20:33 +04:00
* off = le16_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_query_directory_rsp * ) shdr ) - > OutputBufferOffset ) ;
2012-09-19 03:20:33 +04:00
* len = le32_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_query_directory_rsp * ) shdr ) - > OutputBufferLength ) ;
2012-09-19 03:20:33 +04:00
break ;
2011-06-08 15:51:07 +04:00
case SMB2_IOCTL :
2013-06-25 09:20:49 +04:00
* off = le32_to_cpu (
2018-06-01 03:53:05 +03:00
( ( struct smb2_ioctl_rsp * ) shdr ) - > OutputOffset ) ;
* len = le32_to_cpu (
( ( struct smb2_ioctl_rsp * ) shdr ) - > OutputCount ) ;
2013-06-25 09:20:49 +04:00
break ;
2011-06-08 15:51:07 +04:00
case SMB2_CHANGE_NOTIFY :
2020-07-08 07:43:39 +03:00
* off = le16_to_cpu (
( ( struct smb2_change_notify_rsp * ) shdr ) - > OutputBufferOffset ) ;
* len = le32_to_cpu (
( ( struct smb2_change_notify_rsp * ) shdr ) - > OutputBufferLength ) ;
break ;
2011-06-08 15:51:07 +04:00
default :
2020-07-08 07:43:39 +03:00
cifs_dbg ( VFS , " no length check for command %d \n " , le16_to_cpu ( shdr - > Command ) ) ;
2011-06-08 15:51:07 +04:00
break ;
}
/*
* Invalid length or offset probably means data area is invalid , but
* we have little choice but to ignore the data area in this case .
*/
if ( * off > 4096 ) {
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " offset %d too large, data area ignored \n " , * off ) ;
2011-06-08 15:51:07 +04:00
* len = 0 ;
* off = 0 ;
} else if ( * off < 0 ) {
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " negative offset %d to data invalid ignore data area \n " ,
* off ) ;
2011-06-08 15:51:07 +04:00
* off = 0 ;
* len = 0 ;
} else if ( * len < 0 ) {
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " negative data length %d invalid, data area ignored \n " ,
* len ) ;
2011-06-08 15:51:07 +04:00
* len = 0 ;
} else if ( * len > 128 * 1024 ) {
2013-05-05 07:12:25 +04:00
cifs_dbg ( VFS , " data area larger than 128K: %d \n " , * len ) ;
2011-06-08 15:51:07 +04:00
* len = 0 ;
}
/* return pointer to beginning of data area, ie offset from SMB start */
if ( ( * off ! = 0 ) & & ( * len ! = 0 ) )
2016-10-25 01:33:04 +03:00
return ( char * ) shdr + * off ;
2011-06-08 15:51:07 +04:00
else
return NULL ;
}
/*
* Calculate the size of the SMB message based on the fixed header
* portion , the number of word parameters and the data portion of the message .
*/
unsigned int
2022-08-17 20:14:02 +03:00
smb2_calc_size ( void * buf )
2011-06-08 15:51:07 +04:00
{
2022-06-30 12:30:27 +03:00
struct smb2_pdu * pdu = buf ;
2021-11-05 02:39:01 +03:00
struct smb2_hdr * shdr = & pdu - > hdr ;
2011-06-08 15:51:07 +04:00
int offset ; /* the offset from the beginning of SMB to data area */
int data_length ; /* the length of the variable length data area */
/* Structure Size has already been checked to make sure it is 64 */
2018-06-01 03:53:07 +03:00
int len = le16_to_cpu ( shdr - > StructureSize ) ;
2011-06-08 15:51:07 +04:00
/*
* StructureSize2 , ie length of fixed parameter area has already
* been checked to make sure it is the correct length .
*/
len + = le16_to_cpu ( pdu - > StructureSize2 ) ;
2016-10-25 01:33:04 +03:00
if ( has_smb2_data_area [ le16_to_cpu ( shdr - > Command ) ] = = false )
2011-06-08 15:51:07 +04:00
goto calc_size_exit ;
2018-06-01 03:53:05 +03:00
smb2_get_data_area_len ( & offset , & data_length , shdr ) ;
2013-05-05 07:12:25 +04:00
cifs_dbg ( FYI , " SMB2 data length %d offset %d \n " , data_length , offset ) ;
2011-06-08 15:51:07 +04:00
if ( data_length > 0 ) {
/*
* Check to make sure that data area begins after fixed area ,
* Note that last byte of the fixed area is part of data area
* for some commands , typically those with odd StructureSize ,
2018-06-01 03:53:04 +03:00
* so we must add one to the calculation .
2011-06-08 15:51:07 +04:00
*/
2018-06-01 03:53:07 +03:00
if ( offset + 1 < len ) {
cifs_dbg ( VFS , " data area offset %d overlaps SMB2 header %d \n " ,
offset + 1 , len ) ;
2011-06-08 15:51:07 +04:00
data_length = 0 ;
} else {
2018-06-01 03:53:07 +03:00
len = offset + data_length ;
2011-06-08 15:51:07 +04:00
}
}
calc_size_exit :
2013-05-05 07:12:25 +04:00
cifs_dbg ( FYI , " SMB2 len %d \n " , len ) ;
2011-06-08 15:51:07 +04:00
return len ;
}
2011-12-26 22:58:46 +04:00
/* Note: caller must free return buffer */
__le16 *
cifs_convert_path_to_utf16 ( const char * from , struct cifs_sb_info * cifs_sb )
{
int len ;
const char * start_of_path ;
__le16 * to ;
2014-09-25 23:01:34 +04:00
int map_type ;
if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR )
map_type = SFM_MAP_UNI_RSVD ;
else if ( cifs_sb - > mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR )
map_type = SFU_MAP_UNI_RSVD ;
else
map_type = NO_MAP_UNI_RSVD ;
2011-12-26 22:58:46 +04:00
/* Windows doesn't allow paths beginning with \ */
if ( from [ 0 ] = = ' \\ ' )
start_of_path = from + 1 ;
2018-06-29 03:30:23 +03:00
2018-06-01 03:16:54 +03:00
/* SMB311 POSIX extensions paths do not include leading slash */
2018-06-04 23:29:35 +03:00
else if ( cifs_sb_master_tlink ( cifs_sb ) & &
2018-06-15 06:30:56 +03:00
cifs_sb_master_tcon ( cifs_sb ) - > posix_extensions & &
( from [ 0 ] = = ' / ' ) ) {
2018-06-01 03:16:54 +03:00
start_of_path = from + 1 ;
2018-06-29 03:30:23 +03:00
} else
2011-12-26 22:58:46 +04:00
start_of_path = from ;
2018-06-01 03:16:54 +03:00
2011-12-26 22:58:46 +04:00
to = cifs_strndup_to_utf16 ( start_of_path , PATH_MAX , & len ,
2014-09-25 23:01:34 +04:00
cifs_sb - > local_nls , map_type ) ;
2011-12-26 22:58:46 +04:00
return to ;
}
2012-09-19 03:20:33 +04:00
2012-09-19 17:22:45 +04:00
__le32
smb2_get_lease_state ( struct cifsInodeInfo * cinode )
{
2013-09-05 16:11:28 +04:00
__le32 lease = 0 ;
2012-09-19 17:22:45 +04:00
2013-09-05 16:11:28 +04:00
if ( CIFS_CACHE_WRITE ( cinode ) )
2022-03-25 06:09:29 +03:00
lease | = SMB2_LEASE_WRITE_CACHING_LE ;
2013-09-05 16:11:28 +04:00
if ( CIFS_CACHE_HANDLE ( cinode ) )
2022-03-25 06:09:29 +03:00
lease | = SMB2_LEASE_HANDLE_CACHING_LE ;
2013-09-05 16:11:28 +04:00
if ( CIFS_CACHE_READ ( cinode ) )
2022-03-25 06:09:29 +03:00
lease | = SMB2_LEASE_READ_CACHING_LE ;
2013-09-05 16:11:28 +04:00
return lease ;
2012-09-19 17:22:45 +04:00
}
2012-09-19 17:22:45 +04:00
struct smb2_lease_break_work {
struct work_struct lease_break ;
struct tcon_link * tlink ;
__u8 lease_key [ 16 ] ;
__le32 lease_state ;
} ;
static void
cifs_ses_oplock_break ( struct work_struct * work )
{
struct smb2_lease_break_work * lw = container_of ( work ,
struct smb2_lease_break_work , lease_break ) ;
2018-06-13 23:48:35 +03:00
int rc = 0 ;
2012-09-19 17:22:45 +04:00
rc = SMB2_lease_break ( 0 , tlink_tcon ( lw - > tlink ) , lw - > lease_key ,
lw - > lease_state ) ;
2018-06-13 23:48:35 +03:00
2013-05-05 07:12:25 +04:00
cifs_dbg ( FYI , " Lease release rc %d \n " , rc ) ;
2012-09-19 17:22:45 +04:00
cifs_put_tlink ( lw - > tlink ) ;
kfree ( lw ) ;
}
2020-07-10 08:01:16 +03:00
static void
smb2_queue_pending_open_break ( struct tcon_link * tlink , __u8 * lease_key ,
__le32 new_lease_state )
{
struct smb2_lease_break_work * lw ;
lw = kmalloc ( sizeof ( struct smb2_lease_break_work ) , GFP_KERNEL ) ;
if ( ! lw ) {
cifs_put_tlink ( tlink ) ;
return ;
}
INIT_WORK ( & lw - > lease_break , cifs_ses_oplock_break ) ;
lw - > tlink = tlink ;
lw - > lease_state = new_lease_state ;
memcpy ( lw - > lease_key , lease_key , SMB2_LEASE_KEY_SIZE ) ;
queue_work ( cifsiod_wq , & lw - > lease_break ) ;
}
2012-09-19 17:22:45 +04:00
static bool
2020-07-10 08:01:16 +03:00
smb2_tcon_has_lease ( struct cifs_tcon * tcon , struct smb2_lease_break * rsp )
2013-09-05 15:00:07 +04:00
{
__u8 lease_state ;
struct cifsFileInfo * cfile ;
struct cifsInodeInfo * cinode ;
int ack_req = le32_to_cpu ( rsp - > Flags &
SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED ) ;
2013-09-05 16:11:28 +04:00
lease_state = le32_to_cpu ( rsp - > NewLeaseState ) ;
2013-09-05 15:00:07 +04:00
2021-06-18 07:02:32 +03:00
list_for_each_entry ( cfile , & tcon - > openFileList , tlist ) {
2015-03-18 01:25:59 +03:00
cinode = CIFS_I ( d_inode ( cfile - > dentry ) ) ;
2013-09-05 15:00:07 +04:00
if ( memcmp ( cinode - > lease_key , rsp - > LeaseKey ,
SMB2_LEASE_KEY_SIZE ) )
continue ;
cifs_dbg ( FYI , " found in the open list \n " ) ;
2014-08-03 06:16:48 +04:00
cifs_dbg ( FYI , " lease key match, lease break 0x%x \n " ,
2019-10-30 02:51:19 +03:00
lease_state ) ;
2013-09-05 15:00:07 +04:00
if ( ack_req )
cfile - > oplock_break_cancelled = false ;
else
cfile - > oplock_break_cancelled = true ;
2019-02-14 02:43:08 +03:00
set_bit ( CIFS_INODE_PENDING_OPLOCK_BREAK , & cinode - > flags ) ;
2019-10-30 02:51:19 +03:00
cfile - > oplock_epoch = le16_to_cpu ( rsp - > Epoch ) ;
cfile - > oplock_level = lease_state ;
2019-02-14 02:43:08 +03:00
2019-03-29 12:49:12 +03:00
cifs_queue_oplock_break ( cfile ) ;
2013-09-05 15:00:07 +04:00
return true ;
}
2020-07-10 08:01:16 +03:00
return false ;
}
static struct cifs_pending_open *
smb2_tcon_find_pending_open_lease ( struct cifs_tcon * tcon ,
struct smb2_lease_break * rsp )
{
__u8 lease_state = le32_to_cpu ( rsp - > NewLeaseState ) ;
int ack_req = le32_to_cpu ( rsp - > Flags &
SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED ) ;
struct cifs_pending_open * open ;
struct cifs_pending_open * found = NULL ;
2013-09-05 15:00:07 +04:00
list_for_each_entry ( open , & tcon - > pending_opens , olist ) {
if ( memcmp ( open - > lease_key , rsp - > LeaseKey ,
SMB2_LEASE_KEY_SIZE ) )
continue ;
if ( ! found & & ack_req ) {
2020-07-10 08:01:16 +03:00
found = open ;
2013-09-05 15:00:07 +04:00
}
cifs_dbg ( FYI , " found in the pending open list \n " ) ;
2014-08-03 06:16:48 +04:00
cifs_dbg ( FYI , " lease key match, lease break 0x%x \n " ,
2019-10-30 02:51:19 +03:00
lease_state ) ;
2013-09-05 15:00:07 +04:00
open - > oplock = lease_state ;
}
2018-06-13 23:48:35 +03:00
2013-09-05 15:00:07 +04:00
return found ;
}
static bool
2022-10-28 13:01:45 +03:00
smb2_is_valid_lease_break ( char * buffer , struct TCP_Server_Info * server )
2012-09-19 17:22:45 +04:00
{
struct smb2_lease_break * rsp = ( struct smb2_lease_break * ) buffer ;
2022-10-28 13:01:45 +03:00
struct TCP_Server_Info * pserver ;
2012-09-19 17:22:45 +04:00
struct cifs_ses * ses ;
struct cifs_tcon * tcon ;
2020-07-10 08:01:16 +03:00
struct cifs_pending_open * open ;
2012-09-19 17:22:45 +04:00
2013-05-05 07:12:25 +04:00
cifs_dbg ( FYI , " Checking for lease break \n " ) ;
2012-09-19 17:22:45 +04:00
2022-10-28 13:01:45 +03:00
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN ( server ) ? server - > primary_server : server ;
2012-09-19 17:22:45 +04:00
/* look up tcon based on tid & uid */
spin_lock ( & cifs_tcp_ses_lock ) ;
2022-10-28 13:01:45 +03:00
list_for_each_entry ( ses , & pserver - > smb_ses_list , smb_ses_list ) {
list_for_each_entry ( tcon , & ses - > tcon_list , tcon_list ) {
spin_lock ( & tcon - > open_file_lock ) ;
cifs_stats_inc (
& tcon - > stats . cifs_stats . num_oplock_brks ) ;
if ( smb2_tcon_has_lease ( tcon , rsp ) ) {
2016-09-23 02:58:16 +03:00
spin_unlock ( & tcon - > open_file_lock ) ;
2022-10-28 13:01:45 +03:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
return true ;
}
open = smb2_tcon_find_pending_open_lease ( tcon ,
rsp ) ;
if ( open ) {
__u8 lease_key [ SMB2_LEASE_KEY_SIZE ] ;
struct tcon_link * tlink ;
tlink = cifs_get_tlink ( open - > tlink ) ;
memcpy ( lease_key , open - > lease_key ,
SMB2_LEASE_KEY_SIZE ) ;
spin_unlock ( & tcon - > open_file_lock ) ;
spin_unlock ( & cifs_tcp_ses_lock ) ;
smb2_queue_pending_open_break ( tlink ,
lease_key ,
rsp - > NewLeaseState ) ;
return true ;
}
spin_unlock ( & tcon - > open_file_lock ) ;
2018-06-13 23:48:35 +03:00
2022-10-28 13:01:45 +03:00
if ( cached_dir_lease_break ( tcon , rsp - > LeaseKey ) ) {
spin_unlock ( & cifs_tcp_ses_lock ) ;
return true ;
2012-09-19 17:22:45 +04:00
}
2012-09-19 17:22:45 +04:00
}
}
spin_unlock ( & cifs_tcp_ses_lock ) ;
2013-05-05 07:12:25 +04:00
cifs_dbg ( FYI , " Can not process lease break - no lease matched \n " ) ;
2022-05-22 07:56:16 +03:00
trace_smb3_lease_not_found ( le32_to_cpu ( rsp - > CurrentLeaseState ) ,
le32_to_cpu ( rsp - > hdr . Id . SyncId . TreeId ) ,
le64_to_cpu ( rsp - > hdr . SessionId ) ,
* ( ( u64 * ) rsp - > LeaseKey ) ,
* ( ( u64 * ) & rsp - > LeaseKey [ 8 ] ) ) ;
2012-09-19 17:22:45 +04:00
return false ;
}
2012-09-19 03:20:33 +04:00
bool
smb2_is_valid_oplock_break ( char * buffer , struct TCP_Server_Info * server )
{
2018-06-01 03:53:03 +03:00
struct smb2_oplock_break * rsp = ( struct smb2_oplock_break * ) buffer ;
2022-10-28 12:52:26 +03:00
struct TCP_Server_Info * pserver ;
2012-09-19 03:20:33 +04:00
struct cifs_ses * ses ;
struct cifs_tcon * tcon ;
struct cifsInodeInfo * cinode ;
struct cifsFileInfo * cfile ;
2013-05-05 07:12:25 +04:00
cifs_dbg ( FYI , " Checking for oplock break \n " ) ;
2012-09-19 03:20:33 +04:00
2021-11-05 02:39:01 +03:00
if ( rsp - > hdr . Command ! = SMB2_OPLOCK_BREAK )
2012-09-19 03:20:33 +04:00
return false ;
2012-09-19 20:19:39 +04:00
if ( rsp - > StructureSize ! =
2012-09-19 03:20:33 +04:00
smb2_rsp_struct_sizes [ SMB2_OPLOCK_BREAK_HE ] ) {
2012-09-19 17:22:45 +04:00
if ( le16_to_cpu ( rsp - > StructureSize ) = = 44 )
2022-10-28 13:01:45 +03:00
return smb2_is_valid_lease_break ( buffer , server ) ;
2012-09-19 17:22:45 +04:00
else
return false ;
2012-09-19 03:20:33 +04:00
}
2014-08-03 06:16:48 +04:00
cifs_dbg ( FYI , " oplock level 0x%x \n " , rsp - > OplockLevel ) ;
2012-09-19 03:20:33 +04:00
2022-10-28 12:52:26 +03:00
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN ( server ) ? server - > primary_server : server ;
2012-09-19 03:20:33 +04:00
/* look up tcon based on tid & uid */
spin_lock ( & cifs_tcp_ses_lock ) ;
2022-10-28 12:52:26 +03:00
list_for_each_entry ( ses , & pserver - > smb_ses_list , smb_ses_list ) {
2021-06-18 07:02:32 +03:00
list_for_each_entry ( tcon , & ses - > tcon_list , tcon_list ) {
2012-09-19 03:20:33 +04:00
2016-09-23 02:58:16 +03:00
spin_lock ( & tcon - > open_file_lock ) ;
2021-06-18 07:02:32 +03:00
list_for_each_entry ( cfile , & tcon - > openFileList , tlist ) {
2012-09-19 03:20:33 +04:00
if ( rsp - > PersistentFid ! =
cfile - > fid . persistent_fid | |
rsp - > VolatileFid ! =
cfile - > fid . volatile_fid )
continue ;
2013-05-05 07:12:25 +04:00
cifs_dbg ( FYI , " file id match, oplock break \n " ) ;
2019-11-01 00:18:57 +03:00
cifs_stats_inc (
& tcon - > stats . cifs_stats . num_oplock_brks ) ;
2015-03-18 01:25:59 +03:00
cinode = CIFS_I ( d_inode ( cfile - > dentry ) ) ;
2016-09-23 02:58:16 +03:00
spin_lock ( & cfile - > file_info_lock ) ;
2013-09-05 13:01:06 +04:00
if ( ! CIFS_CACHE_WRITE ( cinode ) & &
2012-09-19 03:20:33 +04:00
rsp - > OplockLevel = = SMB2_OPLOCK_LEVEL_NONE )
cfile - > oplock_break_cancelled = true ;
else
cfile - > oplock_break_cancelled = false ;
2014-03-11 20:11:47 +04:00
set_bit ( CIFS_INODE_PENDING_OPLOCK_BREAK ,
& cinode - > flags ) ;
2019-10-30 02:51:19 +03:00
cfile - > oplock_epoch = 0 ;
cfile - > oplock_level = rsp - > OplockLevel ;
2016-09-23 02:58:16 +03:00
spin_unlock ( & cfile - > file_info_lock ) ;
2019-03-29 12:49:12 +03:00
cifs_queue_oplock_break ( cfile ) ;
2012-09-19 03:20:33 +04:00
2016-09-23 02:58:16 +03:00
spin_unlock ( & tcon - > open_file_lock ) ;
2012-09-19 03:20:33 +04:00
spin_unlock ( & cifs_tcp_ses_lock ) ;
return true ;
}
2016-09-23 02:58:16 +03:00
spin_unlock ( & tcon - > open_file_lock ) ;
2012-09-19 03:20:33 +04:00
}
}
spin_unlock ( & cifs_tcp_ses_lock ) ;
2021-03-19 16:57:11 +03:00
cifs_dbg ( FYI , " No file id matched, oplock break ignored \n " ) ;
2022-05-22 08:41:41 +03:00
trace_smb3_oplock_not_found ( 0 /* no xid */ , rsp - > PersistentFid ,
le32_to_cpu ( rsp - > hdr . Id . SyncId . TreeId ) ,
le64_to_cpu ( rsp - > hdr . SessionId ) ) ;
2021-03-19 16:57:11 +03:00
return true ;
2012-09-19 03:20:33 +04:00
}
2017-03-04 02:41:38 +03:00
void
smb2_cancelled_close_fid ( struct work_struct * work )
{
struct close_cancelled_open * cancelled = container_of ( work ,
struct close_cancelled_open , work ) ;
2019-11-14 21:32:12 +03:00
struct cifs_tcon * tcon = cancelled - > tcon ;
int rc ;
if ( cancelled - > mid )
2021-03-08 18:00:47 +03:00
cifs_tcon_dbg ( VFS , " Close unmatched open for MID:%llu \n " ,
2019-11-14 21:32:12 +03:00
cancelled - > mid ) ;
else
cifs_tcon_dbg ( VFS , " Close interrupted close \n " ) ;
2017-03-04 02:41:38 +03:00
2019-11-14 21:32:12 +03:00
rc = SMB2_close ( 0 , tcon , cancelled - > fid . persistent_fid ,
cancelled - > fid . volatile_fid ) ;
if ( rc )
cifs_tcon_dbg ( VFS , " Close cancelled mid failed rc:%d \n " , rc ) ;
2017-03-04 02:41:38 +03:00
2019-11-14 21:32:12 +03:00
cifs_put_tcon ( tcon ) ;
2017-03-04 02:41:38 +03:00
kfree ( cancelled ) ;
}
2019-11-14 21:32:12 +03:00
/*
* Caller should already has an extra reference to @ tcon
* This function is used to queue work to close a handle to prevent leaks
* on the server .
* We handle two cases . If an open was interrupted after we sent the
* SMB2_CREATE to the server but before we processed the reply , and second
* if a close was interrupted before we sent the SMB2_CLOSE to the server .
*/
2019-11-21 22:35:12 +03:00
static int
2019-11-14 21:32:12 +03:00
__smb2_handle_cancelled_cmd ( struct cifs_tcon * tcon , __u16 cmd , __u64 mid ,
__u64 persistent_fid , __u64 volatile_fid )
2019-11-21 22:35:12 +03:00
{
struct close_cancelled_open * cancelled ;
2020-01-13 23:46:59 +03:00
cancelled = kzalloc ( sizeof ( * cancelled ) , GFP_ATOMIC ) ;
2019-11-21 22:35:12 +03:00
if ( ! cancelled )
return - ENOMEM ;
cancelled - > fid . persistent_fid = persistent_fid ;
cancelled - > fid . volatile_fid = volatile_fid ;
cancelled - > tcon = tcon ;
2019-11-14 21:32:12 +03:00
cancelled - > cmd = cmd ;
cancelled - > mid = mid ;
2019-11-21 22:35:12 +03:00
INIT_WORK ( & cancelled - > work , smb2_cancelled_close_fid ) ;
WARN_ON ( queue_work ( cifsiod_wq , & cancelled - > work ) = = false ) ;
return 0 ;
}
int
smb2_handle_cancelled_close ( struct cifs_tcon * tcon , __u64 persistent_fid ,
__u64 volatile_fid )
{
int rc ;
cifs_dbg ( FYI , " %s: tc_count=%d \n " , __func__ , tcon - > tc_count ) ;
spin_lock ( & cifs_tcp_ses_lock ) ;
cifs: ignore cached share root handle closing errors
Fix tcon use-after-free and NULL ptr deref.
Customer system crashes with the following kernel log:
[462233.169868] CIFS VFS: Cancelling wait for mid 4894753 cmd: 14 => a QUERY DIR
[462233.228045] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.305922] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.306205] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.347060] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.347107] CIFS VFS: Close unmatched open
[462233.347113] BUG: unable to handle kernel NULL pointer dereference at 0000000000000038
...
[exception RIP: cifs_put_tcon+0xa0] (this is doing tcon->ses->server)
#6 [...] smb2_cancelled_close_fid at ... [cifs]
#7 [...] process_one_work at ...
#8 [...] worker_thread at ...
#9 [...] kthread at ...
The most likely explanation we have is:
* When we put the last reference of a tcon (refcount=0), we close the
cached share root handle.
* If closing a handle is interrupted, SMB2_close() will
queue a SMB2_close() in a work thread.
* The queued object keeps a tcon ref so we bump the tcon
refcount, jumping from 0 to 1.
* We reach the end of cifs_put_tcon(), we free the tcon object despite
it now having a refcount of 1.
* The queued work now runs, but the tcon, ses & server was freed in
the meantime resulting in a crash.
THREAD 1
========
cifs_put_tcon => tcon refcount reach 0
SMB2_tdis
close_shroot_lease
close_shroot_lease_locked => if cached root has lease && refcount = 0
smb2_close_cached_fid => if cached root valid
SMB2_close => retry close in a thread if interrupted
smb2_handle_cancelled_close
__smb2_handle_cancelled_close => !! tcon refcount bump 0 => 1 !!
INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
queue_work(cifsiod_wq, &cancelled->work) => queue work
tconInfoFree(tcon); ==> freed!
cifs_put_smb_ses(ses); ==> freed!
THREAD 2 (workqueue)
========
smb2_cancelled_close_fid
SMB2_close(0, cancelled->tcon, ...); => use-after-free of tcon
cifs_put_tcon(cancelled->tcon); => tcon refcount reach 0 second time
*CRASH*
Fixes: d9191319358d ("CIFS: Close cached root handle only if it has a lease")
Signed-off-by: Aurelien Aptel <aaptel@suse.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
2020-04-07 12:49:55 +03:00
if ( tcon - > tc_count < = 0 ) {
struct TCP_Server_Info * server = NULL ;
WARN_ONCE ( tcon - > tc_count < 0 , " tcon refcount is negative " ) ;
spin_unlock ( & cifs_tcp_ses_lock ) ;
if ( tcon - > ses )
server = tcon - > ses - > server ;
2022-05-18 17:41:04 +03:00
cifs_server_dbg ( FYI , " tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu \n " ,
cifs: ignore cached share root handle closing errors
Fix tcon use-after-free and NULL ptr deref.
Customer system crashes with the following kernel log:
[462233.169868] CIFS VFS: Cancelling wait for mid 4894753 cmd: 14 => a QUERY DIR
[462233.228045] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.305922] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.306205] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.347060] CIFS VFS: cifs_put_smb_ses: Session Logoff failure rc=-4
[462233.347107] CIFS VFS: Close unmatched open
[462233.347113] BUG: unable to handle kernel NULL pointer dereference at 0000000000000038
...
[exception RIP: cifs_put_tcon+0xa0] (this is doing tcon->ses->server)
#6 [...] smb2_cancelled_close_fid at ... [cifs]
#7 [...] process_one_work at ...
#8 [...] worker_thread at ...
#9 [...] kthread at ...
The most likely explanation we have is:
* When we put the last reference of a tcon (refcount=0), we close the
cached share root handle.
* If closing a handle is interrupted, SMB2_close() will
queue a SMB2_close() in a work thread.
* The queued object keeps a tcon ref so we bump the tcon
refcount, jumping from 0 to 1.
* We reach the end of cifs_put_tcon(), we free the tcon object despite
it now having a refcount of 1.
* The queued work now runs, but the tcon, ses & server was freed in
the meantime resulting in a crash.
THREAD 1
========
cifs_put_tcon => tcon refcount reach 0
SMB2_tdis
close_shroot_lease
close_shroot_lease_locked => if cached root has lease && refcount = 0
smb2_close_cached_fid => if cached root valid
SMB2_close => retry close in a thread if interrupted
smb2_handle_cancelled_close
__smb2_handle_cancelled_close => !! tcon refcount bump 0 => 1 !!
INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
queue_work(cifsiod_wq, &cancelled->work) => queue work
tconInfoFree(tcon); ==> freed!
cifs_put_smb_ses(ses); ==> freed!
THREAD 2 (workqueue)
========
smb2_cancelled_close_fid
SMB2_close(0, cancelled->tcon, ...); => use-after-free of tcon
cifs_put_tcon(cancelled->tcon); => tcon refcount reach 0 second time
*CRASH*
Fixes: d9191319358d ("CIFS: Close cached root handle only if it has a lease")
Signed-off-by: Aurelien Aptel <aaptel@suse.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
2020-04-07 12:49:55 +03:00
tcon - > tid , persistent_fid , volatile_fid ) ;
return 0 ;
}
2019-11-21 22:35:12 +03:00
tcon - > tc_count + + ;
spin_unlock ( & cifs_tcp_ses_lock ) ;
2019-11-14 21:32:12 +03:00
rc = __smb2_handle_cancelled_cmd ( tcon , SMB2_CLOSE_HE , 0 ,
persistent_fid , volatile_fid ) ;
2019-11-21 22:35:12 +03:00
if ( rc )
cifs_put_tcon ( tcon ) ;
return rc ;
}
2017-03-04 02:41:38 +03:00
int
2021-03-08 18:00:50 +03:00
smb2_handle_cancelled_mid ( struct mid_q_entry * mid , struct TCP_Server_Info * server )
2017-03-04 02:41:38 +03:00
{
2021-11-05 02:39:01 +03:00
struct smb2_hdr * hdr = mid - > resp_buf ;
2021-03-08 18:00:50 +03:00
struct smb2_create_rsp * rsp = mid - > resp_buf ;
2017-03-04 02:41:38 +03:00
struct cifs_tcon * tcon ;
2019-11-21 22:35:12 +03:00
int rc ;
2017-03-04 02:41:38 +03:00
2021-11-05 02:39:01 +03:00
if ( ( mid - > optype & CIFS_CP_CREATE_CLOSE_OP ) | | hdr - > Command ! = SMB2_CREATE | |
hdr - > Status ! = STATUS_SUCCESS )
2017-03-04 02:41:38 +03:00
return 0 ;
2021-11-05 02:39:01 +03:00
tcon = smb2_find_smb_tcon ( server , le64_to_cpu ( hdr - > SessionId ) ,
le32_to_cpu ( hdr - > Id . SyncId . TreeId ) ) ;
2019-11-21 22:35:12 +03:00
if ( ! tcon )
2017-03-04 02:41:38 +03:00
return - ENOENT ;
2019-11-14 21:32:12 +03:00
rc = __smb2_handle_cancelled_cmd ( tcon ,
2021-11-05 02:39:01 +03:00
le16_to_cpu ( hdr - > Command ) ,
le64_to_cpu ( hdr - > MessageId ) ,
2022-03-21 19:08:25 +03:00
rsp - > PersistentFileId ,
rsp - > VolatileFileId ) ;
2019-11-21 22:35:12 +03:00
if ( rc )
cifs_put_tcon ( tcon ) ;
2017-03-04 02:41:38 +03:00
2019-11-21 22:35:12 +03:00
return rc ;
2017-03-04 02:41:38 +03:00
}
2018-02-16 21:19:29 +03:00
/**
* smb311_update_preauth_hash - update @ ses hash with the packet data in @ iov
*
* Assumes @ iov does not contain the rfc1002 length and iov [ 0 ] has the
* SMB2 header .
2020-12-12 21:08:58 +03:00
*
* @ ses : server session structure
2022-01-05 12:39:09 +03:00
* @ server : pointer to server info
2020-12-12 21:08:58 +03:00
* @ iov : array containing the SMB request we will send to the server
* @ nvec : number of array entries for the iov
2018-02-16 21:19:29 +03:00
*/
int
2021-07-19 16:54:16 +03:00
smb311_update_preauth_hash ( struct cifs_ses * ses , struct TCP_Server_Info * server ,
struct kvec * iov , int nvec )
2018-02-16 21:19:29 +03:00
{
int i , rc ;
2021-11-05 02:39:01 +03:00
struct smb2_hdr * hdr ;
2022-09-29 23:36:50 +03:00
struct shash_desc * sha512 = NULL ;
2018-02-16 21:19:29 +03:00
2021-11-05 02:39:01 +03:00
hdr = ( struct smb2_hdr * ) iov [ 0 ] . iov_base ;
2019-09-20 07:31:10 +03:00
/* neg prot are always taken */
if ( hdr - > Command = = SMB2_NEGOTIATE )
goto ok ;
2018-02-16 21:19:29 +03:00
2019-09-20 07:31:10 +03:00
/*
* If we process a command which wasn ' t a negprot it means the
* neg prot was already done , so the server dialect was set
* and we can test it . Preauth requires 3.1 .1 for now .
*/
if ( server - > dialect ! = SMB311_PROT_ID )
return 0 ;
if ( hdr - > Command ! = SMB2_SESSION_SETUP )
return 0 ;
/* skip last sess setup response */
if ( ( hdr - > Flags & SMB2_FLAGS_SERVER_TO_REDIR )
& & ( hdr - > Status = = NT_STATUS_OK
| | ( hdr - > Status ! =
cpu_to_le32 ( NT_STATUS_MORE_PROCESSING_REQUIRED ) ) ) )
return 0 ;
2018-02-16 21:19:29 +03:00
2019-09-20 07:31:10 +03:00
ok :
rc = smb311_crypto_shash_allocate ( server ) ;
2018-02-16 21:19:29 +03:00
if ( rc )
return rc ;
2022-09-29 23:36:50 +03:00
sha512 = server - > secmech . sha512 ;
rc = crypto_shash_init ( sha512 ) ;
2018-02-16 21:19:29 +03:00
if ( rc ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " %s: Could not init sha512 shash \n " , __func__ ) ;
2018-02-16 21:19:29 +03:00
return rc ;
}
2022-09-29 23:36:50 +03:00
rc = crypto_shash_update ( sha512 , ses - > preauth_sha_hash ,
2018-02-16 21:19:29 +03:00
SMB2_PREAUTH_HASH_SIZE ) ;
if ( rc ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " %s: Could not update sha512 shash \n " , __func__ ) ;
2018-02-16 21:19:29 +03:00
return rc ;
}
for ( i = 0 ; i < nvec ; i + + ) {
2022-09-29 23:36:50 +03:00
rc = crypto_shash_update ( sha512 , iov [ i ] . iov_base , iov [ i ] . iov_len ) ;
2018-02-16 21:19:29 +03:00
if ( rc ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " %s: Could not update sha512 shash \n " ,
2018-02-16 21:19:29 +03:00
__func__ ) ;
return rc ;
}
}
2022-09-29 23:36:50 +03:00
rc = crypto_shash_final ( sha512 , ses - > preauth_sha_hash ) ;
2018-02-16 21:19:29 +03:00
if ( rc ) {
2020-04-15 08:42:53 +03:00
cifs_dbg ( VFS , " %s: Could not finalize sha512 shash \n " ,
2018-02-16 21:19:29 +03:00
__func__ ) ;
return rc ;
}
return 0 ;
}