cifs: smbd: take an array of reqeusts when sending upper layer data
To support compounding, __smb_send_rqst() now sends an array of requests to the transport layer. Change smbd_send() to take an array of requests, and send them in as few packets as possible. Signed-off-by: Long Li <longli@microsoft.com> Signed-off-by: Steve French <stfrench@microsoft.com> CC: Stable <stable@vger.kernel.org>
This commit is contained in:
parent
46e6661963
commit
4739f23286
@ -2072,7 +2072,8 @@ out:
|
||||
* rqst: the data to write
|
||||
* return value: 0 if successfully write, otherwise error code
|
||||
*/
|
||||
int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
||||
int smbd_send(struct TCP_Server_Info *server,
|
||||
int num_rqst, struct smb_rqst *rqst_array)
|
||||
{
|
||||
struct smbd_connection *info = server->smbd_conn;
|
||||
struct kvec vec;
|
||||
@ -2084,53 +2085,49 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
||||
info->max_send_size - sizeof(struct smbd_data_transfer);
|
||||
struct kvec *iov;
|
||||
int rc;
|
||||
struct smb_rqst *rqst;
|
||||
int rqst_idx;
|
||||
|
||||
if (info->transport_status != SMBD_CONNECTED) {
|
||||
rc = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Skip the RFC1002 length defined in MS-SMB2 section 2.1
|
||||
* It is used only for TCP transport in the iov[0]
|
||||
* In future we may want to add a transport layer under protocol
|
||||
* layer so this will only be issued to TCP transport
|
||||
*/
|
||||
|
||||
if (rqst->rq_iov[0].iov_len != 4) {
|
||||
log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add in the page array if there is one. The caller needs to set
|
||||
* rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
|
||||
* ends at page boundary
|
||||
*/
|
||||
buflen = smb_rqst_len(server, rqst);
|
||||
remaining_data_length = 0;
|
||||
for (i = 0; i < num_rqst; i++)
|
||||
remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
|
||||
|
||||
if (buflen + sizeof(struct smbd_data_transfer) >
|
||||
if (remaining_data_length + sizeof(struct smbd_data_transfer) >
|
||||
info->max_fragmented_send_size) {
|
||||
log_write(ERR, "payload size %d > max size %d\n",
|
||||
buflen, info->max_fragmented_send_size);
|
||||
remaining_data_length, info->max_fragmented_send_size);
|
||||
rc = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
iov = &rqst->rq_iov[1];
|
||||
rqst_idx = 0;
|
||||
|
||||
cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
|
||||
for (i = 0; i < rqst->rq_nvec-1; i++)
|
||||
next_rqst:
|
||||
rqst = &rqst_array[rqst_idx];
|
||||
iov = rqst->rq_iov;
|
||||
|
||||
cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
|
||||
rqst_idx, smb_rqst_len(server, rqst));
|
||||
for (i = 0; i < rqst->rq_nvec; i++)
|
||||
dump_smb(iov[i].iov_base, iov[i].iov_len);
|
||||
|
||||
remaining_data_length = buflen;
|
||||
|
||||
log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
|
||||
"rq_tailsz=%d buflen=%d\n",
|
||||
rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
|
||||
rqst->rq_tailsz, buflen);
|
||||
log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
|
||||
"rq_tailsz=%d buflen=%lu\n",
|
||||
rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
|
||||
rqst->rq_tailsz, smb_rqst_len(server, rqst));
|
||||
|
||||
start = i = iov[0].iov_len ? 0 : 1;
|
||||
start = i = 0;
|
||||
buflen = 0;
|
||||
while (true) {
|
||||
buflen += iov[i].iov_len;
|
||||
@ -2178,14 +2175,14 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
||||
goto done;
|
||||
}
|
||||
i++;
|
||||
if (i == rqst->rq_nvec-1)
|
||||
if (i == rqst->rq_nvec)
|
||||
break;
|
||||
}
|
||||
start = i;
|
||||
buflen = 0;
|
||||
} else {
|
||||
i++;
|
||||
if (i == rqst->rq_nvec-1) {
|
||||
if (i == rqst->rq_nvec) {
|
||||
/* send out all remaining vecs */
|
||||
remaining_data_length -= buflen;
|
||||
log_write(INFO,
|
||||
@ -2229,6 +2226,10 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
|
||||
}
|
||||
}
|
||||
|
||||
rqst_idx++;
|
||||
if (rqst_idx < num_rqst)
|
||||
goto next_rqst;
|
||||
|
||||
done:
|
||||
/*
|
||||
* As an optimization, we don't wait for individual I/O to finish
|
||||
|
@ -284,7 +284,8 @@ void smbd_destroy(struct TCP_Server_Info *server);
|
||||
|
||||
/* Interface for carrying upper layer I/O through send/recv */
|
||||
int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
|
||||
int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
|
||||
int smbd_send(struct TCP_Server_Info *server,
|
||||
int num_rqst, struct smb_rqst *rqst);
|
||||
|
||||
enum mr_state {
|
||||
MR_READY,
|
||||
@ -324,7 +325,7 @@ static inline void *smbd_get_connection(
|
||||
static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
|
||||
static inline void smbd_destroy(struct TCP_Server_Info *server) {}
|
||||
static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
|
||||
static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
|
||||
static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -319,7 +319,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
||||
__be32 rfc1002_marker;
|
||||
|
||||
if (cifs_rdma_enabled(server) && server->smbd_conn) {
|
||||
rc = smbd_send(server, rqst);
|
||||
rc = smbd_send(server, num_rqst, rqst);
|
||||
goto smbd_done;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user