nvme fixe for Linux 5.15

- fix nvmet-tcp header digest verification (Amit Engel)
  - fix a memory leak in nvmet-tcp when releasing a queue
    (Maurizio Lombardi)
  - fix nvme-tcp H2CData PDU send accounting again (Sagi Grimberg)
  - fix digest pointer calculation in nvme-tcp and nvmet-tcp
    (Varun Prakash)
  - fix possible nvme-tcp req->offset corruption (Varun Prakash)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmF6sloLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYOqvBAAilvJ4Sy6srkSkjiu2UWv28wB4rV4/D26Y8hTwmvT
 3C193FEjMY7PdWtzX5NtiYlFkjZCKTY3bWSGOPP2wZ/X26As3bwBJj/nA5ZPkYX4
 zB2nuJUKNYmsi40cN/+DG7jVDPpM/bIvayRCCl6VNzPlKjtNtwZkOKFrPKwxj0zx
 C8ZJz88Mh4VeetWKXDSSa3SWGcgmuk5ZN+ADQTN/q1yEVM8l6KxnEL7LIKDcYRBt
 hP65HdpAhahCagZgH3hGZ9bt069k0AWf8xL/Xetur9A6lA9lo12F6pZ0KWohtKBg
 qOxm4FphLp3cVO1gKB445lbyPDZBNmizUclVf8vgJQvyD8RmRoQ/BFWdgrFK2B23
 yIEL28cWEGqXjOdLuQd8NnwOk2wPhnSYdkbijBWMkQ0d1iWb4tEyrs/+53eT/Slm
 knNYIFRU70r3vjn6kEQTpBl1n5M0qvMVl2cU8vYWaGJkKMcHuaqvds2jO1LA+UVW
 ZpD35jcjuEIL68XEVi2sfB/7qLCe3RFnHDAljGtx3lMIdSGEui84Q7RhjG2vxi5N
 KQ0Z+MOQpUQ9rHnuEET7qvPEUpiR8Z4ukA2OkCd2V7mUoPG09PrB0YNZELcZTFrG
 fqx2ds6iSiy43z8kBWEl1eJWUtxt3BEYL/X0Go3yDtUA1zD8x9kntD/uD2FLEIna
 H2A=
 =uE4V
 -----END PGP SIGNATURE-----

Merge tag 'nvme-5.15-2021-10-28' of git://git.infradead.org/nvme into block-5.15

Pull NVMe fixes from Christoph:

"nvme fixe for Linux 5.15

 - fix nvmet-tcp header digest verification (Amit Engel)
 - fix a memory leak in nvmet-tcp when releasing a queue
   (Maurizio Lombardi)
 - fix nvme-tcp H2CData PDU send accounting again (Sagi Grimberg)
 - fix digest pointer calculation in nvme-tcp and nvmet-tcp
   (Varun Prakash)
 - fix possible nvme-tcp req->offset corruption (Varun Prakash)"

* tag 'nvme-5.15-2021-10-28' of git://git.infradead.org/nvme:
  nvmet-tcp: fix header digest verification
  nvmet-tcp: fix data digest pointer calculation
  nvme-tcp: fix data digest pointer calculation
  nvme-tcp: fix possible req->offset corruption
  nvme-tcp: fix H2CData PDU send accounting (again)
  nvmet-tcp: fix a memory leak when releasing a queue
This commit is contained in:
Jens Axboe 2021-10-28 08:34:01 -06:00
commit f4aaf1fa8b
2 changed files with 11 additions and 5 deletions

View File

@ -926,12 +926,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
int req_data_len = req->data_len;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
size_t offset = nvme_tcp_req_cur_offset(req);
size_t len = nvme_tcp_req_cur_length(req);
bool last = nvme_tcp_pdu_last_send(req, len);
int req_data_sent = req->data_sent;
int ret, flags = MSG_DONTWAIT;
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
@ -958,7 +960,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
* in the request where we don't want to modify it as we may
* compete with the RX path completing the request.
*/
if (req->data_sent + ret < req->data_len)
if (req_data_sent + ret < req_data_len)
nvme_tcp_advance_req(req, ret);
/* fully successful last send in current PDU */
@ -1048,10 +1050,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
size_t offset = req->offset;
int ret;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
.iov_base = &req->ddgst + req->offset,
.iov_base = (u8 *)&req->ddgst + req->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
};
@ -1064,7 +1067,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
if (unlikely(ret <= 0))
return ret;
if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
nvme_tcp_done_send_req(queue);
return 1;
}

View File

@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
struct nvmet_tcp_queue *queue = cmd->queue;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
.iov_base = &cmd->exp_ddgst + cmd->offset,
.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
};
int ret;
@ -1096,7 +1096,7 @@ recv:
}
if (queue->hdr_digest &&
nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
nvmet_tcp_fatal_error(queue); /* fatal */
return -EPROTO;
}
@ -1428,6 +1428,7 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@ -1447,6 +1448,8 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_free_crypto(queue);
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue);
}