Merge tag 'nvme-5.15-2021-10-28' of git://git.infradead.org/nvme into block-5.15
Pull NVMe fixes from Christoph: "nvme fixe for Linux 5.15 - fix nvmet-tcp header digest verification (Amit Engel) - fix a memory leak in nvmet-tcp when releasing a queue (Maurizio Lombardi) - fix nvme-tcp H2CData PDU send accounting again (Sagi Grimberg) - fix digest pointer calculation in nvme-tcp and nvmet-tcp (Varun Prakash) - fix possible nvme-tcp req->offset corruption (Varun Prakash)" * tag 'nvme-5.15-2021-10-28' of git://git.infradead.org/nvme: nvmet-tcp: fix header digest verification nvmet-tcp: fix data digest pointer calculation nvme-tcp: fix data digest pointer calculation nvme-tcp: fix possible req->offset corruption nvme-tcp: fix H2CData PDU send accounting (again) nvmet-tcp: fix a memory leak when releasing a queue
This commit is contained in:
@@ -926,12 +926,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
|
|||||||
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_queue *queue = req->queue;
|
struct nvme_tcp_queue *queue = req->queue;
|
||||||
|
int req_data_len = req->data_len;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
struct page *page = nvme_tcp_req_cur_page(req);
|
struct page *page = nvme_tcp_req_cur_page(req);
|
||||||
size_t offset = nvme_tcp_req_cur_offset(req);
|
size_t offset = nvme_tcp_req_cur_offset(req);
|
||||||
size_t len = nvme_tcp_req_cur_length(req);
|
size_t len = nvme_tcp_req_cur_length(req);
|
||||||
bool last = nvme_tcp_pdu_last_send(req, len);
|
bool last = nvme_tcp_pdu_last_send(req, len);
|
||||||
|
int req_data_sent = req->data_sent;
|
||||||
int ret, flags = MSG_DONTWAIT;
|
int ret, flags = MSG_DONTWAIT;
|
||||||
|
|
||||||
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
|
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
|
||||||
@@ -958,7 +960,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
|||||||
* in the request where we don't want to modify it as we may
|
* in the request where we don't want to modify it as we may
|
||||||
* compete with the RX path completing the request.
|
* compete with the RX path completing the request.
|
||||||
*/
|
*/
|
||||||
if (req->data_sent + ret < req->data_len)
|
if (req_data_sent + ret < req_data_len)
|
||||||
nvme_tcp_advance_req(req, ret);
|
nvme_tcp_advance_req(req, ret);
|
||||||
|
|
||||||
/* fully successful last send in current PDU */
|
/* fully successful last send in current PDU */
|
||||||
@@ -1048,10 +1050,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
|||||||
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_queue *queue = req->queue;
|
struct nvme_tcp_queue *queue = req->queue;
|
||||||
|
size_t offset = req->offset;
|
||||||
int ret;
|
int ret;
|
||||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
||||||
struct kvec iov = {
|
struct kvec iov = {
|
||||||
.iov_base = &req->ddgst + req->offset,
|
.iov_base = (u8 *)&req->ddgst + req->offset,
|
||||||
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
|
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1064,7 +1067,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
|||||||
if (unlikely(ret <= 0))
|
if (unlikely(ret <= 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
|
if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
|
||||||
nvme_tcp_done_send_req(queue);
|
nvme_tcp_done_send_req(queue);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
|||||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
||||||
struct kvec iov = {
|
struct kvec iov = {
|
||||||
.iov_base = &cmd->exp_ddgst + cmd->offset,
|
.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
|
||||||
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
|
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
@@ -1096,7 +1096,7 @@ recv:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (queue->hdr_digest &&
|
if (queue->hdr_digest &&
|
||||||
nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
|
nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
|
||||||
nvmet_tcp_fatal_error(queue); /* fatal */
|
nvmet_tcp_fatal_error(queue); /* fatal */
|
||||||
return -EPROTO;
|
return -EPROTO;
|
||||||
}
|
}
|
||||||
@@ -1428,6 +1428,7 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
|
|||||||
|
|
||||||
static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
||||||
{
|
{
|
||||||
|
struct page *page;
|
||||||
struct nvmet_tcp_queue *queue =
|
struct nvmet_tcp_queue *queue =
|
||||||
container_of(w, struct nvmet_tcp_queue, release_work);
|
container_of(w, struct nvmet_tcp_queue, release_work);
|
||||||
|
|
||||||
@@ -1447,6 +1448,8 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
|||||||
nvmet_tcp_free_crypto(queue);
|
nvmet_tcp_free_crypto(queue);
|
||||||
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
|
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
|
||||||
|
|
||||||
|
page = virt_to_head_page(queue->pf_cache.va);
|
||||||
|
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
|
||||||
kfree(queue);
|
kfree(queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user