v6.2 first rc pull request
A big data corruption regression due to a change in the scatterlist - Fix compilation warnings on gcc 13 - Oops when using some mlx5 stats - Bad enforcement of atomic responder resources in mlx5 - Do not wrongly combine non-contiguous pages in scatterlist -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCY7izVgAKCRCFwuHvBreF YfOXAQC08HilnYdRjlVrxswOQIN1KzHQ63xDXM0Rv99XOcSKCgD+LeXkeCeJ0XWW kBtkPnhR194phADv4nWaaSrIc52DtA0= =HxxG -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Most noticeable is that Yishai found a big data corruption regression due to a change in the scatterlist: - Do not wrongly combine non-contiguous pages in scatterlist - Fix compilation warnings on gcc 13 - Oops when using some mlx5 stats - Bad enforcement of atomic responder resources in mlx5" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: lib/scatterlist: Fix to merge contiguous pages into the last SG properly RDMA/mlx5: Fix validation of max_rd_atomic caps for DC RDMA/mlx5: Fix mlx5_ib_get_hw_stats when used for device RDMA/srp: Move large values to a new enum for gcc13
This commit is contained in:
commit
4a4dcea083
@ -278,7 +278,6 @@ static int do_get_hw_stats(struct ib_device *ibdev,
|
||||
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
|
||||
struct mlx5_core_dev *mdev;
|
||||
int ret, num_counters;
|
||||
u32 mdev_port_num;
|
||||
|
||||
if (!stats)
|
||||
return -EINVAL;
|
||||
@ -299,8 +298,9 @@ static int do_get_hw_stats(struct ib_device *ibdev,
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
|
||||
mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
|
||||
&mdev_port_num);
|
||||
if (!port_num)
|
||||
port_num = 1;
|
||||
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
|
||||
if (!mdev) {
|
||||
/* If port is not affiliated yet, its in down state
|
||||
* which doesn't have any counters yet, so it would be
|
||||
|
@ -4502,6 +4502,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
|
||||
return false;
|
||||
}
|
||||
|
||||
static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
|
||||
int attr_mask, enum ib_qp_type qp_type)
|
||||
{
|
||||
int log_max_ra_res;
|
||||
int log_max_ra_req;
|
||||
|
||||
if (qp_type == MLX5_IB_QPT_DCI) {
|
||||
log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
|
||||
log_max_ra_res_dc);
|
||||
log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
|
||||
log_max_ra_req_dc);
|
||||
} else {
|
||||
log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
|
||||
log_max_ra_res_qp);
|
||||
log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
|
||||
log_max_ra_req_qp);
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
|
||||
attr->max_rd_atomic > log_max_ra_res) {
|
||||
mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
|
||||
attr->max_rd_atomic);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
|
||||
attr->max_dest_rd_atomic > log_max_ra_req) {
|
||||
mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
|
||||
attr->max_dest_rd_atomic);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata)
|
||||
{
|
||||
@ -4589,21 +4623,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
|
||||
attr->max_rd_atomic >
|
||||
(1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
|
||||
mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
|
||||
attr->max_rd_atomic);
|
||||
if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
|
||||
attr->max_dest_rd_atomic >
|
||||
(1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
|
||||
mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
|
||||
attr->max_dest_rd_atomic);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
|
||||
err = 0;
|
||||
|
@ -62,9 +62,6 @@ enum {
|
||||
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
|
||||
SRP_TSK_MGMT_SQ_SIZE,
|
||||
|
||||
SRP_TAG_NO_REQ = ~0U,
|
||||
SRP_TAG_TSK_MGMT = 1U << 31,
|
||||
|
||||
SRP_MAX_PAGES_PER_MR = 512,
|
||||
|
||||
SRP_MAX_ADD_CDB_LEN = 16,
|
||||
@ -79,6 +76,11 @@ enum {
|
||||
sizeof(struct srp_imm_buf),
|
||||
};
|
||||
|
||||
enum {
|
||||
SRP_TAG_NO_REQ = ~0U,
|
||||
SRP_TAG_TSK_MGMT = BIT(31),
|
||||
};
|
||||
|
||||
enum srp_target_state {
|
||||
SRP_TARGET_SCANNING,
|
||||
SRP_TARGET_LIVE,
|
||||
|
@ -476,7 +476,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
|
||||
/* Merge contiguous pages into the last SG */
|
||||
prv_len = sgt_append->prv->length;
|
||||
last_pg = sg_page(sgt_append->prv);
|
||||
while (n_pages && pages_are_mergeable(last_pg, pages[0])) {
|
||||
while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
|
||||
if (sgt_append->prv->length + PAGE_SIZE > max_segment)
|
||||
break;
|
||||
sgt_append->prv->length += PAGE_SIZE;
|
||||
|
Loading…
Reference in New Issue
Block a user