Merge branch '6.8/s/mpi3mr2' into 6.8/scsi-staging

Two driver updates from Chandrakanth patil at Broadcom:

  scsi: mpi3mr: Update driver version to 8.5.1.0.0
  scsi: mpi3mr: Support for preallocation of SGL BSG data buffers part-3
  scsi: mpi3mr: Support for preallocation of SGL BSG data buffers part-2
  scsi: mpi3mr: Support for preallocation of SGL BSG data buffers part-1
  scsi: mpi3mr: Fetch correct device dev handle for status reply descriptor
  scsi: mpi3mr: Block PEL Enable Command on Controller Reset and Unrecoverable State
  scsi: mpi3mr: Clean up block devices post controller reset
  scsi: mpi3mr: Refresh sdev queue depth after controller reset

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2023-12-05 21:46:32 -05:00
commit e84d34372e
5 changed files with 583 additions and 118 deletions

View File

@ -55,8 +55,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
#define MPI3MR_DRIVER_VERSION "8.5.0.0.50"
#define MPI3MR_DRIVER_RELDATE "22-November-2023"
#define MPI3MR_DRIVER_VERSION "8.5.1.0.0"
#define MPI3MR_DRIVER_RELDATE "5-December-2023"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@ -218,14 +218,16 @@ extern atomic64_t event_counter;
* @length: SGE length
* @rsvd: Reserved
* @rsvd1: Reserved
* @sgl_type: sgl type
* @sub_type: sgl sub type
* @type: sgl type
*/
struct mpi3mr_nvme_pt_sge {
u64 base_addr;
u32 length;
__le64 base_addr;
__le32 length;
u16 rsvd;
u8 rsvd1;
u8 sgl_type;
u8 sub_type:4;
u8 type:4;
};
/**
@ -247,6 +249,8 @@ struct mpi3mr_buf_map {
u32 kern_buf_len;
dma_addr_t kern_buf_dma;
u8 data_dir;
u16 num_dma_desc;
struct dma_memory_desc *dma_desc;
};
/* IOC State definitions */
@ -477,6 +481,10 @@ struct mpi3mr_throttle_group_info {
/* HBA port flags */
#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
/* IOCTL data transfer sge*/
#define MPI3MR_NUM_IOCTL_SGE 256
#define MPI3MR_IOCTL_SGE_SIZE (8 * 1024)
/**
* struct mpi3mr_hba_port - HBA's port information
* @port_id: Port number
@ -1042,6 +1050,11 @@ struct scmd_priv {
* @sas_node_lock: Lock to protect SAS node list
* @hba_port_table_list: List of HBA Ports
* @enclosure_list: List of Enclosure objects
* @ioctl_dma_pool: DMA pool for IOCTL data buffers
* @ioctl_sge: DMA buffer descriptors for IOCTL data
* @ioctl_chain_sge: DMA buffer descriptor for IOCTL chain
* @ioctl_resp_sge: DMA buffer descriptor for Mgmt cmd response
* @ioctl_sges_allocated: Flag for IOCTL SGEs allocated or not
*/
struct mpi3mr_ioc {
struct list_head list;
@ -1227,6 +1240,12 @@ struct mpi3mr_ioc {
spinlock_t sas_node_lock;
struct list_head hba_port_table_list;
struct list_head enclosure_list;
struct dma_pool *ioctl_dma_pool;
struct dma_memory_desc ioctl_sge[MPI3MR_NUM_IOCTL_SGE];
struct dma_memory_desc ioctl_chain_sge;
struct dma_memory_desc ioctl_resp_sge;
bool ioctl_sges_allocated;
};
/**

View File

@ -223,6 +223,22 @@ static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
return rval;
}
if (mrioc->unrecoverable) {
dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
__func__);
return -EFAULT;
}
if (mrioc->reset_in_progress) {
dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
return -EAGAIN;
}
if (mrioc->stop_bsgs) {
dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
return -EAGAIN;
}
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
&pel_enable, sizeof(pel_enable));
@ -547,8 +563,37 @@ static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
return rval;
}
/**
* mpi3mr_total_num_ioctl_sges - Count number of SGEs required
* @drv_bufs: DMA address of the buffers to be placed in sgl
* @bufcnt: Number of DMA buffers
*
* This function returns total number of data SGEs required
* including zero length SGEs and excluding management request
* and response buffer for the given list of data buffer
* descriptors
*
* Return: Number of SGE elements needed
*/
static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs,
u8 bufcnt)
{
u16 i, sge_count = 0;
for (i = 0; i < bufcnt; i++, drv_bufs++) {
if (drv_bufs->data_dir == DMA_NONE ||
drv_bufs->kern_buf)
continue;
sge_count += drv_bufs->num_dma_desc;
if (!drv_bufs->num_dma_desc)
sge_count++;
}
return sge_count;
}
/**
* mpi3mr_bsg_build_sgl - SGL construction for MPI commands
* @mrioc: Adapter instance reference
* @mpi_req: MPI request
* @sgl_offset: offset to start sgl in the MPI request
* @drv_bufs: DMA address of the buffers to be placed in sgl
@ -560,27 +605,45 @@ static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
* This function places the DMA address of the given buffers in
* proper format as SGEs in the given MPI request.
*
* Return: Nothing
* Return: 0 on success,-1 on failure
*/
static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc,
u8 is_rmr, u8 num_datasges)
static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req,
u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs,
u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges)
{
struct mpi3_request_header *mpi_header =
(struct mpi3_request_header *)mpi_req;
u8 *sgl = (mpi_req + sgl_offset), count = 0;
struct mpi3_mgmt_passthrough_request *rmgmt_req =
(struct mpi3_mgmt_passthrough_request *)mpi_req;
struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
u8 sgl_flags, sgl_flags_last;
u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag;
u16 available_sges, i, sges_needed;
u32 sge_element_size = sizeof(struct mpi3_sge_common);
bool chain_used = false;
sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER;
sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST;
MPI3_SGE_FLAGS_DLAS_SYSTEM;
sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER;
sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST;
last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
MPI3_SGE_FLAGS_DLAS_SYSTEM;
sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt);
if (is_rmc) {
mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
sgl_flags_last, drv_buf_iter->kern_buf_len,
drv_buf_iter->kern_buf_dma);
sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len;
sgl = (u8 *)drv_buf_iter->kern_buf +
drv_buf_iter->bsg_buf_len;
available_sges = (drv_buf_iter->kern_buf_len -
drv_buf_iter->bsg_buf_len) / sge_element_size;
if (sges_needed > available_sges)
return -1;
chain_used = true;
drv_buf_iter++;
count++;
if (is_rmr) {
@ -592,23 +655,95 @@ static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
} else
mpi3mr_build_zero_len_sge(
&rmgmt_req->response_sgl);
if (num_datasges) {
i = 0;
goto build_sges;
}
} else {
if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ)
return -1;
available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) /
sge_element_size;
if (!available_sges)
return -1;
}
if (!num_datasges) {
mpi3mr_build_zero_len_sge(sgl);
return;
return 0;
}
if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
if ((sges_needed > 2) || (sges_needed > available_sges))
return -1;
for (; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE ||
!drv_buf_iter->num_dma_desc)
continue;
mpi3mr_add_sg_single(sgl, sgl_flags_last,
drv_buf_iter->dma_desc[0].size,
drv_buf_iter->dma_desc[0].dma_addr);
sgl += sge_element_size;
}
return 0;
}
i = 0;
build_sges:
for (; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
if (num_datasges == 1 || !is_rmc)
mpi3mr_add_sg_single(sgl, sgl_flags_last,
drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
else
mpi3mr_add_sg_single(sgl, sgl_flags,
drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
sgl += sizeof(struct mpi3_sge_common);
if (!drv_buf_iter->num_dma_desc) {
if (chain_used && !available_sges)
return -1;
if (!chain_used && (available_sges == 1) &&
(sges_needed > 1))
goto setup_chain;
flag = sgl_flag_eob;
if (num_datasges == 1)
flag = sgl_flags_last;
mpi3mr_add_sg_single(sgl, flag, 0, 0);
sgl += sge_element_size;
sges_needed--;
available_sges--;
num_datasges--;
continue;
}
for (; i < drv_buf_iter->num_dma_desc; i++) {
if (chain_used && !available_sges)
return -1;
if (!chain_used && (available_sges == 1) &&
(sges_needed > 1))
goto setup_chain;
flag = sgl_flags;
if (i == (drv_buf_iter->num_dma_desc - 1)) {
if (num_datasges == 1)
flag = sgl_flags_last;
else
flag = sgl_flag_eob;
}
mpi3mr_add_sg_single(sgl, flag,
drv_buf_iter->dma_desc[i].size,
drv_buf_iter->dma_desc[i].dma_addr);
sgl += sge_element_size;
available_sges--;
sges_needed--;
}
num_datasges--;
i = 0;
}
return 0;
setup_chain:
available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
if (sges_needed > available_sges)
return -1;
mpi3mr_add_sg_single(sgl, last_chain_sgl_flag,
(sges_needed * sge_element_size),
mrioc->ioctl_chain_sge.dma_addr);
memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
sgl = (u8 *)mrioc->ioctl_chain_sge.addr;
chain_used = true;
goto build_sges;
}
/**
@ -648,14 +783,20 @@ static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
{
struct mpi3mr_nvme_pt_sge *nvme_sgl;
u64 sgl_ptr;
__le64 sgl_dma;
u8 count;
size_t length = 0;
u16 available_sges = 0, i;
u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge);
struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
mrioc->facts.sge_mod_shift) << 32);
u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
mrioc->facts.sge_mod_shift) << 32;
u32 size;
nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
/*
* Not all commands require a data transfer. If no data, just return
@ -664,27 +805,59 @@ static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
sgl_ptr = (u64)drv_buf_iter->kern_buf_dma;
length = drv_buf_iter->kern_buf_len;
break;
}
if (!length)
if (!length || !drv_buf_iter->num_dma_desc)
return 0;
if (sgl_ptr & sgemod_mask) {
if (drv_buf_iter->num_dma_desc == 1) {
available_sges = 1;
goto build_sges;
}
sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr);
if (sgl_dma & sgemod_mask) {
dprint_bsg_err(mrioc,
"%s: SGL address collides with SGE modifier\n",
"%s: SGL chain address collides with SGE modifier\n",
__func__);
return -1;
}
sgl_ptr &= ~sgemod_mask;
sgl_ptr |= sgemod_val;
nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
sgl_dma &= ~sgemod_mask;
sgl_dma |= sgemod_val;
memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
if (available_sges < drv_buf_iter->num_dma_desc)
return -1;
memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
nvme_sgl->base_addr = sgl_ptr;
nvme_sgl->length = length;
nvme_sgl->base_addr = sgl_dma;
size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge);
nvme_sgl->length = cpu_to_le32(size);
nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT;
nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr;
build_sges:
for (i = 0; i < drv_buf_iter->num_dma_desc; i++) {
sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr);
if (sgl_dma & sgemod_mask) {
dprint_bsg_err(mrioc,
"%s: SGL address collides with SGE modifier\n",
__func__);
return -1;
}
sgl_dma &= ~sgemod_mask;
sgl_dma |= sgemod_val;
nvme_sgl->base_addr = sgl_dma;
nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size);
nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT;
nvme_sgl++;
available_sges--;
}
return 0;
}
@ -712,7 +885,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
u32 offset, entry_len, dev_pgsz;
u32 page_mask_result, page_mask;
size_t length = 0;
size_t length = 0, desc_len;
u8 count;
struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
@ -721,6 +894,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
mrioc->facts.sge_mod_shift) << 32;
u16 dev_handle = nvme_encap_request->dev_handle;
struct mpi3mr_tgt_dev *tgtdev;
u16 desc_count = 0;
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
if (!tgtdev) {
@ -739,6 +913,21 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
mpi3mr_tgtdev_put(tgtdev);
page_mask = dev_pgsz - 1;
if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) {
dprint_bsg_err(mrioc,
"%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n",
__func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle);
return -1;
}
if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) {
dprint_bsg_err(mrioc,
"%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n",
__func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle);
return -1;
}
/*
* Not all commands require a data transfer. If no data, just return
@ -747,14 +936,26 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
dma_addr = drv_buf_iter->kern_buf_dma;
length = drv_buf_iter->kern_buf_len;
break;
}
if (!length)
if (!length || !drv_buf_iter->num_dma_desc)
return 0;
for (count = 0; count < drv_buf_iter->num_dma_desc; count++) {
dma_addr = drv_buf_iter->dma_desc[count].dma_addr;
if (dma_addr & page_mask) {
dprint_bsg_err(mrioc,
"%s:dma_addr 0x%llx is not aligned with page size 0x%x\n",
__func__, dma_addr, dev_pgsz);
return -1;
}
}
dma_addr = drv_buf_iter->dma_desc[0].dma_addr;
desc_len = drv_buf_iter->dma_desc[0].size;
mrioc->prp_sz = 0;
mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
@ -784,7 +985,6 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
* Check if we are within 1 entry of a page boundary we don't
* want our first entry to be a PRP List entry.
*/
page_mask = dev_pgsz - 1;
page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
if (!page_mask_result) {
dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
@ -898,18 +1098,31 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
prp_entry_dma += prp_size;
}
/*
* Bump the phys address of the command's data buffer by the
* entry_len.
*/
dma_addr += entry_len;
/* decrement length accounting for last partial page. */
if (entry_len > length)
if (entry_len >= length) {
length = 0;
else
} else {
if (entry_len <= desc_len) {
dma_addr += entry_len;
desc_len -= entry_len;
}
if (!desc_len) {
if ((++desc_count) >=
drv_buf_iter->num_dma_desc) {
dprint_bsg_err(mrioc,
"%s: Invalid len %ld while building PRP\n",
__func__, length);
goto err_out;
}
dma_addr =
drv_buf_iter->dma_desc[desc_count].dma_addr;
desc_len =
drv_buf_iter->dma_desc[desc_count].size;
}
length -= entry_len;
}
}
return 0;
err_out:
if (mrioc->prp_list_virt) {
@ -919,10 +1132,66 @@ err_out:
}
return -1;
}
/**
* mpi3mr_map_data_buffer_dma - build dma descriptors for data
* buffers
* @mrioc: Adapter instance reference
* @drv_buf: buffer map descriptor
* @desc_count: Number of already consumed dma descriptors
*
* This function computes how many pre-allocated DMA descriptors
* are required for the given data buffer and if those number of
* descriptors are free, then setup the mapping of the scattered
* DMA address to the given data buffer, if the data direction
* of the buffer is DMA_TO_DEVICE then the actual data is copied to
* the DMA buffers
*
* Return: 0 on success, -1 on failure
*/
static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc,
struct mpi3mr_buf_map *drv_buf,
u16 desc_count)
{
u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE;
u32 buf_len = drv_buf->kern_buf_len, copied_len = 0;
if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE)
needed_desc++;
if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) {
dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n",
__func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE);
return -1;
}
drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc,
GFP_KERNEL);
if (!drv_buf->dma_desc)
return -1;
for (i = 0; i < needed_desc; i++, desc_count++) {
drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr;
drv_buf->dma_desc[i].dma_addr =
mrioc->ioctl_sge[desc_count].dma_addr;
if (buf_len < mrioc->ioctl_sge[desc_count].size)
drv_buf->dma_desc[i].size = buf_len;
else
drv_buf->dma_desc[i].size =
mrioc->ioctl_sge[desc_count].size;
buf_len -= drv_buf->dma_desc[i].size;
memset(drv_buf->dma_desc[i].addr, 0,
mrioc->ioctl_sge[desc_count].size);
if (drv_buf->data_dir == DMA_TO_DEVICE) {
memcpy(drv_buf->dma_desc[i].addr,
drv_buf->bsg_buf + copied_len,
drv_buf->dma_desc[i].size);
copied_len += drv_buf->dma_desc[i].size;
}
}
drv_buf->num_dma_desc = needed_desc;
return 0;
}
/**
* mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
* @job: BSG job reference
* @reply_payload_rcv_len: length of payload recvd
*
* This function is the top level handler for MPI Pass through
* command, this does basic validation of the input data buffers,
@ -938,10 +1207,9 @@ err_out:
* Return: 0 on success and proper error codes on failure
*/
static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len)
static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
{
long rval = -EINVAL;
struct mpi3mr_ioc *mrioc = NULL;
u8 *mpi_req = NULL, *sense_buff_k = NULL;
u8 mpi_msg_size = 0;
@ -949,9 +1217,10 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
struct mpi3mr_bsg_mptcmd *karg;
struct mpi3mr_buf_entry *buf_entries = NULL;
struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0;
u8 block_io = 0, resp_code = 0, nvme_fmt = 0;
u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0;
u8 din_cnt = 0, dout_cnt = 0;
u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF;
u8 block_io = 0, nvme_fmt = 0, resp_code = 0;
struct mpi3_request_header *mpi_header = NULL;
struct mpi3_status_reply_descriptor *status_desc;
struct mpi3_scsi_task_mgmt_request *tm_req;
@ -963,6 +1232,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
u32 din_size = 0, dout_size = 0;
u8 *din_buf = NULL, *dout_buf = NULL;
u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
u16 rmc_size = 0, desc_count = 0;
bsg_req = job->request;
karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
@ -971,6 +1241,12 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
if (!mrioc)
return -ENODEV;
if (!mrioc->ioctl_sges_allocated) {
dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
__func__);
return -ENOMEM;
}
if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
@ -1011,26 +1287,13 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
__func__);
rval = -EINVAL;
goto out;
}
if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
__func__);
rval = -EINVAL;
goto out;
}
switch (buf_entries->buf_type) {
case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
sgl_iter = sgl_dout_iter;
sgl_dout_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_TO_DEVICE;
is_rmcb = 1;
if (count != 0)
if ((count != 0) || !buf_entries->buf_len)
invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
@ -1038,7 +1301,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_FROM_DEVICE;
is_rmrb = 1;
if (count != 1 || !is_rmcb)
if (count != 1 || !is_rmcb || !buf_entries->buf_len)
invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_DATA_IN:
@ -1046,7 +1309,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_FROM_DEVICE;
din_cnt++;
din_size += drv_buf_iter->bsg_buf_len;
din_size += buf_entries->buf_len;
if ((din_cnt > 1) && !is_rmcb)
invalid_be = 1;
break;
@ -1055,7 +1318,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_dout_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_TO_DEVICE;
dout_cnt++;
dout_size += drv_buf_iter->bsg_buf_len;
dout_size += buf_entries->buf_len;
if ((dout_cnt > 1) && !is_rmcb)
invalid_be = 1;
break;
@ -1064,12 +1327,16 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_NONE;
mpirep_offset = count;
if (!buf_entries->buf_len)
invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
sgl_iter = sgl_din_iter;
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_NONE;
erb_offset = count;
if (!buf_entries->buf_len)
invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
sgl_iter = sgl_dout_iter;
@ -1096,21 +1363,31 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
goto out;
}
drv_buf_iter->bsg_buf = sgl_iter;
drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
}
if (!is_rmcb && (dout_cnt || din_cnt)) {
sg_entries = dout_cnt + din_cnt;
if (((mpi_msg_size) + (sg_entries *
sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) {
dprint_bsg_err(mrioc,
"%s:%d: invalid message size passed\n",
__func__, __LINE__);
if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
__func__);
rval = -EINVAL;
goto out;
}
if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
__func__);
rval = -EINVAL;
goto out;
}
drv_buf_iter->bsg_buf = sgl_iter;
drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
}
if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) {
dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n",
__func__, __LINE__, mpi_header->function, din_size,
dout_size);
rval = -EINVAL;
goto out;
}
if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
dprint_bsg_err(mrioc,
"%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
@ -1126,30 +1403,64 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
goto out;
}
if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
if (din_size > MPI3MR_IOCTL_SGE_SIZE ||
dout_size > MPI3MR_IOCTL_SGE_SIZE) {
dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n",
__func__, __LINE__, din_cnt, dout_cnt, din_size,
dout_size);
rval = -EINVAL;
goto out;
}
}
drv_buf_iter = drv_bufs;
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
if (is_rmcb && !count)
drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) *
sizeof(struct mpi3_sge_common));
if (!drv_buf_iter->kern_buf_len)
continue;
drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev,
drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma,
GFP_KERNEL);
if (!drv_buf_iter->kern_buf) {
rval = -ENOMEM;
if (is_rmcb && !count) {
drv_buf_iter->kern_buf_len =
mrioc->ioctl_chain_sge.size;
drv_buf_iter->kern_buf =
mrioc->ioctl_chain_sge.addr;
drv_buf_iter->kern_buf_dma =
mrioc->ioctl_chain_sge.dma_addr;
drv_buf_iter->dma_desc = NULL;
drv_buf_iter->num_dma_desc = 0;
memset(drv_buf_iter->kern_buf, 0,
drv_buf_iter->kern_buf_len);
tmplen = min(drv_buf_iter->kern_buf_len,
drv_buf_iter->bsg_buf_len);
rmc_size = tmplen;
memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
} else if (is_rmrb && (count == 1)) {
drv_buf_iter->kern_buf_len =
mrioc->ioctl_resp_sge.size;
drv_buf_iter->kern_buf =
mrioc->ioctl_resp_sge.addr;
drv_buf_iter->kern_buf_dma =
mrioc->ioctl_resp_sge.dma_addr;
drv_buf_iter->dma_desc = NULL;
drv_buf_iter->num_dma_desc = 0;
memset(drv_buf_iter->kern_buf, 0,
drv_buf_iter->kern_buf_len);
tmplen = min(drv_buf_iter->kern_buf_len,
drv_buf_iter->bsg_buf_len);
drv_buf_iter->kern_buf_len = tmplen;
memset(drv_buf_iter->bsg_buf, 0,
drv_buf_iter->bsg_buf_len);
} else {
if (!drv_buf_iter->kern_buf_len)
continue;
if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) {
rval = -ENOMEM;
dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n",
__func__, __LINE__);
goto out;
}
if (drv_buf_iter->data_dir == DMA_TO_DEVICE) {
tmplen = min(drv_buf_iter->kern_buf_len,
drv_buf_iter->bsg_buf_len);
memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
desc_count += drv_buf_iter->num_dma_desc;
}
}
@ -1219,9 +1530,14 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
goto out;
}
} else {
mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size),
drv_bufs, bufcnt, is_rmcb, is_rmrb,
(dout_cnt + din_cnt));
if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size,
drv_bufs, bufcnt, is_rmcb, is_rmrb,
(dout_cnt + din_cnt))) {
dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__);
rval = -EAGAIN;
mutex_unlock(&mrioc->bsg_cmds.mutex);
goto out;
}
}
if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
@ -1257,7 +1573,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
drv_buf_iter = &drv_bufs[0];
dprint_dump(drv_buf_iter->kern_buf,
drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
rmc_size, "mpi3_mgmt_req");
}
}
@ -1292,10 +1608,9 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
drv_buf_iter = &drv_bufs[0];
dprint_dump(drv_buf_iter->kern_buf,
drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
rmc_size, "mpi3_mgmt_req");
}
}
if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
(mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
mpi3mr_issue_tm(mrioc,
@ -1366,17 +1681,27 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
tmplen = min(drv_buf_iter->kern_buf_len,
drv_buf_iter->bsg_buf_len);
if ((count == 1) && is_rmrb) {
memcpy(drv_buf_iter->bsg_buf,
drv_buf_iter->kern_buf, tmplen);
drv_buf_iter->kern_buf,
drv_buf_iter->kern_buf_len);
} else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
tmplen = 0;
for (desc_count = 0;
desc_count < drv_buf_iter->num_dma_desc;
desc_count++) {
memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen),
drv_buf_iter->dma_desc[desc_count].addr,
drv_buf_iter->dma_desc[desc_count].size);
tmplen +=
drv_buf_iter->dma_desc[desc_count].size;
}
}
}
out_unlock:
if (din_buf) {
*reply_payload_rcv_len =
job->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
din_buf, job->reply_payload.payload_len);
@ -1392,13 +1717,8 @@ out:
kfree(mpi_req);
if (drv_bufs) {
drv_buf_iter = drv_bufs;
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma)
dma_free_coherent(&mrioc->pdev->dev,
drv_buf_iter->kern_buf_len,
drv_buf_iter->kern_buf,
drv_buf_iter->kern_buf_dma);
}
for (count = 0; count < bufcnt; count++, drv_buf_iter++)
kfree(drv_buf_iter->dma_desc);
kfree(drv_bufs);
}
kfree(bsg_reply_buf);
@ -1457,7 +1777,7 @@ static int mpi3mr_bsg_request(struct bsg_job *job)
rval = mpi3mr_bsg_process_drv_cmds(job);
break;
case MPI3MR_MPT_CMD:
rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len);
rval = mpi3mr_bsg_process_mpt_cmds(job);
break;
default:
pr_err("%s: unsupported BSG command(0x%08x)\n",

View File

@ -1058,6 +1058,114 @@ enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
return MRIOC_STATE_RESET_REQUESTED;
}
/**
* mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
* @mrioc: Adapter instance reference
*
* Free the DMA memory allocated for IOCTL handling purpose.
*
* Return: None
*/
static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
{
struct dma_memory_desc *mem_desc;
u16 i;
if (!mrioc->ioctl_dma_pool)
return;
for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
mem_desc = &mrioc->ioctl_sge[i];
if (mem_desc->addr) {
dma_pool_free(mrioc->ioctl_dma_pool,
mem_desc->addr,
mem_desc->dma_addr);
mem_desc->addr = NULL;
}
}
dma_pool_destroy(mrioc->ioctl_dma_pool);
mrioc->ioctl_dma_pool = NULL;
mem_desc = &mrioc->ioctl_chain_sge;
if (mem_desc->addr) {
dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
mem_desc->addr, mem_desc->dma_addr);
mem_desc->addr = NULL;
}
mem_desc = &mrioc->ioctl_resp_sge;
if (mem_desc->addr) {
dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
mem_desc->addr, mem_desc->dma_addr);
mem_desc->addr = NULL;
}
mrioc->ioctl_sges_allocated = false;
}
/**
* mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
* @mrioc: Adapter instance reference
*
* This function allocates dmaable memory required to handle the
* application issued MPI3 IOCTL requests.
*
* Return: None
*/
static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
{
struct dma_memory_desc *mem_desc;
u16 i;
mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
&mrioc->pdev->dev,
MPI3MR_IOCTL_SGE_SIZE,
MPI3MR_PAGE_SIZE_4K, 0);
if (!mrioc->ioctl_dma_pool) {
ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
goto out_failed;
}
for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
mem_desc = &mrioc->ioctl_sge[i];
mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
GFP_KERNEL,
&mem_desc->dma_addr);
if (!mem_desc->addr)
goto out_failed;
}
mem_desc = &mrioc->ioctl_chain_sge;
mem_desc->size = MPI3MR_PAGE_SIZE_4K;
mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
mem_desc->size,
&mem_desc->dma_addr,
GFP_KERNEL);
if (!mem_desc->addr)
goto out_failed;
mem_desc = &mrioc->ioctl_resp_sge;
mem_desc->size = MPI3MR_PAGE_SIZE_4K;
mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
mem_desc->size,
&mem_desc->dma_addr,
GFP_KERNEL);
if (!mem_desc->addr)
goto out_failed;
mrioc->ioctl_sges_allocated = true;
return;
out_failed:
ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
"from the applications, application interface for MPT command is disabled\n");
mpi3mr_free_ioctl_dma_memory(mrioc);
}
/**
* mpi3mr_clear_reset_history - clear reset history
* @mrioc: Adapter instance reference
@ -3874,6 +3982,9 @@ retry_init:
}
}
dprint_init(mrioc, "allocating ioctl dma buffers\n");
mpi3mr_alloc_ioctl_dma_memory(mrioc);
if (!mrioc->init_cmds.reply) {
retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
if (retval) {
@ -4293,6 +4404,7 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
struct mpi3mr_intr_info *intr_info;
mpi3mr_free_enclosure_list(mrioc);
mpi3mr_free_ioctl_dma_memory(mrioc);
if (mrioc->sense_buf_pool) {
if (mrioc->sense_buf)

View File

@ -1047,8 +1047,9 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
tgtdev->host_exposed && tgtdev->starget &&
tgtdev->starget->hostdata) {
tgtdev->is_hidden &&
tgtdev->host_exposed && tgtdev->starget &&
tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
tgt_priv->dev_removed = 1;
atomic_set(&tgt_priv->block_io, 0);
@ -1064,14 +1065,24 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
mpi3mr_tgtdev_put(tgtdev);
} else if (tgtdev->is_hidden & tgtdev->host_exposed) {
dprint_reset(mrioc, "hiding target device with perst_id(%d)\n",
tgtdev->perst_id);
mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
}
}
tgtdev = NULL;
list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
!tgtdev->is_hidden) {
if (!tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc,
tgtdev->perst_id);
else if (tgtdev->starget)
starget_for_each_device(tgtdev->starget,
(void *)tgtdev, mpi3mr_update_sdev);
}
}
}
@ -3194,6 +3205,7 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
tg = stgt_priv_data->throttle_group;
throttle_enabled_dev =
stgt_priv_data->io_throttle_enabled;
dev_handle = stgt_priv_data->dev_handle;
}
}
if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&

View File

@ -491,6 +491,8 @@ struct mpi3_nvme_encapsulated_error_reply {
#define MPI3MR_NVME_DATA_FORMAT_PRP 0
#define MPI3MR_NVME_DATA_FORMAT_SGL1 1
#define MPI3MR_NVME_DATA_FORMAT_SGL2 2
#define MPI3MR_NVMESGL_DATA_SEGMENT 0x00
#define MPI3MR_NVMESGL_LAST_SEGMENT 0x03
/* MPI3: task management related definitions */
struct mpi3_scsi_task_mgmt_request {