skd: Simplify the code for handling data direction
Use DMA_FROM_DEVICE and DMA_TO_DEVICE directly instead of introducing driver-private constants with the same numerical value. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
0b2e0c0772
commit
b1824eef28
@ -212,7 +212,7 @@ struct skd_request_context {
|
|||||||
u8 flush_cmd;
|
u8 flush_cmd;
|
||||||
|
|
||||||
u32 timeout_stamp;
|
u32 timeout_stamp;
|
||||||
u8 sg_data_dir;
|
enum dma_data_direction data_dir;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
u32 n_sg;
|
u32 n_sg;
|
||||||
u32 sg_byte_count;
|
u32 sg_byte_count;
|
||||||
@ -225,8 +225,6 @@ struct skd_request_context {
|
|||||||
struct fit_comp_error_info err_info;
|
struct fit_comp_error_info err_info;
|
||||||
|
|
||||||
};
|
};
|
||||||
#define SKD_DATA_DIR_HOST_TO_CARD 1
|
|
||||||
#define SKD_DATA_DIR_CARD_TO_HOST 2
|
|
||||||
|
|
||||||
struct skd_special_context {
|
struct skd_special_context {
|
||||||
struct skd_request_context req;
|
struct skd_request_context req;
|
||||||
@ -615,8 +613,8 @@ static void skd_request_fn(struct request_queue *q)
|
|||||||
skreq->req = req;
|
skreq->req = req;
|
||||||
skreq->fitmsg_id = 0;
|
skreq->fitmsg_id = 0;
|
||||||
|
|
||||||
skreq->sg_data_dir = data_dir == READ ?
|
skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE :
|
||||||
SKD_DATA_DIR_CARD_TO_HOST : SKD_DATA_DIR_HOST_TO_CARD;
|
DMA_TO_DEVICE;
|
||||||
|
|
||||||
if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
|
if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
|
||||||
dev_dbg(&skdev->pdev->dev, "error Out\n");
|
dev_dbg(&skdev->pdev->dev, "error Out\n");
|
||||||
@ -742,16 +740,14 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
|
|||||||
struct skd_request_context *skreq)
|
struct skd_request_context *skreq)
|
||||||
{
|
{
|
||||||
struct request *req = skreq->req;
|
struct request *req = skreq->req;
|
||||||
int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
|
|
||||||
int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
|
|
||||||
struct scatterlist *sg = &skreq->sg[0];
|
struct scatterlist *sg = &skreq->sg[0];
|
||||||
int n_sg;
|
int n_sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
skreq->sg_byte_count = 0;
|
skreq->sg_byte_count = 0;
|
||||||
|
|
||||||
/* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
|
WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
|
||||||
skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
|
skreq->data_dir != DMA_FROM_DEVICE);
|
||||||
|
|
||||||
n_sg = blk_rq_map_sg(skdev->queue, req, sg);
|
n_sg = blk_rq_map_sg(skdev->queue, req, sg);
|
||||||
if (n_sg <= 0)
|
if (n_sg <= 0)
|
||||||
@ -761,7 +757,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
|
|||||||
* Map scatterlist to PCI bus addresses.
|
* Map scatterlist to PCI bus addresses.
|
||||||
* Note PCI might change the number of entries.
|
* Note PCI might change the number of entries.
|
||||||
*/
|
*/
|
||||||
n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
|
n_sg = pci_map_sg(skdev->pdev, sg, n_sg, skreq->data_dir);
|
||||||
if (n_sg <= 0)
|
if (n_sg <= 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -804,9 +800,6 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
|
|||||||
static void skd_postop_sg_list(struct skd_device *skdev,
|
static void skd_postop_sg_list(struct skd_device *skdev,
|
||||||
struct skd_request_context *skreq)
|
struct skd_request_context *skreq)
|
||||||
{
|
{
|
||||||
int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
|
|
||||||
int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* restore the next ptr for next IO request so we
|
* restore the next ptr for next IO request so we
|
||||||
* don't have to set it every time.
|
* don't have to set it every time.
|
||||||
@ -814,7 +807,7 @@ static void skd_postop_sg_list(struct skd_device *skdev,
|
|||||||
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
|
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
|
||||||
skreq->sksg_dma_address +
|
skreq->sksg_dma_address +
|
||||||
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
|
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
|
||||||
pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
|
pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void skd_request_fn_not_online(struct request_queue *q)
|
static void skd_request_fn_not_online(struct request_queue *q)
|
||||||
@ -2506,7 +2499,7 @@ static void skd_process_scsi_inq(struct skd_device *skdev,
|
|||||||
struct skd_scsi_request *scsi_req = &skspcl->msg_buf->scsi[0];
|
struct skd_scsi_request *scsi_req = &skspcl->msg_buf->scsi[0];
|
||||||
|
|
||||||
dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
|
dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
|
||||||
skspcl->req.sg_data_dir);
|
skspcl->req.data_dir);
|
||||||
buf = skd_sg_1st_page_ptr(skspcl->req.sg);
|
buf = skd_sg_1st_page_ptr(skspcl->req.sg);
|
||||||
|
|
||||||
if (buf)
|
if (buf)
|
||||||
@ -4935,7 +4928,7 @@ static void skd_log_skreq(struct skd_device *skdev,
|
|||||||
skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
|
skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
|
||||||
skreq->fitmsg_id);
|
skreq->fitmsg_id);
|
||||||
dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n",
|
dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n",
|
||||||
skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
|
skreq->timeout_stamp, skreq->data_dir, skreq->n_sg);
|
||||||
|
|
||||||
if (skreq->req != NULL) {
|
if (skreq->req != NULL) {
|
||||||
struct request *req = skreq->req;
|
struct request *req = skreq->req;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user