[SCSI] isci: atapi support
Based on original implementation from Jiangbi Liu and Maciej Trela. ATAPI transfers happen in two-to-three stages. The two stage atapi commands are those that include a dma data transfer. The data transfer portion of these operations is handled by the hardware packet-dma acceleration. The three-stage commands do not have a data transfer and are handled without hardware assistance in raw frame mode. stage1: transmit host-to-device fis to notify the device of an incoming atapi cdb. Upon reception of the pio-setup-fis repost the task_context to perform the dma transfer of the cdb+data (go to stage3), or repost the task_context to transmit the cdb as a raw frame (go to stage 2). stage2: wait for hardware notification of the cdb transmission and then go to stage 3. stage3: wait for the arrival of the terminating device-to-host fis and terminate the command. To keep the implementation simple we only support ATAPI packet-dma protocol (for commands with data) to avoid needing to handle the data transfer manually (like we do for SATA-PIO). This may affect compatibility for a small number of devices (see ATA_HORKAGE_ATAPI_MOD16_DMA). If the data-transfer underruns, or encounters an error the device-to-host fis is expected to arrive in the unsolicited frame queue to pass to libata for disposition. However, in the DONE_UNEXP_FIS (data underrun) case it appears we need to craft a response. In the DONE_REG_ERR case we do receive the UF and propagate it to libsas. Signed-off-by: Maciej Trela <maciej.trela@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
parent
4f3f812dd3
commit
b50102d3e9
@ -386,6 +386,18 @@ static bool is_remote_device_ready(struct isci_remote_device *idev)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* called once the remote node context has transisitioned to a ready
|
||||
* state (after suspending RX and/or TX due to early D2H fis)
|
||||
*/
|
||||
static void atapi_remote_device_resume_done(void *_dev)
|
||||
{
|
||||
struct isci_remote_device *idev = _dev;
|
||||
struct isci_request *ireq = idev->working_request;
|
||||
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
}
|
||||
|
||||
enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
|
||||
u32 event_code)
|
||||
{
|
||||
@ -432,6 +444,16 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
|
||||
if (status != SCI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (state == SCI_STP_DEV_ATAPI_ERROR) {
|
||||
/* For ATAPI error state resume the RNC right away. */
|
||||
if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
|
||||
scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
|
||||
return sci_remote_node_context_resume(&idev->rnc,
|
||||
atapi_remote_device_resume_done,
|
||||
idev);
|
||||
}
|
||||
}
|
||||
|
||||
if (state == SCI_STP_DEV_IDLE) {
|
||||
|
||||
/* We pick up suspension events to handle specifically to this
|
||||
@ -625,6 +647,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
|
||||
case SCI_STP_DEV_CMD:
|
||||
case SCI_STP_DEV_NCQ:
|
||||
case SCI_STP_DEV_NCQ_ERROR:
|
||||
case SCI_STP_DEV_ATAPI_ERROR:
|
||||
status = common_complete_io(iport, idev, ireq);
|
||||
if (status != SCI_SUCCESS)
|
||||
break;
|
||||
@ -1020,6 +1043,7 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
|
||||
[SCI_STP_DEV_NCQ_ERROR] = {
|
||||
.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
|
||||
},
|
||||
[SCI_STP_DEV_ATAPI_ERROR] = { },
|
||||
[SCI_STP_DEV_AWAIT_RESET] = { },
|
||||
[SCI_SMP_DEV_IDLE] = {
|
||||
.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
|
||||
|
@ -243,6 +243,15 @@ enum sci_remote_device_states {
|
||||
*/
|
||||
SCI_STP_DEV_NCQ_ERROR,
|
||||
|
||||
/**
|
||||
* This is the ATAPI error state for the STP ATAPI remote device.
|
||||
* This state is entered when ATAPI device sends error status FIS
|
||||
* without data while the device object is in CMD state.
|
||||
* A suspension event is expected in this state.
|
||||
* The device object will resume right away.
|
||||
*/
|
||||
SCI_STP_DEV_ATAPI_ERROR,
|
||||
|
||||
/**
|
||||
* This is the READY substate indicates the device is waiting for the RESET task
|
||||
* coming to be recovered from certain hardware specific error.
|
||||
|
@ -481,7 +481,29 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
|
||||
}
|
||||
}
|
||||
|
||||
static void sci_atapi_construct(struct isci_request *ireq)
|
||||
{
|
||||
struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
|
||||
struct sas_task *task;
|
||||
|
||||
/* To simplify the implementation we take advantage of the
|
||||
* silicon's partial acceleration of atapi protocol (dma data
|
||||
* transfers), so we promote all commands to dma protocol. This
|
||||
* breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
|
||||
*/
|
||||
h2d_fis->features |= ATAPI_PKT_DMA;
|
||||
|
||||
scu_stp_raw_request_construct_task_context(ireq);
|
||||
|
||||
task = isci_request_access_task(ireq);
|
||||
if (task->data_dir == DMA_NONE)
|
||||
task->total_xfer_len = 0;
|
||||
|
||||
/* clear the response so we can detect arrivial of an
|
||||
* unsolicited h2d fis
|
||||
*/
|
||||
ireq->stp.rsp.fis_type = 0;
|
||||
}
|
||||
|
||||
static enum sci_status
|
||||
sci_io_request_construct_sata(struct isci_request *ireq,
|
||||
@ -491,6 +513,7 @@ sci_io_request_construct_sata(struct isci_request *ireq,
|
||||
{
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
struct domain_device *dev = ireq->target_device->domain_dev;
|
||||
|
||||
/* check for management protocols */
|
||||
if (ireq->ttype == tmf_task) {
|
||||
@ -519,6 +542,13 @@ sci_io_request_construct_sata(struct isci_request *ireq,
|
||||
|
||||
}
|
||||
|
||||
/* ATAPI */
|
||||
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
|
||||
task->ata_task.fis.command == ATA_CMD_PACKET) {
|
||||
sci_atapi_construct(ireq);
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
|
||||
/* non data */
|
||||
if (task->data_dir == DMA_NONE) {
|
||||
scu_stp_raw_request_construct_task_context(ireq);
|
||||
@ -627,7 +657,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
|
||||
|
||||
/**
|
||||
* sci_req_tx_bytes - bytes transferred when reply underruns request
|
||||
* @sci_req: request that was terminated early
|
||||
* @ireq: request that was terminated early
|
||||
*/
|
||||
#define SCU_TASK_CONTEXT_SRAM 0x200000
|
||||
static u32 sci_req_tx_bytes(struct isci_request *ireq)
|
||||
@ -729,6 +759,10 @@ sci_io_request_terminate(struct isci_request *ireq)
|
||||
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
|
||||
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
|
||||
case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
|
||||
case SCI_REQ_ATAPI_WAIT_H2D:
|
||||
case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
|
||||
case SCI_REQ_ATAPI_WAIT_D2H:
|
||||
case SCI_REQ_ATAPI_WAIT_TC_COMP:
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
|
||||
return SCI_SUCCESS;
|
||||
case SCI_REQ_TASK_WAIT_TC_RESP:
|
||||
@ -1194,8 +1228,8 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
|
||||
{
|
||||
struct isci_stp_request *stp_req = &ireq->stp.req;
|
||||
struct scu_sgl_element_pair *sgl_pair;
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
struct scu_sgl_element *sgl;
|
||||
enum sci_status status;
|
||||
u32 offset;
|
||||
u32 len = 0;
|
||||
|
||||
@ -1249,7 +1283,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
|
||||
*/
|
||||
static enum sci_status
|
||||
sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
|
||||
u8 *data_buf, u32 len)
|
||||
u8 *data_buf, u32 len)
|
||||
{
|
||||
struct isci_request *ireq;
|
||||
u8 *src_addr;
|
||||
@ -1423,6 +1457,128 @@ static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_re
|
||||
return status;
|
||||
}
|
||||
|
||||
static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
|
||||
u32 frame_index)
|
||||
{
|
||||
struct isci_host *ihost = ireq->owning_controller;
|
||||
enum sci_status status;
|
||||
struct dev_to_host_fis *frame_header;
|
||||
u32 *frame_buffer;
|
||||
|
||||
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
|
||||
frame_index,
|
||||
(void **)&frame_header);
|
||||
|
||||
if (status != SCI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (frame_header->fis_type != FIS_REGD2H) {
|
||||
dev_err(&ireq->isci_host->pdev->dev,
|
||||
"%s ERROR: invalid fis type 0x%X\n",
|
||||
__func__, frame_header->fis_type);
|
||||
return SCI_FAILURE;
|
||||
}
|
||||
|
||||
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
|
||||
frame_index,
|
||||
(void **)&frame_buffer);
|
||||
|
||||
sci_controller_copy_sata_response(&ireq->stp.rsp,
|
||||
(u32 *)frame_header,
|
||||
frame_buffer);
|
||||
|
||||
/* Frame has been decoded return it to the controller */
|
||||
sci_controller_release_frame(ihost, frame_index);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
|
||||
u32 frame_index)
|
||||
{
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
enum sci_status status;
|
||||
|
||||
status = process_unsolicited_fis(ireq, frame_index);
|
||||
|
||||
if (status == SCI_SUCCESS) {
|
||||
if (ireq->stp.rsp.status & ATA_ERR)
|
||||
status = SCI_IO_FAILURE_RESPONSE_VALID;
|
||||
} else {
|
||||
status = SCI_IO_FAILURE_RESPONSE_VALID;
|
||||
}
|
||||
|
||||
if (status != SCI_SUCCESS) {
|
||||
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
|
||||
ireq->sci_status = status;
|
||||
} else {
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS;
|
||||
}
|
||||
|
||||
/* the d2h ufi is the end of non-data commands */
|
||||
if (task->data_dir == DMA_NONE)
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
|
||||
{
|
||||
struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
|
||||
void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
|
||||
struct scu_task_context *task_context = ireq->tc;
|
||||
|
||||
/* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
|
||||
* type. The TC for previous Packet fis was already there, we only need to
|
||||
* change the H2D fis content.
|
||||
*/
|
||||
memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
|
||||
memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
|
||||
memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
|
||||
task_context->type.stp.fis_type = FIS_DATA;
|
||||
task_context->transfer_length_bytes = dev->cdb_len;
|
||||
}
|
||||
|
||||
static void scu_atapi_construct_task_context(struct isci_request *ireq)
|
||||
{
|
||||
struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
struct scu_task_context *task_context = ireq->tc;
|
||||
int cdb_len = dev->cdb_len;
|
||||
|
||||
/* reference: SSTL 1.13.4.2
|
||||
* task_type, sata_direction
|
||||
*/
|
||||
if (task->data_dir == DMA_TO_DEVICE) {
|
||||
task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
|
||||
task_context->sata_direction = 0;
|
||||
} else {
|
||||
/* todo: for NO_DATA command, we need to send out raw frame. */
|
||||
task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
|
||||
task_context->sata_direction = 1;
|
||||
}
|
||||
|
||||
memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
|
||||
task_context->type.stp.fis_type = FIS_DATA;
|
||||
|
||||
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
|
||||
memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
|
||||
task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
|
||||
|
||||
/* task phase is set to TX_CMD */
|
||||
task_context->task_phase = 0x1;
|
||||
|
||||
/* retry counter */
|
||||
task_context->stp_retry_count = 0;
|
||||
|
||||
/* data transfer size. */
|
||||
task_context->transfer_length_bytes = task->total_xfer_len;
|
||||
|
||||
/* setup sgl */
|
||||
sci_request_build_sgl(ireq);
|
||||
}
|
||||
|
||||
enum sci_status
|
||||
sci_io_request_frame_handler(struct isci_request *ireq,
|
||||
u32 frame_index)
|
||||
@ -1835,6 +1991,24 @@ sci_io_request_frame_handler(struct isci_request *ireq,
|
||||
|
||||
return status;
|
||||
}
|
||||
case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
|
||||
sci_controller_release_frame(ihost, frame_index);
|
||||
ireq->target_device->working_request = ireq;
|
||||
if (task->data_dir == DMA_NONE) {
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
|
||||
scu_atapi_reconstruct_raw_frame_task_context(ireq);
|
||||
} else {
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
|
||||
scu_atapi_construct_task_context(ireq);
|
||||
}
|
||||
|
||||
sci_controller_continue_io(ireq);
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
case SCI_REQ_ATAPI_WAIT_D2H:
|
||||
return atapi_d2h_reg_frame_handler(ireq, frame_index);
|
||||
case SCI_REQ_ABORTING:
|
||||
/*
|
||||
* TODO: Is it even possible to get an unsolicited frame in the
|
||||
@ -1966,6 +2140,112 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
|
||||
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
|
||||
enum sci_base_request_states next)
|
||||
{
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
|
||||
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS;
|
||||
sci_change_state(&ireq->sm, next);
|
||||
break;
|
||||
default:
|
||||
/* All other completion status cause the IO to be complete.
|
||||
* If a NAK was received, then it is up to the user to retry
|
||||
* the request.
|
||||
*/
|
||||
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
|
||||
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
|
||||
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
|
||||
u32 completion_code)
|
||||
{
|
||||
struct isci_remote_device *idev = ireq->target_device;
|
||||
struct dev_to_host_fis *d2h = &ireq->stp.rsp;
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
|
||||
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
|
||||
case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
break;
|
||||
|
||||
case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
|
||||
u16 len = sci_req_tx_bytes(ireq);
|
||||
|
||||
/* likely non-error data underrrun, workaround missing
|
||||
* d2h frame from the controller
|
||||
*/
|
||||
if (d2h->fis_type != FIS_REGD2H) {
|
||||
d2h->fis_type = FIS_REGD2H;
|
||||
d2h->flags = (1 << 6);
|
||||
d2h->status = 0x50;
|
||||
d2h->error = 0;
|
||||
d2h->lbal = 0;
|
||||
d2h->byte_count_low = len & 0xff;
|
||||
d2h->byte_count_high = len >> 8;
|
||||
d2h->device = 0xa0;
|
||||
d2h->lbal_exp = 0;
|
||||
d2h->lbam_exp = 0;
|
||||
d2h->lbah_exp = 0;
|
||||
d2h->_r_a = 0;
|
||||
d2h->sector_count = 0x3;
|
||||
d2h->sector_count_exp = 0;
|
||||
d2h->_r_b = 0;
|
||||
d2h->_r_c = 0;
|
||||
d2h->_r_d = 0;
|
||||
}
|
||||
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
|
||||
status = ireq->sci_status;
|
||||
|
||||
/* the hw will have suspended the rnc, so complete the
|
||||
* request upon pending resume
|
||||
*/
|
||||
sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
|
||||
break;
|
||||
}
|
||||
case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
|
||||
/* In this case, there is no UF coming after.
|
||||
* compelte the IO now.
|
||||
*/
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS;
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (d2h->fis_type == FIS_REGD2H) {
|
||||
/* UF received change the device state to ATAPI_ERROR */
|
||||
status = ireq->sci_status;
|
||||
sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
|
||||
} else {
|
||||
/* If receiving any non-sucess TC status, no UF
|
||||
* received yet, then an UF for the status fis
|
||||
* is coming after (XXX: suspect this is
|
||||
* actually a protocol error or a bug like the
|
||||
* DONE_UNEXP_FIS case)
|
||||
*/
|
||||
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
|
||||
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
|
||||
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
enum sci_status
|
||||
sci_io_request_tc_completion(struct isci_request *ireq,
|
||||
u32 completion_code)
|
||||
@ -2017,6 +2297,17 @@ sci_io_request_tc_completion(struct isci_request *ireq,
|
||||
return request_aborting_state_tc_event(ireq,
|
||||
completion_code);
|
||||
|
||||
case SCI_REQ_ATAPI_WAIT_H2D:
|
||||
return atapi_raw_completion(ireq, completion_code,
|
||||
SCI_REQ_ATAPI_WAIT_PIO_SETUP);
|
||||
|
||||
case SCI_REQ_ATAPI_WAIT_TC_COMP:
|
||||
return atapi_raw_completion(ireq, completion_code,
|
||||
SCI_REQ_ATAPI_WAIT_D2H);
|
||||
|
||||
case SCI_REQ_ATAPI_WAIT_D2H:
|
||||
return atapi_data_tc_completion_handler(ireq, completion_code);
|
||||
|
||||
default:
|
||||
dev_warn(&ihost->pdev->dev,
|
||||
"%s: SCIC IO Request given task completion "
|
||||
@ -2423,6 +2714,8 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
|
||||
*/
|
||||
if (fis->status & ATA_DF)
|
||||
ts->stat = SAS_PROTO_RESPONSE;
|
||||
else if (fis->status & ATA_ERR)
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
else
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
|
||||
@ -2782,6 +3075,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
|
||||
{
|
||||
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
|
||||
struct domain_device *dev = ireq->target_device->domain_dev;
|
||||
enum sci_base_request_states state;
|
||||
struct sas_task *task;
|
||||
|
||||
/* XXX as hch said always creating an internal sas_task for tmf
|
||||
@ -2793,26 +3087,30 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
|
||||
* substates
|
||||
*/
|
||||
if (!task && dev->dev_type == SAS_END_DEV) {
|
||||
sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
|
||||
state = SCI_REQ_TASK_WAIT_TC_COMP;
|
||||
} else if (!task &&
|
||||
(isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
|
||||
isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
|
||||
sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
|
||||
state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
|
||||
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
|
||||
sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
|
||||
state = SCI_REQ_SMP_WAIT_RESP;
|
||||
} else if (task && sas_protocol_ata(task->task_proto) &&
|
||||
!task->ata_task.use_ncq) {
|
||||
u32 state;
|
||||
|
||||
if (task->data_dir == DMA_NONE)
|
||||
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
|
||||
task->ata_task.fis.command == ATA_CMD_PACKET) {
|
||||
state = SCI_REQ_ATAPI_WAIT_H2D;
|
||||
} else if (task->data_dir == DMA_NONE) {
|
||||
state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
|
||||
else if (task->ata_task.dma_xfer)
|
||||
} else if (task->ata_task.dma_xfer) {
|
||||
state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
|
||||
else /* PIO */
|
||||
} else /* PIO */ {
|
||||
state = SCI_REQ_STP_PIO_WAIT_H2D;
|
||||
|
||||
sci_change_state(sm, state);
|
||||
}
|
||||
} else {
|
||||
/* SSP or NCQ are fully accelerated, no substates */
|
||||
return;
|
||||
}
|
||||
sci_change_state(sm, state);
|
||||
}
|
||||
|
||||
static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
|
||||
@ -2904,6 +3202,10 @@ static const struct sci_base_state sci_request_state_table[] = {
|
||||
[SCI_REQ_TASK_WAIT_TC_RESP] = { },
|
||||
[SCI_REQ_SMP_WAIT_RESP] = { },
|
||||
[SCI_REQ_SMP_WAIT_TC_COMP] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_H2D] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_D2H] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
|
||||
[SCI_REQ_COMPLETED] = {
|
||||
.enter_state = sci_request_completed_state_enter,
|
||||
},
|
||||
|
@ -96,7 +96,6 @@ enum sci_request_protocol {
|
||||
* to wait for another fis or if the transfer is complete. Upon
|
||||
* receipt of a d2h fis this will be the status field of that fis.
|
||||
* @sgl - track pio transfer progress as we iterate through the sgl
|
||||
* @device_cdb_len - atapi device advertises it's transfer constraints at setup
|
||||
*/
|
||||
struct isci_stp_request {
|
||||
u32 pio_len;
|
||||
@ -107,7 +106,6 @@ struct isci_stp_request {
|
||||
u8 set;
|
||||
u32 offset;
|
||||
} sgl;
|
||||
u32 device_cdb_len;
|
||||
};
|
||||
|
||||
struct isci_request {
|
||||
@ -248,6 +246,32 @@ enum sci_base_request_states {
|
||||
*/
|
||||
SCI_REQ_STP_PIO_DATA_OUT,
|
||||
|
||||
/*
|
||||
* While in this state the IO request object is waiting for the TC
|
||||
* completion notification for the H2D Register FIS
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_H2D,
|
||||
|
||||
/*
|
||||
* While in this state the IO request object is waiting for either a
|
||||
* PIO Setup.
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_PIO_SETUP,
|
||||
|
||||
/*
|
||||
* The non-data IO transit to this state in this state after receiving
|
||||
* TC completion. While in this state IO request object is waiting for
|
||||
* D2H status frame as UF.
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_D2H,
|
||||
|
||||
/*
|
||||
* When transmitting raw frames hardware reports task context completion
|
||||
* after every frame submission, so in the non-accelerated case we need
|
||||
* to expect the completion for the "cdb" frame.
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_TC_COMP,
|
||||
|
||||
/*
|
||||
* The AWAIT_TC_COMPLETION sub-state indicates that the started raw
|
||||
* task management request is waiting for the transmission of the
|
||||
|
@ -801,7 +801,7 @@ void sas_slave_destroy(struct scsi_device *scsi_dev)
|
||||
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
|
||||
|
||||
if (dev_is_sata(dev))
|
||||
dev->sata_dev.ap->link.device[0].class = ATA_DEV_NONE;
|
||||
sas_to_ata_dev(dev)->class = ATA_DEV_NONE;
|
||||
}
|
||||
|
||||
int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
|
||||
|
@ -389,6 +389,11 @@ sdev_to_domain_dev(struct scsi_device *sdev) {
|
||||
return starget_to_domain_dev(sdev->sdev_target);
|
||||
}
|
||||
|
||||
static inline struct ata_device *sas_to_ata_dev(struct domain_device *dev)
|
||||
{
|
||||
return &dev->sata_dev.ap->link.device[0];
|
||||
}
|
||||
|
||||
static inline struct domain_device *
|
||||
cmd_to_domain_dev(struct scsi_cmnd *cmd)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user