Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (87 commits) [SCSI] fusion: fix domain validation loops [SCSI] qla2xxx: fix regression on sparc64 [SCSI] modalias for scsi devices [SCSI] sg: cap reserved_size values at max_sectors [SCSI] BusLogic: stop using check_region [SCSI] tgt: fix rdma transfer bugs [SCSI] aacraid: fix aacraid not finding device [SCSI] aacraid: Correct SMC products in aacraid.txt [SCSI] scsi_error.c: Add EH Start Unit retry [SCSI] aacraid: [Fastboot] Panics for AACRAID driver during 'insmod' for kexec test. [SCSI] ipr: Driver version to 2.3.2 [SCSI] ipr: Faster sg list fetch [SCSI] ipr: Return better qc_issue errors [SCSI] ipr: Disrupt device error [SCSI] ipr: Improve async error logging level control [SCSI] ipr: PCI unblock config access fix [SCSI] ipr: Fix for oops following SATA request sense [SCSI] ipr: Log error for SAS dual path switch [SCSI] ipr: Enable logging of debug error data for all devices [SCSI] ipr: Add new PCI-E IDs to device table ...
This commit is contained in:
commit
4f7a307dc6
@ -37,7 +37,11 @@ Supported Cards/Chipsets
|
||||
9005:0286:9005:029d Adaptec 2420SA (Intruder HP release)
|
||||
9005:0286:9005:02ac Adaptec 1800 (Typhoon44)
|
||||
9005:0285:9005:02b5 Adaptec 5445 (Voodoo44)
|
||||
9005:0285:15d9:02b5 SMC AOC-USAS-S4i
|
||||
9005:0285:15d9:02c9 SMC AOC-USAS-S4iR
|
||||
9005:0285:9005:02b6 Adaptec 5805 (Voodoo80)
|
||||
9005:0285:15d9:02b6 SMC AOC-USAS-S8i
|
||||
9005:0285:15d9:02ca SMC AOC-USAS-S8iR
|
||||
9005:0285:9005:02b7 Adaptec 5085 (Voodoo08)
|
||||
9005:0285:9005:02bb Adaptec 3405 (Marauder40LP)
|
||||
9005:0285:9005:02bc Adaptec 3805 (Marauder80LP)
|
||||
@ -93,6 +97,9 @@ Supported Cards/Chipsets
|
||||
9005:0286:9005:02ae (Aurora Lite ARK)
|
||||
9005:0285:9005:02b0 (Sunrise Lake ARK)
|
||||
9005:0285:9005:02b1 Adaptec (Voodoo 8 internal 8 external)
|
||||
9005:0285:108e:7aac SUN STK RAID REM (Voodoo44 Coyote)
|
||||
9005:0285:108e:0286 SUN SG-XPCIESAS-R-IN (Cougar)
|
||||
9005:0285:108e:0287 SUN SG-XPCIESAS-R-EX (Prometheus)
|
||||
|
||||
People
|
||||
-------------------------
|
||||
|
@ -562,11 +562,6 @@ if only one has a flaw for some SCSI feature, you can disable the
|
||||
support by the driver of this feature at linux start-up and enable
|
||||
this feature after boot-up only for devices that support it safely.
|
||||
|
||||
CONFIG_SCSI_NCR53C8XX_PROFILE_SUPPORT (default answer: n)
|
||||
This option must be set for profiling information to be gathered
|
||||
and printed out through the proc file system. This features may
|
||||
impact performances.
|
||||
|
||||
CONFIG_SCSI_NCR53C8XX_IOMAPPED (default answer: n)
|
||||
Answer "y" if you suspect your mother board to not allow memory mapped I/O.
|
||||
May slow down performance a little. This option is required by
|
||||
|
@ -1925,6 +1925,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
|
||||
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
|
||||
|
||||
q->sg_reserved_size = INT_MAX;
|
||||
|
||||
/*
|
||||
* all done
|
||||
*/
|
||||
|
@ -78,7 +78,9 @@ static int sg_set_timeout(request_queue_t *q, int __user *p)
|
||||
|
||||
static int sg_get_reserved_size(request_queue_t *q, int __user *p)
|
||||
{
|
||||
return put_user(q->sg_reserved_size, p);
|
||||
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
|
||||
|
||||
return put_user(val, p);
|
||||
}
|
||||
|
||||
static int sg_set_reserved_size(request_queue_t *q, int __user *p)
|
||||
|
@ -201,7 +201,7 @@ static int iser_post_receive_control(struct iscsi_conn *conn)
|
||||
* what's common for both schemes is that the connection is not started
|
||||
*/
|
||||
if (conn->c_stage != ISCSI_CONN_STARTED)
|
||||
rx_data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
|
||||
rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
|
||||
else /* FIXME till user space sets conn->max_recv_dlength correctly */
|
||||
rx_data_size = 128;
|
||||
|
||||
|
@ -1531,6 +1531,7 @@ mpt_resume(struct pci_dev *pdev)
|
||||
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
|
||||
u32 device_state = pdev->current_state;
|
||||
int recovery_state;
|
||||
int err;
|
||||
|
||||
printk(MYIOC_s_INFO_FMT
|
||||
"pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
|
||||
@ -1538,7 +1539,9 @@ mpt_resume(struct pci_dev *pdev)
|
||||
|
||||
pci_set_power_state(pdev, 0);
|
||||
pci_restore_state(pdev);
|
||||
pci_enable_device(pdev);
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* enable interrupts */
|
||||
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
|
||||
@ -4739,12 +4742,8 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt_inactive_raid_list_free
|
||||
*
|
||||
* This clears this link list.
|
||||
*
|
||||
* @ioc - pointer to per adapter structure
|
||||
*
|
||||
* mpt_inactive_raid_list_free - This clears this link list.
|
||||
* @ioc : pointer to per adapter structure
|
||||
**/
|
||||
static void
|
||||
mpt_inactive_raid_list_free(MPT_ADAPTER *ioc)
|
||||
@ -4764,15 +4763,11 @@ mpt_inactive_raid_list_free(MPT_ADAPTER *ioc)
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt_inactive_raid_volumes
|
||||
*
|
||||
* This sets up link list of phy_disk_nums for devices belonging in an inactive volume
|
||||
*
|
||||
* @ioc - pointer to per adapter structure
|
||||
* @channel - volume channel
|
||||
* @id - volume target id
|
||||
*
|
||||
* mpt_inactive_raid_volumes - sets up link list of phy_disk_nums for devices belonging in an inactive volume
|
||||
*
|
||||
* @ioc : pointer to per adapter structure
|
||||
* @channel : volume channel
|
||||
* @id : volume target id
|
||||
**/
|
||||
static void
|
||||
mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
|
||||
@ -6663,7 +6658,7 @@ union loginfo_type {
|
||||
/**
|
||||
* mpt_iocstatus_info_config - IOCSTATUS information for config pages
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* ioc_status: U32 IOCStatus word from IOC
|
||||
* @ioc_status: U32 IOCStatus word from IOC
|
||||
* @mf: Pointer to MPT request frame
|
||||
*
|
||||
* Refer to lsi/mpi.h.
|
||||
|
@ -994,6 +994,7 @@ typedef struct _MPT_SCSI_HOST {
|
||||
int scandv_wait_done;
|
||||
long last_queue_full;
|
||||
u16 tm_iocstatus;
|
||||
u16 spi_pending;
|
||||
struct list_head target_reset_list;
|
||||
} MPT_SCSI_HOST;
|
||||
|
||||
|
@ -819,10 +819,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
sc->resid=0;
|
||||
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
|
||||
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
|
||||
if (scsi_status == MPI_SCSI_STATUS_BUSY)
|
||||
sc->result = (DID_BUS_BUSY << 16) | scsi_status;
|
||||
else
|
||||
sc->result = (DID_OK << 16) | scsi_status;
|
||||
sc->result = (DID_OK << 16) | scsi_status;
|
||||
if (scsi_state == 0) {
|
||||
;
|
||||
} else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
|
||||
@ -1188,20 +1185,7 @@ mptscsih_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
int
|
||||
mptscsih_resume(struct pci_dev *pdev)
|
||||
{
|
||||
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
|
||||
struct Scsi_Host *host = ioc->sh;
|
||||
MPT_SCSI_HOST *hd;
|
||||
|
||||
mpt_resume(pdev);
|
||||
|
||||
if(!host)
|
||||
return 0;
|
||||
|
||||
hd = (MPT_SCSI_HOST *)host->hostdata;
|
||||
if(!hd)
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
return mpt_resume(pdev);
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -1537,21 +1521,23 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/**
|
||||
* mptscsih_TMHandler - Generic handler for SCSI Task Management.
|
||||
* Fall through to mpt_HardResetHandler if: not operational, too many
|
||||
* failed TM requests or handshake failure.
|
||||
*
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @hd: Pointer to MPT SCSI HOST structure
|
||||
* @type: Task Management type
|
||||
* @channel: channel number for task management
|
||||
* @id: Logical Target ID for reset (if appropriate)
|
||||
* @lun: Logical Unit for reset (if appropriate)
|
||||
* @ctx2abort: Context for the task to be aborted (if appropriate)
|
||||
* @timeout: timeout for task management control
|
||||
*
|
||||
* Fall through to mpt_HardResetHandler if: not operational, too many
|
||||
* failed TM requests or handshake failure.
|
||||
*
|
||||
* Remark: Currently invoked from a non-interrupt thread (_bh).
|
||||
*
|
||||
* Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC
|
||||
* will be active.
|
||||
*
|
||||
* Returns 0 for SUCCESS, or FAILED.
|
||||
* Returns 0 for SUCCESS, or %FAILED.
|
||||
**/
|
||||
int
|
||||
mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
|
||||
@ -1650,9 +1636,11 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int c
|
||||
* mptscsih_IssueTaskMgmt - Generic send Task Management function.
|
||||
* @hd: Pointer to MPT_SCSI_HOST structure
|
||||
* @type: Task Management type
|
||||
* @channel: channel number for task management
|
||||
* @id: Logical Target ID for reset (if appropriate)
|
||||
* @lun: Logical Unit for reset (if appropriate)
|
||||
* @ctx2abort: Context for the task to be aborted (if appropriate)
|
||||
* @timeout: timeout for task management control
|
||||
*
|
||||
* Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
|
||||
* or a non-interrupt thread. In the former, must not call schedule().
|
||||
@ -2022,6 +2010,7 @@ mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
|
||||
/**
|
||||
* mptscsih_tm_wait_for_completion - wait for completion of TM task
|
||||
* @hd: Pointer to MPT host structure.
|
||||
* @timeout: timeout value
|
||||
*
|
||||
* Returns {SUCCESS,FAILED}.
|
||||
*/
|
||||
|
@ -96,14 +96,13 @@ static int mptspiTaskCtx = -1;
|
||||
static int mptspiInternalCtx = -1; /* Used only for internal commands */
|
||||
|
||||
/**
|
||||
* mptspi_setTargetNegoParms - Update the target negotiation
|
||||
* parameters based on the the Inquiry data, adapter capabilities,
|
||||
* and NVRAM settings
|
||||
*
|
||||
* mptspi_setTargetNegoParms - Update the target negotiation parameters
|
||||
* @hd: Pointer to a SCSI Host Structure
|
||||
* @vtarget: per target private data
|
||||
* @target: per target private data
|
||||
* @sdev: SCSI device
|
||||
*
|
||||
* Update the target negotiation parameters based on the the Inquiry
|
||||
* data, adapter capabilities, and NVRAM settings.
|
||||
**/
|
||||
static void
|
||||
mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
|
||||
@ -234,7 +233,7 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
|
||||
/**
|
||||
* mptspi_writeIOCPage4 - write IOC Page 4
|
||||
* @hd: Pointer to a SCSI Host Structure
|
||||
* @channel:
|
||||
* @channel: channel number
|
||||
* @id: write IOC Page4 for this ID & Bus
|
||||
*
|
||||
* Return: -EAGAIN if unable to obtain a Message Frame
|
||||
@ -446,7 +445,7 @@ static int mptspi_target_alloc(struct scsi_target *starget)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
mptspi_target_destroy(struct scsi_target *starget)
|
||||
{
|
||||
if (starget->hostdata)
|
||||
@ -677,7 +676,9 @@ static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
|
||||
return;
|
||||
}
|
||||
|
||||
hd->spi_pending |= (1 << sdev->id);
|
||||
spi_dv_device(sdev);
|
||||
hd->spi_pending &= ~(1 << sdev->id);
|
||||
|
||||
if (sdev->channel == 1 &&
|
||||
mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0)
|
||||
@ -1203,11 +1204,27 @@ mptspi_dv_renegotiate_work(struct work_struct *work)
|
||||
container_of(work, struct work_queue_wrapper, work);
|
||||
struct _MPT_SCSI_HOST *hd = wqw->hd;
|
||||
struct scsi_device *sdev;
|
||||
struct scsi_target *starget;
|
||||
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
|
||||
u32 nego;
|
||||
|
||||
kfree(wqw);
|
||||
|
||||
shost_for_each_device(sdev, hd->ioc->sh)
|
||||
mptspi_dv_device(hd, sdev);
|
||||
if (hd->spi_pending) {
|
||||
shost_for_each_device(sdev, hd->ioc->sh) {
|
||||
if (hd->spi_pending & (1 << sdev->id))
|
||||
continue;
|
||||
starget = scsi_target(sdev);
|
||||
nego = mptspi_getRP(starget);
|
||||
pg1.RequestedParameters = cpu_to_le32(nego);
|
||||
pg1.Reserved = 0;
|
||||
pg1.Configuration = 0;
|
||||
mptspi_write_spi_device_pg1(starget, &pg1);
|
||||
}
|
||||
} else {
|
||||
shost_for_each_device(sdev, hd->ioc->sh)
|
||||
mptspi_dv_device(hd, sdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1453,6 +1470,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
init_waitqueue_head(&hd->scandv_waitq);
|
||||
hd->scandv_wait_done = 0;
|
||||
hd->last_queue_full = 0;
|
||||
hd->spi_pending = 0;
|
||||
|
||||
/* Some versions of the firmware don't support page 0; without
|
||||
* that we can't get the parameters */
|
||||
|
@ -186,7 +186,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
|
||||
{
|
||||
fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
|
||||
fsf_req->timer.data = (unsigned long) fsf_req->adapter;
|
||||
fsf_req->timer.expires = timeout;
|
||||
fsf_req->timer.expires = jiffies + timeout;
|
||||
add_timer(&fsf_req->timer);
|
||||
}
|
||||
|
||||
|
@ -299,9 +299,10 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
|
||||
}
|
||||
|
||||
/* log additional information provided by FSF (if any) */
|
||||
if (unlikely(qtcb->header.log_length)) {
|
||||
if (likely(qtcb->header.log_length)) {
|
||||
/* do not trust them ;-) */
|
||||
if (qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
|
||||
if (unlikely(qtcb->header.log_start >
|
||||
sizeof(struct fsf_qtcb))) {
|
||||
ZFCP_LOG_NORMAL
|
||||
("bug: ULP (FSF logging) log data starts "
|
||||
"beyond end of packet header. Ignored. "
|
||||
@ -310,8 +311,9 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
|
||||
sizeof(struct fsf_qtcb));
|
||||
goto forget_log;
|
||||
}
|
||||
if ((size_t) (qtcb->header.log_start + qtcb->header.log_length)
|
||||
> sizeof(struct fsf_qtcb)) {
|
||||
if (unlikely((size_t) (qtcb->header.log_start +
|
||||
qtcb->header.log_length) >
|
||||
sizeof(struct fsf_qtcb))) {
|
||||
ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
|
||||
"beyond end of packet header. Ignored. "
|
||||
"(start=%i, length=%i, size=%li)\n",
|
||||
|
@ -579,17 +579,17 @@ static void __init BusLogic_InitializeProbeInfoListISA(struct BusLogic_HostAdapt
|
||||
/*
|
||||
Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
|
||||
*/
|
||||
if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe330 : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0)
|
||||
if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe330)
|
||||
BusLogic_AppendProbeAddressISA(0x330);
|
||||
if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe334 : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0)
|
||||
if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe334)
|
||||
BusLogic_AppendProbeAddressISA(0x334);
|
||||
if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe230 : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0)
|
||||
if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe230)
|
||||
BusLogic_AppendProbeAddressISA(0x230);
|
||||
if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe234 : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0)
|
||||
if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe234)
|
||||
BusLogic_AppendProbeAddressISA(0x234);
|
||||
if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe130 : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0)
|
||||
if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe130)
|
||||
BusLogic_AppendProbeAddressISA(0x130);
|
||||
if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe134 : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0)
|
||||
if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe134)
|
||||
BusLogic_AppendProbeAddressISA(0x134);
|
||||
}
|
||||
|
||||
@ -795,7 +795,9 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
|
||||
host adapters are probed.
|
||||
*/
|
||||
if (!BusLogic_ProbeOptions.NoProbeISA)
|
||||
if (PrimaryProbeInfo->IO_Address == 0 && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe330 : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0)) {
|
||||
if (PrimaryProbeInfo->IO_Address == 0 &&
|
||||
(!BusLogic_ProbeOptions.LimitedProbeISA ||
|
||||
BusLogic_ProbeOptions.Probe330)) {
|
||||
PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster;
|
||||
PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus;
|
||||
PrimaryProbeInfo->IO_Address = 0x330;
|
||||
@ -805,15 +807,25 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
|
||||
omitting the Primary I/O Address which has already been handled.
|
||||
*/
|
||||
if (!BusLogic_ProbeOptions.NoProbeISA) {
|
||||
if (!StandardAddressSeen[1] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe334 : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0))
|
||||
if (!StandardAddressSeen[1] &&
|
||||
(!BusLogic_ProbeOptions.LimitedProbeISA ||
|
||||
BusLogic_ProbeOptions.Probe334))
|
||||
BusLogic_AppendProbeAddressISA(0x334);
|
||||
if (!StandardAddressSeen[2] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe230 : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0))
|
||||
if (!StandardAddressSeen[2] &&
|
||||
(!BusLogic_ProbeOptions.LimitedProbeISA ||
|
||||
BusLogic_ProbeOptions.Probe230))
|
||||
BusLogic_AppendProbeAddressISA(0x230);
|
||||
if (!StandardAddressSeen[3] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe234 : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0))
|
||||
if (!StandardAddressSeen[3] &&
|
||||
(!BusLogic_ProbeOptions.LimitedProbeISA ||
|
||||
BusLogic_ProbeOptions.Probe234))
|
||||
BusLogic_AppendProbeAddressISA(0x234);
|
||||
if (!StandardAddressSeen[4] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe130 : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0))
|
||||
if (!StandardAddressSeen[4] &&
|
||||
(!BusLogic_ProbeOptions.LimitedProbeISA ||
|
||||
BusLogic_ProbeOptions.Probe130))
|
||||
BusLogic_AppendProbeAddressISA(0x130);
|
||||
if (!StandardAddressSeen[5] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe134 : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0))
|
||||
if (!StandardAddressSeen[5] &&
|
||||
(!BusLogic_ProbeOptions.LimitedProbeISA ||
|
||||
BusLogic_ProbeOptions.Probe134))
|
||||
BusLogic_AppendProbeAddressISA(0x134);
|
||||
}
|
||||
/*
|
||||
@ -2220,22 +2232,35 @@ static int __init BusLogic_init(void)
|
||||
HostAdapter->PCI_Device = ProbeInfo->PCI_Device;
|
||||
HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel;
|
||||
HostAdapter->AddressCount = BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType];
|
||||
|
||||
/*
|
||||
Make sure region is free prior to probing.
|
||||
*/
|
||||
if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount,
|
||||
"BusLogic"))
|
||||
continue;
|
||||
/*
|
||||
Probe the Host Adapter. If unsuccessful, abort further initialization.
|
||||
*/
|
||||
if (!BusLogic_ProbeHostAdapter(HostAdapter))
|
||||
if (!BusLogic_ProbeHostAdapter(HostAdapter)) {
|
||||
release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
Hard Reset the Host Adapter. If unsuccessful, abort further
|
||||
initialization.
|
||||
*/
|
||||
if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true))
|
||||
if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) {
|
||||
release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
Check the Host Adapter. If unsuccessful, abort further initialization.
|
||||
*/
|
||||
if (!BusLogic_CheckHostAdapter(HostAdapter))
|
||||
if (!BusLogic_CheckHostAdapter(HostAdapter)) {
|
||||
release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
Initialize the Driver Options field if provided.
|
||||
*/
|
||||
@ -2246,16 +2271,6 @@ static int __init BusLogic_init(void)
|
||||
and Electronic Mail Address.
|
||||
*/
|
||||
BusLogic_AnnounceDriver(HostAdapter);
|
||||
/*
|
||||
Register usage of the I/O Address range. From this point onward, any
|
||||
failure will be assumed to be due to a problem with the Host Adapter,
|
||||
rather than due to having mistakenly identified this port as belonging
|
||||
to a BusLogic Host Adapter. The I/O Address range will not be
|
||||
released, thereby preventing it from being incorrectly identified as
|
||||
any other type of Host Adapter.
|
||||
*/
|
||||
if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, "BusLogic"))
|
||||
continue;
|
||||
/*
|
||||
Register the SCSI Host structure.
|
||||
*/
|
||||
@ -2280,6 +2295,12 @@ static int __init BusLogic_init(void)
|
||||
Acquire the System Resources necessary to use the Host Adapter, then
|
||||
Create the Initial CCBs, Initialize the Host Adapter, and finally
|
||||
perform Target Device Inquiry.
|
||||
|
||||
From this point onward, any failure will be assumed to be due to a
|
||||
problem with the Host Adapter, rather than due to having mistakenly
|
||||
identified this port as belonging to a BusLogic Host Adapter. The
|
||||
I/O Address range will not be released, thereby preventing it from
|
||||
being incorrectly identified as any other type of Host Adapter.
|
||||
*/
|
||||
if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
|
||||
BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
|
||||
@ -3598,6 +3619,7 @@ static void __exit BusLogic_exit(void)
|
||||
|
||||
__setup("BusLogic=", BusLogic_Setup);
|
||||
|
||||
#ifdef MODULE
|
||||
static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
|
||||
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
|
||||
@ -3607,6 +3629,7 @@ static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
|
||||
{ }
|
||||
};
|
||||
#endif
|
||||
MODULE_DEVICE_TABLE(pci, BusLogic_pci_tbl);
|
||||
|
||||
module_init(BusLogic_init);
|
||||
|
@ -241,6 +241,12 @@ config SCSI_SCAN_ASYNC
|
||||
You can override this choice by specifying "scsi_mod.scan=sync"
|
||||
or async on the kernel's command line.
|
||||
|
||||
config SCSI_WAIT_SCAN
|
||||
tristate
|
||||
default m
|
||||
depends on SCSI
|
||||
depends on MODULES
|
||||
|
||||
menu "SCSI Transports"
|
||||
depends on SCSI
|
||||
|
||||
@ -1194,17 +1200,6 @@ config SCSI_NCR53C8XX_SYNC
|
||||
There is no safe option other than using good cabling, right
|
||||
terminations and SCSI conformant devices.
|
||||
|
||||
config SCSI_NCR53C8XX_PROFILE
|
||||
bool "enable profiling"
|
||||
depends on SCSI_ZALON || SCSI_NCR_Q720
|
||||
help
|
||||
This option allows you to enable profiling information gathering.
|
||||
These statistics are not very accurate due to the low frequency
|
||||
of the kernel clock (100 Hz on i386) and have performance impact
|
||||
on systems that use very fast devices.
|
||||
|
||||
The normal answer therefore is N.
|
||||
|
||||
config SCSI_NCR53C8XX_NO_DISCONNECT
|
||||
bool "not allow targets to disconnect"
|
||||
depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
|
||||
@ -1334,11 +1329,6 @@ config SCSI_SIM710
|
||||
|
||||
It currently supports Compaq EISA cards and NCR MCA cards
|
||||
|
||||
config 53C700_IO_MAPPED
|
||||
bool
|
||||
depends on SCSI_SIM710
|
||||
default y
|
||||
|
||||
config SCSI_SYM53C416
|
||||
tristate "Symbios 53c416 SCSI support"
|
||||
depends on ISA && SCSI
|
||||
|
@ -146,7 +146,7 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
|
||||
# This goes last, so that "real" scsi devices probe earlier
|
||||
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
|
||||
|
||||
obj-$(CONFIG_SCSI) += scsi_wait_scan.o
|
||||
obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
|
||||
|
||||
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
|
||||
scsicam.o scsi_error.o scsi_lib.o \
|
||||
|
@ -5,7 +5,7 @@
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -172,6 +172,30 @@ MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size.
|
||||
int expose_physicals = -1;
|
||||
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
|
||||
|
||||
|
||||
static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
|
||||
struct fib *fibptr) {
|
||||
struct scsi_device *device;
|
||||
|
||||
if (unlikely(!scsicmd || !scsicmd->scsi_done )) {
|
||||
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"))
|
||||
;
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
return 0;
|
||||
}
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
device = scsicmd->device;
|
||||
if (unlikely(!device || !scsi_device_online(device))) {
|
||||
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_get_config_status - check the adapter configuration
|
||||
* @common: adapter to query
|
||||
@ -258,13 +282,10 @@ int aac_get_containers(struct aac_dev *dev)
|
||||
u32 index;
|
||||
int status = 0;
|
||||
struct fib * fibptr;
|
||||
unsigned instance;
|
||||
struct aac_get_container_count *dinfo;
|
||||
struct aac_get_container_count_resp *dresp;
|
||||
int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
|
||||
|
||||
instance = dev->scsi_host_ptr->unique_id;
|
||||
|
||||
if (!(fibptr = aac_fib_alloc(dev)))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -284,88 +305,35 @@ int aac_get_containers(struct aac_dev *dev)
|
||||
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
|
||||
aac_fib_complete(fibptr);
|
||||
}
|
||||
aac_fib_free(fibptr);
|
||||
|
||||
if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
|
||||
maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
|
||||
fsa_dev_ptr = kmalloc(
|
||||
sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
|
||||
if (!fsa_dev_ptr) {
|
||||
aac_fib_free(fibptr);
|
||||
fsa_dev_ptr = kmalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
|
||||
GFP_KERNEL);
|
||||
if (!fsa_dev_ptr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
|
||||
|
||||
dev->fsa_dev = fsa_dev_ptr;
|
||||
dev->maximum_num_containers = maximum_num_containers;
|
||||
|
||||
for (index = 0; index < dev->maximum_num_containers; index++) {
|
||||
struct aac_query_mount *dinfo;
|
||||
struct aac_mount *dresp;
|
||||
|
||||
for (index = 0; index < dev->maximum_num_containers; ) {
|
||||
fsa_dev_ptr[index].devname[0] = '\0';
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
dinfo = (struct aac_query_mount *) fib_data(fibptr);
|
||||
status = aac_probe_container(dev, index);
|
||||
|
||||
dinfo->command = cpu_to_le32(VM_NameServe);
|
||||
dinfo->count = cpu_to_le32(index);
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof (struct aac_query_mount),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL);
|
||||
if (status < 0 ) {
|
||||
if (status < 0) {
|
||||
printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
|
||||
break;
|
||||
}
|
||||
dresp = (struct aac_mount *)fib_data(fibptr);
|
||||
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
|
||||
dinfo->command = cpu_to_le32(VM_NameServe64);
|
||||
dinfo->count = cpu_to_le32(index);
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
|
||||
if (aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof(struct aac_query_mount),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL) < 0)
|
||||
continue;
|
||||
} else
|
||||
dresp->mnt[0].capacityhigh = 0;
|
||||
|
||||
dprintk ((KERN_DEBUG
|
||||
"VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
|
||||
(int)index, (int)le32_to_cpu(dresp->status),
|
||||
(int)le32_to_cpu(dresp->mnt[0].vol),
|
||||
(int)le32_to_cpu(dresp->mnt[0].state),
|
||||
((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
|
||||
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
|
||||
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
|
||||
fsa_dev_ptr[index].valid = 1;
|
||||
fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
|
||||
fsa_dev_ptr[index].size
|
||||
= ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
|
||||
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
|
||||
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
|
||||
fsa_dev_ptr[index].ro = 1;
|
||||
}
|
||||
aac_fib_complete(fibptr);
|
||||
/*
|
||||
* If there are no more containers, then stop asking.
|
||||
*/
|
||||
if ((index + 1) >= le32_to_cpu(dresp->count)){
|
||||
if (++index >= status)
|
||||
break;
|
||||
}
|
||||
}
|
||||
aac_fib_free(fibptr);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -382,8 +350,9 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
|
||||
buf = scsicmd->request_buffer;
|
||||
transfer_len = min(scsicmd->request_bufflen, len + offset);
|
||||
}
|
||||
|
||||
memcpy(buf + offset, data, transfer_len - offset);
|
||||
transfer_len -= offset;
|
||||
if (buf && transfer_len)
|
||||
memcpy(buf + offset, data, transfer_len);
|
||||
|
||||
if (scsicmd->use_sg)
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
@ -396,7 +365,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
|
||||
struct scsi_cmnd * scsicmd;
|
||||
|
||||
scsicmd = (struct scsi_cmnd *) context;
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
if (!aac_valid_context(scsicmd, fibptr))
|
||||
return;
|
||||
|
||||
dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
|
||||
BUG_ON(fibptr == NULL);
|
||||
@ -431,7 +402,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
|
||||
/**
|
||||
* aac_get_container_name - get container name, none blocking.
|
||||
*/
|
||||
static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
|
||||
static int aac_get_container_name(struct scsi_cmnd * scsicmd)
|
||||
{
|
||||
int status;
|
||||
struct aac_get_name *dinfo;
|
||||
@ -448,7 +419,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
|
||||
|
||||
dinfo->command = cpu_to_le32(VM_ContainerConfig);
|
||||
dinfo->type = cpu_to_le32(CT_READ_NAME);
|
||||
dinfo->cid = cpu_to_le32(cid);
|
||||
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
|
||||
dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
@ -473,6 +444,153 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
|
||||
{
|
||||
struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
|
||||
|
||||
if (fsa_dev_ptr[scmd_id(scsicmd)].valid)
|
||||
return aac_scsi_cmd(scsicmd);
|
||||
|
||||
scsicmd->result = DID_NO_CONNECT << 16;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _aac_probe_container2(void * context, struct fib * fibptr)
|
||||
{
|
||||
struct fsa_dev_info *fsa_dev_ptr;
|
||||
int (*callback)(struct scsi_cmnd *);
|
||||
struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
|
||||
|
||||
if (!aac_valid_context(scsicmd, fibptr))
|
||||
return 0;
|
||||
|
||||
fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
|
||||
|
||||
scsicmd->SCp.Status = 0;
|
||||
if (fsa_dev_ptr) {
|
||||
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
|
||||
fsa_dev_ptr += scmd_id(scsicmd);
|
||||
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
|
||||
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
|
||||
fsa_dev_ptr->valid = 1;
|
||||
fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
|
||||
fsa_dev_ptr->size
|
||||
= ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
|
||||
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
|
||||
fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
|
||||
}
|
||||
if ((fsa_dev_ptr->valid & 1) == 0)
|
||||
fsa_dev_ptr->valid = 0;
|
||||
scsicmd->SCp.Status = le32_to_cpu(dresp->count);
|
||||
}
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
|
||||
scsicmd->SCp.ptr = NULL;
|
||||
return (*callback)(scsicmd);
|
||||
}
|
||||
|
||||
static int _aac_probe_container1(void * context, struct fib * fibptr)
|
||||
{
|
||||
struct scsi_cmnd * scsicmd;
|
||||
struct aac_mount * dresp;
|
||||
struct aac_query_mount *dinfo;
|
||||
int status;
|
||||
|
||||
dresp = (struct aac_mount *) fib_data(fibptr);
|
||||
dresp->mnt[0].capacityhigh = 0;
|
||||
if ((le32_to_cpu(dresp->status) != ST_OK) ||
|
||||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE))
|
||||
return _aac_probe_container2(context, fibptr);
|
||||
scsicmd = (struct scsi_cmnd *) context;
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
if (!aac_valid_context(scsicmd, fibptr))
|
||||
return 0;
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
|
||||
dinfo = (struct aac_query_mount *)fib_data(fibptr);
|
||||
|
||||
dinfo->command = cpu_to_le32(VM_NameServe64);
|
||||
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof(struct aac_query_mount),
|
||||
FsaNormal,
|
||||
0, 1,
|
||||
(fib_callback) _aac_probe_container2,
|
||||
(void *) scsicmd);
|
||||
/*
|
||||
* Check that the command queued to the controller
|
||||
*/
|
||||
if (status == -EINPROGRESS) {
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
return 0;
|
||||
}
|
||||
if (status < 0) {
|
||||
/* Inherit results from VM_NameServe, if any */
|
||||
dresp->status = cpu_to_le32(ST_OK);
|
||||
return _aac_probe_container2(context, fibptr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
|
||||
{
|
||||
struct fib * fibptr;
|
||||
int status = -ENOMEM;
|
||||
|
||||
if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
|
||||
struct aac_query_mount *dinfo;
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
|
||||
dinfo = (struct aac_query_mount *)fib_data(fibptr);
|
||||
|
||||
dinfo->command = cpu_to_le32(VM_NameServe);
|
||||
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
scsicmd->SCp.ptr = (char *)callback;
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof(struct aac_query_mount),
|
||||
FsaNormal,
|
||||
0, 1,
|
||||
(fib_callback) _aac_probe_container1,
|
||||
(void *) scsicmd);
|
||||
/*
|
||||
* Check that the command queued to the controller
|
||||
*/
|
||||
if (status == -EINPROGRESS) {
|
||||
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
|
||||
return 0;
|
||||
}
|
||||
if (status < 0) {
|
||||
scsicmd->SCp.ptr = NULL;
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
}
|
||||
}
|
||||
if (status < 0) {
|
||||
struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
|
||||
if (fsa_dev_ptr) {
|
||||
fsa_dev_ptr += scmd_id(scsicmd);
|
||||
if ((fsa_dev_ptr->valid & 1) == 0) {
|
||||
fsa_dev_ptr->valid = 0;
|
||||
return (*callback)(scsicmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_probe_container - query a logical volume
|
||||
* @dev: device to query
|
||||
@ -481,77 +599,37 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
|
||||
* Queries the controller about the given volume. The volume information
|
||||
* is updated in the struct fsa_dev_info structure rather than returned.
|
||||
*/
|
||||
|
||||
static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
|
||||
{
|
||||
scsicmd->device = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int aac_probe_container(struct aac_dev *dev, int cid)
|
||||
{
|
||||
struct fsa_dev_info *fsa_dev_ptr;
|
||||
struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
|
||||
struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
|
||||
int status;
|
||||
struct aac_query_mount *dinfo;
|
||||
struct aac_mount *dresp;
|
||||
struct fib * fibptr;
|
||||
unsigned instance;
|
||||
|
||||
fsa_dev_ptr = dev->fsa_dev;
|
||||
if (!fsa_dev_ptr)
|
||||
if (!scsicmd || !scsidev) {
|
||||
kfree(scsicmd);
|
||||
kfree(scsidev);
|
||||
return -ENOMEM;
|
||||
instance = dev->scsi_host_ptr->unique_id;
|
||||
|
||||
if (!(fibptr = aac_fib_alloc(dev)))
|
||||
return -ENOMEM;
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
|
||||
dinfo = (struct aac_query_mount *)fib_data(fibptr);
|
||||
|
||||
dinfo->command = cpu_to_le32(VM_NameServe);
|
||||
dinfo->count = cpu_to_le32(cid);
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
|
||||
status = aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof(struct aac_query_mount),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL);
|
||||
if (status < 0) {
|
||||
printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n");
|
||||
goto error;
|
||||
}
|
||||
scsicmd->list.next = NULL;
|
||||
scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))_aac_probe_container1;
|
||||
|
||||
dresp = (struct aac_mount *) fib_data(fibptr);
|
||||
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
|
||||
dinfo->command = cpu_to_le32(VM_NameServe64);
|
||||
dinfo->count = cpu_to_le32(cid);
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
|
||||
if (aac_fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof(struct aac_query_mount),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL) < 0)
|
||||
goto error;
|
||||
} else
|
||||
dresp->mnt[0].capacityhigh = 0;
|
||||
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
|
||||
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
|
||||
fsa_dev_ptr[cid].valid = 1;
|
||||
fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
|
||||
fsa_dev_ptr[cid].size
|
||||
= ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
|
||||
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
|
||||
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
|
||||
fsa_dev_ptr[cid].ro = 1;
|
||||
}
|
||||
|
||||
error:
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
scsicmd->device = scsidev;
|
||||
scsidev->sdev_state = 0;
|
||||
scsidev->id = cid;
|
||||
scsidev->host = dev->scsi_host_ptr;
|
||||
|
||||
if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
|
||||
while (scsicmd->device == scsidev)
|
||||
schedule();
|
||||
kfree(scsidev);
|
||||
status = scsicmd->SCp.Status;
|
||||
kfree(scsicmd);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1115,6 +1193,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
printk(KERN_INFO "%s%d: serial %x\n",
|
||||
dev->name, dev->id,
|
||||
le32_to_cpu(dev->adapter_info.serial[0]));
|
||||
if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
|
||||
printk(KERN_INFO "%s%d: TSID %.*s\n",
|
||||
dev->name, dev->id,
|
||||
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
|
||||
dev->supplement_adapter_info.VpdInfo.Tsid);
|
||||
}
|
||||
}
|
||||
|
||||
dev->nondasd_support = 0;
|
||||
@ -1241,7 +1325,9 @@ static void io_callback(void *context, struct fib * fibptr)
|
||||
u32 cid;
|
||||
|
||||
scsicmd = (struct scsi_cmnd *) context;
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
if (!aac_valid_context(scsicmd, fibptr))
|
||||
return;
|
||||
|
||||
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
cid = scmd_id(scsicmd);
|
||||
@ -1317,7 +1403,7 @@ static void io_callback(void *context, struct fib * fibptr)
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
}
|
||||
|
||||
static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
static int aac_read(struct scsi_cmnd * scsicmd)
|
||||
{
|
||||
u64 lba;
|
||||
u32 count;
|
||||
@ -1331,7 +1417,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
*/
|
||||
switch (scsicmd->cmnd[0]) {
|
||||
case READ_6:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
|
||||
dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
|
||||
|
||||
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
|
||||
(scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
|
||||
@ -1341,7 +1427,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
count = 256;
|
||||
break;
|
||||
case READ_16:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
|
||||
dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 56) |
|
||||
((u64)scsicmd->cmnd[3] << 48) |
|
||||
@ -1355,7 +1441,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
|
||||
break;
|
||||
case READ_12:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
|
||||
dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) |
|
||||
(scsicmd->cmnd[3] << 16) |
|
||||
@ -1365,7 +1451,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
|
||||
break;
|
||||
default:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
|
||||
dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) |
|
||||
(scsicmd->cmnd[3] << 16) |
|
||||
@ -1405,7 +1491,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
||||
static int aac_write(struct scsi_cmnd * scsicmd)
|
||||
{
|
||||
u64 lba;
|
||||
u32 count;
|
||||
@ -1424,7 +1510,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
||||
if (count == 0)
|
||||
count = 256;
|
||||
} else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
|
||||
dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
|
||||
dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 56) |
|
||||
((u64)scsicmd->cmnd[3] << 48) |
|
||||
@ -1436,14 +1522,14 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
||||
count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
|
||||
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
|
||||
} else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
|
||||
dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
|
||||
dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
|
||||
| (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
|
||||
| (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
|
||||
} else {
|
||||
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
|
||||
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
|
||||
}
|
||||
@ -1488,7 +1574,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
|
||||
struct scsi_cmnd *cmd;
|
||||
|
||||
cmd = context;
|
||||
cmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
if (!aac_valid_context(cmd, fibptr))
|
||||
return;
|
||||
|
||||
dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
|
||||
smp_processor_id(), jiffies));
|
||||
@ -1523,7 +1611,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
|
||||
cmd->scsi_done(cmd);
|
||||
}
|
||||
|
||||
static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
|
||||
static int aac_synchronize(struct scsi_cmnd *scsicmd)
|
||||
{
|
||||
int status;
|
||||
struct fib *cmd_fibcontext;
|
||||
@ -1568,7 +1656,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
|
||||
synchronizecmd = fib_data(cmd_fibcontext);
|
||||
synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
|
||||
synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
|
||||
synchronizecmd->cid = cpu_to_le32(cid);
|
||||
synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
|
||||
synchronizecmd->count =
|
||||
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
|
||||
|
||||
@ -1646,29 +1734,12 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
case TEST_UNIT_READY:
|
||||
if (dev->in_reset)
|
||||
return -1;
|
||||
spin_unlock_irq(host->host_lock);
|
||||
aac_probe_container(dev, cid);
|
||||
if ((fsa_dev_ptr[cid].valid & 1) == 0)
|
||||
fsa_dev_ptr[cid].valid = 0;
|
||||
spin_lock_irq(host->host_lock);
|
||||
if (fsa_dev_ptr[cid].valid == 0) {
|
||||
scsicmd->result = DID_NO_CONNECT << 16;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
return _aac_probe_container(scsicmd,
|
||||
aac_probe_container_callback2);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If the target container still doesn't exist,
|
||||
* return failure
|
||||
*/
|
||||
if (fsa_dev_ptr[cid].valid == 0) {
|
||||
scsicmd->result = DID_BAD_TARGET << 16;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
} else { /* check for physical non-dasd devices */
|
||||
if ((dev->nondasd_support == 1) || expose_physicals) {
|
||||
if (dev->in_reset)
|
||||
@ -1733,7 +1804,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
|
||||
inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
|
||||
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
|
||||
return aac_get_container_name(scsicmd, cid);
|
||||
return aac_get_container_name(scsicmd);
|
||||
}
|
||||
case SERVICE_ACTION_IN:
|
||||
if (!(dev->raw_io_interface) ||
|
||||
@ -1899,7 +1970,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
min(sizeof(fsa_dev_ptr[cid].devname),
|
||||
sizeof(scsicmd->request->rq_disk->disk_name) + 1));
|
||||
|
||||
return aac_read(scsicmd, cid);
|
||||
return aac_read(scsicmd);
|
||||
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
@ -1907,11 +1978,11 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
case WRITE_16:
|
||||
if (dev->in_reset)
|
||||
return -1;
|
||||
return aac_write(scsicmd, cid);
|
||||
return aac_write(scsicmd);
|
||||
|
||||
case SYNCHRONIZE_CACHE:
|
||||
/* Issue FIB to tell Firmware to flush it's cache */
|
||||
return aac_synchronize(scsicmd, cid);
|
||||
return aac_synchronize(scsicmd);
|
||||
|
||||
default:
|
||||
/*
|
||||
@ -2058,7 +2129,10 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
||||
struct scsi_cmnd *scsicmd;
|
||||
|
||||
scsicmd = (struct scsi_cmnd *) context;
|
||||
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
|
||||
|
||||
if (!aac_valid_context(scsicmd, fibptr))
|
||||
return;
|
||||
|
||||
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
|
||||
BUG_ON(fibptr == NULL);
|
||||
|
@ -12,8 +12,8 @@
|
||||
*----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 2423
|
||||
# define AAC_DRIVER_BRANCH "-mh3"
|
||||
# define AAC_DRIVER_BUILD 2437
|
||||
# define AAC_DRIVER_BRANCH "-mh4"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
|
||||
@ -48,49 +48,13 @@ struct diskparm
|
||||
|
||||
|
||||
/*
|
||||
* DON'T CHANGE THE ORDER, this is set by the firmware
|
||||
* Firmware constants
|
||||
*/
|
||||
|
||||
#define CT_NONE 0
|
||||
#define CT_VOLUME 1
|
||||
#define CT_MIRROR 2
|
||||
#define CT_STRIPE 3
|
||||
#define CT_RAID5 4
|
||||
#define CT_SSRW 5
|
||||
#define CT_SSRO 6
|
||||
#define CT_MORPH 7
|
||||
#define CT_PASSTHRU 8
|
||||
#define CT_RAID4 9
|
||||
#define CT_RAID10 10 /* stripe of mirror */
|
||||
#define CT_RAID00 11 /* stripe of stripe */
|
||||
#define CT_VOLUME_OF_MIRRORS 12 /* volume of mirror */
|
||||
#define CT_PSEUDO_RAID 13 /* really raid4 */
|
||||
#define CT_LAST_VOLUME_TYPE 14
|
||||
#define CT_OK 218
|
||||
|
||||
/*
|
||||
* Types of objects addressable in some fashion by the client.
|
||||
* This is a superset of those objects handled just by the filesystem
|
||||
* and includes "raw" objects that an administrator would use to
|
||||
* configure containers and filesystems.
|
||||
*/
|
||||
|
||||
#define FT_REG 1 /* regular file */
|
||||
#define FT_DIR 2 /* directory */
|
||||
#define FT_BLK 3 /* "block" device - reserved */
|
||||
#define FT_CHR 4 /* "character special" device - reserved */
|
||||
#define FT_LNK 5 /* symbolic link */
|
||||
#define FT_SOCK 6 /* socket */
|
||||
#define FT_FIFO 7 /* fifo */
|
||||
#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
|
||||
#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
|
||||
#define FT_SLICE 10 /* virtual disk - raw volume - slice */
|
||||
#define FT_PARTITION 11 /* FSA partition - carved out of a slice - building block for containers */
|
||||
#define FT_VOLUME 12 /* Container - Volume Set */
|
||||
#define FT_STRIPE 13 /* Container - Stripe Set */
|
||||
#define FT_MIRROR 14 /* Container - Mirror Set */
|
||||
#define FT_RAID5 15 /* Container - Raid 5 Set */
|
||||
#define FT_DATABASE 16 /* Storage object with "foreign" content manager */
|
||||
|
||||
/*
|
||||
* Host side memory scatter gather list
|
||||
@ -497,6 +461,7 @@ struct adapter_ops
|
||||
void (*adapter_enable_int)(struct aac_dev *dev);
|
||||
int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
|
||||
int (*adapter_check_health)(struct aac_dev *dev);
|
||||
int (*adapter_restart)(struct aac_dev *dev, int bled);
|
||||
/* Transport operations */
|
||||
int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
|
||||
irqreturn_t (*adapter_intr)(int irq, void *dev_id);
|
||||
@ -833,7 +798,7 @@ struct fib {
|
||||
*/
|
||||
struct list_head fiblink;
|
||||
void *data;
|
||||
struct hw_fib *hw_fib; /* Actual shared object */
|
||||
struct hw_fib *hw_fib_va; /* Actual shared object */
|
||||
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
|
||||
};
|
||||
|
||||
@ -878,10 +843,25 @@ struct aac_supplement_adapter_info
|
||||
__le32 Version;
|
||||
__le32 FeatureBits;
|
||||
u8 SlotNumber;
|
||||
u8 ReservedPad0[0];
|
||||
u8 ReservedPad0[3];
|
||||
u8 BuildDate[12];
|
||||
__le32 CurrentNumberPorts;
|
||||
__le32 ReservedGrowth[24];
|
||||
struct {
|
||||
u8 AssemblyPn[8];
|
||||
u8 FruPn[8];
|
||||
u8 BatteryFruPn[8];
|
||||
u8 EcVersionString[8];
|
||||
u8 Tsid[12];
|
||||
} VpdInfo;
|
||||
__le32 FlashFirmwareRevision;
|
||||
__le32 FlashFirmwareBuild;
|
||||
__le32 RaidTypeMorphOptions;
|
||||
__le32 FlashFirmwareBootRevision;
|
||||
__le32 FlashFirmwareBootBuild;
|
||||
u8 MfgPcbaSerialNo[12];
|
||||
u8 MfgWWNName[8];
|
||||
__le32 MoreFeatureBits;
|
||||
__le32 ReservedGrowth[1];
|
||||
};
|
||||
#define AAC_FEATURE_FALCON 0x00000010
|
||||
#define AAC_SIS_VERSION_V3 3
|
||||
@ -970,7 +950,6 @@ struct aac_dev
|
||||
struct fib *fibs;
|
||||
|
||||
struct fib *free_fib;
|
||||
struct fib *timeout_fib;
|
||||
spinlock_t fib_lock;
|
||||
|
||||
struct aac_queue_block *queues;
|
||||
@ -1060,6 +1039,9 @@ struct aac_dev
|
||||
#define aac_adapter_check_health(dev) \
|
||||
(dev)->a_ops.adapter_check_health(dev)
|
||||
|
||||
#define aac_adapter_restart(dev,bled) \
|
||||
(dev)->a_ops.adapter_restart(dev,bled)
|
||||
|
||||
#define aac_adapter_ioremap(dev, size) \
|
||||
(dev)->a_ops.adapter_ioremap(dev, size)
|
||||
|
||||
@ -1516,8 +1498,7 @@ struct aac_mntent {
|
||||
struct creation_info create_info; /* if applicable */
|
||||
__le32 capacity;
|
||||
__le32 vol; /* substrate structure */
|
||||
__le32 obj; /* FT_FILESYS,
|
||||
FT_DATABASE, etc. */
|
||||
__le32 obj; /* FT_FILESYS, etc. */
|
||||
__le32 state; /* unready for mounting,
|
||||
readonly, etc. */
|
||||
union aac_contentinfo fileinfo; /* Info specific to content
|
||||
@ -1817,7 +1798,7 @@ int aac_fib_send(u16 command, struct fib * context, unsigned long size, int prio
|
||||
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
|
||||
void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
|
||||
int aac_fib_complete(struct fib * context);
|
||||
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
|
||||
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
|
||||
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
|
||||
int aac_get_config_status(struct aac_dev *dev, int commit_flag);
|
||||
int aac_get_containers(struct aac_dev *dev);
|
||||
@ -1840,8 +1821,11 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype);
|
||||
int aac_get_adapter_info(struct aac_dev* dev);
|
||||
int aac_send_shutdown(struct aac_dev *dev);
|
||||
int aac_probe_container(struct aac_dev *dev, int cid);
|
||||
int _aac_rx_init(struct aac_dev *dev);
|
||||
int aac_rx_select_comm(struct aac_dev *dev, int comm);
|
||||
extern int numacb;
|
||||
extern int acbsize;
|
||||
extern char aac_driver_version[];
|
||||
extern int startup_timeout;
|
||||
extern int aif_timeout;
|
||||
extern int expose_physicals;
|
||||
|
@ -5,7 +5,7 @@
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -64,12 +64,15 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
|
||||
unsigned size;
|
||||
int retval;
|
||||
|
||||
if (dev->in_reset) {
|
||||
return -EBUSY;
|
||||
}
|
||||
fibptr = aac_fib_alloc(dev);
|
||||
if(fibptr == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kfib = fibptr->hw_fib;
|
||||
kfib = fibptr->hw_fib_va;
|
||||
/*
|
||||
* First copy in the header so that we can check the size field.
|
||||
*/
|
||||
@ -91,9 +94,9 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
|
||||
goto cleanup;
|
||||
}
|
||||
/* Highjack the hw_fib */
|
||||
hw_fib = fibptr->hw_fib;
|
||||
hw_fib = fibptr->hw_fib_va;
|
||||
hw_fib_pa = fibptr->hw_fib_pa;
|
||||
fibptr->hw_fib = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
|
||||
fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
|
||||
memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
|
||||
memcpy(kfib, hw_fib, dev->max_fib_size);
|
||||
}
|
||||
@ -137,7 +140,7 @@ cleanup:
|
||||
if (hw_fib) {
|
||||
pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
|
||||
fibptr->hw_fib_pa = hw_fib_pa;
|
||||
fibptr->hw_fib = hw_fib;
|
||||
fibptr->hw_fib_va = hw_fib;
|
||||
}
|
||||
if (retval != -EINTR)
|
||||
aac_fib_free(fibptr);
|
||||
@ -282,15 +285,15 @@ return_fib:
|
||||
fib = list_entry(entry, struct fib, fiblink);
|
||||
fibctx->count--;
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
|
||||
kfree(fib->hw_fib);
|
||||
if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
|
||||
kfree(fib->hw_fib_va);
|
||||
kfree(fib);
|
||||
return -EFAULT;
|
||||
}
|
||||
/*
|
||||
* Free the space occupied by this copy of the fib.
|
||||
*/
|
||||
kfree(fib->hw_fib);
|
||||
kfree(fib->hw_fib_va);
|
||||
kfree(fib);
|
||||
status = 0;
|
||||
} else {
|
||||
@ -340,7 +343,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
|
||||
/*
|
||||
* Free the space occupied by this copy of the fib.
|
||||
*/
|
||||
kfree(fib->hw_fib);
|
||||
kfree(fib->hw_fib_va);
|
||||
kfree(fib);
|
||||
}
|
||||
/*
|
||||
@ -388,10 +391,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
||||
/*
|
||||
* Extract the fibctx from the input parameters
|
||||
*/
|
||||
if (fibctx->unique == (u32)(unsigned long)arg) {
|
||||
/* We found a winner */
|
||||
if (fibctx->unique == (u32)(ptrdiff_t)arg) /* We found a winner */
|
||||
break;
|
||||
}
|
||||
entry = entry->next;
|
||||
fibctx = NULL;
|
||||
}
|
||||
@ -465,16 +466,20 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
void *sg_list[32];
|
||||
u32 sg_indx = 0;
|
||||
u32 byte_count = 0;
|
||||
u32 actual_fibsize = 0;
|
||||
u32 actual_fibsize64, actual_fibsize = 0;
|
||||
int i;
|
||||
|
||||
|
||||
if (dev->in_reset) {
|
||||
dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
|
||||
return -EBUSY;
|
||||
}
|
||||
if (!capable(CAP_SYS_ADMIN)){
|
||||
dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
|
||||
return -EPERM;
|
||||
}
|
||||
/*
|
||||
* Allocate and initialize a Fib then setup a BlockWrite command
|
||||
* Allocate and initialize a Fib then setup a SRB command
|
||||
*/
|
||||
if (!(srbfib = aac_fib_alloc(dev))) {
|
||||
return -ENOMEM;
|
||||
@ -541,129 +546,183 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if (dev->dac_support == 1) {
|
||||
actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
|
||||
((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
|
||||
actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
|
||||
(sizeof(struct sgentry64) - sizeof(struct sgentry));
|
||||
/* User made a mistake - should not continue */
|
||||
if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
|
||||
dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
|
||||
"Raw SRB command calculated fibsize=%lu;%lu "
|
||||
"user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
|
||||
"issued fibsize=%d\n",
|
||||
actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
|
||||
sizeof(struct aac_srb), sizeof(struct sgentry),
|
||||
sizeof(struct sgentry64), fibsize));
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
|
||||
dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
byte_count = 0;
|
||||
if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
|
||||
struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
|
||||
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
|
||||
struct user_sgmap* usg;
|
||||
byte_count = 0;
|
||||
|
||||
/*
|
||||
* This should also catch if user used the 32 bit sgmap
|
||||
*/
|
||||
actual_fibsize = sizeof(struct aac_srb) -
|
||||
sizeof(struct sgentry) +
|
||||
((upsg->count & 0xff) *
|
||||
sizeof(struct sgentry));
|
||||
if(actual_fibsize != fibsize){ // User made a mistake - should not continue
|
||||
dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n"));
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
|
||||
+ sizeof(struct sgmap), GFP_KERNEL);
|
||||
if (!usg) {
|
||||
dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
|
||||
+ sizeof(struct sgmap));
|
||||
actual_fibsize = sizeof(struct aac_srb) -
|
||||
sizeof(struct sgentry) + ((usg->count & 0xff) *
|
||||
sizeof(struct sgentry64));
|
||||
if ((data_dir == DMA_NONE) && upsg->count) {
|
||||
kfree (usg);
|
||||
dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if (actual_fibsize64 == fibsize) {
|
||||
actual_fibsize = actual_fibsize64;
|
||||
for (i = 0; i < upsg->count; i++) {
|
||||
u64 addr;
|
||||
void* p;
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
upsg->sg[i].count,i,upsg->count));
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
addr = (u64)upsg->sg[i].addr[0];
|
||||
addr += ((u64)upsg->sg[i].addr[1]) << 32;
|
||||
sg_user[i] = (void __user *)(ptrdiff_t)addr;
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
for (i = 0; i < usg->count; i++) {
|
||||
u64 addr;
|
||||
void* p;
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
kfree (usg);
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
usg->sg[i].count,i,usg->count));
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
|
||||
|
||||
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
|
||||
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
|
||||
byte_count += upsg->sg[i].count;
|
||||
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
|
||||
}
|
||||
} else {
|
||||
struct user_sgmap* usg;
|
||||
usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
|
||||
+ sizeof(struct sgmap), GFP_KERNEL);
|
||||
if (!usg) {
|
||||
dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
sg_user[i] = (void __user *)(long)usg->sg[i].addr;
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
|
||||
+ sizeof(struct sgmap));
|
||||
actual_fibsize = actual_fibsize64;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
|
||||
for (i = 0; i < usg->count; i++) {
|
||||
u64 addr;
|
||||
void* p;
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
kfree (usg);
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
usg->sg[i].count,i,usg->count));
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
sg_user[i] = (void __user *)(ptrdiff_t)usg->sg[i].addr;
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
|
||||
kfree (usg);
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
|
||||
|
||||
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
|
||||
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
|
||||
byte_count += usg->sg[i].count;
|
||||
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
|
||||
|
||||
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
|
||||
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
|
||||
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
|
||||
byte_count += usg->sg[i].count;
|
||||
kfree (usg);
|
||||
}
|
||||
kfree (usg);
|
||||
|
||||
srbcmd->count = cpu_to_le32(byte_count);
|
||||
psg->count = cpu_to_le32(sg_indx+1);
|
||||
status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
|
||||
} else {
|
||||
struct user_sgmap* upsg = &user_srbcmd->sg;
|
||||
struct sgmap* psg = &srbcmd->sg;
|
||||
byte_count = 0;
|
||||
|
||||
actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
|
||||
if(actual_fibsize != fibsize){ // User made a mistake - should not continue
|
||||
dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
|
||||
"Raw SRB command calculated fibsize=%d "
|
||||
"user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d "
|
||||
"issued fibsize=%d\n",
|
||||
actual_fibsize, user_srbcmd->sg.count,
|
||||
sizeof(struct aac_srb), sizeof(struct sgentry),
|
||||
fibsize));
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
if ((data_dir == DMA_NONE) && upsg->count) {
|
||||
dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
for (i = 0; i < upsg->count; i++) {
|
||||
dma_addr_t addr;
|
||||
void* p;
|
||||
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
|
||||
if(p == 0) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
upsg->sg[i].count, i, upsg->count));
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
sg_user[i] = (void __user *)(long)upsg->sg[i].addr;
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p, sg_user[i],
|
||||
upsg->sg[i].count)) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
if (actual_fibsize64 == fibsize) {
|
||||
struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
|
||||
for (i = 0; i < upsg->count; i++) {
|
||||
u64 addr;
|
||||
void* p;
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
usg->sg[i].count,i,usg->count));
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p,
|
||||
upsg->sg[i].count, data_dir);
|
||||
addr = (u64)usg->sg[i].addr[0];
|
||||
addr += ((u64)usg->sg[i].addr[1]) << 32;
|
||||
sg_user[i] = (void __user *)(ptrdiff_t)addr;
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
psg->sg[i].addr = cpu_to_le32(addr);
|
||||
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
|
||||
byte_count += upsg->sg[i].count;
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
|
||||
|
||||
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
|
||||
byte_count += usg->sg[i].count;
|
||||
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < upsg->count; i++) {
|
||||
dma_addr_t addr;
|
||||
void* p;
|
||||
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
|
||||
if(p == 0) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
upsg->sg[i].count, i, upsg->count));
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
sg_user[i] = (void __user *)(ptrdiff_t)upsg->sg[i].addr;
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if(copy_from_user(p, sg_user[i],
|
||||
upsg->sg[i].count)) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p,
|
||||
upsg->sg[i].count, data_dir);
|
||||
|
||||
psg->sg[i].addr = cpu_to_le32(addr);
|
||||
byte_count += upsg->sg[i].count;
|
||||
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
|
||||
}
|
||||
}
|
||||
srbcmd->count = cpu_to_le32(byte_count);
|
||||
psg->count = cpu_to_le32(sg_indx+1);
|
||||
@ -682,7 +741,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
|
||||
if( flags & SRB_DataIn ) {
|
||||
for(i = 0 ; i <= sg_indx; i++){
|
||||
byte_count = le32_to_cpu((dev->dac_support == 1)
|
||||
byte_count = le32_to_cpu(
|
||||
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
|
||||
? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
|
||||
: srbcmd->sg.sg[i].count);
|
||||
if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
|
||||
|
@ -5,7 +5,7 @@
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -110,7 +110,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
|
||||
/*
|
||||
* Align the beginning of Headers to commalign
|
||||
*/
|
||||
align = (commalign - ((unsigned long)(base) & (commalign - 1)));
|
||||
align = (commalign - ((ptrdiff_t)(base) & (commalign - 1)));
|
||||
base = base + align;
|
||||
phys = phys + align;
|
||||
/*
|
||||
|
@ -5,7 +5,7 @@
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -94,7 +94,7 @@ void aac_fib_map_free(struct aac_dev *dev)
|
||||
int aac_fib_setup(struct aac_dev * dev)
|
||||
{
|
||||
struct fib *fibptr;
|
||||
struct hw_fib *hw_fib_va;
|
||||
struct hw_fib *hw_fib;
|
||||
dma_addr_t hw_fib_pa;
|
||||
int i;
|
||||
|
||||
@ -106,24 +106,24 @@ int aac_fib_setup(struct aac_dev * dev)
|
||||
if (i<0)
|
||||
return -ENOMEM;
|
||||
|
||||
hw_fib_va = dev->hw_fib_va;
|
||||
hw_fib = dev->hw_fib_va;
|
||||
hw_fib_pa = dev->hw_fib_pa;
|
||||
memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
|
||||
memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
|
||||
/*
|
||||
* Initialise the fibs
|
||||
*/
|
||||
for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
|
||||
{
|
||||
fibptr->dev = dev;
|
||||
fibptr->hw_fib = hw_fib_va;
|
||||
fibptr->data = (void *) fibptr->hw_fib->data;
|
||||
fibptr->hw_fib_va = hw_fib;
|
||||
fibptr->data = (void *) fibptr->hw_fib_va->data;
|
||||
fibptr->next = fibptr+1; /* Forward chain the fibs */
|
||||
init_MUTEX_LOCKED(&fibptr->event_wait);
|
||||
spin_lock_init(&fibptr->event_lock);
|
||||
hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
|
||||
hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
|
||||
hw_fib->header.XferState = cpu_to_le32(0xffffffff);
|
||||
hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
|
||||
fibptr->hw_fib_pa = hw_fib_pa;
|
||||
hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
|
||||
hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
|
||||
hw_fib_pa = hw_fib_pa + dev->max_fib_size;
|
||||
}
|
||||
/*
|
||||
@ -166,7 +166,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
|
||||
* Null out fields that depend on being zero at the start of
|
||||
* each I/O
|
||||
*/
|
||||
fibptr->hw_fib->header.XferState = 0;
|
||||
fibptr->hw_fib_va->header.XferState = 0;
|
||||
fibptr->callback = NULL;
|
||||
fibptr->callback_data = NULL;
|
||||
|
||||
@ -178,7 +178,6 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
|
||||
* @fibptr: fib to free up
|
||||
*
|
||||
* Frees up a fib and places it on the appropriate queue
|
||||
* (either free or timed out)
|
||||
*/
|
||||
|
||||
void aac_fib_free(struct fib *fibptr)
|
||||
@ -186,19 +185,15 @@ void aac_fib_free(struct fib *fibptr)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
|
||||
if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
|
||||
if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
|
||||
aac_config.fib_timeouts++;
|
||||
fibptr->next = fibptr->dev->timeout_fib;
|
||||
fibptr->dev->timeout_fib = fibptr;
|
||||
} else {
|
||||
if (fibptr->hw_fib->header.XferState != 0) {
|
||||
printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
|
||||
(void*)fibptr,
|
||||
le32_to_cpu(fibptr->hw_fib->header.XferState));
|
||||
}
|
||||
fibptr->next = fibptr->dev->free_fib;
|
||||
fibptr->dev->free_fib = fibptr;
|
||||
}
|
||||
if (fibptr->hw_fib_va->header.XferState != 0) {
|
||||
printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
|
||||
(void*)fibptr,
|
||||
le32_to_cpu(fibptr->hw_fib_va->header.XferState));
|
||||
}
|
||||
fibptr->next = fibptr->dev->free_fib;
|
||||
fibptr->dev->free_fib = fibptr;
|
||||
spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
|
||||
}
|
||||
|
||||
@ -211,7 +206,7 @@ void aac_fib_free(struct fib *fibptr)
|
||||
|
||||
void aac_fib_init(struct fib *fibptr)
|
||||
{
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib;
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib_va;
|
||||
|
||||
hw_fib->header.StructType = FIB_MAGIC;
|
||||
hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
|
||||
@ -231,7 +226,7 @@ void aac_fib_init(struct fib *fibptr)
|
||||
|
||||
static void fib_dealloc(struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib;
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib_va;
|
||||
BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
|
||||
hw_fib->header.XferState = 0;
|
||||
}
|
||||
@ -386,7 +381,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
void *callback_data)
|
||||
{
|
||||
struct aac_dev * dev = fibptr->dev;
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib_va;
|
||||
unsigned long flags = 0;
|
||||
unsigned long qflags;
|
||||
|
||||
@ -430,7 +425,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
*/
|
||||
hw_fib->header.Command = cpu_to_le16(command);
|
||||
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
|
||||
fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
|
||||
fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/
|
||||
/*
|
||||
* Set the size of the Fib we want to send to the adapter
|
||||
*/
|
||||
@ -462,7 +457,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
|
||||
dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
|
||||
dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
|
||||
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
|
||||
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
|
||||
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
|
||||
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
|
||||
|
||||
@ -513,22 +508,20 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
}
|
||||
udelay(5);
|
||||
}
|
||||
} else if (down_interruptible(&fibptr->event_wait)) {
|
||||
spin_lock_irqsave(&fibptr->event_lock, flags);
|
||||
if (fibptr->done == 0) {
|
||||
fibptr->done = 2; /* Tell interrupt we aborted */
|
||||
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
||||
return -EINTR;
|
||||
}
|
||||
} else
|
||||
(void)down_interruptible(&fibptr->event_wait);
|
||||
spin_lock_irqsave(&fibptr->event_lock, flags);
|
||||
if (fibptr->done == 0) {
|
||||
fibptr->done = 2; /* Tell interrupt we aborted */
|
||||
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
||||
return -EINTR;
|
||||
}
|
||||
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
||||
BUG_ON(fibptr->done == 0);
|
||||
|
||||
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
|
||||
if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* If the user does not want a response than return success otherwise
|
||||
@ -624,7 +617,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
|
||||
|
||||
int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib_va;
|
||||
struct aac_dev * dev = fibptr->dev;
|
||||
struct aac_queue * q;
|
||||
unsigned long nointr = 0;
|
||||
@ -688,7 +681,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
|
||||
|
||||
int aac_fib_complete(struct fib *fibptr)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib_va;
|
||||
|
||||
/*
|
||||
* Check for a fib which has already been completed
|
||||
@ -774,9 +767,8 @@ void aac_printf(struct aac_dev *dev, u32 val)
|
||||
#define AIF_SNIFF_TIMEOUT (30*HZ)
|
||||
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib_va;
|
||||
struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
|
||||
int busy;
|
||||
u32 container;
|
||||
struct scsi_device *device;
|
||||
enum {
|
||||
@ -988,9 +980,6 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
* behind you.
|
||||
*/
|
||||
|
||||
busy = 0;
|
||||
|
||||
|
||||
/*
|
||||
* Find the scsi_device associated with the SCSI address,
|
||||
* and mark it as changed, invalidating the cache. This deals
|
||||
@ -1035,7 +1024,6 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
static int _aac_reset_adapter(struct aac_dev *aac)
|
||||
{
|
||||
int index, quirks;
|
||||
u32 ret;
|
||||
int retval;
|
||||
struct Scsi_Host *host;
|
||||
struct scsi_device *dev;
|
||||
@ -1059,35 +1047,29 @@ static int _aac_reset_adapter(struct aac_dev *aac)
|
||||
* If a positive health, means in a known DEAD PANIC
|
||||
* state and the adapter could be reset to `try again'.
|
||||
*/
|
||||
retval = aac_adapter_check_health(aac);
|
||||
if (retval == 0)
|
||||
retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
|
||||
0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
|
||||
if (retval)
|
||||
retval = aac_adapter_sync_cmd(aac, IOP_RESET,
|
||||
0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
|
||||
retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
|
||||
|
||||
if (retval)
|
||||
goto out;
|
||||
if (ret != 0x00000001) {
|
||||
retval = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Loop through the fibs, close the synchronous FIBS
|
||||
*/
|
||||
for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
|
||||
for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
|
||||
struct fib *fib = &aac->fibs[index];
|
||||
if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
|
||||
(fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
|
||||
if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
|
||||
(fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
|
||||
unsigned long flagv;
|
||||
spin_lock_irqsave(&fib->event_lock, flagv);
|
||||
up(&fib->event_wait);
|
||||
spin_unlock_irqrestore(&fib->event_lock, flagv);
|
||||
schedule();
|
||||
retval = 0;
|
||||
}
|
||||
}
|
||||
/* Give some extra time for ioctls to complete. */
|
||||
if (retval == 0)
|
||||
ssleep(2);
|
||||
index = aac->cardtype;
|
||||
|
||||
/*
|
||||
@ -1248,7 +1230,7 @@ int aac_check_health(struct aac_dev * aac)
|
||||
|
||||
memset(hw_fib, 0, sizeof(struct hw_fib));
|
||||
memset(fib, 0, sizeof(struct fib));
|
||||
fib->hw_fib = hw_fib;
|
||||
fib->hw_fib_va = hw_fib;
|
||||
fib->dev = aac;
|
||||
aac_fib_init(fib);
|
||||
fib->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
@ -1354,11 +1336,11 @@ int aac_command_thread(void *data)
|
||||
* do anything at this point since we don't have
|
||||
* anything defined for this thread to do.
|
||||
*/
|
||||
hw_fib = fib->hw_fib;
|
||||
hw_fib = fib->hw_fib_va;
|
||||
memset(fib, 0, sizeof(struct fib));
|
||||
fib->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
fib->size = sizeof( struct fib );
|
||||
fib->hw_fib = hw_fib;
|
||||
fib->hw_fib_va = hw_fib;
|
||||
fib->data = hw_fib->data;
|
||||
fib->dev = dev;
|
||||
/*
|
||||
@ -1485,7 +1467,7 @@ int aac_command_thread(void *data)
|
||||
*/
|
||||
memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
|
||||
memcpy(newfib, fib, sizeof(struct fib));
|
||||
newfib->hw_fib = hw_newfib;
|
||||
newfib->hw_fib_va = hw_newfib;
|
||||
/*
|
||||
* Put the FIB onto the
|
||||
* fibctx's fibs
|
||||
|
@ -5,7 +5,7 @@
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -72,7 +72,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
|
||||
u32 index = le32_to_cpu(entry->addr);
|
||||
fast = index & 0x01;
|
||||
fib = &dev->fibs[index >> 2];
|
||||
hwfib = fib->hw_fib;
|
||||
hwfib = fib->hw_fib_va;
|
||||
|
||||
aac_consumer_free(dev, q, HostNormRespQueue);
|
||||
/*
|
||||
@ -83,11 +83,13 @@ unsigned int aac_response_normal(struct aac_queue * q)
|
||||
* continue. The caller has already been notified that
|
||||
* the fib timed out.
|
||||
*/
|
||||
if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
|
||||
dev->queues->queue[AdapNormCmdQueue].numpending--;
|
||||
else {
|
||||
printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
|
||||
printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
|
||||
dev->queues->queue[AdapNormCmdQueue].numpending--;
|
||||
|
||||
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
|
||||
spin_unlock_irqrestore(q->lock, flags);
|
||||
aac_fib_complete(fib);
|
||||
aac_fib_free(fib);
|
||||
spin_lock_irqsave(q->lock, flags);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irqrestore(q->lock, flags);
|
||||
@ -192,7 +194,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
|
||||
INIT_LIST_HEAD(&fib->fiblink);
|
||||
fib->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
fib->size = sizeof(struct fib);
|
||||
fib->hw_fib = hw_fib;
|
||||
fib->hw_fib_va = hw_fib;
|
||||
fib->data = hw_fib->data;
|
||||
fib->dev = dev;
|
||||
|
||||
@ -253,12 +255,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
|
||||
return 1;
|
||||
}
|
||||
memset(hw_fib, 0, sizeof(struct hw_fib));
|
||||
memcpy(hw_fib, (struct hw_fib *)(((unsigned long)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib));
|
||||
memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) +
|
||||
(index & ~0x00000002L)), sizeof(struct hw_fib));
|
||||
memset(fib, 0, sizeof(struct fib));
|
||||
INIT_LIST_HEAD(&fib->fiblink);
|
||||
fib->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
fib->size = sizeof(struct fib);
|
||||
fib->hw_fib = hw_fib;
|
||||
fib->hw_fib_va = hw_fib;
|
||||
fib->data = hw_fib->data;
|
||||
fib->dev = dev;
|
||||
|
||||
@ -270,7 +273,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
|
||||
} else {
|
||||
int fast = index & 0x01;
|
||||
struct fib * fib = &dev->fibs[index >> 2];
|
||||
struct hw_fib * hwfib = fib->hw_fib;
|
||||
struct hw_fib * hwfib = fib->hw_fib_va;
|
||||
|
||||
/*
|
||||
* Remove this fib from the Outstanding I/O queue.
|
||||
@ -280,14 +283,14 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
|
||||
* continue. The caller has already been notified that
|
||||
* the fib timed out.
|
||||
*/
|
||||
if ((fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
|
||||
printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
|
||||
printk(KERN_DEBUG"aacraid: hwfib=%p index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
|
||||
dev->queues->queue[AdapNormCmdQueue].numpending--;
|
||||
|
||||
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
|
||||
aac_fib_complete(fib);
|
||||
aac_fib_free(fib);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev->queues->queue[AdapNormCmdQueue].numpending--;
|
||||
|
||||
if (fast) {
|
||||
/*
|
||||
* Doctor the fib
|
||||
|
@ -5,7 +5,7 @@
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -82,8 +82,6 @@ static LIST_HEAD(aac_devices);
|
||||
static int aac_cfg_major = -1;
|
||||
char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
|
||||
|
||||
extern int expose_physicals;
|
||||
|
||||
/*
|
||||
* Because of the way Linux names scsi devices, the order in this table has
|
||||
* become important. Check for on-board Raid first, add-in cards second.
|
||||
@ -247,7 +245,19 @@ static struct aac_driver_ident aac_drivers[] = {
|
||||
|
||||
static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
|
||||
u32 count = 0;
|
||||
cmd->scsi_done = done;
|
||||
for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
|
||||
struct fib * fib = &dev->fibs[count];
|
||||
struct scsi_cmnd * command;
|
||||
if (fib->hw_fib_va->header.XferState &&
|
||||
((command = fib->callback_data)) &&
|
||||
(command == cmd) &&
|
||||
(cmd->SCp.phase == AAC_OWNER_FIRMWARE))
|
||||
return 0; /* Already owned by Adapter */
|
||||
}
|
||||
cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
|
||||
return (aac_scsi_cmd(cmd) ? FAILED : 0);
|
||||
}
|
||||
@ -446,6 +456,40 @@ static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
|
||||
return aac_do_ioctl(dev, cmd, arg);
|
||||
}
|
||||
|
||||
static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
{
|
||||
struct scsi_device * dev = cmd->device;
|
||||
struct Scsi_Host * host = dev->host;
|
||||
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
|
||||
int count;
|
||||
int ret = FAILED;
|
||||
|
||||
printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
|
||||
AAC_DRIVERNAME,
|
||||
host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
|
||||
switch (cmd->cmnd[0]) {
|
||||
case SERVICE_ACTION_IN:
|
||||
if (!(aac->raw_io_interface) ||
|
||||
!(aac->raw_io_64) ||
|
||||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
|
||||
break;
|
||||
case INQUIRY:
|
||||
case READ_CAPACITY:
|
||||
case TEST_UNIT_READY:
|
||||
/* Mark associated FIB to not complete, eh handler does this */
|
||||
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
|
||||
struct fib * fib = &aac->fibs[count];
|
||||
if (fib->hw_fib_va->header.XferState &&
|
||||
(fib->callback_data == cmd)) {
|
||||
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
|
||||
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
|
||||
ret = SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* aac_eh_reset - Reset command handling
|
||||
* @scsi_cmd: SCSI command block causing the reset
|
||||
@ -457,12 +501,20 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
struct Scsi_Host * host = dev->host;
|
||||
struct scsi_cmnd * command;
|
||||
int count;
|
||||
struct aac_dev * aac;
|
||||
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
|
||||
unsigned long flags;
|
||||
|
||||
/* Mark the associated FIB to not complete, eh handler does this */
|
||||
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
|
||||
struct fib * fib = &aac->fibs[count];
|
||||
if (fib->hw_fib_va->header.XferState &&
|
||||
(fib->callback_data == cmd)) {
|
||||
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
|
||||
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
|
||||
}
|
||||
}
|
||||
printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
|
||||
AAC_DRIVERNAME);
|
||||
aac = (struct aac_dev *)host->hostdata;
|
||||
|
||||
if ((count = aac_check_health(aac)))
|
||||
return count;
|
||||
@ -496,7 +548,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
ssleep(1);
|
||||
}
|
||||
printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
|
||||
return -ETIMEDOUT;
|
||||
return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
|
||||
}
|
||||
|
||||
/**
|
||||
@ -796,6 +848,7 @@ static struct scsi_host_template aac_driver_template = {
|
||||
.bios_param = aac_biosparm,
|
||||
.shost_attrs = aac_attrs,
|
||||
.slave_configure = aac_slave_configure,
|
||||
.eh_abort_handler = aac_eh_abort,
|
||||
.eh_host_reset_handler = aac_eh_reset,
|
||||
.can_queue = AAC_NUM_IO_FIB,
|
||||
.this_id = MAXIMUM_NUM_CONTAINERS,
|
||||
|
@ -74,9 +74,6 @@ static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
|
||||
|
||||
int aac_nark_init(struct aac_dev * dev)
|
||||
{
|
||||
extern int _aac_rx_init(struct aac_dev *dev);
|
||||
extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
|
||||
|
||||
/*
|
||||
* Fill in the function dispatch table.
|
||||
*/
|
||||
|
@ -45,7 +45,6 @@
|
||||
static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
|
||||
{
|
||||
int retval;
|
||||
extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
|
||||
retval = aac_rx_select_comm(dev, comm);
|
||||
if (comm == AAC_COMM_MESSAGE) {
|
||||
/*
|
||||
@ -97,8 +96,6 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
|
||||
|
||||
int aac_rkt_init(struct aac_dev *dev)
|
||||
{
|
||||
extern int _aac_rx_init(struct aac_dev *dev);
|
||||
|
||||
/*
|
||||
* Fill in the function dispatch table.
|
||||
*/
|
||||
|
@ -5,7 +5,7 @@
|
||||
* based on the old aacraid driver that is..
|
||||
* Adaptec aacraid device driver for Linux.
|
||||
*
|
||||
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
* Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -57,25 +57,25 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
|
||||
* been enabled.
|
||||
* Check to see if this is our interrupt. If it isn't just return
|
||||
*/
|
||||
if (intstat & ~(dev->OIMR)) {
|
||||
if (likely(intstat & ~(dev->OIMR))) {
|
||||
bellbits = rx_readl(dev, OutboundDoorbellReg);
|
||||
if (bellbits & DoorBellPrintfReady) {
|
||||
if (unlikely(bellbits & DoorBellPrintfReady)) {
|
||||
aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
|
||||
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
|
||||
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormCmdReady) {
|
||||
else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
|
||||
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormRespReady) {
|
||||
else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
|
||||
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
|
||||
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
|
||||
else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
|
||||
}
|
||||
else if (bellbits & DoorBellAdapterNormRespNotFull) {
|
||||
else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
|
||||
}
|
||||
@ -88,11 +88,11 @@ static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
|
||||
{
|
||||
struct aac_dev *dev = dev_id;
|
||||
u32 Index = rx_readl(dev, MUnit.OutboundQueue);
|
||||
if (Index == 0xFFFFFFFFL)
|
||||
if (unlikely(Index == 0xFFFFFFFFL))
|
||||
Index = rx_readl(dev, MUnit.OutboundQueue);
|
||||
if (Index != 0xFFFFFFFFL) {
|
||||
if (likely(Index != 0xFFFFFFFFL)) {
|
||||
do {
|
||||
if (aac_intr_normal(dev, Index)) {
|
||||
if (unlikely(aac_intr_normal(dev, Index))) {
|
||||
rx_writel(dev, MUnit.OutboundQueue, Index);
|
||||
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
|
||||
}
|
||||
@ -204,7 +204,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
|
||||
*/
|
||||
msleep(1);
|
||||
}
|
||||
if (ok != 1) {
|
||||
if (unlikely(ok != 1)) {
|
||||
/*
|
||||
* Restore interrupt mask even though we timed out
|
||||
*/
|
||||
@ -294,7 +294,7 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
|
||||
* Start up processing on an i960 based AAC adapter
|
||||
*/
|
||||
|
||||
void aac_rx_start_adapter(struct aac_dev *dev)
|
||||
static void aac_rx_start_adapter(struct aac_dev *dev)
|
||||
{
|
||||
struct aac_init *init;
|
||||
|
||||
@ -319,12 +319,12 @@ static int aac_rx_check_health(struct aac_dev *dev)
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
if (status & SELF_TEST_FAILED)
|
||||
if (unlikely(status & SELF_TEST_FAILED))
|
||||
return -1;
|
||||
/*
|
||||
* Check to see if the board panic'd.
|
||||
*/
|
||||
if (status & KERNEL_PANIC) {
|
||||
if (unlikely(status & KERNEL_PANIC)) {
|
||||
char * buffer;
|
||||
struct POSTSTATUS {
|
||||
__le32 Post_Command;
|
||||
@ -333,15 +333,15 @@ static int aac_rx_check_health(struct aac_dev *dev)
|
||||
dma_addr_t paddr, baddr;
|
||||
int ret;
|
||||
|
||||
if ((status & 0xFF000000L) == 0xBC000000L)
|
||||
if (likely((status & 0xFF000000L) == 0xBC000000L))
|
||||
return (status >> 16) & 0xFF;
|
||||
buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
|
||||
ret = -2;
|
||||
if (buffer == NULL)
|
||||
if (unlikely(buffer == NULL))
|
||||
return ret;
|
||||
post = pci_alloc_consistent(dev->pdev,
|
||||
sizeof(struct POSTSTATUS), &paddr);
|
||||
if (post == NULL) {
|
||||
if (unlikely(post == NULL)) {
|
||||
pci_free_consistent(dev->pdev, 512, buffer, baddr);
|
||||
return ret;
|
||||
}
|
||||
@ -353,7 +353,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
|
||||
NULL, NULL, NULL, NULL, NULL);
|
||||
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
|
||||
post, paddr);
|
||||
if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
|
||||
if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
|
||||
ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
|
||||
ret <<= 4;
|
||||
ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
|
||||
@ -364,7 +364,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
|
||||
/*
|
||||
* Wait for the adapter to be up and running.
|
||||
*/
|
||||
if (!(status & KERNEL_UP_AND_RUNNING))
|
||||
if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
|
||||
return -3;
|
||||
/*
|
||||
* Everything is OK
|
||||
@ -387,7 +387,7 @@ static int aac_rx_deliver_producer(struct fib * fib)
|
||||
unsigned long nointr = 0;
|
||||
|
||||
spin_lock_irqsave(q->lock, qflags);
|
||||
aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr);
|
||||
aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
|
||||
|
||||
q->numpending++;
|
||||
*(q->headers.producer) = cpu_to_le32(Index + 1);
|
||||
@ -419,9 +419,9 @@ static int aac_rx_deliver_message(struct fib * fib)
|
||||
spin_unlock_irqrestore(q->lock, qflags);
|
||||
for(;;) {
|
||||
Index = rx_readl(dev, MUnit.InboundQueue);
|
||||
if (Index == 0xFFFFFFFFL)
|
||||
if (unlikely(Index == 0xFFFFFFFFL))
|
||||
Index = rx_readl(dev, MUnit.InboundQueue);
|
||||
if (Index != 0xFFFFFFFFL)
|
||||
if (likely(Index != 0xFFFFFFFFL))
|
||||
break;
|
||||
if (--count == 0) {
|
||||
spin_lock_irqsave(q->lock, qflags);
|
||||
@ -437,7 +437,7 @@ static int aac_rx_deliver_message(struct fib * fib)
|
||||
device += sizeof(u32);
|
||||
writel((u32)(addr >> 32), device);
|
||||
device += sizeof(u32);
|
||||
writel(le16_to_cpu(fib->hw_fib->header.Size), device);
|
||||
writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
|
||||
rx_writel(dev, MUnit.InboundQueue, Index);
|
||||
return 0;
|
||||
}
|
||||
@ -460,22 +460,34 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aac_rx_restart_adapter(struct aac_dev *dev)
|
||||
static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
|
||||
{
|
||||
u32 var;
|
||||
|
||||
printk(KERN_ERR "%s%d: adapter kernel panic'd.\n",
|
||||
dev->name, dev->id);
|
||||
if (bled)
|
||||
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
|
||||
dev->name, dev->id, bled);
|
||||
else {
|
||||
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
|
||||
0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
|
||||
if (!bled && (var != 0x00000001))
|
||||
bled = -EINVAL;
|
||||
}
|
||||
if (bled && (bled != -ETIMEDOUT))
|
||||
bled = aac_adapter_sync_cmd(dev, IOP_RESET,
|
||||
0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
|
||||
|
||||
if (aac_rx_check_health(dev) <= 0)
|
||||
return 1;
|
||||
if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0,
|
||||
&var, NULL, NULL, NULL, NULL))
|
||||
return 1;
|
||||
if (bled && (bled != -ETIMEDOUT))
|
||||
return -EINVAL;
|
||||
if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
|
||||
rx_writel(dev, MUnit.reserved2, 3);
|
||||
msleep(5000); /* Delay 5 seconds */
|
||||
var = 0x00000001;
|
||||
}
|
||||
if (var != 0x00000001)
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
|
||||
return 1;
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -517,24 +529,29 @@ int _aac_rx_init(struct aac_dev *dev)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long status;
|
||||
int instance;
|
||||
const char * name;
|
||||
|
||||
instance = dev->id;
|
||||
name = dev->name;
|
||||
int restart = 0;
|
||||
int instance = dev->id;
|
||||
const char * name = dev->name;
|
||||
|
||||
if (aac_adapter_ioremap(dev, dev->base_size)) {
|
||||
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
|
||||
goto error_iounmap;
|
||||
}
|
||||
|
||||
/* Failure to reset here is an option ... */
|
||||
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
|
||||
if ((((status & 0xff) != 0xff) || reset_devices) &&
|
||||
!aac_rx_restart_adapter(dev, 0))
|
||||
++restart;
|
||||
/*
|
||||
* Check to see if the board panic'd while booting.
|
||||
*/
|
||||
status = rx_readl(dev, MUnit.OMRx[0]);
|
||||
if (status & KERNEL_PANIC)
|
||||
if (aac_rx_restart_adapter(dev))
|
||||
if (status & KERNEL_PANIC) {
|
||||
if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
|
||||
goto error_iounmap;
|
||||
++restart;
|
||||
}
|
||||
/*
|
||||
* Check to see if the board failed any self tests.
|
||||
*/
|
||||
@ -556,12 +573,23 @@ int _aac_rx_init(struct aac_dev *dev)
|
||||
*/
|
||||
while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
|
||||
{
|
||||
if(time_after(jiffies, start+startup_timeout*HZ))
|
||||
{
|
||||
if ((restart &&
|
||||
(status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
|
||||
time_after(jiffies, start+HZ*startup_timeout)) {
|
||||
printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
|
||||
dev->name, instance, status);
|
||||
goto error_iounmap;
|
||||
}
|
||||
if (!restart &&
|
||||
((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
|
||||
time_after(jiffies, start + HZ *
|
||||
((startup_timeout > 60)
|
||||
? (startup_timeout - 60)
|
||||
: (startup_timeout / 2))))) {
|
||||
if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
|
||||
start = jiffies;
|
||||
++restart;
|
||||
}
|
||||
msleep(1);
|
||||
}
|
||||
/*
|
||||
@ -572,6 +600,7 @@ int _aac_rx_init(struct aac_dev *dev)
|
||||
dev->a_ops.adapter_notify = aac_rx_notify_adapter;
|
||||
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
|
||||
dev->a_ops.adapter_check_health = aac_rx_check_health;
|
||||
dev->a_ops.adapter_restart = aac_rx_restart_adapter;
|
||||
|
||||
/*
|
||||
* First clear out all interrupts. Then enable the one's that we
|
||||
|
@ -57,18 +57,6 @@ config AIC79XX_BUILD_FIRMWARE
|
||||
or modify the assembler Makefile or the files it includes if your
|
||||
build environment is different than that of the author.
|
||||
|
||||
config AIC79XX_ENABLE_RD_STRM
|
||||
bool "Enable Read Streaming for All Targets"
|
||||
depends on SCSI_AIC79XX
|
||||
default n
|
||||
help
|
||||
Read Streaming is a U320 protocol option that should enhance
|
||||
performance. Early U320 drive firmware actually performs slower
|
||||
with read streaming enabled so it is disabled by default. Read
|
||||
Streaming can be configured in much the same way as tagged queueing
|
||||
using the "rd_strm" command line option. See
|
||||
drivers/scsi/aic7xxx/README.aic79xx for details.
|
||||
|
||||
config AIC79XX_DEBUG_ENABLE
|
||||
bool "Compile in Debugging Code"
|
||||
depends on SCSI_AIC79XX
|
||||
|
@ -50,16 +50,6 @@ config AIC7XXX_RESET_DELAY_MS
|
||||
|
||||
Default: 5000 (5 seconds)
|
||||
|
||||
config AIC7XXX_PROBE_EISA_VL
|
||||
bool "Probe for EISA and VL AIC7XXX Adapters"
|
||||
depends on SCSI_AIC7XXX && EISA
|
||||
help
|
||||
Probe for EISA and VLB Aic7xxx controllers. In many newer systems,
|
||||
the invasive probes necessary to detect these controllers can cause
|
||||
other devices to fail. For this reason, the non-PCI probe code is
|
||||
disabled by default. The current value of this option can be "toggled"
|
||||
via the no_probe kernel command line option.
|
||||
|
||||
config AIC7XXX_BUILD_FIRMWARE
|
||||
bool "Build Adapter Firmware with Kernel Build"
|
||||
depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD
|
||||
|
@ -363,6 +363,8 @@ static int ahd_linux_run_command(struct ahd_softc*,
|
||||
struct scsi_cmnd *);
|
||||
static void ahd_linux_setup_tag_info_global(char *p);
|
||||
static int aic79xx_setup(char *c);
|
||||
static void ahd_freeze_simq(struct ahd_softc *ahd);
|
||||
static void ahd_release_simq(struct ahd_softc *ahd);
|
||||
|
||||
static int ahd_linux_unit;
|
||||
|
||||
@ -2016,13 +2018,13 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
|
||||
cmd->scsi_done(cmd);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
ahd_freeze_simq(struct ahd_softc *ahd)
|
||||
{
|
||||
scsi_block_requests(ahd->platform_data->host);
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
ahd_release_simq(struct ahd_softc *ahd)
|
||||
{
|
||||
scsi_unblock_requests(ahd->platform_data->host);
|
||||
|
@ -837,8 +837,6 @@ int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
|
||||
void ahd_platform_free(struct ahd_softc *ahd);
|
||||
void ahd_platform_init(struct ahd_softc *ahd);
|
||||
void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
|
||||
void ahd_freeze_simq(struct ahd_softc *ahd);
|
||||
void ahd_release_simq(struct ahd_softc *ahd);
|
||||
|
||||
static __inline void
|
||||
ahd_freeze_scb(struct scb *scb)
|
||||
|
@ -1278,11 +1278,6 @@ typedef enum {
|
||||
AHC_QUEUE_TAGGED
|
||||
} ahc_queue_alg;
|
||||
|
||||
void ahc_set_tags(struct ahc_softc *ahc,
|
||||
struct scsi_cmnd *cmd,
|
||||
struct ahc_devinfo *devinfo,
|
||||
ahc_queue_alg alg);
|
||||
|
||||
/**************************** Target Mode *************************************/
|
||||
#ifdef AHC_TARGET_MODE
|
||||
void ahc_send_lstate_events(struct ahc_softc *,
|
||||
|
@ -2073,7 +2073,7 @@ ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
|
||||
/*
|
||||
* Update the current state of tagged queuing for a given target.
|
||||
*/
|
||||
void
|
||||
static void
|
||||
ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
|
||||
struct ahc_devinfo *devinfo, ahc_queue_alg alg)
|
||||
{
|
||||
|
@ -202,31 +202,29 @@ static const char * get_sa_name(const struct value_name_pair * arr,
|
||||
}
|
||||
|
||||
/* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */
|
||||
static void print_opcode_name(unsigned char * cdbp, int cdb_len,
|
||||
int start_of_line)
|
||||
static void print_opcode_name(unsigned char * cdbp, int cdb_len)
|
||||
{
|
||||
int sa, len, cdb0;
|
||||
const char * name;
|
||||
const char * leadin = start_of_line ? KERN_INFO : "";
|
||||
|
||||
cdb0 = cdbp[0];
|
||||
switch(cdb0) {
|
||||
case VARIABLE_LENGTH_CMD:
|
||||
len = cdbp[7] + 8;
|
||||
if (len < 10) {
|
||||
printk("%sshort variable length command, "
|
||||
"len=%d ext_len=%d", leadin, len, cdb_len);
|
||||
printk("short variable length command, "
|
||||
"len=%d ext_len=%d", len, cdb_len);
|
||||
break;
|
||||
}
|
||||
sa = (cdbp[8] << 8) + cdbp[9];
|
||||
name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
|
||||
if (name) {
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
if ((cdb_len > 0) && (len != cdb_len))
|
||||
printk(", in_cdb_len=%d, ext_len=%d",
|
||||
len, cdb_len);
|
||||
} else {
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
if ((cdb_len > 0) && (len != cdb_len))
|
||||
printk(", in_cdb_len=%d, ext_len=%d",
|
||||
len, cdb_len);
|
||||
@ -236,83 +234,80 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len,
|
||||
sa = cdbp[1] & 0x1f;
|
||||
name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
|
||||
if (name)
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
else
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
break;
|
||||
case MAINTENANCE_OUT:
|
||||
sa = cdbp[1] & 0x1f;
|
||||
name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa);
|
||||
if (name)
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
else
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
break;
|
||||
case SERVICE_ACTION_IN_12:
|
||||
sa = cdbp[1] & 0x1f;
|
||||
name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa);
|
||||
if (name)
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
else
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
break;
|
||||
case SERVICE_ACTION_OUT_12:
|
||||
sa = cdbp[1] & 0x1f;
|
||||
name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa);
|
||||
if (name)
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
else
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
break;
|
||||
case SERVICE_ACTION_IN_16:
|
||||
sa = cdbp[1] & 0x1f;
|
||||
name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa);
|
||||
if (name)
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
else
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
break;
|
||||
case SERVICE_ACTION_OUT_16:
|
||||
sa = cdbp[1] & 0x1f;
|
||||
name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa);
|
||||
if (name)
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
else
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
break;
|
||||
default:
|
||||
if (cdb0 < 0xc0) {
|
||||
name = cdb_byte0_names[cdb0];
|
||||
if (name)
|
||||
printk("%s%s", leadin, name);
|
||||
printk("%s", name);
|
||||
else
|
||||
printk("%scdb[0]=0x%x (reserved)",
|
||||
leadin, cdb0);
|
||||
printk("cdb[0]=0x%x (reserved)", cdb0);
|
||||
} else
|
||||
printk("%scdb[0]=0x%x (vendor)", leadin, cdb0);
|
||||
printk("cdb[0]=0x%x (vendor)", cdb0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#else /* ifndef CONFIG_SCSI_CONSTANTS */
|
||||
|
||||
static void print_opcode_name(unsigned char * cdbp, int cdb_len,
|
||||
int start_of_line)
|
||||
static void print_opcode_name(unsigned char * cdbp, int cdb_len)
|
||||
{
|
||||
int sa, len, cdb0;
|
||||
const char * leadin = start_of_line ? KERN_INFO : "";
|
||||
|
||||
cdb0 = cdbp[0];
|
||||
switch(cdb0) {
|
||||
case VARIABLE_LENGTH_CMD:
|
||||
len = cdbp[7] + 8;
|
||||
if (len < 10) {
|
||||
printk("%sshort opcode=0x%x command, len=%d "
|
||||
"ext_len=%d", leadin, cdb0, len, cdb_len);
|
||||
printk("short opcode=0x%x command, len=%d "
|
||||
"ext_len=%d", cdb0, len, cdb_len);
|
||||
break;
|
||||
}
|
||||
sa = (cdbp[8] << 8) + cdbp[9];
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
if (len != cdb_len)
|
||||
printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
|
||||
break;
|
||||
@ -323,49 +318,48 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len,
|
||||
case SERVICE_ACTION_IN_16:
|
||||
case SERVICE_ACTION_OUT_16:
|
||||
sa = cdbp[1] & 0x1f;
|
||||
printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa);
|
||||
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
|
||||
break;
|
||||
default:
|
||||
if (cdb0 < 0xc0)
|
||||
printk("%scdb[0]=0x%x", leadin, cdb0);
|
||||
printk("cdb[0]=0x%x", cdb0);
|
||||
else
|
||||
printk("%scdb[0]=0x%x (vendor)", leadin, cdb0);
|
||||
printk("cdb[0]=0x%x (vendor)", cdb0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void __scsi_print_command(unsigned char *command)
|
||||
void __scsi_print_command(unsigned char *cdb)
|
||||
{
|
||||
int k, len;
|
||||
|
||||
print_opcode_name(command, 0, 1);
|
||||
if (VARIABLE_LENGTH_CMD == command[0])
|
||||
len = command[7] + 8;
|
||||
print_opcode_name(cdb, 0);
|
||||
if (VARIABLE_LENGTH_CMD == cdb[0])
|
||||
len = cdb[7] + 8;
|
||||
else
|
||||
len = COMMAND_SIZE(command[0]);
|
||||
len = COMMAND_SIZE(cdb[0]);
|
||||
/* print out all bytes in cdb */
|
||||
for (k = 0; k < len; ++k)
|
||||
printk(" %02x", command[k]);
|
||||
printk(" %02x", cdb[k]);
|
||||
printk("\n");
|
||||
}
|
||||
EXPORT_SYMBOL(__scsi_print_command);
|
||||
|
||||
/* This function (perhaps with the addition of peripheral device type)
|
||||
* is more approriate than __scsi_print_command(). Perhaps that static
|
||||
* can be dropped later if it replaces the __scsi_print_command version.
|
||||
*/
|
||||
static void scsi_print_cdb(unsigned char *cdb, int cdb_len, int start_of_line)
|
||||
void scsi_print_command(struct scsi_cmnd *cmd)
|
||||
{
|
||||
int k;
|
||||
|
||||
print_opcode_name(cdb, cdb_len, start_of_line);
|
||||
scmd_printk(KERN_INFO, cmd, "CDB: ");
|
||||
print_opcode_name(cmd->cmnd, cmd->cmd_len);
|
||||
|
||||
/* print out all bytes in cdb */
|
||||
printk(":");
|
||||
for (k = 0; k < cdb_len; ++k)
|
||||
printk(" %02x", cdb[k]);
|
||||
for (k = 0; k < cmd->cmd_len; ++k)
|
||||
printk(" %02x", cmd->cmnd[k]);
|
||||
printk("\n");
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_print_command);
|
||||
|
||||
/**
|
||||
*
|
||||
@ -410,7 +404,11 @@ struct error_info {
|
||||
const char * text;
|
||||
};
|
||||
|
||||
static struct error_info additional[] =
|
||||
/*
|
||||
* The canonical list of T10 Additional Sense Codes is available at:
|
||||
* http://www.t10.org/lists/asc-num.txt
|
||||
*/
|
||||
static const struct error_info additional[] =
|
||||
{
|
||||
{0x0000, "No additional sense information"},
|
||||
{0x0001, "Filemark detected"},
|
||||
@ -714,6 +712,7 @@ static struct error_info additional[] =
|
||||
|
||||
{0x2F00, "Commands cleared by another initiator"},
|
||||
{0x2F01, "Commands cleared by power loss notification"},
|
||||
{0x2F02, "Commands cleared by device server"},
|
||||
|
||||
{0x3000, "Incompatible medium installed"},
|
||||
{0x3001, "Cannot read medium - unknown format"},
|
||||
@ -1176,67 +1175,77 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq) {
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_extd_sense_format);
|
||||
|
||||
/* Print extended sense information; no leadin, no linefeed */
|
||||
static void
|
||||
void
|
||||
scsi_show_extd_sense(unsigned char asc, unsigned char ascq)
|
||||
{
|
||||
const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq);
|
||||
const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq);
|
||||
|
||||
if (extd_sense_fmt) {
|
||||
if (strstr(extd_sense_fmt, "%x")) {
|
||||
printk("Additional sense: ");
|
||||
printk("Add. Sense: ");
|
||||
printk(extd_sense_fmt, ascq);
|
||||
} else
|
||||
printk("Additional sense: %s", extd_sense_fmt);
|
||||
printk("Add. Sense: %s", extd_sense_fmt);
|
||||
} else {
|
||||
if (asc >= 0x80)
|
||||
printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc, ascq);
|
||||
printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc,
|
||||
ascq);
|
||||
if (ascq >= 0x80)
|
||||
printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc, ascq);
|
||||
printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc,
|
||||
ascq);
|
||||
else
|
||||
printk("ASC=0x%x ASCQ=0x%x", asc, ascq);
|
||||
}
|
||||
|
||||
printk("\n");
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_show_extd_sense);
|
||||
|
||||
void
|
||||
scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr)
|
||||
scsi_show_sense_hdr(struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
const char *sense_txt;
|
||||
/* An example of deferred is when an earlier write to disk cache
|
||||
* succeeded, but now the disk discovers that it cannot write the
|
||||
* data to the magnetic media.
|
||||
*/
|
||||
const char *error = scsi_sense_is_deferred(sshdr) ?
|
||||
"<<DEFERRED>>" : "Current";
|
||||
printk(KERN_INFO "%s: %s", name, error);
|
||||
if (sshdr->response_code >= 0x72)
|
||||
printk(" [descriptor]");
|
||||
|
||||
sense_txt = scsi_sense_key_string(sshdr->sense_key);
|
||||
if (sense_txt)
|
||||
printk(": sense key: %s\n", sense_txt);
|
||||
printk("Sense Key : %s ", sense_txt);
|
||||
else
|
||||
printk(": sense key=0x%x\n", sshdr->sense_key);
|
||||
printk(KERN_INFO " ");
|
||||
scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
|
||||
printk("Sense Key : 0x%x ", sshdr->sense_key);
|
||||
|
||||
printk("%s", scsi_sense_is_deferred(sshdr) ? "[deferred] " :
|
||||
"[current] ");
|
||||
|
||||
if (sshdr->response_code >= 0x72)
|
||||
printk("[descriptor]");
|
||||
|
||||
printk("\n");
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_show_sense_hdr);
|
||||
|
||||
/*
|
||||
* Print normalized SCSI sense header with a prefix.
|
||||
*/
|
||||
void
|
||||
scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
printk(KERN_INFO "%s: ", name);
|
||||
scsi_show_sense_hdr(sshdr);
|
||||
printk(KERN_INFO "%s: ", name);
|
||||
scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_print_sense_hdr);
|
||||
|
||||
/* Print sense information */
|
||||
void
|
||||
__scsi_print_sense(const char *name, const unsigned char *sense_buffer,
|
||||
int sense_len)
|
||||
scsi_decode_sense_buffer(const unsigned char *sense_buffer, int sense_len,
|
||||
struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
int k, num, res;
|
||||
unsigned int info;
|
||||
struct scsi_sense_hdr ssh;
|
||||
|
||||
res = scsi_normalize_sense(sense_buffer, sense_len, &ssh);
|
||||
res = scsi_normalize_sense(sense_buffer, sense_len, sshdr);
|
||||
if (0 == res) {
|
||||
/* this may be SCSI-1 sense data */
|
||||
num = (sense_len < 32) ? sense_len : 32;
|
||||
printk(KERN_INFO "Unrecognized sense data (in hex):");
|
||||
printk("Unrecognized sense data (in hex):");
|
||||
for (k = 0; k < num; ++k) {
|
||||
if (0 == (k % 16)) {
|
||||
printk("\n");
|
||||
@ -1247,11 +1256,20 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
|
||||
printk("\n");
|
||||
return;
|
||||
}
|
||||
scsi_print_sense_hdr(name, &ssh);
|
||||
if (ssh.response_code < 0x72) {
|
||||
}
|
||||
|
||||
void
|
||||
scsi_decode_sense_extras(const unsigned char *sense_buffer, int sense_len,
|
||||
struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
int k, num, res;
|
||||
|
||||
if (sshdr->response_code < 0x72)
|
||||
{
|
||||
/* only decode extras for "fixed" format now */
|
||||
char buff[80];
|
||||
int blen, fixed_valid;
|
||||
unsigned int info;
|
||||
|
||||
fixed_valid = sense_buffer[0] & 0x80;
|
||||
info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) |
|
||||
@ -1281,13 +1299,13 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
|
||||
res += snprintf(buff + res, blen - res, "ILI");
|
||||
}
|
||||
if (res > 0)
|
||||
printk(KERN_INFO "%s\n", buff);
|
||||
} else if (ssh.additional_length > 0) {
|
||||
printk("%s\n", buff);
|
||||
} else if (sshdr->additional_length > 0) {
|
||||
/* descriptor format with sense descriptors */
|
||||
num = 8 + ssh.additional_length;
|
||||
num = 8 + sshdr->additional_length;
|
||||
num = (sense_len < num) ? sense_len : num;
|
||||
printk(KERN_INFO "Descriptor sense data with sense "
|
||||
"descriptors (in hex):");
|
||||
printk("Descriptor sense data with sense descriptors "
|
||||
"(in hex):");
|
||||
for (k = 0; k < num; ++k) {
|
||||
if (0 == (k % 16)) {
|
||||
printk("\n");
|
||||
@ -1295,30 +1313,43 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
|
||||
}
|
||||
printk("%02x ", sense_buffer[k]);
|
||||
}
|
||||
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Normalize and print sense buffer with name prefix */
|
||||
void __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
|
||||
int sense_len)
|
||||
{
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
printk(KERN_INFO "%s: ", name);
|
||||
scsi_decode_sense_buffer(sense_buffer, sense_len, &sshdr);
|
||||
scsi_show_sense_hdr(&sshdr);
|
||||
scsi_decode_sense_extras(sense_buffer, sense_len, &sshdr);
|
||||
printk(KERN_INFO "%s: ", name);
|
||||
scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
|
||||
}
|
||||
EXPORT_SYMBOL(__scsi_print_sense);
|
||||
|
||||
void scsi_print_sense(const char *devclass, struct scsi_cmnd *cmd)
|
||||
/* Normalize and print sense buffer in SCSI command */
|
||||
void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
|
||||
{
|
||||
const char *name = devclass;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
if (cmd->request->rq_disk)
|
||||
name = cmd->request->rq_disk->disk_name;
|
||||
__scsi_print_sense(name, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
|
||||
scmd_printk(KERN_INFO, cmd, "");
|
||||
scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
|
||||
&sshdr);
|
||||
scsi_show_sense_hdr(&sshdr);
|
||||
scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
|
||||
&sshdr);
|
||||
scmd_printk(KERN_INFO, cmd, "");
|
||||
scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_print_sense);
|
||||
|
||||
void scsi_print_command(struct scsi_cmnd *cmd)
|
||||
{
|
||||
/* Assume appended output (i.e. not at start of line) */
|
||||
sdev_printk("", cmd->device, "\n");
|
||||
printk(KERN_INFO " command: ");
|
||||
scsi_print_cdb(cmd->cmnd, cmd->cmd_len, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_print_command);
|
||||
|
||||
#ifdef CONFIG_SCSI_CONSTANTS
|
||||
|
||||
static const char * const hostbyte_table[]={
|
||||
@ -1327,25 +1358,6 @@ static const char * const hostbyte_table[]={
|
||||
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"};
|
||||
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
|
||||
|
||||
void scsi_print_hostbyte(int scsiresult)
|
||||
{
|
||||
int hb = host_byte(scsiresult);
|
||||
|
||||
printk("Hostbyte=0x%02x", hb);
|
||||
if (hb < NUM_HOSTBYTE_STRS)
|
||||
printk("(%s) ", hostbyte_table[hb]);
|
||||
else
|
||||
printk("is invalid ");
|
||||
}
|
||||
#else
|
||||
void scsi_print_hostbyte(int scsiresult)
|
||||
{
|
||||
printk("Hostbyte=0x%02x ", host_byte(scsiresult));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCSI_CONSTANTS
|
||||
|
||||
static const char * const driverbyte_table[]={
|
||||
"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
|
||||
"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
|
||||
@ -1356,19 +1368,35 @@ static const char * const driversuggest_table[]={"SUGGEST_OK",
|
||||
"SUGGEST_5", "SUGGEST_6", "SUGGEST_7", "SUGGEST_SENSE"};
|
||||
#define NUM_SUGGEST_STRS ARRAY_SIZE(driversuggest_table)
|
||||
|
||||
void scsi_print_driverbyte(int scsiresult)
|
||||
void scsi_show_result(int result)
|
||||
{
|
||||
int dr = (driver_byte(scsiresult) & DRIVER_MASK);
|
||||
int su = ((driver_byte(scsiresult) & SUGGEST_MASK) >> 4);
|
||||
int hb = host_byte(result);
|
||||
int db = (driver_byte(result) & DRIVER_MASK);
|
||||
int su = ((driver_byte(result) & SUGGEST_MASK) >> 4);
|
||||
|
||||
printk("Driverbyte=0x%02x ", driver_byte(scsiresult));
|
||||
printk("(%s,%s) ",
|
||||
(dr < NUM_DRIVERBYTE_STRS ? driverbyte_table[dr] : "invalid"),
|
||||
printk("Result: hostbyte=%s driverbyte=%s,%s\n",
|
||||
(hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"),
|
||||
(db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid"),
|
||||
(su < NUM_SUGGEST_STRS ? driversuggest_table[su] : "invalid"));
|
||||
}
|
||||
|
||||
#else
|
||||
void scsi_print_driverbyte(int scsiresult)
|
||||
|
||||
void scsi_show_result(int result)
|
||||
{
|
||||
printk("Driverbyte=0x%02x ", driver_byte(scsiresult));
|
||||
printk("Result: hostbyte=0x%02x driverbyte=0x%02x\n",
|
||||
host_byte(result), driver_byte(result));
|
||||
}
|
||||
|
||||
#endif
|
||||
EXPORT_SYMBOL(scsi_show_result);
|
||||
|
||||
|
||||
void scsi_print_result(struct scsi_cmnd *cmd)
|
||||
{
|
||||
scmd_printk(KERN_INFO, cmd, "");
|
||||
scsi_show_result(cmd->result);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_print_result);
|
||||
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
* Tunable parameters first
|
||||
*/
|
||||
|
||||
/* How many different OSM's are we allowing */
|
||||
/* How many different OSM's are we allowing */
|
||||
#define MAX_I2O_MODULES 64
|
||||
|
||||
#define I2O_EVT_CAPABILITY_OTHER 0x01
|
||||
@ -63,7 +63,7 @@ struct i2o_message
|
||||
u16 size;
|
||||
u32 target_tid:12;
|
||||
u32 init_tid:12;
|
||||
u32 function:8;
|
||||
u32 function:8;
|
||||
u32 initiator_context;
|
||||
/* List follows */
|
||||
};
|
||||
@ -77,7 +77,7 @@ struct i2o_device
|
||||
|
||||
char dev_name[8]; /* linux /dev name if available */
|
||||
i2o_lct_entry lct_data;/* Device LCT information */
|
||||
u32 flags;
|
||||
u32 flags;
|
||||
struct proc_dir_entry* proc_entry; /* /proc dir */
|
||||
struct adpt_device *owner;
|
||||
struct _adpt_hba *controller; /* Controlling IOP */
|
||||
@ -86,7 +86,7 @@ struct i2o_device
|
||||
/*
|
||||
* Each I2O controller has one of these objects
|
||||
*/
|
||||
|
||||
|
||||
struct i2o_controller
|
||||
{
|
||||
char name[16];
|
||||
@ -111,9 +111,9 @@ struct i2o_sys_tbl_entry
|
||||
u32 iop_id:12;
|
||||
u32 reserved2:20;
|
||||
u16 seg_num:12;
|
||||
u16 i2o_version:4;
|
||||
u8 iop_state;
|
||||
u8 msg_type;
|
||||
u16 i2o_version:4;
|
||||
u8 iop_state;
|
||||
u8 msg_type;
|
||||
u16 frame_size;
|
||||
u16 reserved3;
|
||||
u32 last_changed;
|
||||
@ -124,14 +124,14 @@ struct i2o_sys_tbl_entry
|
||||
|
||||
struct i2o_sys_tbl
|
||||
{
|
||||
u8 num_entries;
|
||||
u8 version;
|
||||
u16 reserved1;
|
||||
u8 num_entries;
|
||||
u8 version;
|
||||
u16 reserved1;
|
||||
u32 change_ind;
|
||||
u32 reserved2;
|
||||
u32 reserved3;
|
||||
struct i2o_sys_tbl_entry iops[0];
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* I2O classes / subclasses
|
||||
@ -146,7 +146,7 @@ struct i2o_sys_tbl
|
||||
/* Class code names
|
||||
* (from v1.5 Table 6-1 Class Code Assignments.)
|
||||
*/
|
||||
|
||||
|
||||
#define I2O_CLASS_EXECUTIVE 0x000
|
||||
#define I2O_CLASS_DDM 0x001
|
||||
#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
|
||||
@ -166,7 +166,7 @@ struct i2o_sys_tbl
|
||||
|
||||
/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
|
||||
*/
|
||||
|
||||
|
||||
#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
|
||||
|
||||
/* Subclasses
|
||||
@ -175,7 +175,7 @@ struct i2o_sys_tbl
|
||||
#define I2O_SUBCLASS_i960 0x001
|
||||
#define I2O_SUBCLASS_HDM 0x020
|
||||
#define I2O_SUBCLASS_ISM 0x021
|
||||
|
||||
|
||||
/* Operation functions */
|
||||
|
||||
#define I2O_PARAMS_FIELD_GET 0x0001
|
||||
@ -219,7 +219,7 @@ struct i2o_sys_tbl
|
||||
/*
|
||||
* Messaging API values
|
||||
*/
|
||||
|
||||
|
||||
#define I2O_CMD_ADAPTER_ASSIGN 0xB3
|
||||
#define I2O_CMD_ADAPTER_READ 0xB2
|
||||
#define I2O_CMD_ADAPTER_RELEASE 0xB5
|
||||
@ -284,16 +284,16 @@ struct i2o_sys_tbl
|
||||
#define I2O_PRIVATE_MSG 0xFF
|
||||
|
||||
/*
|
||||
* Init Outbound Q status
|
||||
* Init Outbound Q status
|
||||
*/
|
||||
|
||||
|
||||
#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
|
||||
#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
|
||||
#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
|
||||
#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
|
||||
|
||||
/*
|
||||
* I2O Get Status State values
|
||||
* I2O Get Status State values
|
||||
*/
|
||||
|
||||
#define ADAPTER_STATE_INITIALIZING 0x01
|
||||
@ -303,7 +303,7 @@ struct i2o_sys_tbl
|
||||
#define ADAPTER_STATE_OPERATIONAL 0x08
|
||||
#define ADAPTER_STATE_FAILED 0x10
|
||||
#define ADAPTER_STATE_FAULTED 0x11
|
||||
|
||||
|
||||
/* I2O API function return values */
|
||||
|
||||
#define I2O_RTN_NO_ERROR 0
|
||||
@ -321,9 +321,9 @@ struct i2o_sys_tbl
|
||||
|
||||
/* Reply message status defines for all messages */
|
||||
|
||||
#define I2O_REPLY_STATUS_SUCCESS 0x00
|
||||
#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
|
||||
#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
|
||||
#define I2O_REPLY_STATUS_SUCCESS 0x00
|
||||
#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
|
||||
#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
|
||||
#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
|
||||
#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
|
||||
#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
|
||||
@ -338,7 +338,7 @@ struct i2o_sys_tbl
|
||||
|
||||
#define I2O_PARAMS_STATUS_SUCCESS 0x00
|
||||
#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
|
||||
#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
|
||||
#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
|
||||
#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
|
||||
#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
|
||||
#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
|
||||
@ -390,7 +390,7 @@ struct i2o_sys_tbl
|
||||
#define I2O_CLAIM_MANAGEMENT 0x02000000
|
||||
#define I2O_CLAIM_AUTHORIZED 0x03000000
|
||||
#define I2O_CLAIM_SECONDARY 0x04000000
|
||||
|
||||
|
||||
/* Message header defines for VersionOffset */
|
||||
#define I2OVER15 0x0001
|
||||
#define I2OVER20 0x0002
|
||||
|
@ -99,7 +99,7 @@ typedef struct {
|
||||
uCHAR eataVersion; /* EATA Version */
|
||||
uLONG cpLength; /* EATA Command Packet Length */
|
||||
uLONG spLength; /* EATA Status Packet Length */
|
||||
uCHAR drqNum; /* DRQ Index (0,5,6,7) */
|
||||
uCHAR drqNum; /* DRQ Index (0,5,6,7) */
|
||||
uCHAR flag1; /* EATA Flags 1 (Byte 9) */
|
||||
uCHAR flag2; /* EATA Flags 2 (Byte 30) */
|
||||
} CtrlInfo;
|
||||
|
@ -145,8 +145,8 @@ typedef unsigned long sigLONG;
|
||||
#define FT_LOGGER 12 /* Event Logger */
|
||||
#define FT_INSTALL 13 /* An Install Program */
|
||||
#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
|
||||
#define FT_RESOURCE 15 /* Storage Manager Resource File */
|
||||
#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
|
||||
#define FT_RESOURCE 15 /* Storage Manager Resource File */
|
||||
#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
|
||||
|
||||
/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
@ -195,8 +195,6 @@ static int adpt_detect(struct scsi_host_template* sht)
|
||||
pci_dev_get(pDev);
|
||||
}
|
||||
}
|
||||
if (pDev)
|
||||
pci_dev_put(pDev);
|
||||
|
||||
/* In INIT state, Activate IOPs */
|
||||
for (pHba = hba_chain; pHba; pHba = pHba->next) {
|
||||
|
@ -18,13 +18,6 @@
|
||||
* Misc. definitions *
|
||||
*********************************************/
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE 1
|
||||
#endif
|
||||
#ifndef FALSE
|
||||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
#define R_LIMIT 0x20000
|
||||
|
||||
#define MAXISA 4
|
||||
|
@ -85,7 +85,7 @@
|
||||
static int max_id = 64;
|
||||
static int max_channel = 3;
|
||||
static int init_timeout = 5;
|
||||
static int max_requests = 50;
|
||||
static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
|
||||
|
||||
#define IBMVSCSI_VERSION "1.5.8"
|
||||
|
||||
@ -538,7 +538,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
|
||||
int request_status;
|
||||
int rc;
|
||||
|
||||
/* If we have exhausted our request limit, just fail this request.
|
||||
/* If we have exhausted our request limit, just fail this request,
|
||||
* unless it is for a reset or abort.
|
||||
* Note that there are rare cases involving driver generated requests
|
||||
* (such as task management requests) that the mid layer may think we
|
||||
* can handle more requests (can_queue) when we actually can't
|
||||
@ -551,9 +552,30 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
|
||||
*/
|
||||
if (request_status < -1)
|
||||
goto send_error;
|
||||
/* Otherwise, if we have run out of requests */
|
||||
else if (request_status < 0)
|
||||
goto send_busy;
|
||||
/* Otherwise, we may have run out of requests. */
|
||||
/* Abort and reset calls should make it through.
|
||||
* Nothing except abort and reset should use the last two
|
||||
* slots unless we had two or less to begin with.
|
||||
*/
|
||||
else if (request_status < 2 &&
|
||||
evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
|
||||
/* In the case that we have less than two requests
|
||||
* available, check the server limit as a combination
|
||||
* of the request limit and the number of requests
|
||||
* in-flight (the size of the send list). If the
|
||||
* server limit is greater than 2, return busy so
|
||||
* that the last two are reserved for reset and abort.
|
||||
*/
|
||||
int server_limit = request_status;
|
||||
struct srp_event_struct *tmp_evt;
|
||||
|
||||
list_for_each_entry(tmp_evt, &hostdata->sent, list) {
|
||||
server_limit++;
|
||||
}
|
||||
|
||||
if (server_limit > 2)
|
||||
goto send_busy;
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy the IU into the transfer area */
|
||||
@ -572,6 +594,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
|
||||
|
||||
printk(KERN_ERR "ibmvscsi: send error %d\n",
|
||||
rc);
|
||||
atomic_inc(&hostdata->request_limit);
|
||||
goto send_error;
|
||||
}
|
||||
|
||||
@ -581,7 +604,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
|
||||
unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
|
||||
|
||||
free_event_struct(&hostdata->pool, evt_struct);
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
atomic_inc(&hostdata->request_limit);
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
send_error:
|
||||
unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
|
||||
@ -831,23 +855,16 @@ static void login_rsp(struct srp_event_struct *evt_struct)
|
||||
|
||||
printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
|
||||
|
||||
if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta >
|
||||
(max_requests - 2))
|
||||
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
|
||||
max_requests - 2;
|
||||
if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
|
||||
printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
|
||||
|
||||
/* Now we know what the real request-limit is */
|
||||
/* Now we know what the real request-limit is.
|
||||
* This value is set rather than added to request_limit because
|
||||
* request_limit could have been set to -1 by this client.
|
||||
*/
|
||||
atomic_set(&hostdata->request_limit,
|
||||
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
|
||||
|
||||
hostdata->host->can_queue =
|
||||
evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
|
||||
|
||||
if (hostdata->host->can_queue < 1) {
|
||||
printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we had any pending I/Os, kick them */
|
||||
scsi_unblock_requests(hostdata->host);
|
||||
|
||||
@ -1337,6 +1354,27 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
|
||||
* @sdev: struct scsi_device device to configure
|
||||
*
|
||||
* Enable allow_restart for a device if it is a disk. Adjust the
|
||||
* queue_depth here also as is required by the documentation for
|
||||
* struct scsi_host_template.
|
||||
*/
|
||||
static int ibmvscsi_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
struct Scsi_Host *shost = sdev->host;
|
||||
unsigned long lock_flags = 0;
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, lock_flags);
|
||||
if (sdev->type == TYPE_DISK)
|
||||
sdev->allow_restart = 1;
|
||||
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
|
||||
spin_unlock_irqrestore(shost->host_lock, lock_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------
|
||||
* sysfs attributes
|
||||
*/
|
||||
@ -1482,8 +1520,9 @@ static struct scsi_host_template driver_template = {
|
||||
.queuecommand = ibmvscsi_queuecommand,
|
||||
.eh_abort_handler = ibmvscsi_eh_abort_handler,
|
||||
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
|
||||
.slave_configure = ibmvscsi_slave_configure,
|
||||
.cmd_per_lun = 16,
|
||||
.can_queue = 1, /* Updated after SRP_LOGIN */
|
||||
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
@ -1503,6 +1542,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
|
||||
vdev->dev.driver_data = NULL;
|
||||
|
||||
driver_template.can_queue = max_requests;
|
||||
host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
|
||||
if (!host) {
|
||||
printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
|
||||
|
@ -44,6 +44,8 @@ struct Scsi_Host;
|
||||
*/
|
||||
#define MAX_INDIRECT_BUFS 10
|
||||
|
||||
#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
|
||||
|
||||
/* ------------------------------------------------------------
|
||||
* Data Structures
|
||||
*/
|
||||
|
@ -35,7 +35,7 @@
|
||||
#include "ibmvscsi.h"
|
||||
|
||||
#define INITIAL_SRP_LIMIT 16
|
||||
#define DEFAULT_MAX_SECTORS 512
|
||||
#define DEFAULT_MAX_SECTORS 256
|
||||
|
||||
#define TGT_NAME "ibmvstgt"
|
||||
|
||||
@ -248,8 +248,8 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
|
||||
md[i].va + mdone);
|
||||
|
||||
if (err != H_SUCCESS) {
|
||||
eprintk("rdma error %d %d\n", dir, slen);
|
||||
goto out;
|
||||
eprintk("rdma error %d %d %ld\n", dir, slen, err);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mlen -= slen;
|
||||
@ -265,45 +265,35 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
|
||||
if (sidx > nsg) {
|
||||
eprintk("out of sg %p %d %d\n",
|
||||
iue, sidx, nsg);
|
||||
goto out;
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
rest -= mlen;
|
||||
}
|
||||
out:
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
|
||||
void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
|
||||
int err;
|
||||
|
||||
err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
|
||||
|
||||
done(sc);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
|
||||
void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
|
||||
struct srp_target *target = iue->target;
|
||||
int err = 0;
|
||||
|
||||
dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
|
||||
dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
|
||||
cmd->usg_sg);
|
||||
|
||||
if (sc->use_sg)
|
||||
err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
|
||||
|
||||
spin_lock_irqsave(&target->lock, flags);
|
||||
list_del(&iue->ilist);
|
||||
spin_unlock_irqrestore(&target->lock, flags);
|
||||
|
||||
if (sc->result != SAM_STAT_GOOD) {
|
||||
if (err|| sc->result != SAM_STAT_GOOD) {
|
||||
eprintk("operation failed %p %d %x\n",
|
||||
iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
|
||||
send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
|
||||
@ -503,7 +493,8 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
|
||||
{
|
||||
struct vio_port *vport = target_to_port(target);
|
||||
struct iu_entry *iue;
|
||||
long err, done;
|
||||
long err;
|
||||
int done = 1;
|
||||
|
||||
iue = srp_iu_get(target);
|
||||
if (!iue) {
|
||||
@ -518,7 +509,6 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
|
||||
|
||||
if (err != H_SUCCESS) {
|
||||
eprintk("%ld transferring data error %p\n", err, iue);
|
||||
done = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -794,7 +784,6 @@ static struct scsi_host_template ibmvstgt_sht = {
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.max_sectors = DEFAULT_MAX_SECTORS,
|
||||
.transfer_response = ibmvstgt_cmd_done,
|
||||
.transfer_data = ibmvstgt_transfer_data,
|
||||
.eh_abort_handler = ibmvstgt_eh_abort_handler,
|
||||
.tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
|
||||
.shost_attrs = ibmvstgt_attrs,
|
||||
|
@ -89,10 +89,9 @@ static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
|
||||
static unsigned int ipr_max_speed = 1;
|
||||
static int ipr_testmode = 0;
|
||||
static unsigned int ipr_fastfail = 0;
|
||||
static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
|
||||
static unsigned int ipr_transop_timeout = 0;
|
||||
static unsigned int ipr_enable_cache = 1;
|
||||
static unsigned int ipr_debug = 0;
|
||||
static int ipr_auto_create = 1;
|
||||
static DEFINE_SPINLOCK(ipr_driver_lock);
|
||||
|
||||
/* This table describes the differences between DMA controller chips */
|
||||
@ -159,15 +158,13 @@ module_param_named(enable_cache, ipr_enable_cache, int, 0);
|
||||
MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
|
||||
module_param_named(debug, ipr_debug, int, 0);
|
||||
MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
|
||||
module_param_named(auto_create, ipr_auto_create, int, 0);
|
||||
MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(IPR_DRIVER_VERSION);
|
||||
|
||||
/* A constant array of IOASCs/URCs/Error Messages */
|
||||
static const
|
||||
struct ipr_error_table_t ipr_error_table[] = {
|
||||
{0x00000000, 1, 1,
|
||||
{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"8155: An unknown error was received"},
|
||||
{0x00330000, 0, 0,
|
||||
"Soft underlength error"},
|
||||
@ -175,37 +172,37 @@ struct ipr_error_table_t ipr_error_table[] = {
|
||||
"Command to be cancelled not found"},
|
||||
{0x00808000, 0, 0,
|
||||
"Qualified success"},
|
||||
{0x01080000, 1, 1,
|
||||
{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFFE: Soft device bus error recovered by the IOA"},
|
||||
{0x01088100, 0, 1,
|
||||
{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4101: Soft device bus fabric error"},
|
||||
{0x01170600, 0, 1,
|
||||
{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF9: Device sector reassign successful"},
|
||||
{0x01170900, 0, 1,
|
||||
{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF7: Media error recovered by device rewrite procedures"},
|
||||
{0x01180200, 0, 1,
|
||||
{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"7001: IOA sector reassignment successful"},
|
||||
{0x01180500, 0, 1,
|
||||
{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF9: Soft media error. Sector reassignment recommended"},
|
||||
{0x01180600, 0, 1,
|
||||
{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF7: Media error recovered by IOA rewrite procedures"},
|
||||
{0x01418000, 0, 1,
|
||||
{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FF3D: Soft PCI bus error recovered by the IOA"},
|
||||
{0x01440000, 1, 1,
|
||||
{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF6: Device hardware error recovered by the IOA"},
|
||||
{0x01448100, 0, 1,
|
||||
{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF6: Device hardware error recovered by the device"},
|
||||
{0x01448200, 1, 1,
|
||||
{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FF3D: Soft IOA error recovered by the IOA"},
|
||||
{0x01448300, 0, 1,
|
||||
{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFFA: Undefined device response recovered by the IOA"},
|
||||
{0x014A0000, 1, 1,
|
||||
{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF6: Device bus error, message or command phase"},
|
||||
{0x014A8000, 0, 1,
|
||||
{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFFE: Task Management Function failed"},
|
||||
{0x015D0000, 0, 1,
|
||||
{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF6: Failure prediction threshold exceeded"},
|
||||
{0x015D9200, 0, 1,
|
||||
{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"8009: Impending cache battery pack failure"},
|
||||
{0x02040400, 0, 0,
|
||||
"34FF: Disk device format in progress"},
|
||||
@ -215,85 +212,85 @@ struct ipr_error_table_t ipr_error_table[] = {
|
||||
"No ready, IOA shutdown"},
|
||||
{0x025A0000, 0, 0,
|
||||
"Not ready, IOA has been shutdown"},
|
||||
{0x02670100, 0, 1,
|
||||
{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3020: Storage subsystem configuration error"},
|
||||
{0x03110B00, 0, 0,
|
||||
"FFF5: Medium error, data unreadable, recommend reassign"},
|
||||
{0x03110C00, 0, 0,
|
||||
"7000: Medium error, data unreadable, do not reassign"},
|
||||
{0x03310000, 0, 1,
|
||||
{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF3: Disk media format bad"},
|
||||
{0x04050000, 0, 1,
|
||||
{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3002: Addressed device failed to respond to selection"},
|
||||
{0x04080000, 1, 1,
|
||||
{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3100: Device bus error"},
|
||||
{0x04080100, 0, 1,
|
||||
{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3109: IOA timed out a device command"},
|
||||
{0x04088000, 0, 0,
|
||||
"3120: SCSI bus is not operational"},
|
||||
{0x04088100, 0, 1,
|
||||
{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4100: Hard device bus fabric error"},
|
||||
{0x04118000, 0, 1,
|
||||
{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9000: IOA reserved area data check"},
|
||||
{0x04118100, 0, 1,
|
||||
{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9001: IOA reserved area invalid data pattern"},
|
||||
{0x04118200, 0, 1,
|
||||
{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9002: IOA reserved area LRC error"},
|
||||
{0x04320000, 0, 1,
|
||||
{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"102E: Out of alternate sectors for disk storage"},
|
||||
{0x04330000, 1, 1,
|
||||
{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF4: Data transfer underlength error"},
|
||||
{0x04338000, 1, 1,
|
||||
{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF4: Data transfer overlength error"},
|
||||
{0x043E0100, 0, 1,
|
||||
{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3400: Logical unit failure"},
|
||||
{0x04408500, 0, 1,
|
||||
{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF4: Device microcode is corrupt"},
|
||||
{0x04418000, 1, 1,
|
||||
{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"8150: PCI bus error"},
|
||||
{0x04430000, 1, 0,
|
||||
"Unsupported device bus message received"},
|
||||
{0x04440000, 1, 1,
|
||||
{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF4: Disk device problem"},
|
||||
{0x04448200, 1, 1,
|
||||
{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"8150: Permanent IOA failure"},
|
||||
{0x04448300, 0, 1,
|
||||
{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3010: Disk device returned wrong response to IOA"},
|
||||
{0x04448400, 0, 1,
|
||||
{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"8151: IOA microcode error"},
|
||||
{0x04448500, 0, 0,
|
||||
"Device bus status error"},
|
||||
{0x04448600, 0, 1,
|
||||
{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"8157: IOA error requiring IOA reset to recover"},
|
||||
{0x04448700, 0, 0,
|
||||
"ATA device status error"},
|
||||
{0x04490000, 0, 0,
|
||||
"Message reject received from the device"},
|
||||
{0x04449200, 0, 1,
|
||||
{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"8008: A permanent cache battery pack failure occurred"},
|
||||
{0x0444A000, 0, 1,
|
||||
{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9090: Disk unit has been modified after the last known status"},
|
||||
{0x0444A200, 0, 1,
|
||||
{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9081: IOA detected device error"},
|
||||
{0x0444A300, 0, 1,
|
||||
{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9082: IOA detected device error"},
|
||||
{0x044A0000, 1, 1,
|
||||
{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3110: Device bus error, message or command phase"},
|
||||
{0x044A8000, 1, 1,
|
||||
{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3110: SAS Command / Task Management Function failed"},
|
||||
{0x04670400, 0, 1,
|
||||
{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9091: Incorrect hardware configuration change has been detected"},
|
||||
{0x04678000, 0, 1,
|
||||
{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9073: Invalid multi-adapter configuration"},
|
||||
{0x04678100, 0, 1,
|
||||
{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4010: Incorrect connection between cascaded expanders"},
|
||||
{0x04678200, 0, 1,
|
||||
{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4020: Connections exceed IOA design limits"},
|
||||
{0x04678300, 0, 1,
|
||||
{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4030: Incorrect multipath connection"},
|
||||
{0x04679000, 0, 1,
|
||||
{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4110: Unsupported enclosure function"},
|
||||
{0x046E0000, 0, 1,
|
||||
{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFF4: Command to logical unit failed"},
|
||||
{0x05240000, 1, 0,
|
||||
"Illegal request, invalid request type or request packet"},
|
||||
@ -313,101 +310,103 @@ struct ipr_error_table_t ipr_error_table[] = {
|
||||
"Illegal request, command sequence error"},
|
||||
{0x052C8000, 1, 0,
|
||||
"Illegal request, dual adapter support not enabled"},
|
||||
{0x06040500, 0, 1,
|
||||
{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9031: Array protection temporarily suspended, protection resuming"},
|
||||
{0x06040600, 0, 1,
|
||||
{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9040: Array protection temporarily suspended, protection resuming"},
|
||||
{0x06288000, 0, 1,
|
||||
{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3140: Device bus not ready to ready transition"},
|
||||
{0x06290000, 0, 1,
|
||||
{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFFB: SCSI bus was reset"},
|
||||
{0x06290500, 0, 0,
|
||||
"FFFE: SCSI bus transition to single ended"},
|
||||
{0x06290600, 0, 0,
|
||||
"FFFE: SCSI bus transition to LVD"},
|
||||
{0x06298000, 0, 1,
|
||||
{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"FFFB: SCSI bus was reset by another initiator"},
|
||||
{0x063F0300, 0, 1,
|
||||
{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3029: A device replacement has occurred"},
|
||||
{0x064C8000, 0, 1,
|
||||
{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9051: IOA cache data exists for a missing or failed device"},
|
||||
{0x064C8100, 0, 1,
|
||||
{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
|
||||
{0x06670100, 0, 1,
|
||||
{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9025: Disk unit is not supported at its physical location"},
|
||||
{0x06670600, 0, 1,
|
||||
{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3020: IOA detected a SCSI bus configuration error"},
|
||||
{0x06678000, 0, 1,
|
||||
{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"3150: SCSI bus configuration error"},
|
||||
{0x06678100, 0, 1,
|
||||
{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9074: Asymmetric advanced function disk configuration"},
|
||||
{0x06678300, 0, 1,
|
||||
{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4040: Incomplete multipath connection between IOA and enclosure"},
|
||||
{0x06678400, 0, 1,
|
||||
{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4041: Incomplete multipath connection between enclosure and device"},
|
||||
{0x06678500, 0, 1,
|
||||
{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9075: Incomplete multipath connection between IOA and remote IOA"},
|
||||
{0x06678600, 0, 1,
|
||||
{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9076: Configuration error, missing remote IOA"},
|
||||
{0x06679100, 0, 1,
|
||||
{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4050: Enclosure does not support a required multipath function"},
|
||||
{0x06690200, 0, 1,
|
||||
{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9041: Array protection temporarily suspended"},
|
||||
{0x06698200, 0, 1,
|
||||
{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9042: Corrupt array parity detected on specified device"},
|
||||
{0x066B0200, 0, 1,
|
||||
{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9030: Array no longer protected due to missing or failed disk unit"},
|
||||
{0x066B8000, 0, 1,
|
||||
{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9071: Link operational transition"},
|
||||
{0x066B8100, 0, 1,
|
||||
{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9072: Link not operational transition"},
|
||||
{0x066B8200, 0, 1,
|
||||
{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9032: Array exposed but still protected"},
|
||||
{0x066B9100, 0, 1,
|
||||
{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
|
||||
"70DD: Device forced failed by disrupt device command"},
|
||||
{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4061: Multipath redundancy level got better"},
|
||||
{0x066B9200, 0, 1,
|
||||
{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"4060: Multipath redundancy level got worse"},
|
||||
{0x07270000, 0, 0,
|
||||
"Failure due to other device"},
|
||||
{0x07278000, 0, 1,
|
||||
{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9008: IOA does not support functions expected by devices"},
|
||||
{0x07278100, 0, 1,
|
||||
{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9010: Cache data associated with attached devices cannot be found"},
|
||||
{0x07278200, 0, 1,
|
||||
{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9011: Cache data belongs to devices other than those attached"},
|
||||
{0x07278400, 0, 1,
|
||||
{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9020: Array missing 2 or more devices with only 1 device present"},
|
||||
{0x07278500, 0, 1,
|
||||
{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9021: Array missing 2 or more devices with 2 or more devices present"},
|
||||
{0x07278600, 0, 1,
|
||||
{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9022: Exposed array is missing a required device"},
|
||||
{0x07278700, 0, 1,
|
||||
{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9023: Array member(s) not at required physical locations"},
|
||||
{0x07278800, 0, 1,
|
||||
{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9024: Array not functional due to present hardware configuration"},
|
||||
{0x07278900, 0, 1,
|
||||
{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9026: Array not functional due to present hardware configuration"},
|
||||
{0x07278A00, 0, 1,
|
||||
{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9027: Array is missing a device and parity is out of sync"},
|
||||
{0x07278B00, 0, 1,
|
||||
{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9028: Maximum number of arrays already exist"},
|
||||
{0x07278C00, 0, 1,
|
||||
{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9050: Required cache data cannot be located for a disk unit"},
|
||||
{0x07278D00, 0, 1,
|
||||
{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9052: Cache data exists for a device that has been modified"},
|
||||
{0x07278F00, 0, 1,
|
||||
{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9054: IOA resources not available due to previous problems"},
|
||||
{0x07279100, 0, 1,
|
||||
{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9092: Disk unit requires initialization before use"},
|
||||
{0x07279200, 0, 1,
|
||||
{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9029: Incorrect hardware configuration change has been detected"},
|
||||
{0x07279600, 0, 1,
|
||||
{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9060: One or more disk pairs are missing from an array"},
|
||||
{0x07279700, 0, 1,
|
||||
{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9061: One or more disks are missing from an array"},
|
||||
{0x07279800, 0, 1,
|
||||
{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9062: One or more disks are missing from an array"},
|
||||
{0x07279900, 0, 1,
|
||||
{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
|
||||
"9063: Maximum number of functional arrays has been exceeded"},
|
||||
{0x0B260000, 0, 0,
|
||||
"Aborted command, invalid descriptor"},
|
||||
@ -481,12 +480,16 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
|
||||
{
|
||||
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
|
||||
struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
|
||||
dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
|
||||
|
||||
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
|
||||
ioarcb->write_data_transfer_length = 0;
|
||||
ioarcb->read_data_transfer_length = 0;
|
||||
ioarcb->write_ioadl_len = 0;
|
||||
ioarcb->read_ioadl_len = 0;
|
||||
ioarcb->write_ioadl_addr =
|
||||
cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
|
||||
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
|
||||
ioasa->ioasc = 0;
|
||||
ioasa->residual_data_len = 0;
|
||||
ioasa->u.gata.status = 0;
|
||||
@ -1610,7 +1613,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
|
||||
/* Set indication we have logged an error */
|
||||
ioa_cfg->errors_logged++;
|
||||
|
||||
if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
|
||||
if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
|
||||
return;
|
||||
if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
|
||||
hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
|
||||
@ -3850,6 +3853,8 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
|
||||
if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
|
||||
if (ipr_cmd->scsi_cmd)
|
||||
ipr_cmd->done = ipr_scsi_eh_done;
|
||||
if (ipr_cmd->qc)
|
||||
ipr_cmd->done = ipr_sata_eh_done;
|
||||
if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
|
||||
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
|
||||
ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
|
||||
@ -4230,6 +4235,14 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
|
||||
|
||||
sglist = scsi_cmd->request_buffer;
|
||||
|
||||
if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
|
||||
ioadl = ioarcb->add_data.u.ioadl;
|
||||
ioarcb->write_ioadl_addr =
|
||||
cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
|
||||
offsetof(struct ipr_ioarcb, add_data));
|
||||
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
|
||||
}
|
||||
|
||||
for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
|
||||
ioadl[i].flags_and_data_len =
|
||||
cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
|
||||
@ -4260,6 +4273,11 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
|
||||
scsi_cmd->sc_data_direction);
|
||||
|
||||
if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
|
||||
ioadl = ioarcb->add_data.u.ioadl;
|
||||
ioarcb->write_ioadl_addr =
|
||||
cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
|
||||
offsetof(struct ipr_ioarcb, add_data));
|
||||
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
|
||||
ipr_cmd->dma_use_sg = 1;
|
||||
ioadl[0].flags_and_data_len =
|
||||
cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
|
||||
@ -4346,11 +4364,9 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
|
||||
**/
|
||||
static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
|
||||
{
|
||||
struct ipr_ioarcb *ioarcb;
|
||||
struct ipr_ioasa *ioasa;
|
||||
|
||||
ioarcb = &ipr_cmd->ioarcb;
|
||||
ioasa = &ipr_cmd->ioasa;
|
||||
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
|
||||
struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
|
||||
dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
|
||||
|
||||
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
|
||||
ioarcb->write_data_transfer_length = 0;
|
||||
@ -4359,6 +4375,9 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
|
||||
ioarcb->read_ioadl_len = 0;
|
||||
ioasa->ioasc = 0;
|
||||
ioasa->residual_data_len = 0;
|
||||
ioarcb->write_ioadl_addr =
|
||||
cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
|
||||
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4457,12 +4476,13 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
|
||||
{
|
||||
int i;
|
||||
u16 data_len;
|
||||
u32 ioasc;
|
||||
u32 ioasc, fd_ioasc;
|
||||
struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
|
||||
__be32 *ioasa_data = (__be32 *)ioasa;
|
||||
int error_index;
|
||||
|
||||
ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
|
||||
fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
|
||||
|
||||
if (0 == ioasc)
|
||||
return;
|
||||
@ -4470,13 +4490,19 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
|
||||
if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
|
||||
return;
|
||||
|
||||
error_index = ipr_get_error(ioasc);
|
||||
if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
|
||||
error_index = ipr_get_error(fd_ioasc);
|
||||
else
|
||||
error_index = ipr_get_error(ioasc);
|
||||
|
||||
if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
|
||||
/* Don't log an error if the IOA already logged one */
|
||||
if (ioasa->ilid != 0)
|
||||
return;
|
||||
|
||||
if (!ipr_is_gscsi(res))
|
||||
return;
|
||||
|
||||
if (ipr_error_table[error_index].log_ioasa == 0)
|
||||
return;
|
||||
}
|
||||
@ -4636,11 +4662,11 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
|
||||
return;
|
||||
}
|
||||
|
||||
if (ipr_is_gscsi(res))
|
||||
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
|
||||
else
|
||||
if (!ipr_is_gscsi(res))
|
||||
ipr_gen_sense(ipr_cmd);
|
||||
|
||||
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
|
||||
|
||||
switch (ioasc & IPR_IOASC_IOASC_MASK) {
|
||||
case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
|
||||
if (ipr_is_naca_model(res))
|
||||
@ -5121,7 +5147,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
|
||||
struct ipr_ioarcb_ata_regs *regs;
|
||||
|
||||
if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
|
||||
return -EIO;
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
|
||||
ioarcb = &ipr_cmd->ioarcb;
|
||||
@ -5166,7 +5192,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return -1;
|
||||
return AC_ERR_INVALID;
|
||||
}
|
||||
|
||||
mb();
|
||||
@ -6188,7 +6214,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
|
||||
dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
|
||||
|
||||
ipr_cmd->timer.data = (unsigned long) ipr_cmd;
|
||||
ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
|
||||
ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
|
||||
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
|
||||
ipr_cmd->done = ipr_reset_ioa_job;
|
||||
add_timer(&ipr_cmd->timer);
|
||||
@ -6385,6 +6411,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
|
||||
rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
|
||||
|
||||
if (rc != PCIBIOS_SUCCESSFUL) {
|
||||
pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
|
||||
ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
|
||||
rc = IPR_RC_JOB_CONTINUE;
|
||||
} else {
|
||||
@ -7117,8 +7144,6 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
|
||||
ioa_cfg->pdev = pdev;
|
||||
ioa_cfg->log_level = ipr_log_level;
|
||||
ioa_cfg->doorbell = IPR_DOORBELL;
|
||||
if (!ipr_auto_create)
|
||||
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
|
||||
sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
|
||||
sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
|
||||
sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
|
||||
@ -7233,6 +7258,13 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
|
||||
goto out_scsi_host_put;
|
||||
}
|
||||
|
||||
if (ipr_transop_timeout)
|
||||
ioa_cfg->transop_timeout = ipr_transop_timeout;
|
||||
else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
|
||||
ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
|
||||
else
|
||||
ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
|
||||
|
||||
ipr_regs_pci = pci_resource_start(pdev, 0);
|
||||
|
||||
rc = pci_request_regions(pdev, IPR_NAME);
|
||||
@ -7540,29 +7572,45 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 },
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 },
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
|
||||
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 },
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 },
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
|
||||
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
|
||||
IPR_USE_LONG_TRANSOP_TIMEOUT },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
|
||||
|
@ -37,8 +37,8 @@
|
||||
/*
|
||||
* Literals
|
||||
*/
|
||||
#define IPR_DRIVER_VERSION "2.3.1"
|
||||
#define IPR_DRIVER_DATE "(January 23, 2007)"
|
||||
#define IPR_DRIVER_VERSION "2.3.2"
|
||||
#define IPR_DRIVER_DATE "(March 23, 2007)"
|
||||
|
||||
/*
|
||||
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
|
||||
@ -55,6 +55,7 @@
|
||||
#define IPR_NUM_BASE_CMD_BLKS 100
|
||||
|
||||
#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
|
||||
#define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A
|
||||
|
||||
#define IPR_SUBS_DEV_ID_2780 0x0264
|
||||
#define IPR_SUBS_DEV_ID_5702 0x0266
|
||||
@ -69,8 +70,12 @@
|
||||
#define IPR_SUBS_DEV_ID_572A 0x02C1
|
||||
#define IPR_SUBS_DEV_ID_572B 0x02C2
|
||||
#define IPR_SUBS_DEV_ID_572F 0x02C3
|
||||
#define IPR_SUBS_DEV_ID_574D 0x030B
|
||||
#define IPR_SUBS_DEV_ID_574E 0x030A
|
||||
#define IPR_SUBS_DEV_ID_575B 0x030D
|
||||
#define IPR_SUBS_DEV_ID_575C 0x0338
|
||||
#define IPR_SUBS_DEV_ID_575D 0x033E
|
||||
#define IPR_SUBS_DEV_ID_57B3 0x033A
|
||||
#define IPR_SUBS_DEV_ID_57B7 0x0360
|
||||
#define IPR_SUBS_DEV_ID_57B8 0x02C2
|
||||
|
||||
@ -104,6 +109,9 @@
|
||||
#define IPR_IOASC_IOA_WAS_RESET 0x10000001
|
||||
#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
|
||||
|
||||
/* Driver data flags */
|
||||
#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001
|
||||
|
||||
#define IPR_DEFAULT_MAX_ERROR_DUMP 984
|
||||
#define IPR_NUM_LOG_HCAMS 2
|
||||
#define IPR_NUM_CFG_CHG_HCAMS 2
|
||||
@ -179,6 +187,7 @@
|
||||
#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ)
|
||||
#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ)
|
||||
#define IPR_OPERATIONAL_TIMEOUT (5 * 60)
|
||||
#define IPR_LONG_OPERATIONAL_TIMEOUT (12 * 60)
|
||||
#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
|
||||
#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
|
||||
#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
|
||||
@ -413,9 +422,25 @@ struct ipr_ioarcb_ata_regs {
|
||||
u8 ctl;
|
||||
}__attribute__ ((packed, aligned(4)));
|
||||
|
||||
struct ipr_ioadl_desc {
|
||||
__be32 flags_and_data_len;
|
||||
#define IPR_IOADL_FLAGS_MASK 0xff000000
|
||||
#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
|
||||
#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
|
||||
#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
|
||||
#define IPR_IOADL_FLAGS_READ 0x48000000
|
||||
#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
|
||||
#define IPR_IOADL_FLAGS_WRITE 0x68000000
|
||||
#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
|
||||
#define IPR_IOADL_FLAGS_LAST 0x01000000
|
||||
|
||||
__be32 address;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
|
||||
struct ipr_ioarcb_add_data {
|
||||
union {
|
||||
struct ipr_ioarcb_ata_regs regs;
|
||||
struct ipr_ioadl_desc ioadl[5];
|
||||
__be32 add_cmd_parms[10];
|
||||
}u;
|
||||
}__attribute__ ((packed, aligned(4)));
|
||||
@ -447,21 +472,6 @@ struct ipr_ioarcb {
|
||||
struct ipr_ioarcb_add_data add_data;
|
||||
}__attribute__((packed, aligned (4)));
|
||||
|
||||
struct ipr_ioadl_desc {
|
||||
__be32 flags_and_data_len;
|
||||
#define IPR_IOADL_FLAGS_MASK 0xff000000
|
||||
#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
|
||||
#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
|
||||
#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
|
||||
#define IPR_IOADL_FLAGS_READ 0x48000000
|
||||
#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
|
||||
#define IPR_IOADL_FLAGS_WRITE 0x68000000
|
||||
#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
|
||||
#define IPR_IOADL_FLAGS_LAST 0x01000000
|
||||
|
||||
__be32 address;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
|
||||
struct ipr_ioasa_vset {
|
||||
__be32 failing_lba_hi;
|
||||
__be32 failing_lba_lo;
|
||||
@ -1119,6 +1129,7 @@ struct ipr_ioa_cfg {
|
||||
|
||||
struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
|
||||
|
||||
unsigned int transop_timeout;
|
||||
const struct ipr_chip_cfg_t *chip_cfg;
|
||||
|
||||
void __iomem *hdw_dma_regs; /* iomapped PCI memory space */
|
||||
|
@ -527,12 +527,12 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
|
||||
* than 8K, but there are no targets that currently do this.
|
||||
* For now we fail until we find a vendor that needs it
|
||||
*/
|
||||
if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH <
|
||||
if (ISCSI_DEF_MAX_RECV_SEG_LEN <
|
||||
tcp_conn->in.datalen) {
|
||||
printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
|
||||
"but conn buffer is only %u (opcode %0x)\n",
|
||||
tcp_conn->in.datalen,
|
||||
DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode);
|
||||
ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
|
||||
rc = ISCSI_ERR_PROTO;
|
||||
break;
|
||||
}
|
||||
@ -1762,7 +1762,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
|
||||
* due to strange issues with iser these are not set
|
||||
* in iscsi_conn_setup
|
||||
*/
|
||||
conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
|
||||
conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
|
||||
|
||||
tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
|
||||
if (!tcp_conn)
|
||||
@ -1777,14 +1777,24 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
|
||||
tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
tcp_conn->tx_hash.flags = 0;
|
||||
if (IS_ERR(tcp_conn->tx_hash.tfm))
|
||||
if (IS_ERR(tcp_conn->tx_hash.tfm)) {
|
||||
printk(KERN_ERR "Could not create connection due to crc32c "
|
||||
"loading error %ld. Make sure the crc32c module is "
|
||||
"built as a module or into the kernel\n",
|
||||
PTR_ERR(tcp_conn->tx_hash.tfm));
|
||||
goto free_tcp_conn;
|
||||
}
|
||||
|
||||
tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
|
||||
CRYPTO_ALG_ASYNC);
|
||||
tcp_conn->rx_hash.flags = 0;
|
||||
if (IS_ERR(tcp_conn->rx_hash.tfm))
|
||||
if (IS_ERR(tcp_conn->rx_hash.tfm)) {
|
||||
printk(KERN_ERR "Could not create connection due to crc32c "
|
||||
"loading error %ld. Make sure the crc32c module is "
|
||||
"built as a module or into the kernel\n",
|
||||
PTR_ERR(tcp_conn->rx_hash.tfm));
|
||||
goto free_tx_tfm;
|
||||
}
|
||||
|
||||
return cls_conn;
|
||||
|
||||
@ -2138,6 +2148,7 @@ static struct scsi_host_template iscsi_sht = {
|
||||
.change_queue_depth = iscsi_change_queue_depth,
|
||||
.can_queue = ISCSI_XMIT_CMDS_MAX - 1,
|
||||
.sg_tablesize = ISCSI_SG_TABLESIZE,
|
||||
.max_sectors = 0xFFFF,
|
||||
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
|
||||
.eh_abort_handler = iscsi_eh_abort,
|
||||
.eh_host_reset_handler = iscsi_eh_host_reset,
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <net/tcp.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
@ -269,14 +270,14 @@ invalid_datalen:
|
||||
goto out;
|
||||
}
|
||||
|
||||
senselen = be16_to_cpu(*(__be16 *)data);
|
||||
senselen = be16_to_cpu(get_unaligned((__be16 *) data));
|
||||
if (datalen < senselen)
|
||||
goto invalid_datalen;
|
||||
|
||||
memcpy(sc->sense_buffer, data + 2,
|
||||
min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
|
||||
debug_scsi("copied %d bytes of sense\n",
|
||||
min(senselen, SCSI_SENSE_BUFFERSIZE));
|
||||
min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
|
||||
}
|
||||
|
||||
if (sc->sc_data_direction == DMA_TO_DEVICE)
|
||||
@ -577,7 +578,7 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
|
||||
|
||||
static int iscsi_xmit_imm_task(struct iscsi_conn *conn)
|
||||
static int iscsi_xmit_mtask(struct iscsi_conn *conn)
|
||||
{
|
||||
struct iscsi_hdr *hdr = conn->mtask->hdr;
|
||||
int rc, was_logout = 0;
|
||||
@ -591,6 +592,9 @@ static int iscsi_xmit_imm_task(struct iscsi_conn *conn)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* done with this in-progress mtask */
|
||||
conn->mtask = NULL;
|
||||
|
||||
if (was_logout) {
|
||||
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
|
||||
return -ENODATA;
|
||||
@ -643,11 +647,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
||||
conn->ctask = NULL;
|
||||
}
|
||||
if (conn->mtask) {
|
||||
rc = iscsi_xmit_imm_task(conn);
|
||||
rc = iscsi_xmit_mtask(conn);
|
||||
if (rc)
|
||||
goto again;
|
||||
/* done with this in-progress mtask */
|
||||
conn->mtask = NULL;
|
||||
}
|
||||
|
||||
/* process immediate first */
|
||||
@ -658,12 +660,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
||||
list_add_tail(&conn->mtask->running,
|
||||
&conn->mgmt_run_list);
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
rc = iscsi_xmit_imm_task(conn);
|
||||
rc = iscsi_xmit_mtask(conn);
|
||||
if (rc)
|
||||
goto again;
|
||||
}
|
||||
/* done with this mtask */
|
||||
conn->mtask = NULL;
|
||||
}
|
||||
|
||||
/* process command queue */
|
||||
@ -701,12 +701,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
||||
list_add_tail(&conn->mtask->running,
|
||||
&conn->mgmt_run_list);
|
||||
spin_unlock_bh(&conn->session->lock);
|
||||
rc = tt->xmit_mgmt_task(conn, conn->mtask);
|
||||
if (rc)
|
||||
rc = iscsi_xmit_mtask(conn);
|
||||
if (rc)
|
||||
goto again;
|
||||
}
|
||||
/* done with this mtask */
|
||||
conn->mtask = NULL;
|
||||
}
|
||||
|
||||
return -ENODATA;
|
||||
@ -1523,7 +1521,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
|
||||
}
|
||||
spin_unlock_bh(&session->lock);
|
||||
|
||||
data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
|
||||
data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
|
||||
if (!data)
|
||||
goto login_mtask_data_alloc_fail;
|
||||
conn->login_mtask->data = conn->data = data;
|
||||
@ -1597,6 +1595,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
|
||||
wake_up(&conn->ehwait);
|
||||
}
|
||||
|
||||
/* flush queued up work because we free the connection below */
|
||||
scsi_flush_work(session->host);
|
||||
|
||||
spin_lock_bh(&session->lock);
|
||||
kfree(conn->data);
|
||||
kfree(conn->persistent_address);
|
||||
|
@ -224,8 +224,7 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
|
||||
struct srp_direct_buf *md = NULL;
|
||||
struct scatterlist dummy, *sg = NULL;
|
||||
dma_addr_t token = 0;
|
||||
long err;
|
||||
unsigned int done = 0;
|
||||
int err = 0;
|
||||
int nmd, nsg = 0, len;
|
||||
|
||||
if (dma_map || ext_desc) {
|
||||
@ -257,8 +256,8 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
|
||||
sg_dma_address(&dummy) = token;
|
||||
err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
|
||||
id->table_desc.len);
|
||||
if (err < 0) {
|
||||
eprintk("Error copying indirect table %ld\n", err);
|
||||
if (err) {
|
||||
eprintk("Error copying indirect table %d\n", err);
|
||||
goto free_mem;
|
||||
}
|
||||
} else {
|
||||
@ -271,6 +270,7 @@ rdma:
|
||||
nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
|
||||
if (!nsg) {
|
||||
eprintk("fail to map %p %d\n", iue, sc->use_sg);
|
||||
err = -EIO;
|
||||
goto free_mem;
|
||||
}
|
||||
len = min(sc->request_bufflen, id->len);
|
||||
@ -286,7 +286,7 @@ free_mem:
|
||||
if (token && dma_map)
|
||||
dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
|
||||
|
||||
return done;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int data_out_desc_size(struct srp_cmd *cmd)
|
||||
@ -351,7 +351,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
|
||||
break;
|
||||
default:
|
||||
eprintk("Unknown format %d %x\n", dir, format);
|
||||
break;
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -671,7 +671,7 @@ static int
|
||||
lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
|
||||
{
|
||||
uint8_t lenlo, lenhi;
|
||||
uint32_t Length;
|
||||
int Length;
|
||||
int i, j;
|
||||
int finished = 0;
|
||||
int index = 0;
|
||||
|
@ -87,6 +87,7 @@ MODULE_AUTHOR("Willem Riede");
|
||||
MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
|
||||
|
||||
module_param(max_dev, int, 0444);
|
||||
MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)");
|
||||
|
@ -1,197 +0,0 @@
|
||||
/****************************************************************************
|
||||
* Perceptive Solutions, Inc. PCI-2000 device driver for Linux.
|
||||
*
|
||||
* pci2000.h - Linux Host Driver for PCI-2000 IntelliCache SCSI Adapters
|
||||
*
|
||||
* Copyright (c) 1997-1999 Perceptive Solutions, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that redistributions of source
|
||||
* code retain the above copyright notice and this comment without
|
||||
* modification.
|
||||
*
|
||||
* Technical updates and product information at:
|
||||
* http://www.psidisk.com
|
||||
*
|
||||
* Please send questions, comments, bug reports to:
|
||||
* tech@psidisk.com Technical Support
|
||||
*
|
||||
****************************************************************************/
|
||||
#ifndef _PCI2000_H
|
||||
#define _PCI2000_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifndef PSI_EIDE_SCSIOP
|
||||
#define PSI_EIDE_SCSIOP 1
|
||||
|
||||
#define LINUXVERSION(v,p,s) (((v)<<16) + ((p)<<8) + (s))
|
||||
|
||||
/************************************************/
|
||||
/* definition of standard data types */
|
||||
/************************************************/
|
||||
#define CHAR char
|
||||
#define UCHAR unsigned char
|
||||
#define SHORT short
|
||||
#define USHORT unsigned short
|
||||
#define BOOL long
|
||||
#define LONG long
|
||||
#define ULONG unsigned long
|
||||
#define VOID void
|
||||
|
||||
typedef CHAR *PCHAR;
|
||||
typedef UCHAR *PUCHAR;
|
||||
typedef SHORT *PSHORT;
|
||||
typedef USHORT *PUSHORT;
|
||||
typedef BOOL *PBOOL;
|
||||
typedef LONG *PLONG;
|
||||
typedef ULONG *PULONG;
|
||||
typedef VOID *PVOID;
|
||||
|
||||
|
||||
/************************************************/
|
||||
/* Misc. macros */
|
||||
/************************************************/
|
||||
#define ANY2SCSI(up, p) \
|
||||
((UCHAR *)up)[0] = (((ULONG)(p)) >> 8); \
|
||||
((UCHAR *)up)[1] = ((ULONG)(p));
|
||||
|
||||
#define SCSI2LONG(up) \
|
||||
( (((long)*(((UCHAR *)up))) << 16) \
|
||||
+ (((long)(((UCHAR *)up)[1])) << 8) \
|
||||
+ ((long)(((UCHAR *)up)[2])) )
|
||||
|
||||
#define XANY2SCSI(up, p) \
|
||||
((UCHAR *)up)[0] = ((long)(p)) >> 24; \
|
||||
((UCHAR *)up)[1] = ((long)(p)) >> 16; \
|
||||
((UCHAR *)up)[2] = ((long)(p)) >> 8; \
|
||||
((UCHAR *)up)[3] = ((long)(p));
|
||||
|
||||
#define XSCSI2LONG(up) \
|
||||
( (((long)(((UCHAR *)up)[0])) << 24) \
|
||||
+ (((long)(((UCHAR *)up)[1])) << 16) \
|
||||
+ (((long)(((UCHAR *)up)[2])) << 8) \
|
||||
+ ((long)(((UCHAR *)up)[3])) )
|
||||
|
||||
/************************************************/
|
||||
/* SCSI CDB operation codes */
|
||||
/************************************************/
|
||||
#define SCSIOP_TEST_UNIT_READY 0x00
|
||||
#define SCSIOP_REZERO_UNIT 0x01
|
||||
#define SCSIOP_REWIND 0x01
|
||||
#define SCSIOP_REQUEST_BLOCK_ADDR 0x02
|
||||
#define SCSIOP_REQUEST_SENSE 0x03
|
||||
#define SCSIOP_FORMAT_UNIT 0x04
|
||||
#define SCSIOP_READ_BLOCK_LIMITS 0x05
|
||||
#define SCSIOP_REASSIGN_BLOCKS 0x07
|
||||
#define SCSIOP_READ6 0x08
|
||||
#define SCSIOP_RECEIVE 0x08
|
||||
#define SCSIOP_WRITE6 0x0A
|
||||
#define SCSIOP_PRINT 0x0A
|
||||
#define SCSIOP_SEND 0x0A
|
||||
#define SCSIOP_SEEK6 0x0B
|
||||
#define SCSIOP_TRACK_SELECT 0x0B
|
||||
#define SCSIOP_SLEW_PRINT 0x0B
|
||||
#define SCSIOP_SEEK_BLOCK 0x0C
|
||||
#define SCSIOP_PARTITION 0x0D
|
||||
#define SCSIOP_READ_REVERSE 0x0F
|
||||
#define SCSIOP_WRITE_FILEMARKS 0x10
|
||||
#define SCSIOP_FLUSH_BUFFER 0x10
|
||||
#define SCSIOP_SPACE 0x11
|
||||
#define SCSIOP_INQUIRY 0x12
|
||||
#define SCSIOP_VERIFY6 0x13
|
||||
#define SCSIOP_RECOVER_BUF_DATA 0x14
|
||||
#define SCSIOP_MODE_SELECT 0x15
|
||||
#define SCSIOP_RESERVE_UNIT 0x16
|
||||
#define SCSIOP_RELEASE_UNIT 0x17
|
||||
#define SCSIOP_COPY 0x18
|
||||
#define SCSIOP_ERASE 0x19
|
||||
#define SCSIOP_MODE_SENSE 0x1A
|
||||
#define SCSIOP_START_STOP_UNIT 0x1B
|
||||
#define SCSIOP_STOP_PRINT 0x1B
|
||||
#define SCSIOP_LOAD_UNLOAD 0x1B
|
||||
#define SCSIOP_RECEIVE_DIAGNOSTIC 0x1C
|
||||
#define SCSIOP_SEND_DIAGNOSTIC 0x1D
|
||||
#define SCSIOP_MEDIUM_REMOVAL 0x1E
|
||||
#define SCSIOP_READ_CAPACITY 0x25
|
||||
#define SCSIOP_READ 0x28
|
||||
#define SCSIOP_WRITE 0x2A
|
||||
#define SCSIOP_SEEK 0x2B
|
||||
#define SCSIOP_LOCATE 0x2B
|
||||
#define SCSIOP_WRITE_VERIFY 0x2E
|
||||
#define SCSIOP_VERIFY 0x2F
|
||||
#define SCSIOP_SEARCH_DATA_HIGH 0x30
|
||||
#define SCSIOP_SEARCH_DATA_EQUAL 0x31
|
||||
#define SCSIOP_SEARCH_DATA_LOW 0x32
|
||||
#define SCSIOP_SET_LIMITS 0x33
|
||||
#define SCSIOP_READ_POSITION 0x34
|
||||
#define SCSIOP_SYNCHRONIZE_CACHE 0x35
|
||||
#define SCSIOP_COMPARE 0x39
|
||||
#define SCSIOP_COPY_COMPARE 0x3A
|
||||
#define SCSIOP_WRITE_DATA_BUFF 0x3B
|
||||
#define SCSIOP_READ_DATA_BUFF 0x3C
|
||||
#define SCSIOP_CHANGE_DEFINITION 0x40
|
||||
#define SCSIOP_READ_SUB_CHANNEL 0x42
|
||||
#define SCSIOP_READ_TOC 0x43
|
||||
#define SCSIOP_READ_HEADER 0x44
|
||||
#define SCSIOP_PLAY_AUDIO 0x45
|
||||
#define SCSIOP_PLAY_AUDIO_MSF 0x47
|
||||
#define SCSIOP_PLAY_TRACK_INDEX 0x48
|
||||
#define SCSIOP_PLAY_TRACK_RELATIVE 0x49
|
||||
#define SCSIOP_PAUSE_RESUME 0x4B
|
||||
#define SCSIOP_LOG_SELECT 0x4C
|
||||
#define SCSIOP_LOG_SENSE 0x4D
|
||||
#define SCSIOP_MODE_SELECT10 0x55
|
||||
#define SCSIOP_MODE_SENSE10 0x5A
|
||||
#define SCSIOP_LOAD_UNLOAD_SLOT 0xA6
|
||||
#define SCSIOP_MECHANISM_STATUS 0xBD
|
||||
#define SCSIOP_READ_CD 0xBE
|
||||
|
||||
// SCSI read capacity structure
|
||||
typedef struct _READ_CAPACITY_DATA
|
||||
{
|
||||
ULONG blks; /* total blocks (converted to little endian) */
|
||||
ULONG blksiz; /* size of each (converted to little endian) */
|
||||
} READ_CAPACITY_DATA, *PREAD_CAPACITY_DATA;
|
||||
|
||||
// SCSI inquiry data
|
||||
typedef struct _INQUIRYDATA
|
||||
{
|
||||
UCHAR DeviceType :5;
|
||||
UCHAR DeviceTypeQualifier :3;
|
||||
UCHAR DeviceTypeModifier :7;
|
||||
UCHAR RemovableMedia :1;
|
||||
UCHAR Versions;
|
||||
UCHAR ResponseDataFormat;
|
||||
UCHAR AdditionalLength;
|
||||
UCHAR Reserved[2];
|
||||
UCHAR SoftReset :1;
|
||||
UCHAR CommandQueue :1;
|
||||
UCHAR Reserved2 :1;
|
||||
UCHAR LinkedCommands :1;
|
||||
UCHAR Synchronous :1;
|
||||
UCHAR Wide16Bit :1;
|
||||
UCHAR Wide32Bit :1;
|
||||
UCHAR RelativeAddressing :1;
|
||||
UCHAR VendorId[8];
|
||||
UCHAR ProductId[16];
|
||||
UCHAR ProductRevisionLevel[4];
|
||||
UCHAR VendorSpecific[20];
|
||||
UCHAR Reserved3[40];
|
||||
} INQUIRYDATA, *PINQUIRYDATA;
|
||||
|
||||
#endif
|
||||
|
||||
// function prototypes
|
||||
int Pci2000_Detect (struct scsi_host_template *tpnt);
|
||||
int Pci2000_Command (Scsi_Cmnd *SCpnt);
|
||||
int Pci2000_QueueCommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
|
||||
int Pci2000_Abort (Scsi_Cmnd *SCpnt);
|
||||
int Pci2000_Reset (Scsi_Cmnd *SCpnt, unsigned int flags);
|
||||
int Pci2000_Release (struct Scsi_Host *pshost);
|
||||
int Pci2000_BiosParam (struct scsi_device *sdev,
|
||||
struct block_device *bdev,
|
||||
sector_t capacity, int geom[]);
|
||||
|
||||
#endif
|
@ -3,11 +3,11 @@
|
||||
#
|
||||
|
||||
menu "PCMCIA SCSI adapter support"
|
||||
depends on SCSI!=n && PCMCIA!=n && MODULES
|
||||
depends on SCSI!=n && PCMCIA!=n
|
||||
|
||||
config PCMCIA_AHA152X
|
||||
tristate "Adaptec AHA152X PCMCIA support"
|
||||
depends on m && !64BIT
|
||||
depends on !64BIT
|
||||
select SCSI_SPI_ATTRS
|
||||
help
|
||||
Say Y here if you intend to attach this type of PCMCIA SCSI host
|
||||
@ -18,7 +18,6 @@ config PCMCIA_AHA152X
|
||||
|
||||
config PCMCIA_FDOMAIN
|
||||
tristate "Future Domain PCMCIA support"
|
||||
depends on m
|
||||
help
|
||||
Say Y here if you intend to attach this type of PCMCIA SCSI host
|
||||
adapter to your computer.
|
||||
@ -28,7 +27,7 @@ config PCMCIA_FDOMAIN
|
||||
|
||||
config PCMCIA_NINJA_SCSI
|
||||
tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
|
||||
depends on m && !64BIT
|
||||
depends on !64BIT
|
||||
help
|
||||
If you intend to attach this type of PCMCIA SCSI host adapter to
|
||||
your computer, say Y here and read
|
||||
@ -62,7 +61,6 @@ config PCMCIA_NINJA_SCSI
|
||||
|
||||
config PCMCIA_QLOGIC
|
||||
tristate "Qlogic PCMCIA support"
|
||||
depends on m
|
||||
help
|
||||
Say Y here if you intend to attach this type of PCMCIA SCSI host
|
||||
adapter to your computer.
|
||||
@ -72,7 +70,6 @@ config PCMCIA_QLOGIC
|
||||
|
||||
config PCMCIA_SYM53C500
|
||||
tristate "Symbios 53c500 PCMCIA support"
|
||||
depends on m
|
||||
help
|
||||
Say Y here if you have a New Media Bus Toaster or other PCMCIA
|
||||
SCSI adapter based on the Symbios 53c500 controller.
|
||||
|
@ -1478,14 +1478,17 @@ typedef union {
|
||||
uint32_t b24 : 24;
|
||||
|
||||
struct {
|
||||
uint8_t d_id[3];
|
||||
uint8_t rsvd_1;
|
||||
} r;
|
||||
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN
|
||||
uint8_t domain;
|
||||
uint8_t area;
|
||||
uint8_t al_pa;
|
||||
#elif __LITTLE_ENDIAN
|
||||
uint8_t al_pa;
|
||||
uint8_t area;
|
||||
uint8_t domain;
|
||||
#else
|
||||
#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
|
||||
#endif
|
||||
uint8_t rsvd_1;
|
||||
} b;
|
||||
} port_id_t;
|
||||
|
@ -11,6 +11,11 @@
|
||||
|
||||
#include "qla_devtbl.h"
|
||||
|
||||
#ifdef CONFIG_SPARC
|
||||
#include <asm/prom.h>
|
||||
#include <asm/pbm.h>
|
||||
#endif
|
||||
|
||||
/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */
|
||||
#ifndef EXT_IS_LUN_BIT_SET
|
||||
#define EXT_IS_LUN_BIT_SET(P,L) \
|
||||
@ -88,12 +93,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
|
||||
|
||||
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
|
||||
|
||||
rval = ha->isp_ops.nvram_config(ha);
|
||||
if (rval) {
|
||||
DEBUG2(printk("scsi(%ld): Unable to verify NVRAM data.\n",
|
||||
ha->host_no));
|
||||
return rval;
|
||||
}
|
||||
ha->isp_ops.nvram_config(ha);
|
||||
|
||||
if (ha->flags.disable_serdes) {
|
||||
/* Mask HBA via NVRAM settings? */
|
||||
@ -1393,6 +1393,28 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
|
||||
}
|
||||
}
|
||||
|
||||
/* On sparc systems, obtain port and node WWN from firmware
|
||||
* properties.
|
||||
*/
|
||||
static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
|
||||
{
|
||||
#ifdef CONFIG_SPARC
|
||||
struct pci_dev *pdev = ha->pdev;
|
||||
struct pcidev_cookie *pcp = pdev->sysdata;
|
||||
struct device_node *dp = pcp->prom_node;
|
||||
u8 *val;
|
||||
int len;
|
||||
|
||||
val = of_get_property(dp, "port-wwn", &len);
|
||||
if (val && len >= WWN_SIZE)
|
||||
memcpy(nv->port_name, val, WWN_SIZE);
|
||||
|
||||
val = of_get_property(dp, "node-wwn", &len);
|
||||
if (val && len >= WWN_SIZE)
|
||||
memcpy(nv->node_name, val, WWN_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* NVRAM configuration for ISP 2xxx
|
||||
*
|
||||
@ -1409,6 +1431,7 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
|
||||
int
|
||||
qla2x00_nvram_config(scsi_qla_host_t *ha)
|
||||
{
|
||||
int rval;
|
||||
uint8_t chksum = 0;
|
||||
uint16_t cnt;
|
||||
uint8_t *dptr1, *dptr2;
|
||||
@ -1417,6 +1440,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
|
||||
uint8_t *ptr = (uint8_t *)ha->request_ring;
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
|
||||
rval = QLA_SUCCESS;
|
||||
|
||||
/* Determine NVRAM starting address. */
|
||||
ha->nvram_size = sizeof(nvram_t);
|
||||
ha->nvram_base = 0;
|
||||
@ -1440,7 +1465,57 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
|
||||
qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
|
||||
"checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
|
||||
nv->nvram_version);
|
||||
return QLA_FUNCTION_FAILED;
|
||||
qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
|
||||
"invalid -- WWPN) defaults.\n");
|
||||
|
||||
/*
|
||||
* Set default initialization control block.
|
||||
*/
|
||||
memset(nv, 0, ha->nvram_size);
|
||||
nv->parameter_block_version = ICB_VERSION;
|
||||
|
||||
if (IS_QLA23XX(ha)) {
|
||||
nv->firmware_options[0] = BIT_2 | BIT_1;
|
||||
nv->firmware_options[1] = BIT_7 | BIT_5;
|
||||
nv->add_firmware_options[0] = BIT_5;
|
||||
nv->add_firmware_options[1] = BIT_5 | BIT_4;
|
||||
nv->frame_payload_size = __constant_cpu_to_le16(2048);
|
||||
nv->special_options[1] = BIT_7;
|
||||
} else if (IS_QLA2200(ha)) {
|
||||
nv->firmware_options[0] = BIT_2 | BIT_1;
|
||||
nv->firmware_options[1] = BIT_7 | BIT_5;
|
||||
nv->add_firmware_options[0] = BIT_5;
|
||||
nv->add_firmware_options[1] = BIT_5 | BIT_4;
|
||||
nv->frame_payload_size = __constant_cpu_to_le16(1024);
|
||||
} else if (IS_QLA2100(ha)) {
|
||||
nv->firmware_options[0] = BIT_3 | BIT_1;
|
||||
nv->firmware_options[1] = BIT_5;
|
||||
nv->frame_payload_size = __constant_cpu_to_le16(1024);
|
||||
}
|
||||
|
||||
nv->max_iocb_allocation = __constant_cpu_to_le16(256);
|
||||
nv->execution_throttle = __constant_cpu_to_le16(16);
|
||||
nv->retry_count = 8;
|
||||
nv->retry_delay = 1;
|
||||
|
||||
nv->port_name[0] = 33;
|
||||
nv->port_name[3] = 224;
|
||||
nv->port_name[4] = 139;
|
||||
|
||||
qla2xxx_nvram_wwn_from_ofw(ha, nv);
|
||||
|
||||
nv->login_timeout = 4;
|
||||
|
||||
/*
|
||||
* Set default host adapter parameters
|
||||
*/
|
||||
nv->host_p[1] = BIT_2;
|
||||
nv->reset_delay = 5;
|
||||
nv->port_down_retry_count = 8;
|
||||
nv->max_luns_per_target = __constant_cpu_to_le16(8);
|
||||
nv->link_down_timeout = 60;
|
||||
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
|
||||
@ -1653,7 +1728,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
|
||||
}
|
||||
}
|
||||
|
||||
return QLA_SUCCESS;
|
||||
if (rval) {
|
||||
DEBUG2_3(printk(KERN_WARNING
|
||||
"scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
|
||||
}
|
||||
return (rval);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3071,9 +3150,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
|
||||
|
||||
ha->isp_ops.get_flash_version(ha, ha->request_ring);
|
||||
|
||||
rval = ha->isp_ops.nvram_config(ha);
|
||||
if (rval)
|
||||
goto isp_abort_retry;
|
||||
ha->isp_ops.nvram_config(ha);
|
||||
|
||||
if (!qla2x00_restart_isp(ha)) {
|
||||
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
|
||||
@ -3103,7 +3180,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
|
||||
}
|
||||
}
|
||||
} else { /* failed the ISP abort */
|
||||
isp_abort_retry:
|
||||
ha->flags.online = 1;
|
||||
if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
|
||||
if (ha->isp_abort_cnt == 0) {
|
||||
@ -3290,9 +3366,32 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
}
|
||||
|
||||
/* On sparc systems, obtain port and node WWN from firmware
|
||||
* properties.
|
||||
*/
|
||||
static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv)
|
||||
{
|
||||
#ifdef CONFIG_SPARC
|
||||
struct pci_dev *pdev = ha->pdev;
|
||||
struct pcidev_cookie *pcp = pdev->sysdata;
|
||||
struct device_node *dp = pcp->prom_node;
|
||||
u8 *val;
|
||||
int len;
|
||||
|
||||
val = of_get_property(dp, "port-wwn", &len);
|
||||
if (val && len >= WWN_SIZE)
|
||||
memcpy(nv->port_name, val, WWN_SIZE);
|
||||
|
||||
val = of_get_property(dp, "node-wwn", &len);
|
||||
if (val && len >= WWN_SIZE)
|
||||
memcpy(nv->node_name, val, WWN_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
qla24xx_nvram_config(scsi_qla_host_t *ha)
|
||||
{
|
||||
int rval;
|
||||
struct init_cb_24xx *icb;
|
||||
struct nvram_24xx *nv;
|
||||
uint32_t *dptr;
|
||||
@ -3300,6 +3399,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
|
||||
uint32_t chksum;
|
||||
uint16_t cnt;
|
||||
|
||||
rval = QLA_SUCCESS;
|
||||
icb = (struct init_cb_24xx *)ha->init_cb;
|
||||
nv = (struct nvram_24xx *)ha->request_ring;
|
||||
|
||||
@ -3332,7 +3432,52 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
|
||||
qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
|
||||
"checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
|
||||
le16_to_cpu(nv->nvram_version));
|
||||
return QLA_FUNCTION_FAILED;
|
||||
qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
|
||||
"invalid -- WWPN) defaults.\n");
|
||||
|
||||
/*
|
||||
* Set default initialization control block.
|
||||
*/
|
||||
memset(nv, 0, ha->nvram_size);
|
||||
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->frame_payload_size = __constant_cpu_to_le16(2048);
|
||||
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = __constant_cpu_to_le16(0);
|
||||
nv->hard_address = __constant_cpu_to_le16(124);
|
||||
nv->port_name[0] = 0x21;
|
||||
nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
|
||||
nv->port_name[2] = 0x00;
|
||||
nv->port_name[3] = 0xe0;
|
||||
nv->port_name[4] = 0x8b;
|
||||
nv->port_name[5] = 0x1c;
|
||||
nv->port_name[6] = 0x55;
|
||||
nv->port_name[7] = 0x86;
|
||||
nv->node_name[0] = 0x20;
|
||||
nv->node_name[1] = 0x00;
|
||||
nv->node_name[2] = 0x00;
|
||||
nv->node_name[3] = 0xe0;
|
||||
nv->node_name[4] = 0x8b;
|
||||
nv->node_name[5] = 0x1c;
|
||||
nv->node_name[6] = 0x55;
|
||||
nv->node_name[7] = 0x86;
|
||||
qla24xx_nvram_wwn_from_ofw(ha, nv);
|
||||
nv->login_retry_count = __constant_cpu_to_le16(8);
|
||||
nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
|
||||
nv->login_timeout = __constant_cpu_to_le16(0);
|
||||
nv->firmware_options_1 =
|
||||
__constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
|
||||
nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
|
||||
nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
|
||||
nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
|
||||
nv->efi_parameters = __constant_cpu_to_le32(0);
|
||||
nv->reset_delay = 5;
|
||||
nv->max_luns_per_target = __constant_cpu_to_le16(128);
|
||||
nv->port_down_retry_count = __constant_cpu_to_le16(30);
|
||||
nv->link_down_timeout = __constant_cpu_to_le16(30);
|
||||
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
/* Reset Initialization control block */
|
||||
@ -3479,7 +3624,11 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
|
||||
ha->flags.process_response_queue = 1;
|
||||
}
|
||||
|
||||
return QLA_SUCCESS;
|
||||
if (rval) {
|
||||
DEBUG2_3(printk(KERN_WARNING
|
||||
"scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
|
||||
}
|
||||
return (rval);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1280,14 +1280,14 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
|
||||
} else {
|
||||
if (name != NULL) {
|
||||
/* This function returns name in big endian. */
|
||||
name[0] = LSB(mcp->mb[2]);
|
||||
name[1] = MSB(mcp->mb[2]);
|
||||
name[2] = LSB(mcp->mb[3]);
|
||||
name[3] = MSB(mcp->mb[3]);
|
||||
name[4] = LSB(mcp->mb[6]);
|
||||
name[5] = MSB(mcp->mb[6]);
|
||||
name[6] = LSB(mcp->mb[7]);
|
||||
name[7] = MSB(mcp->mb[7]);
|
||||
name[0] = MSB(mcp->mb[2]);
|
||||
name[1] = LSB(mcp->mb[2]);
|
||||
name[2] = MSB(mcp->mb[3]);
|
||||
name[3] = LSB(mcp->mb[3]);
|
||||
name[4] = MSB(mcp->mb[6]);
|
||||
name[5] = LSB(mcp->mb[6]);
|
||||
name[6] = MSB(mcp->mb[7]);
|
||||
name[7] = LSB(mcp->mb[7]);
|
||||
}
|
||||
|
||||
DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
|
||||
|
@ -62,7 +62,7 @@ MODULE_PARM_DESC(ql2xallocfwdump,
|
||||
"vary by ISP type. Default is 1 - allocate memory.");
|
||||
|
||||
int ql2xextended_error_logging;
|
||||
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IRUSR);
|
||||
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(ql2xextended_error_logging,
|
||||
"Option to enable extended error logging, "
|
||||
"Default is 0 - no logging. 1 - log errors.");
|
||||
@ -157,6 +157,8 @@ static struct scsi_host_template qla24xx_driver_template = {
|
||||
|
||||
.slave_alloc = qla2xxx_slave_alloc,
|
||||
.slave_destroy = qla2xxx_slave_destroy,
|
||||
.scan_finished = qla2xxx_scan_finished,
|
||||
.scan_start = qla2xxx_scan_start,
|
||||
.change_queue_depth = qla2x00_change_queue_depth,
|
||||
.change_queue_type = qla2x00_change_queue_type,
|
||||
.this_id = -1,
|
||||
@ -1705,6 +1707,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
|
||||
|
||||
scsi_host_put(ha->host);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
||||
@ -1747,8 +1750,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
|
||||
if (ha->iobase)
|
||||
iounmap(ha->iobase);
|
||||
pci_release_regions(ha->pdev);
|
||||
|
||||
pci_disable_device(ha->pdev);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -466,6 +466,7 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
|
||||
udelay(10);
|
||||
else
|
||||
rval = QLA_FUNCTION_TIMEOUT;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* TODO: What happens if we time out? */
|
||||
@ -508,6 +509,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
|
||||
udelay(10);
|
||||
else
|
||||
rval = QLA_FUNCTION_TIMEOUT;
|
||||
cond_resched();
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
@ -1255,6 +1257,7 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
|
||||
}
|
||||
udelay(10);
|
||||
barrier();
|
||||
cond_resched();
|
||||
}
|
||||
return status;
|
||||
}
|
||||
@ -1403,6 +1406,7 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
|
||||
if (saddr % 100)
|
||||
udelay(10);
|
||||
*tmp_buf = data;
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1449,7 +1453,6 @@ uint8_t *
|
||||
qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
|
||||
uint32_t offset, uint32_t length)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t addr, midpoint;
|
||||
uint8_t *data;
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
@ -1458,7 +1461,6 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
|
||||
qla2x00_suspend_hba(ha);
|
||||
|
||||
/* Go with read. */
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
midpoint = ha->optrom_size / 2;
|
||||
|
||||
qla2x00_flash_enable(ha);
|
||||
@ -1473,7 +1475,6 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
|
||||
*data = qla2x00_read_flash_byte(ha, addr);
|
||||
}
|
||||
qla2x00_flash_disable(ha);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
/* Resume HBA. */
|
||||
qla2x00_resume_hba(ha);
|
||||
@ -1487,7 +1488,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
|
||||
{
|
||||
|
||||
int rval;
|
||||
unsigned long flags;
|
||||
uint8_t man_id, flash_id, sec_number, data;
|
||||
uint16_t wd;
|
||||
uint32_t addr, liter, sec_mask, rest_addr;
|
||||
@ -1500,7 +1500,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
|
||||
sec_number = 0;
|
||||
|
||||
/* Reset ISP chip. */
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
|
||||
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
|
||||
|
||||
@ -1689,10 +1688,10 @@ update_flash:
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
} while (0);
|
||||
qla2x00_flash_disable(ha);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
/* Resume HBA. */
|
||||
qla2x00_resume_hba(ha);
|
||||
|
@ -7,7 +7,7 @@
|
||||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "8.01.07-k5"
|
||||
#define QLA2XXX_VERSION "8.01.07-k6"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 8
|
||||
#define QLA_DRIVER_MINOR_VER 1
|
||||
|
@ -344,7 +344,6 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
|
||||
void scsi_log_send(struct scsi_cmnd *cmd)
|
||||
{
|
||||
unsigned int level;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
/*
|
||||
* If ML QUEUE log level is greater than or equal to:
|
||||
@ -361,22 +360,17 @@ void scsi_log_send(struct scsi_cmnd *cmd)
|
||||
level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
|
||||
SCSI_LOG_MLQUEUE_BITS);
|
||||
if (level > 1) {
|
||||
sdev = cmd->device;
|
||||
sdev_printk(KERN_INFO, sdev, "send ");
|
||||
scmd_printk(KERN_INFO, cmd, "Send: ");
|
||||
if (level > 2)
|
||||
printk("0x%p ", cmd);
|
||||
/*
|
||||
* spaces to match disposition and cmd->result
|
||||
* output in scsi_log_completion.
|
||||
*/
|
||||
printk(" ");
|
||||
printk("\n");
|
||||
scsi_print_command(cmd);
|
||||
if (level > 3) {
|
||||
printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
|
||||
" done = 0x%p, queuecommand 0x%p\n",
|
||||
cmd->request_buffer, cmd->request_bufflen,
|
||||
cmd->done,
|
||||
sdev->host->hostt->queuecommand);
|
||||
cmd->device->host->hostt->queuecommand);
|
||||
|
||||
}
|
||||
}
|
||||
@ -386,7 +380,6 @@ void scsi_log_send(struct scsi_cmnd *cmd)
|
||||
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
|
||||
{
|
||||
unsigned int level;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
/*
|
||||
* If ML COMPLETE log level is greater than or equal to:
|
||||
@ -405,8 +398,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
|
||||
SCSI_LOG_MLCOMPLETE_BITS);
|
||||
if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
|
||||
(level > 1)) {
|
||||
sdev = cmd->device;
|
||||
sdev_printk(KERN_INFO, sdev, "done ");
|
||||
scmd_printk(KERN_INFO, cmd, "Done: ");
|
||||
if (level > 2)
|
||||
printk("0x%p ", cmd);
|
||||
/*
|
||||
@ -415,40 +407,35 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
|
||||
*/
|
||||
switch (disposition) {
|
||||
case SUCCESS:
|
||||
printk("SUCCESS");
|
||||
printk("SUCCESS\n");
|
||||
break;
|
||||
case NEEDS_RETRY:
|
||||
printk("RETRY ");
|
||||
printk("RETRY\n");
|
||||
break;
|
||||
case ADD_TO_MLQUEUE:
|
||||
printk("MLQUEUE");
|
||||
printk("MLQUEUE\n");
|
||||
break;
|
||||
case FAILED:
|
||||
printk("FAILED ");
|
||||
printk("FAILED\n");
|
||||
break;
|
||||
case TIMEOUT_ERROR:
|
||||
/*
|
||||
* If called via scsi_times_out.
|
||||
*/
|
||||
printk("TIMEOUT");
|
||||
printk("TIMEOUT\n");
|
||||
break;
|
||||
default:
|
||||
printk("UNKNOWN");
|
||||
printk("UNKNOWN\n");
|
||||
}
|
||||
printk(" %8x ", cmd->result);
|
||||
scsi_print_result(cmd);
|
||||
scsi_print_command(cmd);
|
||||
if (status_byte(cmd->result) & CHECK_CONDITION) {
|
||||
/*
|
||||
* XXX The scsi_print_sense formatting/prefix
|
||||
* doesn't match this function.
|
||||
*/
|
||||
if (status_byte(cmd->result) & CHECK_CONDITION)
|
||||
scsi_print_sense("", cmd);
|
||||
}
|
||||
if (level > 3) {
|
||||
printk(KERN_INFO "scsi host busy %d failed %d\n",
|
||||
sdev->host->host_busy,
|
||||
sdev->host->host_failed);
|
||||
}
|
||||
if (level > 3)
|
||||
scmd_printk(KERN_INFO, cmd,
|
||||
"scsi host busy %d failed %d\n",
|
||||
cmd->device->host->host_busy,
|
||||
cmd->device->host->host_failed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -184,10 +184,19 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
|
||||
**/
|
||||
void scsi_times_out(struct scsi_cmnd *scmd)
|
||||
{
|
||||
enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
|
||||
|
||||
scsi_log_completion(scmd, TIMEOUT_ERROR);
|
||||
|
||||
if (scmd->device->host->transportt->eh_timed_out)
|
||||
switch (scmd->device->host->transportt->eh_timed_out(scmd)) {
|
||||
eh_timed_out = scmd->device->host->transportt->eh_timed_out;
|
||||
else if (scmd->device->host->hostt->eh_timed_out)
|
||||
eh_timed_out = scmd->device->host->hostt->eh_timed_out;
|
||||
else
|
||||
eh_timed_out = NULL;
|
||||
|
||||
if (eh_timed_out)
|
||||
switch (eh_timed_out(scmd)) {
|
||||
case EH_HANDLED:
|
||||
__scsi_done(scmd);
|
||||
return;
|
||||
@ -923,10 +932,12 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
|
||||
static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
|
||||
|
||||
if (scmd->device->allow_restart) {
|
||||
int rtn;
|
||||
int i, rtn = NEEDS_RETRY;
|
||||
|
||||
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
|
||||
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
|
||||
START_UNIT_TIMEOUT, 0);
|
||||
|
||||
rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
|
||||
START_UNIT_TIMEOUT, 0);
|
||||
if (rtn == SUCCESS)
|
||||
return 0;
|
||||
}
|
||||
|
@ -848,8 +848,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
memcpy(req->sense, cmd->sense_buffer, len);
|
||||
req->sense_len = len;
|
||||
}
|
||||
} else
|
||||
req->data_len = cmd->resid;
|
||||
}
|
||||
req->data_len = cmd->resid;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -968,9 +968,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
}
|
||||
if (result) {
|
||||
if (!(req->cmd_flags & REQ_QUIET)) {
|
||||
scmd_printk(KERN_INFO, cmd,
|
||||
"SCSI error: return code = 0x%08x\n",
|
||||
result);
|
||||
scsi_print_result(cmd);
|
||||
if (driver_byte(result) & DRIVER_SENSE)
|
||||
scsi_print_sense("", cmd);
|
||||
}
|
||||
|
@ -181,10 +181,8 @@ int scsi_complete_async_scans(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef MODULE
|
||||
/* Only exported for the benefit of scsi_wait_scan */
|
||||
EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* scsi_unlock_floptical - unlock device via a special MODE SENSE command
|
||||
|
@ -276,8 +276,22 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
|
||||
return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
|
||||
}
|
||||
|
||||
static int scsi_bus_uevent(struct device *dev, char **envp, int num_envp,
|
||||
char *buffer, int buffer_size)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
int i = 0;
|
||||
int length = 0;
|
||||
|
||||
add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
|
||||
"MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
|
||||
envp[i] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_bus_suspend(struct device * dev, pm_message_t state)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
struct scsi_host_template *sht = sdev->host->hostt;
|
||||
int err;
|
||||
@ -286,28 +300,51 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (sht->suspend)
|
||||
err = sht->suspend(sdev, state);
|
||||
/* call HLD suspend first */
|
||||
if (drv && drv->suspend) {
|
||||
err = drv->suspend(dev, state);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
/* then, call host suspend */
|
||||
if (sht->suspend) {
|
||||
err = sht->suspend(sdev, state);
|
||||
if (err) {
|
||||
if (drv && drv->resume)
|
||||
drv->resume(dev);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_bus_resume(struct device * dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
struct scsi_host_template *sht = sdev->host->hostt;
|
||||
int err = 0;
|
||||
int err = 0, err2 = 0;
|
||||
|
||||
/* call host resume first */
|
||||
if (sht->resume)
|
||||
err = sht->resume(sdev);
|
||||
|
||||
/* then, call HLD resume */
|
||||
if (drv && drv->resume)
|
||||
err2 = drv->resume(dev);
|
||||
|
||||
scsi_device_resume(sdev);
|
||||
return err;
|
||||
|
||||
/* favor LLD failure */
|
||||
return err ? err : err2;;
|
||||
}
|
||||
|
||||
struct bus_type scsi_bus_type = {
|
||||
.name = "scsi",
|
||||
.match = scsi_bus_match,
|
||||
.uevent = scsi_bus_uevent,
|
||||
.suspend = scsi_bus_suspend,
|
||||
.resume = scsi_bus_resume,
|
||||
};
|
||||
@ -547,6 +584,14 @@ show_sdev_iostat(iorequest_cnt);
|
||||
show_sdev_iostat(iodone_cnt);
|
||||
show_sdev_iostat(ioerr_cnt);
|
||||
|
||||
static ssize_t
|
||||
sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device *sdev;
|
||||
sdev = to_scsi_device(dev);
|
||||
return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
|
||||
}
|
||||
static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
|
||||
|
||||
/* Default template for device attributes. May NOT be modified */
|
||||
static struct device_attribute *scsi_sysfs_sdev_attrs[] = {
|
||||
@ -566,6 +611,7 @@ static struct device_attribute *scsi_sysfs_sdev_attrs[] = {
|
||||
&dev_attr_iorequest_cnt,
|
||||
&dev_attr_iodone_cnt,
|
||||
&dev_attr_ioerr_cnt,
|
||||
&dev_attr_modalias,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -179,10 +179,12 @@ static int event_recv_msg(struct tgt_event *ev)
|
||||
switch (ev->hdr.type) {
|
||||
case TGT_UEVENT_CMD_RSP:
|
||||
err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
|
||||
ev->p.cmd_rsp.tag,
|
||||
ev->p.cmd_rsp.result,
|
||||
ev->p.cmd_rsp.len,
|
||||
ev->p.cmd_rsp.tag,
|
||||
ev->p.cmd_rsp.uaddr,
|
||||
ev->p.cmd_rsp.len,
|
||||
ev->p.cmd_rsp.sense_uaddr,
|
||||
ev->p.cmd_rsp.sense_len,
|
||||
ev->p.cmd_rsp.rw);
|
||||
break;
|
||||
case TGT_UEVENT_TSK_MGMT_RSP:
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_tgt.h>
|
||||
#include <../drivers/md/dm-bio-list.h>
|
||||
|
||||
#include "scsi_tgt_priv.h"
|
||||
|
||||
@ -42,16 +41,12 @@ static struct kmem_cache *scsi_tgt_cmd_cache;
|
||||
struct scsi_tgt_cmd {
|
||||
/* TODO replace work with James b's code */
|
||||
struct work_struct work;
|
||||
/* TODO replace the lists with a large bio */
|
||||
struct bio_list xfer_done_list;
|
||||
struct bio_list xfer_list;
|
||||
/* TODO fix limits of some drivers */
|
||||
struct bio *bio;
|
||||
|
||||
struct list_head hash_list;
|
||||
struct request *rq;
|
||||
u64 tag;
|
||||
|
||||
void *buffer;
|
||||
unsigned bufflen;
|
||||
};
|
||||
|
||||
#define TGT_HASH_ORDER 4
|
||||
@ -93,7 +88,12 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
|
||||
if (!tcmd)
|
||||
goto put_dev;
|
||||
|
||||
rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
|
||||
/*
|
||||
* The blk helpers are used to the READ/WRITE requests
|
||||
* transfering data from a initiator point of view. Since
|
||||
* we are in target mode we want the opposite.
|
||||
*/
|
||||
rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
|
||||
if (!rq)
|
||||
goto free_tcmd;
|
||||
|
||||
@ -111,8 +111,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
|
||||
rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
|
||||
rq->end_io_data = tcmd;
|
||||
|
||||
bio_list_init(&tcmd->xfer_list);
|
||||
bio_list_init(&tcmd->xfer_done_list);
|
||||
tcmd->rq = rq;
|
||||
|
||||
return cmd;
|
||||
@ -157,22 +155,6 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_host_put_command);
|
||||
|
||||
static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
/* must call bio_endio in case bio was bounced */
|
||||
while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
|
||||
bio_endio(bio, bio->bi_size, 0);
|
||||
bio_unmap_user(bio);
|
||||
}
|
||||
|
||||
while ((bio = bio_list_pop(&tcmd->xfer_list))) {
|
||||
bio_endio(bio, bio->bi_size, 0);
|
||||
bio_unmap_user(bio);
|
||||
}
|
||||
}
|
||||
|
||||
static void cmd_hashlist_del(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct request_queue *q = cmd->request->q;
|
||||
@ -185,6 +167,11 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd)
|
||||
spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
|
||||
}
|
||||
|
||||
static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
|
||||
{
|
||||
blk_rq_unmap_user(tcmd->bio);
|
||||
}
|
||||
|
||||
static void scsi_tgt_cmd_destroy(struct work_struct *work)
|
||||
{
|
||||
struct scsi_tgt_cmd *tcmd =
|
||||
@ -193,16 +180,6 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
|
||||
|
||||
dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
|
||||
rq_data_dir(cmd->request));
|
||||
/*
|
||||
* We fix rq->cmd_flags here since when we told bio_map_user
|
||||
* to write vm for WRITE commands, blk_rq_bio_prep set
|
||||
* rq_data_dir the flags to READ.
|
||||
*/
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
cmd->request->cmd_flags |= REQ_RW;
|
||||
else
|
||||
cmd->request->cmd_flags &= ~REQ_RW;
|
||||
|
||||
scsi_unmap_user_pages(tcmd);
|
||||
scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
|
||||
}
|
||||
@ -215,6 +192,7 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
|
||||
struct list_head *head;
|
||||
|
||||
tcmd->tag = tag;
|
||||
tcmd->bio = NULL;
|
||||
INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
|
||||
spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
|
||||
head = &qdata->cmd_hash[cmd_hashfn(tag)];
|
||||
@ -349,10 +327,14 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
|
||||
dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
|
||||
|
||||
scsi_tgt_uspace_send_status(cmd, tcmd->tag);
|
||||
|
||||
if (cmd->request_buffer)
|
||||
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
|
||||
|
||||
queue_work(scsi_tgtd, &tcmd->work);
|
||||
}
|
||||
|
||||
static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
|
||||
static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
|
||||
int err;
|
||||
@ -365,30 +347,12 @@ static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
|
||||
case SCSI_MLQUEUE_DEVICE_BUSY:
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
|
||||
int err;
|
||||
|
||||
err = __scsi_tgt_transfer_response(cmd);
|
||||
if (!err)
|
||||
return;
|
||||
|
||||
cmd->result = DID_BUS_BUSY << 16;
|
||||
err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
|
||||
if (err <= 0)
|
||||
/* the eh will have to pick this up */
|
||||
printk(KERN_ERR "Could not send cmd %p status\n", cmd);
|
||||
}
|
||||
|
||||
static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||
{
|
||||
struct request *rq = cmd->request;
|
||||
struct scsi_tgt_cmd *tcmd = rq->end_io_data;
|
||||
int count;
|
||||
|
||||
cmd->use_sg = rq->nr_phys_segments;
|
||||
@ -398,143 +362,54 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||
|
||||
cmd->request_bufflen = rq->data_len;
|
||||
|
||||
dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
|
||||
rq_data_dir(rq));
|
||||
dprintk("cmd %p cnt %d %lu\n", cmd, cmd->use_sg, rq_data_dir(rq));
|
||||
count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
|
||||
if (likely(count <= cmd->use_sg)) {
|
||||
cmd->use_sg = count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
|
||||
eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
|
||||
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TODO: test this crap and replace bio_map_user with new interface maybe */
|
||||
static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
|
||||
int rw)
|
||||
unsigned long uaddr, unsigned int len, int rw)
|
||||
{
|
||||
struct request_queue *q = cmd->request->q;
|
||||
struct request *rq = cmd->request;
|
||||
void *uaddr = tcmd->buffer;
|
||||
unsigned int len = tcmd->bufflen;
|
||||
struct bio *bio;
|
||||
int err;
|
||||
|
||||
while (len > 0) {
|
||||
dprintk("%lx %u\n", (unsigned long) uaddr, len);
|
||||
bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
|
||||
if (IS_ERR(bio)) {
|
||||
err = PTR_ERR(bio);
|
||||
dprintk("fail to map %lx %u %d %x\n",
|
||||
(unsigned long) uaddr, len, err, cmd->cmnd[0]);
|
||||
goto unmap_bios;
|
||||
}
|
||||
|
||||
uaddr += bio->bi_size;
|
||||
len -= bio->bi_size;
|
||||
|
||||
dprintk("%lx %u\n", uaddr, len);
|
||||
err = blk_rq_map_user(q, rq, (void *)uaddr, len);
|
||||
if (err) {
|
||||
/*
|
||||
* The first bio is added and merged. We could probably
|
||||
* try to add others using scsi_merge_bio() but for now
|
||||
* we keep it simple. The first bio should be pretty large
|
||||
* (either hitting the 1 MB bio pages limit or a queue limit)
|
||||
* already but for really large IO we may want to try and
|
||||
* merge these.
|
||||
* TODO: need to fixup sg_tablesize, max_segment_size,
|
||||
* max_sectors, etc for modern HW and software drivers
|
||||
* where this value is bogus.
|
||||
*
|
||||
* TODO2: we can alloc a reserve buffer of max size
|
||||
* we can handle and do the slow copy path for really large
|
||||
* IO.
|
||||
*/
|
||||
if (!rq->bio) {
|
||||
blk_rq_bio_prep(q, rq, bio);
|
||||
rq->data_len = bio->bi_size;
|
||||
} else
|
||||
/* put list of bios to transfer in next go around */
|
||||
bio_list_add(&tcmd->xfer_list, bio);
|
||||
eprintk("Could not handle request of size %u.\n", len);
|
||||
return err;
|
||||
}
|
||||
|
||||
cmd->offset = 0;
|
||||
tcmd->bio = rq->bio;
|
||||
err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
|
||||
if (err)
|
||||
goto unmap_bios;
|
||||
goto unmap_rq;
|
||||
|
||||
return 0;
|
||||
|
||||
unmap_bios:
|
||||
if (rq->bio) {
|
||||
bio_unmap_user(rq->bio);
|
||||
while ((bio = bio_list_pop(&tcmd->xfer_list)))
|
||||
bio_unmap_user(bio);
|
||||
}
|
||||
|
||||
unmap_rq:
|
||||
scsi_unmap_user_pages(tcmd);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int scsi_tgt_transfer_data(struct scsi_cmnd *);
|
||||
|
||||
static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
|
||||
struct bio *bio;
|
||||
int err;
|
||||
|
||||
/* should we free resources here on error ? */
|
||||
if (cmd->result) {
|
||||
send_uspace_err:
|
||||
err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
|
||||
if (err <= 0)
|
||||
/* the tgt uspace eh will have to pick this up */
|
||||
printk(KERN_ERR "Could not send cmd %p status\n", cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
dprintk("cmd %p request_bufflen %u bufflen %u\n",
|
||||
cmd, cmd->request_bufflen, tcmd->bufflen);
|
||||
|
||||
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
|
||||
bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
|
||||
|
||||
tcmd->buffer += cmd->request_bufflen;
|
||||
cmd->offset += cmd->request_bufflen;
|
||||
|
||||
if (!tcmd->xfer_list.head) {
|
||||
scsi_tgt_transfer_response(cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
|
||||
cmd, cmd->request_bufflen, tcmd->bufflen);
|
||||
|
||||
bio = bio_list_pop(&tcmd->xfer_list);
|
||||
BUG_ON(!bio);
|
||||
|
||||
blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
|
||||
cmd->request->data_len = bio->bi_size;
|
||||
err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
|
||||
if (err) {
|
||||
cmd->result = DID_ERROR << 16;
|
||||
goto send_uspace_err;
|
||||
}
|
||||
|
||||
if (scsi_tgt_transfer_data(cmd)) {
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
goto send_uspace_err;
|
||||
}
|
||||
}
|
||||
|
||||
static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
|
||||
{
|
||||
int err;
|
||||
struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
|
||||
|
||||
err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
|
||||
switch (err) {
|
||||
case SCSI_MLQUEUE_HOST_BUSY:
|
||||
case SCSI_MLQUEUE_DEVICE_BUSY:
|
||||
return -EAGAIN;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
|
||||
unsigned len)
|
||||
{
|
||||
@ -584,8 +459,9 @@ static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
|
||||
return rq;
|
||||
}
|
||||
|
||||
int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
|
||||
unsigned long uaddr, u8 rw)
|
||||
int scsi_tgt_kspace_exec(int host_no, int result, u64 tag,
|
||||
unsigned long uaddr, u32 len, unsigned long sense_uaddr,
|
||||
u32 sense_len, u8 rw)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
struct scsi_cmnd *cmd;
|
||||
@ -617,8 +493,9 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
|
||||
}
|
||||
cmd = rq->special;
|
||||
|
||||
dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
|
||||
result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
|
||||
dprintk("cmd %p scb %x result %d len %d bufflen %u %lu %x\n",
|
||||
cmd, cmd->cmnd[0], result, len, cmd->request_bufflen,
|
||||
rq_data_dir(rq), cmd->cmnd[0]);
|
||||
|
||||
if (result == TASK_ABORTED) {
|
||||
scsi_tgt_abort_cmd(shost, cmd);
|
||||
@ -629,36 +506,36 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
|
||||
* in the request_* values
|
||||
*/
|
||||
tcmd = cmd->request->end_io_data;
|
||||
tcmd->buffer = (void *)uaddr;
|
||||
tcmd->bufflen = len;
|
||||
cmd->result = result;
|
||||
|
||||
if (!tcmd->bufflen || cmd->request_buffer) {
|
||||
err = __scsi_tgt_transfer_response(cmd);
|
||||
goto done;
|
||||
}
|
||||
if (cmd->result == SAM_STAT_CHECK_CONDITION)
|
||||
scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len);
|
||||
|
||||
/*
|
||||
* TODO: Do we need to handle case where request does not
|
||||
* align with LLD.
|
||||
*/
|
||||
err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
|
||||
if (err) {
|
||||
eprintk("%p %d\n", cmd, err);
|
||||
err = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
if (len) {
|
||||
err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw);
|
||||
if (err) {
|
||||
/*
|
||||
* user-space daemon bugs or OOM
|
||||
* TODO: we can do better for OOM.
|
||||
*/
|
||||
struct scsi_tgt_queuedata *qdata;
|
||||
struct list_head *head;
|
||||
unsigned long flags;
|
||||
|
||||
/* userspace failure */
|
||||
if (cmd->result) {
|
||||
if (status_byte(cmd->result) == CHECK_CONDITION)
|
||||
scsi_tgt_copy_sense(cmd, uaddr, len);
|
||||
err = __scsi_tgt_transfer_response(cmd);
|
||||
goto done;
|
||||
}
|
||||
/* ask the target LLD to transfer the data to the buffer */
|
||||
err = scsi_tgt_transfer_data(cmd);
|
||||
eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n",
|
||||
cmd, err, uaddr, len, rw);
|
||||
|
||||
qdata = shost->uspace_req_q->queuedata;
|
||||
head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)];
|
||||
|
||||
spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
|
||||
list_add(&tcmd->hash_list, head);
|
||||
spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
|
||||
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
err = scsi_tgt_transfer_response(cmd);
|
||||
done:
|
||||
scsi_host_put(shost);
|
||||
return err;
|
||||
|
@ -18,8 +18,9 @@ extern int scsi_tgt_if_init(void);
|
||||
extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
|
||||
u64 tag);
|
||||
extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
|
||||
extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
|
||||
unsigned long uaddr, u8 rw);
|
||||
extern int scsi_tgt_kspace_exec(int host_no, int result, u64 tag,
|
||||
unsigned long uaddr, u32 len, unsigned long sense_uaddr,
|
||||
u32 sense_len, u8 rw);
|
||||
extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
|
||||
struct scsi_lun *scsilun, void *data);
|
||||
extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
|
||||
|
@ -200,6 +200,8 @@ static const struct {
|
||||
{ FC_PORTSPEED_2GBIT, "2 Gbit" },
|
||||
{ FC_PORTSPEED_4GBIT, "4 Gbit" },
|
||||
{ FC_PORTSPEED_10GBIT, "10 Gbit" },
|
||||
{ FC_PORTSPEED_8GBIT, "8 Gbit" },
|
||||
{ FC_PORTSPEED_16GBIT, "16 Gbit" },
|
||||
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
|
||||
};
|
||||
fc_bitfield_name_search(port_speed, fc_port_speed_names)
|
||||
|
@ -49,7 +49,7 @@ struct iscsi_internal {
|
||||
struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
|
||||
};
|
||||
|
||||
static int iscsi_session_nr; /* sysfs session id for next new session */
|
||||
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
|
||||
|
||||
/*
|
||||
* list of registered transports and lock that must
|
||||
@ -300,7 +300,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
|
||||
int err;
|
||||
|
||||
ihost = shost->shost_data;
|
||||
session->sid = iscsi_session_nr++;
|
||||
session->sid = atomic_add_return(1, &iscsi_session_nr);
|
||||
session->target_id = target_id;
|
||||
|
||||
snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
|
||||
@ -1419,6 +1419,8 @@ static __init int iscsi_transport_init(void)
|
||||
printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
|
||||
ISCSI_TRANSPORT_VERSION);
|
||||
|
||||
atomic_set(&iscsi_session_nr, 0);
|
||||
|
||||
err = class_register(&iscsi_transport_class);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -58,16 +58,10 @@
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_ioctl.h>
|
||||
#include <scsi/scsicam.h>
|
||||
#include <scsi/sd.h>
|
||||
|
||||
#include "scsi_logging.h"
|
||||
|
||||
/*
|
||||
* More than enough for everybody ;) The huge number of majors
|
||||
* is a leftover from 16bit dev_t days, we don't really need that
|
||||
* much numberspace.
|
||||
*/
|
||||
#define SD_MAJORS 16
|
||||
|
||||
MODULE_AUTHOR("Eric Youngdale");
|
||||
MODULE_DESCRIPTION("SCSI disk (sd) driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
@ -88,45 +82,9 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
|
||||
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
|
||||
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
|
||||
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
|
||||
|
||||
/*
|
||||
* This is limited by the naming scheme enforced in sd_probe,
|
||||
* add another character to it if you really need more disks.
|
||||
*/
|
||||
#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
|
||||
|
||||
/*
|
||||
* Time out in seconds for disks and Magneto-opticals (which are slower).
|
||||
*/
|
||||
#define SD_TIMEOUT (30 * HZ)
|
||||
#define SD_MOD_TIMEOUT (75 * HZ)
|
||||
|
||||
/*
|
||||
* Number of allowed retries
|
||||
*/
|
||||
#define SD_MAX_RETRIES 5
|
||||
#define SD_PASSTHROUGH_RETRIES 1
|
||||
|
||||
/*
|
||||
* Size of the initial data buffer for mode and read capacity data
|
||||
*/
|
||||
#define SD_BUF_SIZE 512
|
||||
|
||||
struct scsi_disk {
|
||||
struct scsi_driver *driver; /* always &sd_template */
|
||||
struct scsi_device *device;
|
||||
struct class_device cdev;
|
||||
struct gendisk *disk;
|
||||
unsigned int openers; /* protected by BKL for now, yuck */
|
||||
sector_t capacity; /* size in 512-byte sectors */
|
||||
u32 index;
|
||||
u8 media_present;
|
||||
u8 write_prot;
|
||||
unsigned WCE : 1; /* state of disk WCE bit */
|
||||
unsigned RCD : 1; /* state of disk RCD bit, unused */
|
||||
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
|
||||
};
|
||||
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,cdev)
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
|
||||
|
||||
static DEFINE_IDR(sd_index_idr);
|
||||
static DEFINE_SPINLOCK(sd_index_lock);
|
||||
@ -136,20 +94,6 @@ static DEFINE_SPINLOCK(sd_index_lock);
|
||||
* object after last put) */
|
||||
static DEFINE_MUTEX(sd_ref_mutex);
|
||||
|
||||
static int sd_revalidate_disk(struct gendisk *disk);
|
||||
static void sd_rw_intr(struct scsi_cmnd * SCpnt);
|
||||
|
||||
static int sd_probe(struct device *);
|
||||
static int sd_remove(struct device *);
|
||||
static void sd_shutdown(struct device *dev);
|
||||
static void sd_rescan(struct device *);
|
||||
static int sd_init_command(struct scsi_cmnd *);
|
||||
static int sd_issue_flush(struct device *, sector_t *);
|
||||
static void sd_prepare_flush(request_queue_t *, struct request *);
|
||||
static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
|
||||
unsigned char *buffer);
|
||||
static void scsi_disk_release(struct class_device *cdev);
|
||||
|
||||
static const char *sd_cache_types[] = {
|
||||
"write through", "none", "write back",
|
||||
"write back, no read (daft)"
|
||||
@ -199,13 +143,27 @@ static ssize_t sd_store_cache_type(struct class_device *cdev, const char *buf,
|
||||
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
|
||||
SD_MAX_RETRIES, &data, &sshdr)) {
|
||||
if (scsi_sense_valid(&sshdr))
|
||||
scsi_print_sense_hdr(sdkp->disk->disk_name, &sshdr);
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
return -EINVAL;
|
||||
}
|
||||
sd_revalidate_disk(sdkp->disk);
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t sd_store_manage_start_stop(struct class_device *cdev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(cdev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t sd_store_allow_restart(struct class_device *cdev, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
@ -238,6 +196,14 @@ static ssize_t sd_show_fua(struct class_device *cdev, char *buf)
|
||||
return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
|
||||
}
|
||||
|
||||
static ssize_t sd_show_manage_start_stop(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(cdev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
|
||||
return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
|
||||
}
|
||||
|
||||
static ssize_t sd_show_allow_restart(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(cdev);
|
||||
@ -251,6 +217,8 @@ static struct class_device_attribute sd_disk_attrs[] = {
|
||||
__ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
|
||||
__ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
|
||||
sd_store_allow_restart),
|
||||
__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
|
||||
sd_store_manage_start_stop),
|
||||
__ATTR_NULL,
|
||||
};
|
||||
|
||||
@ -267,6 +235,8 @@ static struct scsi_driver sd_template = {
|
||||
.name = "sd",
|
||||
.probe = sd_probe,
|
||||
.remove = sd_remove,
|
||||
.suspend = sd_suspend,
|
||||
.resume = sd_resume,
|
||||
.shutdown = sd_shutdown,
|
||||
},
|
||||
.rescan = sd_rescan,
|
||||
@ -371,15 +341,19 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
|
||||
unsigned int this_count = SCpnt->request_bufflen >> 9;
|
||||
unsigned int timeout = sdp->timeout;
|
||||
|
||||
SCSI_LOG_HLQUEUE(1, printk("sd_init_command: disk=%s, block=%llu, "
|
||||
"count=%d\n", disk->disk_name,
|
||||
(unsigned long long)block, this_count));
|
||||
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
|
||||
"sd_init_command: block=%llu, "
|
||||
"count=%d\n",
|
||||
(unsigned long long)block,
|
||||
this_count));
|
||||
|
||||
if (!sdp || !scsi_device_online(sdp) ||
|
||||
block + rq->nr_sectors > get_capacity(disk)) {
|
||||
SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
|
||||
rq->nr_sectors));
|
||||
SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
|
||||
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
|
||||
"Finishing %ld sectors\n",
|
||||
rq->nr_sectors));
|
||||
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
|
||||
"Retry with 0x%p\n", SCpnt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -391,8 +365,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
|
||||
/* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
|
||||
return 0;
|
||||
}
|
||||
SCSI_LOG_HLQUEUE(2, printk("%s : block=%llu\n",
|
||||
disk->disk_name, (unsigned long long)block));
|
||||
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
|
||||
(unsigned long long)block));
|
||||
|
||||
/*
|
||||
* If we have a 1K hardware sectorsize, prevent access to single
|
||||
@ -407,7 +381,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
|
||||
*/
|
||||
if (sdp->sector_size == 1024) {
|
||||
if ((block & 1) || (rq->nr_sectors & 1)) {
|
||||
printk(KERN_ERR "sd: Bad block number requested");
|
||||
scmd_printk(KERN_ERR, SCpnt,
|
||||
"Bad block number requested\n");
|
||||
return 0;
|
||||
} else {
|
||||
block = block >> 1;
|
||||
@ -416,7 +391,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
|
||||
}
|
||||
if (sdp->sector_size == 2048) {
|
||||
if ((block & 3) || (rq->nr_sectors & 3)) {
|
||||
printk(KERN_ERR "sd: Bad block number requested");
|
||||
scmd_printk(KERN_ERR, SCpnt,
|
||||
"Bad block number requested\n");
|
||||
return 0;
|
||||
} else {
|
||||
block = block >> 2;
|
||||
@ -425,7 +401,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
|
||||
}
|
||||
if (sdp->sector_size == 4096) {
|
||||
if ((block & 7) || (rq->nr_sectors & 7)) {
|
||||
printk(KERN_ERR "sd: Bad block number requested");
|
||||
scmd_printk(KERN_ERR, SCpnt,
|
||||
"Bad block number requested\n");
|
||||
return 0;
|
||||
} else {
|
||||
block = block >> 3;
|
||||
@ -442,13 +419,15 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
|
||||
SCpnt->cmnd[0] = READ_6;
|
||||
SCpnt->sc_data_direction = DMA_FROM_DEVICE;
|
||||
} else {
|
||||
printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags);
|
||||
scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
|
||||
disk->disk_name, (rq_data_dir(rq) == WRITE) ?
|
||||
"writing" : "reading", this_count, rq->nr_sectors));
|
||||
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
|
||||
"%s %d/%ld 512 byte blocks.\n",
|
||||
(rq_data_dir(rq) == WRITE) ?
|
||||
"writing" : "reading", this_count,
|
||||
rq->nr_sectors));
|
||||
|
||||
SCpnt->cmnd[1] = 0;
|
||||
|
||||
@ -490,7 +469,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
|
||||
* during operation and thus turned off
|
||||
* use_10_for_rw.
|
||||
*/
|
||||
printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n");
|
||||
scmd_printk(KERN_ERR, SCpnt,
|
||||
"FUA write on READ/WRITE(6) drive\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -549,7 +529,7 @@ static int sd_open(struct inode *inode, struct file *filp)
|
||||
return -ENXIO;
|
||||
|
||||
|
||||
SCSI_LOG_HLQUEUE(3, printk("sd_open: disk=%s\n", disk->disk_name));
|
||||
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
|
||||
|
||||
sdev = sdkp->device;
|
||||
|
||||
@ -619,7 +599,7 @@ static int sd_release(struct inode *inode, struct file *filp)
|
||||
struct scsi_disk *sdkp = scsi_disk(disk);
|
||||
struct scsi_device *sdev = sdkp->device;
|
||||
|
||||
SCSI_LOG_HLQUEUE(3, printk("sd_release: disk=%s\n", disk->disk_name));
|
||||
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
|
||||
|
||||
if (!--sdkp->openers && sdev->removable) {
|
||||
if (scsi_block_when_processing_errors(sdev))
|
||||
@ -732,8 +712,7 @@ static int sd_media_changed(struct gendisk *disk)
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
int retval;
|
||||
|
||||
SCSI_LOG_HLQUEUE(3, printk("sd_media_changed: disk=%s\n",
|
||||
disk->disk_name));
|
||||
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
|
||||
|
||||
if (!sdp->removable)
|
||||
return 0;
|
||||
@ -786,9 +765,10 @@ not_present:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int sd_sync_cache(struct scsi_device *sdp)
|
||||
static int sd_sync_cache(struct scsi_disk *sdkp)
|
||||
{
|
||||
int retries, res;
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
if (!scsi_device_online(sdp))
|
||||
@ -809,28 +789,27 @@ static int sd_sync_cache(struct scsi_device *sdp)
|
||||
break;
|
||||
}
|
||||
|
||||
if (res) { printk(KERN_WARNING "FAILED\n status = %x, message = %02x, "
|
||||
"host = %d, driver = %02x\n ",
|
||||
status_byte(res), msg_byte(res),
|
||||
host_byte(res), driver_byte(res));
|
||||
if (driver_byte(res) & DRIVER_SENSE)
|
||||
scsi_print_sense_hdr("sd", &sshdr);
|
||||
if (res) {
|
||||
sd_print_result(sdkp, res);
|
||||
if (driver_byte(res) & DRIVER_SENSE)
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
}
|
||||
|
||||
return res;
|
||||
if (res)
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sd_issue_flush(struct device *dev, sector_t *error_sector)
|
||||
{
|
||||
int ret = 0;
|
||||
struct scsi_device *sdp = to_scsi_device(dev);
|
||||
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
|
||||
|
||||
if (!sdkp)
|
||||
return -ENODEV;
|
||||
|
||||
if (sdkp->WCE)
|
||||
ret = sd_sync_cache(sdp);
|
||||
ret = sd_sync_cache(sdkp);
|
||||
scsi_disk_put(sdkp);
|
||||
return ret;
|
||||
}
|
||||
@ -928,12 +907,14 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
|
||||
sense_deferred = scsi_sense_is_deferred(&sshdr);
|
||||
}
|
||||
#ifdef CONFIG_SCSI_LOGGING
|
||||
SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n",
|
||||
SCpnt->request->rq_disk->disk_name, result));
|
||||
SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt));
|
||||
if (sense_valid) {
|
||||
SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: sb[respc,sk,asc,"
|
||||
"ascq]=%x,%x,%x,%x\n", sshdr.response_code,
|
||||
sshdr.sense_key, sshdr.asc, sshdr.ascq));
|
||||
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
|
||||
"sd_rw_intr: sb[respc,sk,asc,"
|
||||
"ascq]=%x,%x,%x,%x\n",
|
||||
sshdr.response_code,
|
||||
sshdr.sense_key, sshdr.asc,
|
||||
sshdr.ascq));
|
||||
}
|
||||
#endif
|
||||
if (driver_byte(result) != DRIVER_SENSE &&
|
||||
@ -1025,7 +1006,7 @@ static int media_not_present(struct scsi_disk *sdkp,
|
||||
* spinup disk - called only in sd_revalidate_disk()
|
||||
*/
|
||||
static void
|
||||
sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
|
||||
sd_spinup_disk(struct scsi_disk *sdkp)
|
||||
{
|
||||
unsigned char cmd[10];
|
||||
unsigned long spintime_expire = 0;
|
||||
@ -1069,9 +1050,10 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
|
||||
if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
|
||||
/* no sense, TUR either succeeded or failed
|
||||
* with a status error */
|
||||
if(!spintime && !scsi_status_is_good(the_result))
|
||||
printk(KERN_NOTICE "%s: Unit Not Ready, "
|
||||
"error = 0x%x\n", diskname, the_result);
|
||||
if(!spintime && !scsi_status_is_good(the_result)) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
|
||||
sd_print_result(sdkp, the_result);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1096,8 +1078,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
|
||||
*/
|
||||
} else if (sense_valid && sshdr.sense_key == NOT_READY) {
|
||||
if (!spintime) {
|
||||
printk(KERN_NOTICE "%s: Spinning up disk...",
|
||||
diskname);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
|
||||
cmd[0] = START_STOP;
|
||||
cmd[1] = 1; /* Return immediately */
|
||||
memset((void *) &cmd[2], 0, 8);
|
||||
@ -1130,9 +1111,8 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
|
||||
/* we don't understand the sense code, so it's
|
||||
* probably pointless to loop */
|
||||
if(!spintime) {
|
||||
printk(KERN_NOTICE "%s: Unit Not Ready, "
|
||||
"sense:\n", diskname);
|
||||
scsi_print_sense_hdr("", &sshdr);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1151,8 +1131,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
|
||||
* read disk capacity
|
||||
*/
|
||||
static void
|
||||
sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
|
||||
unsigned char *buffer)
|
||||
sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
{
|
||||
unsigned char cmd[16];
|
||||
int the_result, retries;
|
||||
@ -1191,18 +1170,12 @@ repeat:
|
||||
} while (the_result && retries);
|
||||
|
||||
if (the_result && !longrc) {
|
||||
printk(KERN_NOTICE "%s : READ CAPACITY failed.\n"
|
||||
"%s : status=%x, message=%02x, host=%d, driver=%02x \n",
|
||||
diskname, diskname,
|
||||
status_byte(the_result),
|
||||
msg_byte(the_result),
|
||||
host_byte(the_result),
|
||||
driver_byte(the_result));
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n");
|
||||
sd_print_result(sdkp, the_result);
|
||||
if (driver_byte(the_result) & DRIVER_SENSE)
|
||||
scsi_print_sense_hdr("sd", &sshdr);
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
else
|
||||
printk("%s : sense not available. \n", diskname);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
|
||||
|
||||
/* Set dirty bit for removable devices if not ready -
|
||||
* sometimes drives will not report this properly. */
|
||||
@ -1218,16 +1191,10 @@ repeat:
|
||||
return;
|
||||
} else if (the_result && longrc) {
|
||||
/* READ CAPACITY(16) has been failed */
|
||||
printk(KERN_NOTICE "%s : READ CAPACITY(16) failed.\n"
|
||||
"%s : status=%x, message=%02x, host=%d, driver=%02x \n",
|
||||
diskname, diskname,
|
||||
status_byte(the_result),
|
||||
msg_byte(the_result),
|
||||
host_byte(the_result),
|
||||
driver_byte(the_result));
|
||||
printk(KERN_NOTICE "%s : use 0xffffffff as device size\n",
|
||||
diskname);
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n");
|
||||
sd_print_result(sdkp, the_result);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Use 0xffffffff as device size\n");
|
||||
|
||||
sdkp->capacity = 1 + (sector_t) 0xffffffff;
|
||||
goto got_data;
|
||||
}
|
||||
@ -1238,14 +1205,14 @@ repeat:
|
||||
if (buffer[0] == 0xff && buffer[1] == 0xff &&
|
||||
buffer[2] == 0xff && buffer[3] == 0xff) {
|
||||
if(sizeof(sdkp->capacity) > 4) {
|
||||
printk(KERN_NOTICE "%s : very big device. try to use"
|
||||
" READ CAPACITY(16).\n", diskname);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Very big device. "
|
||||
"Trying to use READ CAPACITY(16).\n");
|
||||
longrc = 1;
|
||||
goto repeat;
|
||||
}
|
||||
printk(KERN_ERR "%s: too big for this kernel. Use a "
|
||||
"kernel compiled with support for large block "
|
||||
"devices.\n", diskname);
|
||||
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use "
|
||||
"a kernel compiled with support for large "
|
||||
"block devices.\n");
|
||||
sdkp->capacity = 0;
|
||||
goto got_data;
|
||||
}
|
||||
@ -1284,8 +1251,8 @@ repeat:
|
||||
got_data:
|
||||
if (sector_size == 0) {
|
||||
sector_size = 512;
|
||||
printk(KERN_NOTICE "%s : sector size 0 reported, "
|
||||
"assuming 512.\n", diskname);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
|
||||
"assuming 512.\n");
|
||||
}
|
||||
|
||||
if (sector_size != 512 &&
|
||||
@ -1293,8 +1260,8 @@ got_data:
|
||||
sector_size != 2048 &&
|
||||
sector_size != 4096 &&
|
||||
sector_size != 256) {
|
||||
printk(KERN_NOTICE "%s : unsupported sector size "
|
||||
"%d.\n", diskname, sector_size);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
|
||||
sector_size);
|
||||
/*
|
||||
* The user might want to re-format the drive with
|
||||
* a supported sectorsize. Once this happens, it
|
||||
@ -1327,10 +1294,10 @@ got_data:
|
||||
mb -= sz - 974;
|
||||
sector_div(mb, 1950);
|
||||
|
||||
printk(KERN_NOTICE "SCSI device %s: "
|
||||
"%llu %d-byte hdwr sectors (%llu MB)\n",
|
||||
diskname, (unsigned long long)sdkp->capacity,
|
||||
hard_sector, (unsigned long long)mb);
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"%llu %d-byte hardware sectors (%llu MB)\n",
|
||||
(unsigned long long)sdkp->capacity,
|
||||
hard_sector, (unsigned long long)mb);
|
||||
}
|
||||
|
||||
/* Rescale capacity to 512-byte units */
|
||||
@ -1362,8 +1329,7 @@ sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
|
||||
* called with buffer of length SD_BUF_SIZE
|
||||
*/
|
||||
static void
|
||||
sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
|
||||
unsigned char *buffer)
|
||||
sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
{
|
||||
int res;
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
@ -1371,7 +1337,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
|
||||
|
||||
set_disk_ro(sdkp->disk, 0);
|
||||
if (sdp->skip_ms_page_3f) {
|
||||
printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1403,15 +1369,16 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
|
||||
}
|
||||
|
||||
if (!scsi_status_is_good(res)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: test WP failed, assume Write Enabled\n", diskname);
|
||||
sd_printk(KERN_WARNING, sdkp,
|
||||
"Test WP failed, assume Write Enabled\n");
|
||||
} else {
|
||||
sdkp->write_prot = ((data.device_specific & 0x80) != 0);
|
||||
set_disk_ro(sdkp->disk, sdkp->write_prot);
|
||||
printk(KERN_NOTICE "%s: Write Protect is %s\n", diskname,
|
||||
sdkp->write_prot ? "on" : "off");
|
||||
printk(KERN_DEBUG "%s: Mode Sense: %02x %02x %02x %02x\n",
|
||||
diskname, buffer[0], buffer[1], buffer[2], buffer[3]);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
|
||||
sdkp->write_prot ? "on" : "off");
|
||||
sd_printk(KERN_DEBUG, sdkp,
|
||||
"Mode Sense: %02x %02x %02x %02x\n",
|
||||
buffer[0], buffer[1], buffer[2], buffer[3]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1420,8 +1387,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
|
||||
* called with buffer of length SD_BUF_SIZE
|
||||
*/
|
||||
static void
|
||||
sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
|
||||
unsigned char *buffer)
|
||||
sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||
{
|
||||
int len = 0, res;
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
@ -1450,8 +1416,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
|
||||
|
||||
if (!data.header_length) {
|
||||
modepage = 6;
|
||||
printk(KERN_ERR "%s: missing header in MODE_SENSE response\n",
|
||||
diskname);
|
||||
sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
|
||||
}
|
||||
|
||||
/* that went OK, now ask for the proper length */
|
||||
@ -1478,13 +1443,12 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
|
||||
int offset = data.header_length + data.block_descriptor_length;
|
||||
|
||||
if (offset >= SD_BUF_SIZE - 2) {
|
||||
printk(KERN_ERR "%s: malformed MODE SENSE response",
|
||||
diskname);
|
||||
sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
|
||||
goto defaults;
|
||||
}
|
||||
|
||||
if ((buffer[offset] & 0x3f) != modepage) {
|
||||
printk(KERN_ERR "%s: got wrong page\n", diskname);
|
||||
sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
|
||||
goto defaults;
|
||||
}
|
||||
|
||||
@ -1498,14 +1462,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
|
||||
|
||||
sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
|
||||
if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
|
||||
printk(KERN_NOTICE "SCSI device %s: uses "
|
||||
"READ/WRITE(6), disabling FUA\n", diskname);
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Uses READ/WRITE(6), disabling FUA\n");
|
||||
sdkp->DPOFUA = 0;
|
||||
}
|
||||
|
||||
printk(KERN_NOTICE "SCSI device %s: "
|
||||
"write cache: %s, read cache: %s, %s\n",
|
||||
diskname,
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Write cache: %s, read cache: %s, %s\n",
|
||||
sdkp->WCE ? "enabled" : "disabled",
|
||||
sdkp->RCD ? "disabled" : "enabled",
|
||||
sdkp->DPOFUA ? "supports DPO and FUA"
|
||||
@ -1518,15 +1481,13 @@ bad_sense:
|
||||
if (scsi_sense_valid(&sshdr) &&
|
||||
sshdr.sense_key == ILLEGAL_REQUEST &&
|
||||
sshdr.asc == 0x24 && sshdr.ascq == 0x0)
|
||||
printk(KERN_NOTICE "%s: cache data unavailable\n",
|
||||
diskname); /* Invalid field in CDB */
|
||||
/* Invalid field in CDB */
|
||||
sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
|
||||
else
|
||||
printk(KERN_ERR "%s: asking for cache data failed\n",
|
||||
diskname);
|
||||
sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
|
||||
|
||||
defaults:
|
||||
printk(KERN_ERR "%s: assuming drive cache: write through\n",
|
||||
diskname);
|
||||
sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
|
||||
sdkp->WCE = 0;
|
||||
sdkp->RCD = 0;
|
||||
sdkp->DPOFUA = 0;
|
||||
@ -1544,7 +1505,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||
unsigned char *buffer;
|
||||
unsigned ordered;
|
||||
|
||||
SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
|
||||
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
|
||||
"sd_revalidate_disk\n"));
|
||||
|
||||
/*
|
||||
* If the device is offline, don't try and read capacity or any
|
||||
@ -1555,8 +1517,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||
|
||||
buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA);
|
||||
if (!buffer) {
|
||||
printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation "
|
||||
"failure.\n");
|
||||
sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
|
||||
"allocation failure.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1568,16 +1530,16 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
||||
sdkp->WCE = 0;
|
||||
sdkp->RCD = 0;
|
||||
|
||||
sd_spinup_disk(sdkp, disk->disk_name);
|
||||
sd_spinup_disk(sdkp);
|
||||
|
||||
/*
|
||||
* Without media there is no reason to ask; moreover, some devices
|
||||
* react badly if we do.
|
||||
*/
|
||||
if (sdkp->media_present) {
|
||||
sd_read_capacity(sdkp, disk->disk_name, buffer);
|
||||
sd_read_write_protect_flag(sdkp, disk->disk_name, buffer);
|
||||
sd_read_cache_type(sdkp, disk->disk_name, buffer);
|
||||
sd_read_capacity(sdkp, buffer);
|
||||
sd_read_write_protect_flag(sdkp, buffer);
|
||||
sd_read_cache_type(sdkp, buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1709,8 +1671,8 @@ static int sd_probe(struct device *dev)
|
||||
dev_set_drvdata(dev, sdkp);
|
||||
add_disk(gd);
|
||||
|
||||
sdev_printk(KERN_NOTICE, sdp, "Attached scsi %sdisk %s\n",
|
||||
sdp->removable ? "removable " : "", gd->disk_name);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
|
||||
sdp->removable ? "removable " : "");
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1774,6 +1736,31 @@ static void scsi_disk_release(struct class_device *cdev)
|
||||
kfree(sdkp);
|
||||
}
|
||||
|
||||
static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
|
||||
{
|
||||
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
int res;
|
||||
|
||||
if (start)
|
||||
cmd[4] |= 1; /* START */
|
||||
|
||||
if (!scsi_device_online(sdp))
|
||||
return -ENODEV;
|
||||
|
||||
res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
|
||||
SD_TIMEOUT, SD_MAX_RETRIES);
|
||||
if (res) {
|
||||
sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
|
||||
sd_print_result(sdkp, res);
|
||||
if (driver_byte(res) & DRIVER_SENSE)
|
||||
sd_print_sense_hdr(sdkp, &sshdr);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a SYNCHRONIZE CACHE instruction down to the device through
|
||||
* the normal SCSI command structure. Wait for the command to
|
||||
@ -1781,20 +1768,62 @@ static void scsi_disk_release(struct class_device *cdev)
|
||||
*/
|
||||
static void sd_shutdown(struct device *dev)
|
||||
{
|
||||
struct scsi_device *sdp = to_scsi_device(dev);
|
||||
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
|
||||
|
||||
if (!sdkp)
|
||||
return; /* this can happen */
|
||||
|
||||
if (sdkp->WCE) {
|
||||
printk(KERN_NOTICE "Synchronizing SCSI cache for disk %s: \n",
|
||||
sdkp->disk->disk_name);
|
||||
sd_sync_cache(sdp);
|
||||
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
|
||||
sd_sync_cache(sdkp);
|
||||
}
|
||||
|
||||
if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
|
||||
sd_start_stop_device(sdkp, 0);
|
||||
}
|
||||
|
||||
scsi_disk_put(sdkp);
|
||||
}
|
||||
|
||||
static int sd_suspend(struct device *dev, pm_message_t mesg)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
|
||||
int ret;
|
||||
|
||||
if (!sdkp)
|
||||
return 0; /* this can happen */
|
||||
|
||||
if (sdkp->WCE) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
|
||||
ret = sd_sync_cache(sdkp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (mesg.event == PM_EVENT_SUSPEND &&
|
||||
sdkp->device->manage_start_stop) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
|
||||
ret = sd_start_stop_device(sdkp, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sd_resume(struct device *dev)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
|
||||
|
||||
if (!sdkp->device->manage_start_stop)
|
||||
return 0;
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
|
||||
return sd_start_stop_device(sdkp, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* init_sd - entry point for this driver (both when built in or when
|
||||
* a module).
|
||||
@ -1852,3 +1881,19 @@ static void __exit exit_sd(void)
|
||||
|
||||
module_init(init_sd);
|
||||
module_exit(exit_sd);
|
||||
|
||||
static void sd_print_sense_hdr(struct scsi_disk *sdkp,
|
||||
struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
sd_printk(KERN_INFO, sdkp, "");
|
||||
scsi_show_sense_hdr(sshdr);
|
||||
sd_printk(KERN_INFO, sdkp, "");
|
||||
scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
|
||||
}
|
||||
|
||||
static void sd_print_result(struct scsi_disk *sdkp, int result)
|
||||
{
|
||||
sd_printk(KERN_INFO, sdkp, "");
|
||||
scsi_show_result(result);
|
||||
}
|
||||
|
||||
|
@ -917,6 +917,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
|
||||
return result;
|
||||
if (val < 0)
|
||||
return -EINVAL;
|
||||
val = min_t(int, val,
|
||||
sdp->device->request_queue->max_sectors * 512);
|
||||
if (val != sfp->reserve.bufflen) {
|
||||
if (sg_res_in_use(sfp) || sfp->mmap_called)
|
||||
return -EBUSY;
|
||||
@ -925,7 +927,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
|
||||
}
|
||||
return 0;
|
||||
case SG_GET_RESERVED_SIZE:
|
||||
val = (int) sfp->reserve.bufflen;
|
||||
val = min_t(int, sfp->reserve.bufflen,
|
||||
sdp->device->request_queue->max_sectors * 512);
|
||||
return put_user(val, ip);
|
||||
case SG_SET_COMMAND_Q:
|
||||
result = get_user(val, ip);
|
||||
@ -1061,6 +1064,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
|
||||
if (sdp->detached)
|
||||
return -ENODEV;
|
||||
return scsi_ioctl(sdp->device, cmd_in, p);
|
||||
case BLKSECTGET:
|
||||
return put_user(sdp->device->request_queue->max_sectors * 512,
|
||||
ip);
|
||||
default:
|
||||
if (read_only)
|
||||
return -EPERM; /* don't know so take safe approach */
|
||||
@ -2339,6 +2345,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
|
||||
{
|
||||
Sg_fd *sfp;
|
||||
unsigned long iflags;
|
||||
int bufflen;
|
||||
|
||||
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!sfp)
|
||||
@ -2369,7 +2376,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
|
||||
if (unlikely(sg_big_buff != def_reserved_size))
|
||||
sg_big_buff = def_reserved_size;
|
||||
|
||||
sg_build_reserve(sfp, sg_big_buff);
|
||||
bufflen = min_t(int, sg_big_buff,
|
||||
sdp->device->request_queue->max_sectors * 512);
|
||||
sg_build_reserve(sfp, bufflen);
|
||||
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
|
||||
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
|
||||
return sfp;
|
||||
|
@ -62,6 +62,8 @@
|
||||
MODULE_DESCRIPTION("SCSI cdrom (sr) driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_ROM);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
|
||||
|
||||
#define SR_DISKS 256
|
||||
|
||||
|
@ -89,6 +89,7 @@ MODULE_AUTHOR("Kai Makisara");
|
||||
MODULE_DESCRIPTION("SCSI tape (st) driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR);
|
||||
MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
|
||||
|
||||
/* Set 'perm' (4th argument) to 0 to disable module_param's definition
|
||||
* of sysfs parameters (which module_param doesn't yet support).
|
||||
|
@ -588,7 +588,17 @@ struct iscsi_reject {
|
||||
#define VALUE_MAXLEN 255
|
||||
#define TARGET_NAME_MAXLEN VALUE_MAXLEN
|
||||
|
||||
#define DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH 8192
|
||||
#define ISCSI_DEF_MAX_RECV_SEG_LEN 8192
|
||||
#define ISCSI_MIN_MAX_RECV_SEG_LEN 512
|
||||
#define ISCSI_MAX_MAX_RECV_SEG_LEN 16777215
|
||||
|
||||
#define ISCSI_DEF_FIRST_BURST_LEN 65536
|
||||
#define ISCSI_MIN_FIRST_BURST_LEN 512
|
||||
#define ISCSI_MAX_FIRST_BURST_LEN 16777215
|
||||
|
||||
#define ISCSI_DEF_MAX_BURST_LEN 262144
|
||||
#define ISCSI_MIN_MAX_BURST_LEN 512
|
||||
#define ISCSI_MAX_MAX_BURST_LEN 16777215
|
||||
|
||||
/************************* RFC 3720 End *****************************/
|
||||
|
||||
|
@ -203,6 +203,7 @@ static inline int scsi_status_is_good(int status)
|
||||
|
||||
/*
|
||||
* DEVICE TYPES
|
||||
* Please keep them in 0x%02x format for $MODALIAS to work
|
||||
*/
|
||||
|
||||
#define TYPE_DISK 0x00
|
||||
|
@ -73,9 +73,6 @@ struct scsi_cmnd {
|
||||
unsigned short use_sg; /* Number of pieces of scatter-gather */
|
||||
unsigned short sglist_len; /* size of malloc'd scatter-gather list */
|
||||
|
||||
/* offset in cmd we are at (for multi-transfer tgt cmds) */
|
||||
unsigned offset;
|
||||
|
||||
unsigned underflow; /* Return error if less than
|
||||
this amount is transferred */
|
||||
|
||||
|
@ -5,14 +5,16 @@ struct scsi_cmnd;
|
||||
struct scsi_sense_hdr;
|
||||
|
||||
extern void scsi_print_command(struct scsi_cmnd *);
|
||||
extern void scsi_print_sense_hdr(const char *, struct scsi_sense_hdr *);
|
||||
extern void __scsi_print_command(unsigned char *);
|
||||
extern void scsi_print_sense(const char *, struct scsi_cmnd *);
|
||||
extern void scsi_show_extd_sense(unsigned char, unsigned char);
|
||||
extern void scsi_show_sense_hdr(struct scsi_sense_hdr *);
|
||||
extern void scsi_print_sense_hdr(const char *, struct scsi_sense_hdr *);
|
||||
extern void scsi_print_sense(char *, struct scsi_cmnd *);
|
||||
extern void __scsi_print_sense(const char *name,
|
||||
const unsigned char *sense_buffer,
|
||||
int sense_len);
|
||||
extern void scsi_print_driverbyte(int);
|
||||
extern void scsi_print_hostbyte(int);
|
||||
extern void scsi_show_result(int);
|
||||
extern void scsi_print_result(struct scsi_cmnd *);
|
||||
extern void scsi_print_status(unsigned char);
|
||||
extern const char *scsi_sense_key_string(unsigned char);
|
||||
extern const char *scsi_extd_sense_format(unsigned char, unsigned char);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
struct request_queue;
|
||||
@ -119,6 +120,7 @@ struct scsi_device {
|
||||
unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
|
||||
unsigned no_start_on_add:1; /* do not issue start on add */
|
||||
unsigned allow_restart:1; /* issue START_UNIT in error handler */
|
||||
unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
|
||||
unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
|
||||
unsigned select_no_atn:1;
|
||||
unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */
|
||||
@ -154,8 +156,11 @@ struct scsi_device {
|
||||
#define sdev_printk(prefix, sdev, fmt, a...) \
|
||||
dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
|
||||
|
||||
#define scmd_printk(prefix, scmd, fmt, a...) \
|
||||
dev_printk(prefix, &(scmd)->device->sdev_gendev, fmt, ##a)
|
||||
#define scmd_printk(prefix, scmd, fmt, a...) \
|
||||
(scmd)->request->rq_disk ? \
|
||||
sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
|
||||
(scmd)->request->rq_disk->disk_name, ##a) : \
|
||||
sdev_printk(prefix, (scmd)->device, fmt, ##a)
|
||||
|
||||
enum scsi_target_state {
|
||||
STARGET_RUNNING = 1,
|
||||
@ -353,4 +358,9 @@ static inline int scsi_device_qas(struct scsi_device *sdev)
|
||||
return 0;
|
||||
return sdev->inquiry[56] & 0x02;
|
||||
}
|
||||
|
||||
#define MODULE_ALIAS_SCSI_DEVICE(type) \
|
||||
MODULE_ALIAS("scsi:t-" __stringify(type) "*")
|
||||
#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
|
||||
|
||||
#endif /* _SCSI_SCSI_DEVICE_H */
|
||||
|
@ -129,6 +129,11 @@ struct scsi_host_template {
|
||||
* the LLD. When the driver is finished processing the command
|
||||
* the done callback is invoked.
|
||||
*
|
||||
* This is called to inform the LLD to transfer
|
||||
* cmd->request_bufflen bytes. The cmd->use_sg speciefies the
|
||||
* number of scatterlist entried in the command and
|
||||
* cmd->request_buffer contains the scatterlist.
|
||||
*
|
||||
* return values: see queuecommand
|
||||
*
|
||||
* If the LLD accepts the cmd, it should set the result to an
|
||||
@ -139,20 +144,6 @@ struct scsi_host_template {
|
||||
/* TODO: rename */
|
||||
int (* transfer_response)(struct scsi_cmnd *,
|
||||
void (*done)(struct scsi_cmnd *));
|
||||
/*
|
||||
* This is called to inform the LLD to transfer cmd->request_bufflen
|
||||
* bytes of the cmd at cmd->offset in the cmd. The cmd->use_sg
|
||||
* speciefies the number of scatterlist entried in the command
|
||||
* and cmd->request_buffer contains the scatterlist.
|
||||
*
|
||||
* If the command cannot be processed in one transfer_data call
|
||||
* becuase a scatterlist within the LLD's limits cannot be
|
||||
* created then transfer_data will be called multiple times.
|
||||
* It is initially called from process context, and later
|
||||
* calls are from the interrup context.
|
||||
*/
|
||||
int (* transfer_data)(struct scsi_cmnd *,
|
||||
void (*done)(struct scsi_cmnd *));
|
||||
|
||||
/* Used as callback for the completion of task management request. */
|
||||
int (* tsk_mgmt_response)(u64 mid, int result);
|
||||
@ -334,6 +325,19 @@ struct scsi_host_template {
|
||||
*/
|
||||
int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
|
||||
|
||||
/*
|
||||
* This is an optional routine that allows the transport to become
|
||||
* involved when a scsi io timer fires. The return value tells the
|
||||
* timer routine how to finish the io timeout handling:
|
||||
* EH_HANDLED: I fixed the error, please complete the command
|
||||
* EH_RESET_TIMER: I need more time, reset the timer and
|
||||
* begin counting again
|
||||
* EH_NOT_HANDLED Begin normal error recovery
|
||||
*
|
||||
* Status: OPTIONAL
|
||||
*/
|
||||
enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
|
||||
|
||||
/*
|
||||
* suspend support
|
||||
*/
|
||||
|
@ -45,11 +45,13 @@ struct tgt_event {
|
||||
/* user-> kernel */
|
||||
struct {
|
||||
int host_no;
|
||||
uint32_t len;
|
||||
int result;
|
||||
aligned_u64 uaddr;
|
||||
uint8_t rw;
|
||||
aligned_u64 tag;
|
||||
aligned_u64 uaddr;
|
||||
aligned_u64 sense_uaddr;
|
||||
uint32_t len;
|
||||
uint32_t sense_len;
|
||||
uint8_t rw;
|
||||
} cmd_rsp;
|
||||
struct {
|
||||
int host_no;
|
||||
|
@ -108,6 +108,8 @@ enum fc_port_state {
|
||||
#define FC_PORTSPEED_2GBIT 2
|
||||
#define FC_PORTSPEED_4GBIT 4
|
||||
#define FC_PORTSPEED_10GBIT 8
|
||||
#define FC_PORTSPEED_8GBIT 0x10
|
||||
#define FC_PORTSPEED_16GBIT 0x20
|
||||
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
|
||||
|
||||
/*
|
||||
|
72
include/scsi/sd.h
Normal file
72
include/scsi/sd.h
Normal file
@ -0,0 +1,72 @@
|
||||
#ifndef _SCSI_DISK_H
|
||||
#define _SCSI_DISK_H
|
||||
|
||||
/*
|
||||
* More than enough for everybody ;) The huge number of majors
|
||||
* is a leftover from 16bit dev_t days, we don't really need that
|
||||
* much numberspace.
|
||||
*/
|
||||
#define SD_MAJORS 16
|
||||
|
||||
/*
|
||||
* This is limited by the naming scheme enforced in sd_probe,
|
||||
* add another character to it if you really need more disks.
|
||||
*/
|
||||
#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
|
||||
|
||||
/*
|
||||
* Time out in seconds for disks and Magneto-opticals (which are slower).
|
||||
*/
|
||||
#define SD_TIMEOUT (30 * HZ)
|
||||
#define SD_MOD_TIMEOUT (75 * HZ)
|
||||
|
||||
/*
|
||||
* Number of allowed retries
|
||||
*/
|
||||
#define SD_MAX_RETRIES 5
|
||||
#define SD_PASSTHROUGH_RETRIES 1
|
||||
|
||||
/*
|
||||
* Size of the initial data buffer for mode and read capacity data
|
||||
*/
|
||||
#define SD_BUF_SIZE 512
|
||||
|
||||
struct scsi_disk {
|
||||
struct scsi_driver *driver; /* always &sd_template */
|
||||
struct scsi_device *device;
|
||||
struct class_device cdev;
|
||||
struct gendisk *disk;
|
||||
unsigned int openers; /* protected by BKL for now, yuck */
|
||||
sector_t capacity; /* size in 512-byte sectors */
|
||||
u32 index;
|
||||
u8 media_present;
|
||||
u8 write_prot;
|
||||
unsigned WCE : 1; /* state of disk WCE bit */
|
||||
unsigned RCD : 1; /* state of disk RCD bit, unused */
|
||||
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
|
||||
};
|
||||
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,cdev)
|
||||
|
||||
static int sd_revalidate_disk(struct gendisk *disk);
|
||||
static void sd_rw_intr(struct scsi_cmnd * SCpnt);
|
||||
static int sd_probe(struct device *);
|
||||
static int sd_remove(struct device *);
|
||||
static void sd_shutdown(struct device *dev);
|
||||
static int sd_suspend(struct device *dev, pm_message_t state);
|
||||
static int sd_resume(struct device *dev);
|
||||
static void sd_rescan(struct device *);
|
||||
static int sd_init_command(struct scsi_cmnd *);
|
||||
static int sd_issue_flush(struct device *, sector_t *);
|
||||
static void sd_prepare_flush(request_queue_t *, struct request *);
|
||||
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
|
||||
static void scsi_disk_release(struct class_device *cdev);
|
||||
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
|
||||
static void sd_print_result(struct scsi_disk *, int);
|
||||
|
||||
#define sd_printk(prefix, sdsk, fmt, a...) \
|
||||
(sdsk)->disk ? \
|
||||
sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \
|
||||
(sdsk)->disk->disk_name, ##a) : \
|
||||
sdev_printk(prefix, (sdsk)->device, fmt, ##a)
|
||||
|
||||
#endif /* _SCSI_DISK_H */
|
Loading…
Reference in New Issue
Block a user