SCSI misc on 20191130

This is mostly update of the usual drivers: aacraid, ufs, zfcp,
 NCR5380, lpfc, qla2xxx, smartpqi, hisi_sas, target, mpt3sas, pm80xx
 plus a whole load of minor updates and fixes.  The two major core
 changes are Al Viro's reworking of sg's handling of copy to/from user,
 Ming Lei's removal of the host busy counter to avoid contention in the
 multiqueue case and Damien Le Moal's fixing of residual tracking
 across error handling.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXeKvHCYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishQJMAQDAjlAi
 SNfbyndMqyf+rZGWufDI+43Up1VvW9GeWJHeDwEAxfO5XZsCks2uT8UxXhpEp9L7
 HkiUww3zbcgl0FWFkUM=
 =cdVU
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This is mostly update of the usual drivers: aacraid, ufs, zfcp,
  NCR5380, lpfc, qla2xxx, smartpqi, hisi_sas, target, mpt3sas, pm80xx
  plus a whole load of minor updates and fixes.

  The major core changes are Al Viro's reworking of sg's handling of
  copy to/from user, Ming Lei's removal of the host busy counter to
  avoid contention in the multiqueue case and Damien Le Moal's fixing of
  residual tracking across error handling"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (251 commits)
  scsi: bnx2fc: timeout calculation invalid for bnx2fc_eh_abort()
  scsi: target: core: Fix a pr_debug() argument
  scsi: iscsi: Don't send data to unbound connection
  scsi: target: iscsi: Wait for all commands to finish before freeing a session
  scsi: target: core: Release SPC-2 reservations when closing a session
  scsi: target: core: Document target_cmd_size_check()
  scsi: bnx2i: fix potential use after free
  Revert "scsi: qla2xxx: Fix memory leak when sending I/O fails"
  scsi: NCR5380: Add disconnect_mask module parameter
  scsi: NCR5380: Unconditionally clear ICR after do_abort()
  scsi: NCR5380: Call scsi_set_resid() on command completion
  scsi: scsi_debug: num_tgts must be >= 0
  scsi: lpfc: use hdwq assigned cpu for allocation
  scsi: arcmsr: fix indentation issues
  scsi: qla4xxx: fix double free bug
  scsi: pm80xx: Modified the logic to collect fatal dump
  scsi: pm80xx: Tie the interrupt name to the module instance
  scsi: pm80xx: Controller fatal error through sysfs
  scsi: pm80xx: Do not request 12G sas speeds
  scsi: pm80xx: Cleanup command when a reset times out
  ...
This commit is contained in:
Linus Torvalds 2019-12-02 13:37:02 -08:00
commit ef2cc88e2a
163 changed files with 5655 additions and 1986 deletions

View File

@ -0,0 +1,68 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/ti,j721e-ufs.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI J721e UFS Host Controller Glue Driver
maintainers:
- Vignesh Raghavendra <vigneshr@ti.com>
properties:
compatible:
items:
- const: ti,j721e-ufs
reg:
maxItems: 1
description: address of TI UFS glue registers
clocks:
maxItems: 1
description: phandle to the M-PHY clock
power-domains:
maxItems: 1
required:
- compatible
- reg
- clocks
- power-domains
patternProperties:
"^ufs@[0-9a-f]+$":
type: object
description: |
Cadence UFS controller node must be the child node. Refer
Documentation/devicetree/bindings/ufs/cdns,ufshc.txt for binding
documentation of child node
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
ufs_wrapper: ufs-wrapper@4e80000 {
compatible = "ti,j721e-ufs";
reg = <0x0 0x4e80000 0x0 0x100>;
power-domains = <&k3_pds 277>;
clocks = <&k3_clks 277 1>;
assigned-clocks = <&k3_clks 277 1>;
assigned-clock-parents = <&k3_clks 277 4>;
#address-cells = <2>;
#size-cells = <2>;
ufs@4e84000 {
compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
reg = <0x0 0x4e84000 0x0 0x10000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
freq-table-hz = <19200000 19200000>;
power-domains = <&k3_pds 277>;
clocks = <&k3_clks 277 1>;
assigned-clocks = <&k3_clks 277 1>;
assigned-clock-parents = <&k3_clks 277 4>;
clock-names = "core_clk";
};
};

View File

@ -13,6 +13,7 @@ Required properties:
"qcom,msm8996-ufshc", "qcom,ufshc", "jedec,ufs-2.0" "qcom,msm8996-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0" "qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0" "qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,sm8150-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
- interrupts : <interrupt mapping for UFS host controller IRQ> - interrupts : <interrupt mapping for UFS host controller IRQ>
- reg : <registers mapping> - reg : <registers mapping>

View File

@ -1084,7 +1084,8 @@ of interest:
commands to the adapter. commands to the adapter.
this_id - scsi id of host (scsi initiator) or -1 if not known this_id - scsi id of host (scsi initiator) or -1 if not known
sg_tablesize - maximum scatter gather elements allowed by host. sg_tablesize - maximum scatter gather elements allowed by host.
0 implies scatter gather not supported by host Set this to SG_ALL or less to avoid chained SG lists.
Must be at least 1.
max_sectors - maximum number of sectors (usually 512 bytes) allowed max_sectors - maximum number of sectors (usually 512 bytes) allowed
in a single SCSI command. The default value of 0 leads in a single SCSI command. The default value of 0 leads
to a setting of SCSI_DEFAULT_MAX_SECTORS (defined in to a setting of SCSI_DEFAULT_MAX_SECTORS (defined in

View File

@ -219,7 +219,6 @@ struct arasan_cf_dev {
static struct scsi_host_template arasan_cf_sht = { static struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME), ATA_BASE_SHT(DRIVER_NAME),
.sg_tablesize = SG_NONE,
.dma_boundary = 0xFFFFFFFFUL, .dma_boundary = 0xFFFFFFFFUL,
}; };

View File

@ -5,6 +5,6 @@
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \ zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \ zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
zfcp_unit.o zfcp_unit.o zfcp_diag.o
obj-$(CONFIG_ZFCP) += zfcp.o obj-$(CONFIG_ZFCP) += zfcp.o

View File

@ -4,7 +4,7 @@
* *
* Module interface and handling of zfcp data structures. * Module interface and handling of zfcp data structures.
* *
* Copyright IBM Corp. 2002, 2017 * Copyright IBM Corp. 2002, 2018
*/ */
/* /*
@ -25,6 +25,7 @@
* Martin Petermann * Martin Petermann
* Sven Schuetz * Sven Schuetz
* Steffen Maier * Steffen Maier
* Benjamin Block
*/ */
#define KMSG_COMPONENT "zfcp" #define KMSG_COMPONENT "zfcp"
@ -36,6 +37,7 @@
#include "zfcp_ext.h" #include "zfcp_ext.h"
#include "zfcp_fc.h" #include "zfcp_fc.h"
#include "zfcp_reqlist.h" #include "zfcp_reqlist.h"
#include "zfcp_diag.h"
#define ZFCP_BUS_ID_SIZE 20 #define ZFCP_BUS_ID_SIZE 20
@ -356,6 +358,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->erp_action.adapter = adapter; adapter->erp_action.adapter = adapter;
if (zfcp_diag_adapter_setup(adapter))
goto failed;
if (zfcp_qdio_setup(adapter)) if (zfcp_qdio_setup(adapter))
goto failed; goto failed;
@ -402,6 +407,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs)) &zfcp_sysfs_adapter_attrs))
goto failed; goto failed;
if (zfcp_diag_sysfs_setup(adapter))
goto failed;
/* report size limit per scatter-gather segment */ /* report size limit per scatter-gather segment */
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
@ -426,6 +434,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
zfcp_fc_wka_ports_force_offline(adapter->gs); zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter); zfcp_scsi_adapter_unregister(adapter);
zfcp_diag_sysfs_destroy(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter); zfcp_erp_thread_kill(adapter);
@ -449,6 +458,7 @@ void zfcp_adapter_release(struct kref *ref)
dev_set_drvdata(&adapter->ccw_device->dev, NULL); dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter); zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter); zfcp_free_low_mem_buffers(adapter);
zfcp_diag_adapter_free(adapter);
kfree(adapter->req_list); kfree(adapter->req_list);
kfree(adapter->fc_stats); kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data); kfree(adapter->stats_reset_data);

View File

@ -95,11 +95,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE); FSF_STATUS_QUALIFIER_SIZE);
if (q_head->fsf_command != FSF_QTCB_FCP_CMND) { rec->pl_len = q_head->log_length;
rec->pl_len = q_head->log_length; zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, rec->pl_len, "fsf_res", req->req_id);
rec->pl_len, "fsf_res", req->req_id);
}
debug_event(dbf->hba, level, rec, sizeof(*rec)); debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags); spin_unlock_irqrestore(&dbf->hba_lock, flags);

View File

@ -4,7 +4,7 @@
* *
* Global definitions for the zfcp device driver. * Global definitions for the zfcp device driver.
* *
* Copyright IBM Corp. 2002, 2017 * Copyright IBM Corp. 2002, 2018
*/ */
#ifndef ZFCP_DEF_H #ifndef ZFCP_DEF_H
@ -86,6 +86,7 @@
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 #define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200 #define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 #define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
#define ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE 0x00020000
/************************* STRUCTURE DEFINITIONS *****************************/ /************************* STRUCTURE DEFINITIONS *****************************/
@ -197,6 +198,7 @@ struct zfcp_adapter {
struct device_dma_parameters dma_parms; struct device_dma_parameters dma_parms;
struct zfcp_fc_events events; struct zfcp_fc_events events;
unsigned long next_port_scan; unsigned long next_port_scan;
struct zfcp_diag_adapter *diagnostics;
}; };
struct zfcp_port { struct zfcp_port {

View File

@ -0,0 +1,305 @@
// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
* Functions to handle diagnostics.
*
* Copyright IBM Corp. 2018
*/
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/kernfs.h>
#include <linux/sysfs.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include "zfcp_diag.h"
#include "zfcp_ext.h"
#include "zfcp_def.h"
static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait);
/**
* zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics.
* @adapter: the adapter to setup diagnostics for.
*
* Creates the data-structures to store the diagnostics for an adapter. This
* overwrites whatever was stored before at &zfcp_adapter->diagnostics!
*
* Return:
* * 0 - Everyting is OK
* * -ENOMEM - Could not allocate all/parts of the data-structures;
* &zfcp_adapter->diagnostics remains unchanged
*/
int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter)
{
struct zfcp_diag_adapter *diag;
struct zfcp_diag_header *hdr;
diag = kzalloc(sizeof(*diag), GFP_KERNEL);
if (diag == NULL)
return -ENOMEM;
diag->max_age = (5 * 1000); /* default value: 5 s */
/* setup header for port_data */
hdr = &diag->port_data.header;
spin_lock_init(&hdr->access_lock);
hdr->buffer = &diag->port_data.data;
hdr->buffer_size = sizeof(diag->port_data.data);
/* set the timestamp so that the first test on age will always fail */
hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
/* setup header for config_data */
hdr = &diag->config_data.header;
spin_lock_init(&hdr->access_lock);
hdr->buffer = &diag->config_data.data;
hdr->buffer_size = sizeof(diag->config_data.data);
/* set the timestamp so that the first test on age will always fail */
hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
adapter->diagnostics = diag;
return 0;
}
/**
* zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations.
* @adapter: the adapter whose diagnostic structures should be freed.
*
* Frees all data-structures in the given adapter that store diagnostics
* information. Can savely be called with partially setup diagnostics.
*/
void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
{
kfree(adapter->diagnostics);
adapter->diagnostics = NULL;
}
/**
* zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
* @adapter: target adapter to which the group should be added.
*
* Return: 0 on success; Something else otherwise (see sysfs_create_group()).
*/
int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
{
int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_diag_attr_group);
if (rc == 0)
adapter->diagnostics->sysfs_established = 1;
return rc;
}
/**
* zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
* @adapter: target adapter from which the group should be removed.
*/
void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
{
if (adapter->diagnostics == NULL ||
!adapter->diagnostics->sysfs_established)
return;
/*
* We need this state-handling so we can prevent warnings being printed
* on the kernel-console in case we have to abort a halfway done
* zfcp_adapter_enqueue(), in which the sysfs-group was not yet
* established. sysfs_remove_group() does this checking as well, but
* still prints a warning in case we try to remove a group that has not
* been established before
*/
adapter->diagnostics->sysfs_established = 0;
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_diag_attr_group);
}
/**
* zfcp_diag_update_xdata() - Update a diagnostics buffer.
* @hdr: the meta data to update.
* @data: data to use for the update.
* @incomplete: flag stating whether the data in @data is incomplete.
*/
void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
const void *const data, const bool incomplete)
{
const unsigned long capture_timestamp = jiffies;
unsigned long flags;
spin_lock_irqsave(&hdr->access_lock, flags);
/* make sure we never go into the past with an update */
if (!time_after_eq(capture_timestamp, hdr->timestamp))
goto out;
hdr->timestamp = capture_timestamp;
hdr->incomplete = incomplete;
memcpy(hdr->buffer, data, hdr->buffer_size);
out:
spin_unlock_irqrestore(&hdr->access_lock, flags);
}
/**
* zfcp_diag_update_port_data_buffer() - Implementation of
* &typedef zfcp_diag_update_buffer_func
* to collect and update Port Data.
* @adapter: Adapter to collect Port Data from.
*
* This call is SYNCHRONOUS ! It blocks till the respective command has
* finished completely, or has failed in some way.
*
* Return:
* * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
* this also includes cases where data was retrieved, but
* incomplete; you'll have to check the flag ``incomplete``
* of &struct zfcp_diag_header.
* * see zfcp_fsf_exchange_port_data_sync() for possible error-codes (
* excluding -EAGAIN)
*/
int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter)
{
int rc;
rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL);
if (rc == -EAGAIN)
rc = 0; /* signaling incomplete via struct zfcp_diag_header */
/* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */
return rc;
}
/**
* zfcp_diag_update_config_data_buffer() - Implementation of
* &typedef zfcp_diag_update_buffer_func
* to collect and update Config Data.
* @adapter: Adapter to collect Config Data from.
*
* This call is SYNCHRONOUS ! It blocks till the respective command has
* finished completely, or has failed in some way.
*
* Return:
* * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
* this also includes cases where data was retrieved, but
* incomplete; you'll have to check the flag ``incomplete``
* of &struct zfcp_diag_header.
* * see zfcp_fsf_exchange_config_data_sync() for possible error-codes (
* excluding -EAGAIN)
*/
int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter)
{
int rc;
rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
if (rc == -EAGAIN)
rc = 0; /* signaling incomplete via struct zfcp_diag_header */
/* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */
return rc;
}
static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter,
struct zfcp_diag_header *const hdr,
zfcp_diag_update_buffer_func buffer_update,
unsigned long *const flags)
__must_hold(hdr->access_lock)
{
int rc;
if (hdr->updating == 1) {
rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait,
hdr->updating == 0,
hdr->access_lock);
rc = (rc == 0 ? -EAGAIN : -EINTR);
} else {
hdr->updating = 1;
spin_unlock_irqrestore(&hdr->access_lock, *flags);
/* unlocked, because update function sleeps */
rc = buffer_update(adapter);
spin_lock_irqsave(&hdr->access_lock, *flags);
hdr->updating = 0;
/*
* every thread waiting here went via an interruptible wait,
* so its fine to only wake those
*/
wake_up_interruptible_all(&__zfcp_diag_publish_wait);
}
return rc;
}
static bool
__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag,
const struct zfcp_diag_header *const hdr)
__must_hold(hdr->access_lock)
{
const unsigned long now = jiffies;
/*
* Should not happen (data is from the future).. if it does, still
* signal that it needs refresh
*/
if (!time_after_eq(now, hdr->timestamp))
return false;
if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age)
return false;
return true;
}
/**
* zfcp_diag_update_buffer_limited() - Collect diagnostics and update a
* diagnostics buffer rate limited.
* @adapter: Adapter to collect the diagnostics from.
* @hdr: buffer-header for which to update with the collected diagnostics.
* @buffer_update: Specific implementation for collecting and updating.
*
* This function will cause an update of the given @hdr by calling the also
* given @buffer_update function. If called by multiple sources at the same
* time, it will synchornize the update by only allowing one source to call
* @buffer_update and the others to wait for that source to complete instead
* (the wait is interruptible).
*
* Additionally this version is rate-limited and will only exit if either the
* buffer is fresh enough (within the limit) - it will do nothing if the buffer
* is fresh enough to begin with -, or if the source/thread that started this
* update is the one that made the update (to prevent endless loops).
*
* Return:
* * 0 - If the update was successfully published and/or the buffer is
* fresh enough
* * -EINTR - If the thread went into the wait-state and was interrupted
* * whatever @buffer_update returns
*/
int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
struct zfcp_diag_header *const hdr,
zfcp_diag_update_buffer_func buffer_update)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&hdr->access_lock, flags);
for (rc = 0;
!__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr);
rc = 0) {
rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update,
&flags);
if (rc != -EAGAIN)
break;
}
spin_unlock_irqrestore(&hdr->access_lock, flags);
return rc;
}

View File

@ -0,0 +1,101 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
*
* Definitions for handling diagnostics in the the zfcp device driver.
*
* Copyright IBM Corp. 2018
*/
#ifndef ZFCP_DIAG_H
#define ZFCP_DIAG_H
#include <linux/spinlock.h>
#include "zfcp_fsf.h"
#include "zfcp_def.h"
/**
* struct zfcp_diag_header - general part of a diagnostic buffer.
* @access_lock: lock protecting all the data in this buffer.
* @updating: flag showing that an update for this buffer is currently running.
* @incomplete: flag showing that the data in @buffer is incomplete.
* @timestamp: time in jiffies when the data of this buffer was last captured.
* @buffer: implementation-depending data of this buffer
* @buffer_size: size of @buffer
*/
struct zfcp_diag_header {
spinlock_t access_lock;
/* Flags */
u64 updating :1;
u64 incomplete :1;
unsigned long timestamp;
void *buffer;
size_t buffer_size;
};
/**
* struct zfcp_diag_adapter - central storage for all diagnostics concerning an
* adapter.
* @sysfs_established: flag showing that the associated sysfs-group was created
* during run of zfcp_adapter_enqueue().
* @max_age: maximum age of data in diagnostic buffers before they need to be
* refreshed (in ms).
* @port_data: data retrieved using exchange port data.
* @port_data.header: header with metadata for the cache in @port_data.data.
* @port_data.data: cached QTCB Bottom of command exchange port data.
* @config_data: data retrieved using exchange config data.
* @config_data.header: header with metadata for the cache in @config_data.data.
* @config_data.data: cached QTCB Bottom of command exchange config data.
*/
struct zfcp_diag_adapter {
u64 sysfs_established :1;
unsigned long max_age;
struct {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_port data;
} port_data;
struct {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_config data;
} config_data;
};
int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
const void *const data, const bool incomplete);
/*
* Function-Type used in zfcp_diag_update_buffer_limited() for the function
* that does the buffer-implementation dependent work.
*/
typedef int (*zfcp_diag_update_buffer_func)(struct zfcp_adapter *const adapter);
int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter);
int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter);
int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
struct zfcp_diag_header *const hdr,
zfcp_diag_update_buffer_func buffer_update);
/**
* zfcp_diag_support_sfp() - Return %true if the @adapter supports reporting
* SFP Data.
* @adapter: adapter to test the availability of SFP Data reporting for.
*/
static inline bool
zfcp_diag_support_sfp(const struct zfcp_adapter *const adapter)
{
return !!(adapter->adapter_features & FSF_FEATURE_REPORT_SFP_DATA);
}
#endif /* ZFCP_DIAG_H */

View File

@ -174,7 +174,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0; return 0;
p_status = atomic_read(&port->status); p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
p_status & ZFCP_STATUS_COMMON_ERP_FAILED) p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0; return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT; need = ZFCP_ERP_ACTION_REOPEN_PORT;
@ -190,7 +190,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0; return 0;
a_status = atomic_read(&adapter->status); a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
a_status & ZFCP_STATUS_COMMON_ERP_FAILED) a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0; return 0;
if (p_status & ZFCP_STATUS_COMMON_NOESC) if (p_status & ZFCP_STATUS_COMMON_NOESC)
return need; return need;

View File

@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex; extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[];
extern const struct attribute_group zfcp_sysfs_diag_attr_group;
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port); bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */ /* zfcp_unit.c */

View File

@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <linux/jiffies.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <scsi/fc/fc_els.h> #include <scsi/fc/fc_els.h>
@ -19,6 +20,7 @@
#include "zfcp_dbf.h" #include "zfcp_dbf.h"
#include "zfcp_qdio.h" #include "zfcp_qdio.h"
#include "zfcp_reqlist.h" #include "zfcp_reqlist.h"
#include "zfcp_diag.h"
/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
@ -554,6 +556,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
{ {
struct zfcp_adapter *adapter = req->adapter; struct zfcp_adapter *adapter = req->adapter;
struct zfcp_diag_header *const diag_hdr =
&adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb; struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
struct Scsi_Host *shost = adapter->scsi_host; struct Scsi_Host *shost = adapter->scsi_host;
@ -570,6 +574,12 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
switch (qtcb->header.fsf_status) { switch (qtcb->header.fsf_status) {
case FSF_GOOD: case FSF_GOOD:
/*
* usually we wait with an update till the cache is too old,
* but because we have the data available, update it anyway
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
if (zfcp_fsf_exchange_config_evaluate(req)) if (zfcp_fsf_exchange_config_evaluate(req))
return; return;
@ -585,6 +595,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status); &adapter->status);
break; break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
fc_host_node_name(shost) = 0; fc_host_node_name(shost) = 0;
fc_host_port_name(shost) = 0; fc_host_port_name(shost) = 0;
fc_host_port_id(shost) = 0; fc_host_port_id(shost) = 0;
@ -653,16 +666,28 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
{ {
struct zfcp_diag_header *const diag_hdr =
&req->adapter->diagnostics->port_data.header;
struct fsf_qtcb *qtcb = req->qtcb; struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR) if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return; return;
switch (qtcb->header.fsf_status) { switch (qtcb->header.fsf_status) {
case FSF_GOOD: case FSF_GOOD:
/*
* usually we wait with an update till the cache is too old,
* but because we have the data available, update it anyway
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
zfcp_fsf_exchange_port_evaluate(req); zfcp_fsf_exchange_port_evaluate(req);
break; break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
zfcp_diag_update_xdata(diag_hdr, bottom, true);
req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
zfcp_fsf_exchange_port_evaluate(req); zfcp_fsf_exchange_port_evaluate(req);
zfcp_fsf_link_down_info_eval(req, zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info); &qtcb->header.fsf_status_qual.link_down_info);
@ -1261,7 +1286,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
req->qtcb->bottom.config.feature_selection = req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST | FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT; FSF_FEATURE_UPDATE_ALERT |
FSF_FEATURE_REQUEST_SFP_DATA;
req->erp_action = erp_action; req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler; req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id; erp_action->fsf_req_id = req->req_id;
@ -1278,6 +1304,19 @@ out:
return retval; return retval;
} }
/**
* zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
* @qdio: pointer to the QDIO-Queue to use for sending the command.
* @data: pointer to the QTCB-Bottom for storing the result of the command,
* might be %NULL.
*
* Returns:
* * 0 - Exchange Config Data was successful, @data is complete
* * -EIO - Exchange Config Data was not successful, @data is invalid
* * -EAGAIN - @data contains incomplete data
* * -ENOMEM - Some memory allocation failed along the way
*/
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data) struct fsf_qtcb_bottom_config *data)
{ {
@ -1301,7 +1340,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
req->qtcb->bottom.config.feature_selection = req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST | FSF_FEATURE_NOTIFICATION_LOST |
FSF_FEATURE_UPDATE_ALERT; FSF_FEATURE_UPDATE_ALERT |
FSF_FEATURE_REQUEST_SFP_DATA;
if (data) if (data)
req->data = data; req->data = data;
@ -1309,9 +1349,16 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req); retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock); spin_unlock_irq(&qdio->req_q_lock);
if (!retval) { if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion); wait_for_completion(&req->completion);
if (req->status &
(ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
retval = -EIO;
else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
retval = -EAGAIN;
} }
zfcp_fsf_req_free(req); zfcp_fsf_req_free(req);
@ -1369,10 +1416,17 @@ out:
} }
/** /**
* zfcp_fsf_exchange_port_data_sync - request information about local port * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
* @qdio: pointer to struct zfcp_qdio * @qdio: pointer to the QDIO-Queue to use for sending the command.
* @data: pointer to struct fsf_qtcb_bottom_port * @data: pointer to the QTCB-Bottom for storing the result of the command,
* Returns: 0 on success, error otherwise * might be %NULL.
*
* Returns:
* * 0 - Exchange Port Data was successful, @data is complete
* * -EIO - Exchange Port Data was not successful, @data is invalid
* * -EAGAIN - @data contains incomplete data
* * -ENOMEM - Some memory allocation failed along the way
* * -EOPNOTSUPP - This operation is not supported
*/ */
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data) struct fsf_qtcb_bottom_port *data)
@ -1408,10 +1462,15 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (!retval) { if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion); wait_for_completion(&req->completion);
if (req->status &
(ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
retval = -EIO;
else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
retval = -EAGAIN;
} }
zfcp_fsf_req_free(req); zfcp_fsf_req_free(req);
return retval; return retval;
out_unlock: out_unlock:

View File

@ -163,6 +163,8 @@
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 #define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100 #define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 #define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200
#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000 #define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000 #define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
@ -407,7 +409,24 @@ struct fsf_qtcb_bottom_port {
u8 cp_util; u8 cp_util;
u8 cb_util; u8 cb_util;
u8 a_util; u8 a_util;
u8 res2[253]; u8 res2;
u16 temperature;
u16 vcc;
u16 tx_bias;
u16 tx_power;
u16 rx_power;
union {
u16 raw;
struct {
u16 fec_active :1;
u16:7;
u16 connector_type :2;
u16 sfp_invalid :1;
u16 optical_port :1;
u16 port_tx_type :4;
};
} sfp_flags;
u8 res3[240];
} __attribute__ ((packed)); } __attribute__ ((packed));
union fsf_qtcb_bottom { union fsf_qtcb_bottom {

View File

@ -605,7 +605,7 @@ zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
return NULL; return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret) { if (ret != 0 && ret != -EAGAIN) {
kfree(data); kfree(data);
return NULL; return NULL;
} }
@ -634,7 +634,7 @@ static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
return; return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
if (ret) if (ret != 0 && ret != -EAGAIN)
kfree(data); kfree(data);
else { else {
adapter->stats_reset = jiffies/HZ; adapter->stats_reset = jiffies/HZ;

View File

@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h> #include <linux/slab.h>
#include "zfcp_diag.h"
#include "zfcp_ext.h" #include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@ -325,6 +326,50 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store); zfcp_sysfs_port_remove_store);
static ssize_t
zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
ssize_t rc;
if (!adapter)
return -ENODEV;
/* ceil(log(2^64 - 1) / log(10)) = 20 */
rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
zfcp_ccw_adapter_put(adapter);
return rc;
}
static ssize_t
zfcp_sysfs_adapter_diag_max_age_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
unsigned long max_age;
ssize_t rc;
if (!adapter)
return -ENODEV;
rc = kstrtoul(buf, 10, &max_age);
if (rc != 0)
goto out;
adapter->diagnostics->max_age = max_age;
rc = count;
out:
zfcp_ccw_adapter_put(adapter);
return rc;
}
static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
zfcp_sysfs_adapter_diag_max_age_show,
zfcp_sysfs_adapter_diag_max_age_store);
static struct attribute *zfcp_adapter_attrs[] = { static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr, &dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr, &dev_attr_adapter_in_recovery.attr,
@ -337,6 +382,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_lic_version.attr, &dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr, &dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr, &dev_attr_adapter_hardware_version.attr,
&dev_attr_adapter_diag_max_age.attr,
NULL NULL
}; };
@ -577,7 +623,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
return -ENOMEM; return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
if (!retval) if (retval == 0 || retval == -EAGAIN)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util); qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port); kfree(qtcb_port);
@ -603,7 +649,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
return -ENOMEM; return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
if (!retval) if (retval == 0 || retval == -EAGAIN)
*stat_inf = qtcb_config->stat_info; *stat_inf = qtcb_config->stat_info;
kfree(qtcb_config); kfree(qtcb_config);
@ -664,3 +710,123 @@ struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_queue_full, &dev_attr_queue_full,
NULL NULL
}; };
static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
struct device *dev, struct device_attribute *attr, char *buf)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
struct zfcp_diag_header *diag_hdr;
struct fc_els_flogi *nsp;
ssize_t rc = -ENOLINK;
unsigned long flags;
unsigned int status;
if (!adapter)
return -ENODEV;
status = atomic_read(&adapter->status);
if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))
goto out;
diag_hdr = &adapter->diagnostics->config_data.header;
rc = zfcp_diag_update_buffer_limited(
adapter, diag_hdr, zfcp_diag_update_config_data_buffer);
if (rc != 0)
goto out;
spin_lock_irqsave(&diag_hdr->access_lock, flags);
/* nport_serv_param doesn't contain the ELS_Command code */
nsp = (struct fc_els_flogi *)((unsigned long)
adapter->diagnostics->config_data
.data.nport_serv_param -
sizeof(u32));
rc = scnprintf(buf, 5 + 2, "%hu\n",
be16_to_cpu(nsp->fl_csp.sp_bb_cred));
spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
out:
zfcp_ccw_adapter_put(adapter);
return rc;
}
static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \
static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \
struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct zfcp_adapter *const adapter = \
zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); \
struct zfcp_diag_header *diag_hdr; \
ssize_t rc = -ENOLINK; \
unsigned long flags; \
unsigned int status; \
\
if (!adapter) \
return -ENODEV; \
\
status = atomic_read(&adapter->status); \
if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || \
0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || \
0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) \
goto out; \
\
if (!zfcp_diag_support_sfp(adapter)) { \
rc = -EOPNOTSUPP; \
goto out; \
} \
\
diag_hdr = &adapter->diagnostics->port_data.header; \
\
rc = zfcp_diag_update_buffer_limited( \
adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \
if (rc != 0) \
goto out; \
\
spin_lock_irqsave(&diag_hdr->access_lock, flags); \
rc = scnprintf( \
buf, (_prtsize) + 2, _prtfmt "\n", \
adapter->diagnostics->port_data.data._qtcb_member); \
spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \
\
out: \
zfcp_ccw_adapter_put(adapter); \
return rc; \
} \
static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
static struct attribute *zfcp_sysfs_diag_attrs[] = {
&dev_attr_adapter_diag_sfp_temperature.attr,
&dev_attr_adapter_diag_sfp_vcc.attr,
&dev_attr_adapter_diag_sfp_tx_bias.attr,
&dev_attr_adapter_diag_sfp_tx_power.attr,
&dev_attr_adapter_diag_sfp_rx_power.attr,
&dev_attr_adapter_diag_sfp_port_tx_type.attr,
&dev_attr_adapter_diag_sfp_optical_port.attr,
&dev_attr_adapter_diag_sfp_sfp_invalid.attr,
&dev_attr_adapter_diag_sfp_connector_type.attr,
&dev_attr_adapter_diag_sfp_fec_active.attr,
&dev_attr_adapter_diag_b2b_credit.attr,
NULL,
};
const struct attribute_group zfcp_sysfs_diag_attr_group = {
.name = "diagnostics",
.attrs = zfcp_sysfs_diag_attrs,
};

View File

@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x) #define NCR5380_release_dma_irq(x)
#endif #endif
static unsigned int disconnect_mask = ~0;
module_param(disconnect_mask, int, 0444);
static int do_abort(struct Scsi_Host *); static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *); static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *); static void bus_reset_cleanup(struct Scsi_Host *);
@ -172,6 +175,19 @@ static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
} }
} }
static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
{
int resid = cmd->SCp.this_residual;
struct scatterlist *s = cmd->SCp.buffer;
if (s)
while (!sg_is_last(s)) {
s = sg_next(s);
resid += s->length;
}
scsi_set_resid(cmd, resid);
}
/** /**
* NCR5380_poll_politely2 - wait for two chip register values * NCR5380_poll_politely2 - wait for two chip register values
* @hostdata: host private data * @hostdata: host private data
@ -954,7 +970,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err; int err;
bool ret = true; bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ && bool can_disconnect = instance->irq != NO_IRQ &&
cmd->cmnd[0] != REQUEST_SENSE; cmd->cmnd[0] != REQUEST_SENSE &&
(disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance); NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@ -1379,7 +1396,7 @@ static void do_reset(struct Scsi_Host *instance)
* MESSAGE OUT phase and sending an ABORT message. * MESSAGE OUT phase and sending an ABORT message.
* @instance: relevant scsi host instance * @instance: relevant scsi host instance
* *
* Returns 0 on success, -1 on failure. * Returns 0 on success, negative error code on failure.
*/ */
static int do_abort(struct Scsi_Host *instance) static int do_abort(struct Scsi_Host *instance)
@ -1404,7 +1421,7 @@ static int do_abort(struct Scsi_Host *instance)
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0) if (rc < 0)
goto timeout; goto out;
tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; tmp = NCR5380_read(STATUS_REG) & PHASE_MASK;
@ -1415,7 +1432,7 @@ static int do_abort(struct Scsi_Host *instance)
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ); rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0) if (rc < 0)
goto timeout; goto out;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
} }
@ -1424,17 +1441,17 @@ static int do_abort(struct Scsi_Host *instance)
len = 1; len = 1;
phase = PHASE_MSGOUT; phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &msgptr); NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
if (len)
rc = -ENXIO;
/* /*
* If we got here, and the command completed successfully, * If we got here, and the command completed successfully,
* we're about to go into bus free state. * we're about to go into bus free state.
*/ */
return len ? -1 : 0; out:
timeout:
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return -1; return rc;
} }
/* /*
@ -1803,6 +1820,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result |= cmd->SCp.Status; cmd->result |= cmd->SCp.Status;
cmd->result |= cmd->SCp.Message << 8; cmd->result |= cmd->SCp.Message << 8;
set_resid_from_SCp(cmd);
if (cmd->cmnd[0] == REQUEST_SENSE) if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd); complete_cmd(instance, cmd);
else { else {
@ -2264,7 +2283,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL; hostdata->connected = NULL;
hostdata->dma_len = 0; hostdata->dma_len = 0;
if (do_abort(instance)) { if (do_abort(instance) < 0) {
set_host_byte(cmd, DID_ERROR); set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd); complete_cmd(instance, cmd);
result = FAILED; result = FAILED;

View File

@ -1477,6 +1477,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
struct aac_srb * srbcmd; struct aac_srb * srbcmd;
u32 flag; u32 flag;
u32 timeout; u32 timeout;
struct aac_dev *dev = fib->dev;
aac_fib_init(fib); aac_fib_init(fib);
switch(cmd->sc_data_direction){ switch(cmd->sc_data_direction){
@ -1503,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->flags = cpu_to_le32(flag); srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->request->timeout/HZ; timeout = cmd->request->timeout/HZ;
if (timeout == 0) if (timeout == 0)
timeout = 1; timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */ srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len); srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
@ -2467,13 +2468,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION; SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data, set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE)); SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd); scsicmd->scsi_done(scsicmd);
return 1; return 0;
} }
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
@ -2559,13 +2560,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION; SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data, set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE)); SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd); scsicmd->scsi_done(scsicmd);
return 1; return 0;
} }
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",

View File

@ -85,7 +85,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001 #define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD #ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 50877 # define AAC_DRIVER_BUILD 50983
# define AAC_DRIVER_BRANCH "-custom" # define AAC_DRIVER_BRANCH "-custom"
#endif #endif
#define MAXIMUM_NUM_CONTAINERS 32 #define MAXIMUM_NUM_CONTAINERS 32
@ -108,6 +108,8 @@ enum {
#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS) #define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048 #define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512 #define FW_ERROR_BUFFER_SIZE 512
#define AAC_SA_TIMEOUT 180
#define AAC_ARC_TIMEOUT 60
#define get_bus_number(x) (x/AAC_MAX_TARGETS) #define get_bus_number(x) (x/AAC_MAX_TARGETS)
#define get_target_number(x) (x%AAC_MAX_TARGETS) #define get_target_number(x) (x%AAC_MAX_TARGETS)
@ -1328,7 +1330,7 @@ struct fib {
#define AAC_DEVTYPE_ARC_RAW 2 #define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3 #define AAC_DEVTYPE_NATIVE_RAW 3
#define AAC_SAFW_RESCAN_DELAY (10 * HZ) #define AAC_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info { struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */ __le32 rmw_nexus; /* nexus for native HBA devices */
@ -1601,6 +1603,7 @@ struct aac_dev
struct fsa_dev_info *fsa_dev; struct fsa_dev_info *fsa_dev;
struct task_struct *thread; struct task_struct *thread;
struct delayed_work safw_rescan_work; struct delayed_work safw_rescan_work;
struct delayed_work src_reinit_aif_worker;
int cardtype; int cardtype;
/* /*
*This lock will protect the two 32-bit *This lock will protect the two 32-bit
@ -1673,6 +1676,7 @@ struct aac_dev
u8 adapter_shutdown; u8 adapter_shutdown;
u32 handle_pci_error; u32 handle_pci_error;
bool init_reset; bool init_reset;
u8 soft_reset_support;
}; };
#define aac_adapter_interrupt(dev) \ #define aac_adapter_interrupt(dev) \
@ -2644,7 +2648,12 @@ int aac_scan_host(struct aac_dev *dev);
static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev) static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
{ {
schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY); schedule_delayed_work(&dev->safw_rescan_work, AAC_RESCAN_DELAY);
}
static inline void aac_schedule_src_reinit_aif_worker(struct aac_dev *dev)
{
schedule_delayed_work(&dev->src_reinit_aif_worker, AAC_RESCAN_DELAY);
} }
static inline void aac_safw_rescan_worker(struct work_struct *work) static inline void aac_safw_rescan_worker(struct work_struct *work)
@ -2658,10 +2667,10 @@ static inline void aac_safw_rescan_worker(struct work_struct *work)
aac_scan_host(dev); aac_scan_host(dev);
} }
static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev) static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
{ {
if (dev->sa_firmware) cancel_delayed_work_sync(&dev->safw_rescan_work);
cancel_delayed_work_sync(&dev->safw_rescan_work); cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
} }
/* SCp.phase values */ /* SCp.phase values */
@ -2671,6 +2680,7 @@ static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
#define AAC_OWNER_FIRMWARE 0x106 #define AAC_OWNER_FIRMWARE 0x106
void aac_safw_rescan_worker(struct work_struct *work); void aac_safw_rescan_worker(struct work_struct *work);
void aac_src_reinit_aif_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev); int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev); void aac_free_irq(struct aac_dev *dev);
int aac_setup_safw_adapter(struct aac_dev *dev); int aac_setup_safw_adapter(struct aac_dev *dev);
@ -2728,6 +2738,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev); int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm); int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib); int aac_rx_deliver_producer(struct fib * fib);
void aac_reinit_aif(struct aac_dev *aac, unsigned int index);
static inline int aac_is_src(struct aac_dev *dev) static inline int aac_is_src(struct aac_dev *dev)
{ {

View File

@ -571,6 +571,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
else else
dev->sa_firmware = 0; dev->sa_firmware = 0;
if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET))
dev->soft_reset_support = 1;
else
dev->soft_reset_support = 0;
if ((dev->comm_interface == AAC_COMM_MESSAGE) && if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) { (status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0); aac_adapter_ioremap(dev, 0);

View File

@ -232,6 +232,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
fibptr->type = FSAFS_NTC_FIB_CONTEXT; fibptr->type = FSAFS_NTC_FIB_CONTEXT;
fibptr->callback_data = NULL; fibptr->callback_data = NULL;
fibptr->callback = NULL; fibptr->callback = NULL;
fibptr->flags = 0;
return fibptr; return fibptr;
} }
@ -1463,6 +1464,14 @@ retry_next:
} }
} }
static void aac_schedule_bus_scan(struct aac_dev *aac)
{
if (aac->sa_firmware)
aac_schedule_safw_scan_worker(aac);
else
aac_schedule_src_reinit_aif_worker(aac);
}
static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{ {
int index, quirks; int index, quirks;
@ -1638,7 +1647,7 @@ out:
*/ */
if (!retval && !is_kdump_kernel()) { if (!retval && !is_kdump_kernel()) {
dev_info(&aac->pdev->dev, "Scheduling bus rescan\n"); dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
aac_schedule_safw_scan_worker(aac); aac_schedule_bus_scan(aac);
} }
if (jafo) { if (jafo) {
@ -1959,6 +1968,16 @@ int aac_scan_host(struct aac_dev *dev)
return rcode; return rcode;
} }
void aac_src_reinit_aif_worker(struct work_struct *work)
{
struct aac_dev *dev = container_of(to_delayed_work(work),
struct aac_dev, src_reinit_aif_worker);
wait_event(dev->scsi_host_ptr->host_wait,
!scsi_host_in_recovery(dev->scsi_host_ptr));
aac_reinit_aif(dev, dev->cardtype);
}
/** /**
* aac_handle_sa_aif Handle a message from the firmware * aac_handle_sa_aif Handle a message from the firmware
* @dev: Which adapter this fib is from * @dev: Which adapter this fib is from

View File

@ -391,6 +391,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
int chn, tid; int chn, tid;
unsigned int depth = 0; unsigned int depth = 0;
unsigned int set_timeout = 0; unsigned int set_timeout = 0;
int timeout = 0;
bool set_qd_dev_type = false; bool set_qd_dev_type = false;
u8 devtype = 0; u8 devtype = 0;
@ -483,10 +484,13 @@ common_config:
/* /*
* Firmware has an individual device recovery time typically * Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin. * of 35 seconds, give us a margin. Thor devices can take longer in
* error recovery, hence different value.
*/ */
if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ)) if (set_timeout) {
blk_queue_rq_timeout(sdev->request_queue, 45*HZ); timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT;
blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
}
if (depth > 256) if (depth > 256)
depth = 256; depth = 256;
@ -608,9 +612,13 @@ static struct device_attribute *aac_dev_attrs[] = {
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd, static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg) void __user *arg)
{ {
int retval;
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
retval = aac_adapter_check_health(dev);
if (retval)
return -EBUSY;
return aac_do_ioctl(dev, cmd, arg); return aac_do_ioctl(dev, cmd, arg);
} }
@ -1585,6 +1593,19 @@ static void aac_init_char(void)
} }
} }
void aac_reinit_aif(struct aac_dev *aac, unsigned int index)
{
/*
* Firmware may send a AIF messages very early and the Driver may have
* ignored as it is not fully ready to process the messages. Send
* AIF to firmware so that if there are any unprocessed events they
* can be processed now.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
aac_intr_normal(aac, 0, 2, 0, NULL);
}
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
unsigned index = id->driver_data; unsigned index = id->driver_data;
@ -1682,6 +1703,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&aac->scan_mutex); mutex_init(&aac->scan_mutex);
INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker); INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
INIT_DELAYED_WORK(&aac->src_reinit_aif_worker,
aac_src_reinit_aif_worker);
/* /*
* Map in the registers from the adapter. * Map in the registers from the adapter.
*/ */
@ -1872,7 +1895,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata; struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost); scsi_block_requests(shost);
aac_cancel_safw_rescan_worker(aac); aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac); aac_send_shutdown(aac);
aac_release_resources(aac); aac_release_resources(aac);
@ -1931,7 +1954,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev); struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata; struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
aac_cancel_safw_rescan_worker(aac); aac_cancel_rescan_worker(aac);
scsi_remove_host(shost); scsi_remove_host(shost);
__aac_shutdown(aac); __aac_shutdown(aac);
@ -1989,7 +2012,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1; aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr); scsi_block_requests(aac->scsi_host_ptr);
aac_cancel_safw_rescan_worker(aac); aac_cancel_rescan_worker(aac);
aac_flush_ios(aac); aac_flush_ios(aac);
aac_release_resources(aac); aac_release_resources(aac);

View File

@ -733,10 +733,20 @@ static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
return ctrl_up; return ctrl_up;
} }
static void aac_src_drop_io(struct aac_dev *dev)
{
if (!dev->soft_reset_support)
return;
aac_adapter_sync_cmd(dev, DROP_IO,
0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
}
static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
{ {
aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL);
aac_src_drop_io(dev);
} }
static void aac_send_iop_reset(struct aac_dev *dev) static void aac_send_iop_reset(struct aac_dev *dev)

View File

@ -1400,7 +1400,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
, pCCB->acb , pCCB->acb
, pCCB->startdone , pCCB->startdone
, atomic_read(&acb->ccboutstandingcount)); , atomic_read(&acb->ccboutstandingcount));
return; return;
} }
arcmsr_report_ccb_state(acb, pCCB, error); arcmsr_report_ccb_state(acb, pCCB, error);
} }
@ -3476,8 +3476,8 @@ polling_hbc_ccb_retry:
, pCCB->pcmd->device->id , pCCB->pcmd->device->id
, (u32)pCCB->pcmd->device->lun , (u32)pCCB->pcmd->device->lun
, pCCB); , pCCB);
pCCB->pcmd->result = DID_ABORT << 16; pCCB->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(pCCB); arcmsr_ccb_complete(pCCB);
continue; continue;
} }
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"

View File

@ -1067,7 +1067,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
* Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct * Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct
* Params : host - host to finish * Params : host - host to finish
* Notes : This is called when a command is: * Notes : This is called when a command is:
* terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONECT * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT
* : This must not return until all transfers are completed. * : This must not return until all transfers are completed.
*/ */
static static
@ -1816,7 +1816,7 @@ int acornscsi_reconnect(AS_Host *host)
} }
/* /*
* Function: int acornscsi_reconect_finish(AS_Host *host) * Function: int acornscsi_reconnect_finish(AS_Host *host)
* Purpose : finish reconnecting a command * Purpose : finish reconnecting a command
* Params : host - host to complete * Params : host - host to complete
* Returns : 0 if failed * Returns : 0 if failed

View File

@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL; atari_scsi_template.sg_tablesize = SG_ALL;
} else { } else {
atari_scsi_template.can_queue = 1; atari_scsi_template.can_queue = 1;
atari_scsi_template.sg_tablesize = SG_NONE; atari_scsi_template.sg_tablesize = 1;
} }
if (setup_can_queue > 0) if (setup_can_queue > 0)
@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0) if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun; atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
/* Leave sg_tablesize at 0 on a Falcon! */ /* Don't increase sg_tablesize on Falcon! */
if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0) if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize; atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) { if (setup_hostid >= 0) {

View File

@ -1680,7 +1680,7 @@ static struct scsi_host_template atp870u_template = {
.bios_param = atp870u_biosparam /* biosparm */, .bios_param = atp870u_biosparam /* biosparm */,
.can_queue = qcnt /* can_queue */, .can_queue = qcnt /* can_queue */,
.this_id = 7 /* SCSI ID */, .this_id = 7 /* SCSI ID */,
.sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/, .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
.max_sectors = ATP870U_MAX_SECTORS, .max_sectors = ATP870U_MAX_SECTORS,
}; };

View File

@ -1487,8 +1487,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return ret; return ret;
} }
int static int restart_bfa(struct bfad_s *bfad)
restart_bfa(struct bfad_s *bfad)
{ {
unsigned long flags; unsigned long flags;
struct pci_dev *pdev = bfad->pcidev; struct pci_dev *pdev = bfad->pcidev;

View File

@ -275,8 +275,10 @@ bfad_im_get_stats(struct Scsi_Host *shost)
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp); fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags); spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK) if (rc != BFA_STATUS_OK) {
kfree(fcstats);
return NULL; return NULL;
}
wait_for_completion(&fcomp.comp); wait_for_completion(&fcomp.comp);

View File

@ -813,7 +813,7 @@ struct fcoe_confqe {
/* /*
* FCoE conection data base * FCoE connection data base
*/ */
struct fcoe_conn_db { struct fcoe_conn_db {
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)

View File

@ -1242,7 +1242,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */ /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
time_left = wait_for_completion_timeout(&io_req->abts_done, time_left = wait_for_completion_timeout(&io_req->abts_done,
(2 * rp->r_a_tov + 1) * HZ); msecs_to_jiffies(2 * rp->r_a_tov + 1));
if (time_left) if (time_left)
BNX2FC_IO_DBG(io_req, BNX2FC_IO_DBG(io_req,
"Timed out in eh_abort waiting for abts_done"); "Timed out in eh_abort waiting for abts_done");

View File

@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list); INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list); INIT_LIST_HEAD(&hba->ep_destroy_list);
pci_dev_put(hba->pcidev);
if (hba->regview) { if (hba->regview) {
pci_iounmap(hba->pcidev, hba->regview); pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL; hba->regview = NULL;
} }
pci_dev_put(hba->pcidev);
bnx2i_free_mp_bdt(hba); bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba); bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost); iscsi_host_free(shost);

View File

@ -793,10 +793,10 @@ csio_hw_get_flash_params(struct csio_hw *hw)
goto found; goto found;
} }
/* Decode Flash part size. The code below looks repetative with /* Decode Flash part size. The code below looks repetitive with
* common encodings, but that's not guaranteed in the JEDEC * common encodings, but that's not guaranteed in the JEDEC
* specification for the Read JADEC ID command. The only thing that * specification for the Read JEDEC ID command. The only thing that
* we're guaranteed by the JADEC specification is where the * we're guaranteed by the JEDEC specification is where the
* Manufacturer ID is in the returned result. After that each * Manufacturer ID is in the returned result. After that each
* Manufacturer ~could~ encode things completely differently. * Manufacturer ~could~ encode things completely differently.
* Note, all Flash parts must have 64KB sectors. * Note, all Flash parts must have 64KB sectors.
@ -983,8 +983,8 @@ retry:
waiting -= 50; waiting -= 50;
/* /*
* If neither Error nor Initialialized are indicated * If neither Error nor Initialized are indicated
* by the firmware keep waiting till we exaust our * by the firmware keep waiting till we exhaust our
* timeout ... and then retry if we haven't exhausted * timeout ... and then retry if we haven't exhausted
* our retries ... * our retries ...
*/ */
@ -1738,7 +1738,7 @@ static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
* Convert Common Code Forward Error Control settings into the * Convert Common Code Forward Error Control settings into the
* Firmware's API. If the current Requested FEC has "Automatic" * Firmware's API. If the current Requested FEC has "Automatic"
* (IEEE 802.3) specified, then we use whatever the Firmware * (IEEE 802.3) specified, then we use whatever the Firmware
* sent us as part of it's IEEE 802.3-based interpratation of * sent us as part of it's IEEE 802.3-based interpretation of
* the Transceiver Module EPROM FEC parameters. Otherwise we * the Transceiver Module EPROM FEC parameters. Otherwise we
* use whatever is in the current Requested FEC settings. * use whatever is in the current Requested FEC settings.
*/ */
@ -2834,7 +2834,7 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
} }
/* /*
* csio_hws_initializing - Initialiazing state * csio_hws_initializing - Initializing state
* @hw - HW module * @hw - HW module
* @evt - Event * @evt - Event
* *
@ -3049,7 +3049,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
if (!csio_is_hw_master(hw)) if (!csio_is_hw_master(hw))
break; break;
/* /*
* The BYE should have alerady been issued, so we cant * The BYE should have already been issued, so we can't
* use the mailbox interface. Hence we use the PL_RST * use the mailbox interface. Hence we use the PL_RST
* register directly. * register directly.
*/ */
@ -3104,7 +3104,7 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
* *
* A table driven interrupt handler that applies a set of masks to an * A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the * interrupt status word and performs the corresponding actions if the
* interrupts described by the mask have occured. The actions include * interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated * optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt * by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions. * conditions.
@ -4219,7 +4219,7 @@ csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
* @hw: Pointer to HW module. * @hw: Pointer to HW module.
* *
* It is assumed that the initialization is a synchronous operation. * It is assumed that the initialization is a synchronous operation.
* So when we return afer posting the event, the HW SM should be in * So when we return after posting the event, the HW SM should be in
* the ready state, if there were no errors during init. * the ready state, if there were no errors during init.
*/ */
int int

View File

@ -154,13 +154,10 @@ csio_dfs_create(struct csio_hw *hw)
/* /*
* csio_dfs_destroy - Destroys per-hw debugfs. * csio_dfs_destroy - Destroys per-hw debugfs.
*/ */
static int static void
csio_dfs_destroy(struct csio_hw *hw) csio_dfs_destroy(struct csio_hw *hw)
{ {
if (hw->debugfs_root) debugfs_remove_recursive(hw->debugfs_root);
debugfs_remove_recursive(hw->debugfs_root);
return 0;
} }
/* /*

View File

@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name; struct fc_fdmi_port_name *port_name;
uint8_t buf[64]; uint8_t buf[64];
uint8_t *fc4_type; uint8_t *fc4_type;
unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) { if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd); len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */ /* Submit FDMI RPA request */
spin_lock_irq(&hw->lock); spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) { FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err); CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
} }
spin_unlock_irq(&hw->lock); spin_unlock_irqrestore(&hw->lock, flags);
} }
/* /*
@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl; struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk; struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64]; uint8_t buf[64];
unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) { if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs); attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */ /* Submit FDMI RHBA request */
spin_lock_irq(&hw->lock); spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) { FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err); CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
} }
spin_unlock_irq(&hw->lock); spin_unlock_irqrestore(&hw->lock, flags);
} }
/* /*
@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd; void *cmd;
struct fc_fdmi_port_name *port_name; struct fc_fdmi_port_name *port_name;
uint32_t len; uint32_t len;
unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) { if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name); len += sizeof(*port_name);
/* Submit FDMI request */ /* Submit FDMI request */
spin_lock_irq(&hw->lock); spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) { FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err); CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
} }
spin_unlock_irq(&hw->lock); spin_unlock_irqrestore(&hw->lock, flags);
} }
/** /**
@ -1989,7 +1992,7 @@ static int
csio_ln_init(struct csio_lnode *ln) csio_ln_init(struct csio_lnode *ln)
{ {
int rv = -EINVAL; int rv = -EINVAL;
struct csio_lnode *rln, *pln; struct csio_lnode *pln;
struct csio_hw *hw = csio_lnode_to_hw(ln); struct csio_hw *hw = csio_lnode_to_hw(ln);
csio_init_state(&ln->sm, csio_lns_uninit); csio_init_state(&ln->sm, csio_lns_uninit);
@ -2019,7 +2022,6 @@ csio_ln_init(struct csio_lnode *ln)
* THe rest is common for non-root physical and NPIV lnodes. * THe rest is common for non-root physical and NPIV lnodes.
* Just get references to all other modules * Just get references to all other modules
*/ */
rln = csio_root_lnode(ln);
if (csio_is_npiv_ln(ln)) { if (csio_is_npiv_ln(ln)) {
/* NPIV */ /* NPIV */

View File

@ -1210,7 +1210,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
!csio_is_hw_intr_enabled(hw)) { !csio_is_hw_intr_enabled(hw)) {
csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n", csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
*((uint8_t *)mbp->mb)); *((uint8_t *)mbp->mb));
goto error_out; goto error_out;
} }
if (mbm->mcurrent != NULL) { if (mbm->mcurrent != NULL) {

View File

@ -2073,7 +2073,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct net_device *ndev = cdev->ports[0]; struct net_device *ndev = cdev->ports[0];
struct cxgbi_tag_format tformat; struct cxgbi_tag_format tformat;
unsigned int ppmax;
int i, err; int i, err;
if (!lldi->vr->iscsi.size) { if (!lldi->vr->iscsi.size) {
@ -2082,7 +2081,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
} }
cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)

View File

@ -2284,34 +2284,6 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
} }
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
{
int len;
cxgbi_sock_get(csk);
len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
cxgbi_sock_put(csk);
return len;
}
static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
{
int len;
cxgbi_sock_get(csk);
if (csk->csk_family == AF_INET)
len = sprintf(buf, "%pI4",
&csk->daddr.sin_addr.s_addr);
else
len = sprintf(buf, "%pI6",
&csk->daddr6.sin6_addr);
cxgbi_sock_put(csk);
return len;
}
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
char *buf) char *buf)
{ {

View File

@ -44,14 +44,12 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
struct afu *afu = cmd->parent; struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent; struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev; struct device *dev = &cfg->dev->dev;
struct sisl_ioarcb *ioarcb;
struct sisl_ioasa *ioasa; struct sisl_ioasa *ioasa;
u32 resid; u32 resid;
if (unlikely(!cmd)) if (unlikely(!cmd))
return; return;
ioarcb = &(cmd->rcb);
ioasa = &(cmd->sa); ioasa = &(cmd->sa);
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {

View File

@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR, if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) { sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults"); esas2r_hdebug("NVRAM read failed, using defaults");
up(&a->nvram_semaphore);
return false; return false;
} }

View File

@ -1024,7 +1024,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
atomic64_inc(&fnic_stats->io_stats.io_completions); atomic64_inc(&fnic_stats->io_stats.io_completions);
io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); io_duration_time = jiffies_to_msecs(jiffies) -
jiffies_to_msecs(start_time);
if(io_duration_time <= 10) if(io_duration_time <= 10)
atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);

View File

@ -259,7 +259,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
struct vnic_devcmd __iomem *devcmd = vdev->devcmd; struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay; int delay;
u32 status; u32 status;
int dev_cmd_err[] = { static const int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */ /* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */ 0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */ EINVAL, /* ERR_EINVAL */

View File

@ -21,6 +21,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/property.h> #include <linux/property.h>
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/timer.h>
#include <scsi/sas_ata.h> #include <scsi/sas_ata.h>
#include <scsi/libsas.h> #include <scsi/libsas.h>
@ -84,6 +85,7 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK) #define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20 #define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
#define CLEAR_ITCT_TIMEOUT 20
struct hisi_hba; struct hisi_hba;
@ -167,6 +169,7 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate; enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate; enum sas_linkrate maximum_linkrate;
int enable; int enable;
atomic_t down_cnt;
}; };
struct hisi_sas_port { struct hisi_sas_port {
@ -296,8 +299,8 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates); struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void); enum sas_linkrate (*phy_get_max_linkrate)(void);
void (*clear_itct)(struct hisi_hba *hisi_hba, int (*clear_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev); struct hisi_sas_device *dev);
void (*free_device)(struct hisi_sas_device *sas_dev); void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba, void (*dereg_device)(struct hisi_hba *hisi_hba,
@ -321,6 +324,44 @@ struct hisi_sas_hw {
const struct hisi_sas_debugfs_reg *debugfs_reg_port; const struct hisi_sas_debugfs_reg *debugfs_reg_port;
}; };
#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
struct hisi_sas_debugfs_cq {
struct hisi_sas_cq *cq;
void *complete_hdr;
};
struct hisi_sas_debugfs_dq {
struct hisi_sas_dq *dq;
struct hisi_sas_cmd_hdr *hdr;
};
struct hisi_sas_debugfs_regs {
struct hisi_hba *hisi_hba;
u32 *data;
};
struct hisi_sas_debugfs_port {
struct hisi_sas_phy *phy;
u32 *data;
};
struct hisi_sas_debugfs_iost {
struct hisi_sas_iost *iost;
};
struct hisi_sas_debugfs_itct {
struct hisi_sas_itct *itct;
};
struct hisi_sas_debugfs_iost_cache {
struct hisi_sas_iost_itct_cache *cache;
};
struct hisi_sas_debugfs_itct_cache {
struct hisi_sas_iost_itct_cache *cache;
};
struct hisi_hba { struct hisi_hba {
/* This must be the first element, used by SHOST_TO_SAS_HA */ /* This must be the first element, used by SHOST_TO_SAS_HA */
struct sas_ha_struct *p; struct sas_ha_struct *p;
@ -402,19 +443,20 @@ struct hisi_hba {
/* debugfs memories */ /* debugfs memories */
/* Put Global AXI and RAS Register into register array */ /* Put Global AXI and RAS Register into register array */
u32 *debugfs_regs[DEBUGFS_REGS_NUM]; struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM];
u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS]; struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS];
void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES]; struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES]; struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
struct hisi_sas_iost *debugfs_iost; struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP];
struct hisi_sas_itct *debugfs_itct; struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP];
u64 *debugfs_iost_cache; struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
u64 *debugfs_itct_cache; struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP];
int debugfs_dump_index;
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry; struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry; struct dentry *debugfs_bist_dentry;
bool debugfs_snapshot;
}; };
/* Generic HW DMA host memory structures */ /* Generic HW DMA host memory structures */
@ -556,6 +598,7 @@ struct hisi_sas_slot_dif_buf_table {
extern struct scsi_transport_template *hisi_sas_stt; extern struct scsi_transport_template *hisi_sas_stt;
extern bool hisi_sas_debugfs_enable; extern bool hisi_sas_debugfs_enable;
extern u32 hisi_sas_debugfs_dump_count;
extern struct dentry *hisi_sas_debugfs_dir; extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);

View File

@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev = hisi_hba->dev; dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
if (in_softirq()) /*
* For IOs from upper layer, it may already disable preempt
* in the IO path, if disable preempt again in down(),
* function schedule() will report schedule_bug(), so check
* preemptible() before goto down().
*/
if (!preemptible())
return -EINVAL; return -EINVAL;
down(&hisi_hba->sem); down(&hisi_hba->sem);
@ -968,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port; struct asd_sas_port *sas_port = sas_phy->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port); struct hisi_sas_port *port;
unsigned long flags; unsigned long flags;
if (!sas_port) if (!sas_port)
return; return;
port = to_hisi_sas_port(sas_port);
spin_lock_irqsave(&hisi_hba->lock, flags); spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1; port->port_attached = 1;
port->id = phy->port_id; port->id = phy->port_id;
@ -1045,6 +1052,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
int ret = 0;
dev_info(dev, "dev[%d:%x] is gone\n", dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type); sas_dev->device_id, sas_dev->dev_type);
@ -1056,13 +1064,16 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device); hisi_sas_dereg_device(hisi_hba, device);
hisi_hba->hw->clear_itct(hisi_hba, sas_dev); ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
device->lldd_dev = NULL; device->lldd_dev = NULL;
} }
if (hisi_hba->hw->free_device) if (hisi_hba->hw->free_device)
hisi_hba->hw->free_device(sas_dev); hisi_hba->hw->free_device(sas_dev);
sas_dev->dev_type = SAS_PHY_UNUSED;
/* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
if (!ret)
sas_dev->dev_type = SAS_PHY_UNUSED;
sas_dev->sas_device = NULL; sas_dev->sas_device = NULL;
up(&hisi_hba->sem); up(&hisi_hba->sem);
} }
@ -1402,7 +1413,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy; struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port; struct asd_sas_port *sas_port = sas_phy->port;
bool do_port_check = !!(_sas_port != sas_port); bool do_port_check = _sas_port != sas_port;
if (!sas_phy->phy->enabled) if (!sas_phy->phy->enabled)
continue; continue;
@ -1563,7 +1574,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost; struct Scsi_Host *shost = hisi_hba->shost;
int rc; int rc;
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct) if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!hisi_hba->hw->soft_reset) if (!hisi_hba->hw->soft_reset)
@ -2055,7 +2066,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
/* Internal abort timed out */ /* Internal abort timed out */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct) if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@ -2676,6 +2687,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha: err_out_register_ha:
scsi_remove_host(shost); scsi_remove_host(shost);
err_out_ha: err_out_ha:
hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba); hisi_sas_free(hisi_hba);
scsi_host_put(shost); scsi_host_put(shost);
return rc; return rc;
@ -2687,10 +2699,11 @@ struct dentry *hisi_sas_debugfs_dir;
static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
{ {
int queue_entry_size = hisi_hba->hw->complete_hdr_size; int queue_entry_size = hisi_hba->hw->complete_hdr_size;
int dump_index = hisi_hba->debugfs_dump_index;
int i; int i;
for (i = 0; i < hisi_hba->queue_count; i++) for (i = 0; i < hisi_hba->queue_count; i++)
memcpy(hisi_hba->debugfs_complete_hdr[i], memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
hisi_hba->complete_hdr[i], hisi_hba->complete_hdr[i],
HISI_SAS_QUEUE_SLOTS * queue_entry_size); HISI_SAS_QUEUE_SLOTS * queue_entry_size);
} }
@ -2698,13 +2711,14 @@ static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
{ {
int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
int dump_index = hisi_hba->debugfs_dump_index;
int i; int i;
for (i = 0; i < hisi_hba->queue_count; i++) { for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
int j; int j;
debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i]; debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
cmd_hdr = hisi_hba->cmd_hdr[i]; cmd_hdr = hisi_hba->cmd_hdr[i];
for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
@ -2715,6 +2729,7 @@ static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
{ {
int dump_index = hisi_hba->debugfs_dump_index;
const struct hisi_sas_debugfs_reg *port = const struct hisi_sas_debugfs_reg *port =
hisi_hba->hw->debugfs_reg_port; hisi_hba->hw->debugfs_reg_port;
int i, phy_cnt; int i, phy_cnt;
@ -2722,7 +2737,7 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
u32 *databuf; u32 *databuf;
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt]; databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) { for (i = 0; i < port->count; i++, databuf++) {
offset = port->base_off + 4 * i; offset = port->base_off + 4 * i;
*databuf = port->read_port_reg(hisi_hba, phy_cnt, *databuf = port->read_port_reg(hisi_hba, phy_cnt,
@ -2733,7 +2748,8 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
{ {
u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL]; int dump_index = hisi_hba->debugfs_dump_index;
u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *global = const struct hisi_sas_debugfs_reg *global =
hw->debugfs_reg_array[DEBUGFS_GLOBAL]; hw->debugfs_reg_array[DEBUGFS_GLOBAL];
@ -2745,7 +2761,8 @@ static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
{ {
u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI]; int dump_index = hisi_hba->debugfs_dump_index;
u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *axi = const struct hisi_sas_debugfs_reg *axi =
hw->debugfs_reg_array[DEBUGFS_AXI]; hw->debugfs_reg_array[DEBUGFS_AXI];
@ -2758,7 +2775,8 @@ static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
{ {
u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS]; int dump_index = hisi_hba->debugfs_dump_index;
u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *ras = const struct hisi_sas_debugfs_reg *ras =
hw->debugfs_reg_array[DEBUGFS_RAS]; hw->debugfs_reg_array[DEBUGFS_RAS];
@ -2771,8 +2789,9 @@ static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
{ {
void *cachebuf = hisi_hba->debugfs_itct_cache; int dump_index = hisi_hba->debugfs_dump_index;
void *databuf = hisi_hba->debugfs_itct; void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
struct hisi_sas_itct *itct; struct hisi_sas_itct *itct;
int i; int i;
@ -2789,9 +2808,10 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
{ {
int dump_index = hisi_hba->debugfs_dump_index;
int max_command_entries = HISI_SAS_MAX_COMMANDS; int max_command_entries = HISI_SAS_MAX_COMMANDS;
void *cachebuf = hisi_hba->debugfs_iost_cache; void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
void *databuf = hisi_hba->debugfs_iost; void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
struct hisi_sas_iost *iost; struct hisi_sas_iost *iost;
int i; int i;
@ -2842,11 +2862,12 @@ static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
{ {
struct hisi_hba *hisi_hba = s->private; struct hisi_sas_debugfs_regs *global = s->private;
struct hisi_hba *hisi_hba = global->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL]; const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL], hisi_sas_debugfs_print_reg(global->data,
reg_global, s); reg_global, s);
return 0; return 0;
@ -2868,11 +2889,12 @@ static const struct file_operations hisi_sas_debugfs_global_fops = {
static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
{ {
struct hisi_hba *hisi_hba = s->private; struct hisi_sas_debugfs_regs *axi = s->private;
struct hisi_hba *hisi_hba = axi->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI]; const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI], hisi_sas_debugfs_print_reg(axi->data,
reg_axi, s); reg_axi, s);
return 0; return 0;
@ -2894,11 +2916,12 @@ static const struct file_operations hisi_sas_debugfs_axi_fops = {
static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
{ {
struct hisi_hba *hisi_hba = s->private; struct hisi_sas_debugfs_regs *ras = s->private;
struct hisi_hba *hisi_hba = ras->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS]; const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS], hisi_sas_debugfs_print_reg(ras->data,
reg_ras, s); reg_ras, s);
return 0; return 0;
@ -2920,13 +2943,13 @@ static const struct file_operations hisi_sas_debugfs_ras_fops = {
static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
{ {
struct hisi_sas_phy *phy = s->private; struct hisi_sas_debugfs_port *port = s->private;
struct hisi_sas_phy *phy = port->phy;
struct hisi_hba *hisi_hba = phy->hisi_hba; struct hisi_hba *hisi_hba = phy->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
hisi_sas_debugfs_print_reg(databuf, reg_port, s); hisi_sas_debugfs_print_reg(port->data, reg_port, s);
return 0; return 0;
} }
@ -2975,13 +2998,13 @@ static void hisi_sas_show_row_32(struct seq_file *s, int index,
seq_puts(s, "\n"); seq_puts(s, "\n");
} }
static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
struct hisi_sas_debugfs_cq *debugfs_cq)
{ {
struct hisi_sas_cq *cq = cq_ptr; struct hisi_sas_cq *cq = debugfs_cq->cq;
struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_hba *hisi_hba = cq->hisi_hba;
void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; __le32 *complete_hdr = debugfs_cq->complete_hdr +
__le32 *complete_hdr = complete_queue + (hisi_hba->hw->complete_hdr_size * slot);
(hisi_hba->hw->complete_hdr_size * slot);
hisi_sas_show_row_32(s, slot, hisi_sas_show_row_32(s, slot,
hisi_hba->hw->complete_hdr_size, hisi_hba->hw->complete_hdr_size,
@ -2990,11 +3013,11 @@ static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
{ {
struct hisi_sas_cq *cq = s->private; struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
int slot; int slot;
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
hisi_sas_cq_show_slot(s, slot, cq); hisi_sas_cq_show_slot(s, slot, debugfs_cq);
} }
return 0; return 0;
} }
@ -3014,9 +3037,8 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = {
static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
{ {
struct hisi_sas_dq *dq = dq_ptr; struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
struct hisi_hba *hisi_hba = dq->hisi_hba; void *cmd_queue = debugfs_dq->hdr;
void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
__le32 *cmd_hdr = cmd_queue + __le32 *cmd_hdr = cmd_queue +
sizeof(struct hisi_sas_cmd_hdr) * slot; sizeof(struct hisi_sas_cmd_hdr) * slot;
@ -3048,14 +3070,14 @@ static const struct file_operations hisi_sas_debugfs_dq_fops = {
static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
{ {
struct hisi_hba *hisi_hba = s->private; struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; struct hisi_sas_iost *iost = debugfs_iost->iost;
int i, max_command_entries = HISI_SAS_MAX_COMMANDS; int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
for (i = 0; i < max_command_entries; i++, debugfs_iost++) { for (i = 0; i < max_command_entries; i++, iost++) {
__le64 *iost = &debugfs_iost->qw0; __le64 *data = &iost->qw0;
hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost); hisi_sas_show_row_64(s, i, sizeof(*iost), data);
} }
return 0; return 0;
@ -3076,9 +3098,8 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = {
static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
{ {
struct hisi_hba *hisi_hba = s->private; struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
struct hisi_sas_iost_itct_cache *iost_cache = struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
(struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx; int i, tab_idx;
__le64 *iost; __le64 *iost;
@ -3117,13 +3138,13 @@ static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
{ {
int i; int i;
struct hisi_hba *hisi_hba = s->private; struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; struct hisi_sas_itct *itct = debugfs_itct->itct;
for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
__le64 *itct = &debugfs_itct->qw0; __le64 *data = &itct->qw0;
hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct); hisi_sas_show_row_64(s, i, sizeof(*itct), data);
} }
return 0; return 0;
@ -3144,9 +3165,8 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = {
static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p) static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
{ {
struct hisi_hba *hisi_hba = s->private; struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
struct hisi_sas_iost_itct_cache *itct_cache = struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
(struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx; int i, tab_idx;
__le64 *itct; __le64 *itct;
@ -3184,6 +3204,8 @@ static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
{ {
u64 *debugfs_timestamp;
int dump_index = hisi_hba->debugfs_dump_index;
struct dentry *dump_dentry; struct dentry *dump_dentry;
struct dentry *dentry; struct dentry *dentry;
char name[256]; char name[256];
@ -3191,19 +3213,26 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
int c; int c;
int d; int d;
/* Create dump dir inside device dir */ snprintf(name, 256, "%d", dump_index);
dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
hisi_hba->debugfs_dump_dentry = dump_dentry;
debugfs_create_file("global", 0400, dump_dentry, hisi_hba, dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
&hisi_sas_debugfs_global_fops);
debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
debugfs_create_u64("timestamp", 0400, dump_dentry,
debugfs_timestamp);
debugfs_create_file("global", 0400, dump_dentry,
&hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
&hisi_sas_debugfs_global_fops);
/* Create port dir and files */ /* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry); dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) { for (p = 0; p < hisi_hba->n_phy; p++) {
snprintf(name, 256, "%d", p); snprintf(name, 256, "%d", p);
debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p], debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_port_reg[dump_index][p],
&hisi_sas_debugfs_port_fops); &hisi_sas_debugfs_port_fops);
} }
@ -3212,7 +3241,8 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (c = 0; c < hisi_hba->queue_count; c++) { for (c = 0; c < hisi_hba->queue_count; c++) {
snprintf(name, 256, "%d", c); snprintf(name, 256, "%d", c);
debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_cq[dump_index][c],
&hisi_sas_debugfs_cq_fops); &hisi_sas_debugfs_cq_fops);
} }
@ -3221,26 +3251,33 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (d = 0; d < hisi_hba->queue_count; d++) { for (d = 0; d < hisi_hba->queue_count; d++) {
snprintf(name, 256, "%d", d); snprintf(name, 256, "%d", d);
debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d], debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_dq[dump_index][d],
&hisi_sas_debugfs_dq_fops); &hisi_sas_debugfs_dq_fops);
} }
debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, debugfs_create_file("iost", 0400, dump_dentry,
&hisi_hba->debugfs_iost[dump_index],
&hisi_sas_debugfs_iost_fops); &hisi_sas_debugfs_iost_fops);
debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba, debugfs_create_file("iost_cache", 0400, dump_dentry,
&hisi_hba->debugfs_iost_cache[dump_index],
&hisi_sas_debugfs_iost_cache_fops); &hisi_sas_debugfs_iost_cache_fops);
debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, debugfs_create_file("itct", 0400, dump_dentry,
&hisi_hba->debugfs_itct[dump_index],
&hisi_sas_debugfs_itct_fops); &hisi_sas_debugfs_itct_fops);
debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba, debugfs_create_file("itct_cache", 0400, dump_dentry,
&hisi_hba->debugfs_itct_cache[dump_index],
&hisi_sas_debugfs_itct_cache_fops); &hisi_sas_debugfs_itct_cache_fops);
debugfs_create_file("axi", 0400, dump_dentry, hisi_hba, debugfs_create_file("axi", 0400, dump_dentry,
&hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
&hisi_sas_debugfs_axi_fops); &hisi_sas_debugfs_axi_fops);
debugfs_create_file("ras", 0400, dump_dentry, hisi_hba, debugfs_create_file("ras", 0400, dump_dentry,
&hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
&hisi_sas_debugfs_ras_fops); &hisi_sas_debugfs_ras_fops);
return; return;
@ -3271,8 +3308,7 @@ static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
struct hisi_hba *hisi_hba = file->f_inode->i_private; struct hisi_hba *hisi_hba = file->f_inode->i_private;
char buf[8]; char buf[8];
/* A bit racy, but don't care too much since it's only debugfs */ if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
if (hisi_hba->debugfs_snapshot)
return -EFAULT; return -EFAULT;
if (count > 8) if (count > 8)
@ -3539,7 +3575,7 @@ static const struct {
int value; int value;
char *name; char *name;
} hisi_sas_debugfs_loop_modes[] = { } hisi_sas_debugfs_loop_modes[] = {
{ HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" }, { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
{ HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
{ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
}; };
@ -3670,132 +3706,201 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = filp->private_data;
struct hisi_sas_phy *phy = s->private;
unsigned int set_val;
int res;
res = kstrtouint_from_user(buf, count, 0, &set_val);
if (res)
return res;
if (set_val > 0)
return -EINVAL;
atomic_set(&phy->down_cnt, 0);
return count;
}
static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
{
struct hisi_sas_phy *phy = s->private;
seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
return 0;
}
static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
struct file *filp)
{
return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
inode->i_private);
}
static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
.open = hisi_sas_debugfs_phy_down_cnt_open,
.read = seq_read,
.write = hisi_sas_debugfs_phy_down_cnt_write,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
};
void hisi_sas_debugfs_work_handler(struct work_struct *work) void hisi_sas_debugfs_work_handler(struct work_struct *work)
{ {
struct hisi_hba *hisi_hba = struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, debugfs_work); container_of(work, struct hisi_hba, debugfs_work);
int debugfs_dump_index = hisi_hba->debugfs_dump_index;
struct device *dev = hisi_hba->dev;
u64 timestamp = local_clock();
if (hisi_hba->debugfs_snapshot) if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
dev_warn(dev, "dump count exceeded!\n");
return; return;
hisi_hba->debugfs_snapshot = true; }
do_div(timestamp, NSEC_PER_MSEC);
hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
hisi_sas_debugfs_snapshot_regs(hisi_hba); hisi_sas_debugfs_snapshot_regs(hisi_hba);
hisi_hba->debugfs_dump_index++;
} }
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
{ {
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
int i; int i;
devm_kfree(dev, hisi_hba->debugfs_iost_cache); devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
devm_kfree(dev, hisi_hba->debugfs_itct_cache); devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
devm_kfree(dev, hisi_hba->debugfs_iost); devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
for (i = 0; i < hisi_hba->queue_count; i++) for (i = 0; i < hisi_hba->queue_count; i++)
devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
for (i = 0; i < hisi_hba->queue_count; i++) for (i = 0; i < hisi_hba->queue_count; i++)
devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); devm_kfree(dev,
hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
for (i = 0; i < DEBUGFS_REGS_NUM; i++) for (i = 0; i < DEBUGFS_REGS_NUM; i++)
devm_kfree(dev, hisi_hba->debugfs_regs[i]); devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
for (i = 0; i < hisi_hba->n_phy; i++) for (i = 0; i < hisi_hba->n_phy; i++)
devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
} }
static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba) static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
{ {
const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_hw *hw = hisi_hba->hw;
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
int p, c, d; int p, c, d, r, i;
size_t sz; size_t sz;
hisi_hba->debugfs_dump_dentry = for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
debugfs_create_dir("dump", hisi_hba->debugfs_dir); struct hisi_sas_debugfs_regs *regs =
&hisi_hba->debugfs_regs[dump_index][r];
sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4; sz = hw->debugfs_reg_array[r]->count * 4;
hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] = regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
devm_kmalloc(dev, sz, GFP_KERNEL); if (!regs->data)
goto fail;
if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL]) regs->hisi_hba = hisi_hba;
goto fail; }
sz = hw->debugfs_reg_port->count * 4; sz = hw->debugfs_reg_port->count * 4;
for (p = 0; p < hisi_hba->n_phy; p++) { for (p = 0; p < hisi_hba->n_phy; p++) {
hisi_hba->debugfs_port_reg[p] = struct hisi_sas_debugfs_port *port =
devm_kmalloc(dev, sz, GFP_KERNEL); &hisi_hba->debugfs_port_reg[dump_index][p];
if (!hisi_hba->debugfs_port_reg[p]) port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!port->data)
goto fail; goto fail;
port->phy = &hisi_hba->phy[p];
} }
sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
hisi_hba->debugfs_regs[DEBUGFS_AXI] =
devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
goto fail;
sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
hisi_hba->debugfs_regs[DEBUGFS_RAS] =
devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
goto fail;
sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
for (c = 0; c < hisi_hba->queue_count; c++) { for (c = 0; c < hisi_hba->queue_count; c++) {
hisi_hba->debugfs_complete_hdr[c] = struct hisi_sas_debugfs_cq *cq =
devm_kmalloc(dev, sz, GFP_KERNEL); &hisi_hba->debugfs_cq[dump_index][c];
if (!hisi_hba->debugfs_complete_hdr[c]) cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!cq->complete_hdr)
goto fail; goto fail;
cq->cq = &hisi_hba->cq[c];
} }
sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
for (d = 0; d < hisi_hba->queue_count; d++) { for (d = 0; d < hisi_hba->queue_count; d++) {
hisi_hba->debugfs_cmd_hdr[d] = struct hisi_sas_debugfs_dq *dq =
devm_kmalloc(dev, sz, GFP_KERNEL); &hisi_hba->debugfs_dq[dump_index][d];
if (!hisi_hba->debugfs_cmd_hdr[d]) dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!dq->hdr)
goto fail; goto fail;
dq->dq = &hisi_hba->dq[d];
} }
sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost); sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); hisi_hba->debugfs_iost[dump_index].iost =
if (!hisi_hba->debugfs_iost) devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_iost[dump_index].iost)
goto fail; goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM * sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache); sizeof(struct hisi_sas_iost_itct_cache);
hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL); hisi_hba->debugfs_iost_cache[dump_index].cache =
if (!hisi_hba->debugfs_iost_cache) devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
goto fail; goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM * sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache); sizeof(struct hisi_sas_iost_itct_cache);
hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL); hisi_hba->debugfs_itct_cache[dump_index].cache =
if (!hisi_hba->debugfs_itct_cache) devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
goto fail; goto fail;
/* New memory allocation must be locate before itct */ /* New memory allocation must be locate before itct */
sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); hisi_hba->debugfs_itct[dump_index].itct =
if (!hisi_hba->debugfs_itct) devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_itct[dump_index].itct)
goto fail; goto fail;
return 0; return 0;
fail: fail:
hisi_sas_debugfs_release(hisi_hba); for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
hisi_sas_debugfs_release(hisi_hba, i);
return -ENOMEM; return -ENOMEM;
} }
static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
{
struct dentry *dir = debugfs_create_dir("phy_down_cnt",
hisi_hba->debugfs_dir);
char name[16];
int phy_no;
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
snprintf(name, 16, "%d", phy_no);
debugfs_create_file(name, 0600, dir,
&hisi_hba->phy[phy_no],
&hisi_sas_debugfs_phy_down_cnt_ops);
}
}
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{ {
hisi_hba->debugfs_bist_dentry = hisi_hba->debugfs_bist_dentry =
@ -3827,6 +3932,7 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
{ {
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
int i;
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir); hisi_sas_debugfs_dir);
@ -3838,9 +3944,17 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
/* create bist structures */ /* create bist structures */
hisi_sas_debugfs_bist_init(hisi_hba); hisi_sas_debugfs_bist_init(hisi_hba);
if (hisi_sas_debugfs_alloc(hisi_hba)) { hisi_hba->debugfs_dump_dentry =
debugfs_remove_recursive(hisi_hba->debugfs_dir); debugfs_create_dir("dump", hisi_hba->debugfs_dir);
dev_dbg(dev, "failed to init debugfs!\n");
hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
debugfs_remove_recursive(hisi_hba->debugfs_dir);
dev_dbg(dev, "failed to init debugfs!\n");
break;
}
} }
} }
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
@ -3874,14 +3988,24 @@ EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
u32 hisi_sas_debugfs_dump_count = 1;
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
static __init int hisi_sas_init(void) static __init int hisi_sas_init(void)
{ {
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
if (!hisi_sas_stt) if (!hisi_sas_stt)
return -ENOMEM; return -ENOMEM;
if (hisi_sas_debugfs_enable) if (hisi_sas_debugfs_enable) {
hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
pr_info("hisi_sas: Limiting debugfs dump count\n");
hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
}
}
return 0; return 0;
} }

View File

@ -531,8 +531,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
} }
static void clear_itct_v1_hw(struct hisi_hba *hisi_hba, static int clear_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev) struct hisi_sas_device *sas_dev)
{ {
u64 dev_id = sas_dev->device_id; u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
@ -551,6 +551,8 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
qw0 = le64_to_cpu(itct->qw0); qw0 = le64_to_cpu(itct->qw0);
qw0 &= ~ITCT_HDR_VALID_MSK; qw0 &= ~ITCT_HDR_VALID_MSK;
itct->qw0 = cpu_to_le64(qw0); itct->qw0 = cpu_to_le64(qw0);
return 0;
} }
static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)

View File

@ -974,13 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF)); (0x1ULL << ITCT_HDR_RTOLT_OFF));
} }
static void clear_itct_v2_hw(struct hisi_hba *hisi_hba, static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev) struct hisi_sas_device *sas_dev)
{ {
DECLARE_COMPLETION_ONSTACK(completion); DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id; u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
struct device *dev = hisi_hba->dev;
int i; int i;
sas_dev->completion = &completion; sas_dev->completion = &completion;
@ -990,13 +991,19 @@ static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK); ENT_INT_SRC3_ITC_INT_MSK);
/* need to set register twice to clear ITCT for v2 hw */
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
wait_for_completion(sas_dev->completion); if (!wait_for_completion_timeout(sas_dev->completion,
CLEAR_ITCT_TIMEOUT * HZ)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
memset(itct, 0, sizeof(struct hisi_sas_itct)); memset(itct, 0, sizeof(struct hisi_sas_itct));
} }
return 0;
} }
static void free_device_v2_hw(struct hisi_sas_device *sas_dev) static void free_device_v2_hw(struct hisi_sas_device *sas_dev)

View File

@ -795,13 +795,14 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF)); (0x1ULL << ITCT_HDR_RTOLT_OFF));
} }
static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev) struct hisi_sas_device *sas_dev)
{ {
DECLARE_COMPLETION_ONSTACK(completion); DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id; u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
struct device *dev = hisi_hba->dev;
sas_dev->completion = &completion; sas_dev->completion = &completion;
@ -814,8 +815,14 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
wait_for_completion(sas_dev->completion); if (!wait_for_completion_timeout(sas_dev->completion,
CLEAR_ITCT_TIMEOUT * HZ)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
memset(itct, 0, sizeof(struct hisi_sas_itct)); memset(itct, 0, sizeof(struct hisi_sas_itct));
return 0;
} }
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@ -1542,6 +1549,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto; u32 phy_state, sl_ctrl, txid_auto;
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
atomic_inc(&phy->down_cnt);
del_timer(&phy->timer); del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
@ -3022,11 +3031,6 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
hisi_sas_phy_write32(hisi_hba, phy_id, hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CTRL, reg_val); SAS_PHY_BIST_CTRL, reg_val);
mdelay(100);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CTRL, reg_val);
/* set the bist init value */ /* set the bist init value */
hisi_sas_phy_write32(hisi_hba, phy_id, hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CODE, SAS_PHY_BIST_CODE,
@ -3035,6 +3039,11 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1, SAS_PHY_BIST_CODE1,
SAS_PHY_BIST_CODE1_INIT); SAS_PHY_BIST_CODE1_INIT);
mdelay(100);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CTRL, reg_val);
/* clear error bit */ /* clear error bit */
mdelay(100); mdelay(100);
hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT); hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
@ -3259,6 +3268,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_register_ha: err_out_register_ha:
scsi_remove_host(shost); scsi_remove_host(shost);
err_out_ha: err_out_ha:
hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost); scsi_host_put(shost);
err_out_regions: err_out_regions:
pci_release_regions(pdev); pci_release_regions(pdev);
@ -3292,8 +3302,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha; struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost; struct Scsi_Host *shost = sha->core.shost;
hisi_sas_debugfs_exit(hisi_hba);
if (timer_pending(&hisi_hba->timer)) if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer); del_timer(&hisi_hba->timer);
@ -3305,6 +3313,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
hisi_sas_free(hisi_hba); hisi_sas_free(hisi_hba);
hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost); scsi_host_put(shost);
} }
@ -3422,6 +3431,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
if (rc) { if (rc) {
scsi_remove_host(shost); scsi_remove_host(shost);
pci_disable_device(pdev); pci_disable_device(pdev);
return rc;
} }
hisi_hba->hw->phys_init(hisi_hba); hisi_hba->hw->phys_init(hisi_hba);
sas_resume_ha(sha); sas_resume_ha(sha);

View File

@ -38,6 +38,7 @@
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
#include <scsi/scsi_cmnd.h>
#include "scsi_priv.h" #include "scsi_priv.h"
#include "scsi_logging.h" #include "scsi_logging.h"
@ -554,13 +555,29 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
} }
EXPORT_SYMBOL(scsi_host_get); EXPORT_SYMBOL(scsi_host_get);
static bool scsi_host_check_in_flight(struct request *rq, void *data,
bool reserved)
{
int *count = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
(*count)++;
return true;
}
/** /**
* scsi_host_busy - Return the host busy counter * scsi_host_busy - Return the host busy counter
* @shost: Pointer to Scsi_Host to inc. * @shost: Pointer to Scsi_Host to inc.
**/ **/
int scsi_host_busy(struct Scsi_Host *shost) int scsi_host_busy(struct Scsi_Host *shost)
{ {
return atomic_read(&shost->host_busy); int cnt = 0;
blk_mq_tagset_busy_iter(&shost->tag_set,
scsi_host_check_in_flight, &cnt);
return cnt;
} }
EXPORT_SYMBOL(scsi_host_busy); EXPORT_SYMBOL(scsi_host_busy);

View File

@ -498,7 +498,7 @@ ips_setup(char *ips_str)
int i; int i;
char *key; char *key;
char *value; char *value;
IPS_OPTION options[] = { static const IPS_OPTION options[] = {
{"noi2o", &ips_force_i2o, 0}, {"noi2o", &ips_force_i2o, 0},
{"nommap", &ips_force_memio, 0}, {"nommap", &ips_force_memio, 0},
{"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},

View File

@ -147,7 +147,7 @@ static struct isci_port *sci_port_configuration_agent_find_port(
/** /**
* *
* @controller: This is the controller object that contains the port agent * @controller: This is the controller object that contains the port agent
* @port_agent: This is the port configruation agent for the controller. * @port_agent: This is the port configuration agent for the controller.
* *
* This routine will validate the port configuration is correct for the SCU * This routine will validate the port configuration is correct for the SCU
* hardware. The SCU hardware allows for port configurations as follows. LP0 * hardware. The SCU hardware allows for port configurations as follows. LP0

View File

@ -1504,7 +1504,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
* This function builds the isci_remote_device when a libsas dev_found message * This function builds the isci_remote_device when a libsas dev_found message
* is received. * is received.
* @isci_host: This parameter specifies the isci host object. * @isci_host: This parameter specifies the isci host object.
* @port: This parameter specifies the isci_port conected to this device. * @port: This parameter specifies the isci_port connected to this device.
* *
* pointer to new isci_remote_device. * pointer to new isci_remote_device.
*/ */

View File

@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{ {
struct iscsi_conn *conn = task->conn; struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag; unsigned int noreclaim_flag;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0; int rc = 0;
if (!tcp_sw_conn->sock) {
iscsi_conn_printk(KERN_ERR, conn,
"Transport not bound to socket!\n");
return -EINVAL;
}
noreclaim_flag = memalloc_noreclaim_save(); noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) { while (iscsi_sw_tcp_xmit_qlen(conn)) {

View File

@ -605,6 +605,12 @@ struct lpfc_epd_pool {
spinlock_t lock; /* lock for expedite pool */ spinlock_t lock; /* lock for expedite pool */
}; };
enum ras_state {
INACTIVE,
REG_INPROGRESS,
ACTIVE
};
struct lpfc_ras_fwlog { struct lpfc_ras_fwlog {
uint8_t *fwlog_buff; uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */ uint32_t fw_buffcount; /* Buffer size posted to FW */
@ -621,7 +627,7 @@ struct lpfc_ras_fwlog {
bool ras_enabled; /* Ras Enabled for the function */ bool ras_enabled; /* Ras Enabled for the function */
#define LPFC_RAS_DISABLE_LOGGING 0x00 #define LPFC_RAS_DISABLE_LOGGING 0x00
#define LPFC_RAS_ENABLE_LOGGING 0x01 #define LPFC_RAS_ENABLE_LOGGING 0x01
bool ras_active; /* RAS logging running state */ enum ras_state state; /* RAS logging running state */
}; };
struct lpfc_hba { struct lpfc_hba {
@ -725,6 +731,7 @@ struct lpfc_hba {
#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ #define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
#define ELS_XRI_ABORT_EVENT 0x40 #define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80 #define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */ #define LINK_DISABLED 0x100 /* Link disabled by user */
@ -830,6 +837,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold; uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue; uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann; uint32_t cfg_irq_chann;
uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp; uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas; uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd; uint32_t cfg_nvme_embed_cmd;
@ -872,7 +880,6 @@ struct lpfc_hba {
uint32_t cfg_aer_support; uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade; uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up; uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz; uint32_t cfg_rrq_xri_bitmap_sz;
uint32_t cfg_delay_discovery; uint32_t cfg_delay_discovery;
@ -990,7 +997,6 @@ struct lpfc_hba {
struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */ struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct dma_pool *txrdy_payload_pool;
struct dma_pool *lpfc_cmd_rsp_buf_pool; struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@ -1055,6 +1061,7 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT #ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat; struct dentry *debug_lockstat;
#endif #endif
struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt; atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size; uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx; uint32_t nvmeio_trc_output_idx;
@ -1209,6 +1216,13 @@ struct lpfc_hba {
uint64_t ktime_seg10_min; uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max; uint64_t ktime_seg10_max;
#endif #endif
struct hlist_node cpuhp; /* used for cpuhp per hba callback */
struct timer_list cpuhp_poll_timer;
struct list_head poll_list; /* slowpath eq polling list */
#define LPFC_POLL_HB 1 /* slowpath heartbeat */
#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
}; };
static inline struct Scsi_Host * static inline struct Scsi_Host *
@ -1298,6 +1312,26 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
return &phba->sli.sli3_ring[LPFC_ELS_RING]; return &phba->sli.sli3_ring[LPFC_ELS_RING];
} }
/**
* lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
* @numa_mask: Pointer to phba's numa_mask member.
* @start: starting cpu index
*
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
static inline unsigned int
lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
{
unsigned int cpu_it;
for_each_cpu_wrap(cpu_it, numa_mask, start) {
if (cpu_online(cpu_it))
break;
}
return cpu_it;
}
/** /**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay * lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.

View File

@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int i; int i;
int len = 0; int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
unsigned long iflags = 0;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done; goto buffer_done;
rcu_read_lock();
scnprintf(tmp, sizeof(tmp), scnprintf(tmp, sizeof(tmp),
"XRI Dist lpfc%d Total %d IO %d ELS %d\n", "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no, phba->brd_no,
@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.io_xri_max, phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba)); lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto buffer_done;
/* Port state is only one of two values for now. */ /* Port state is only one of two values for now. */
if (localport->port_id) if (localport->port_id)
@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn), wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep); localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto buffer_done;
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL; nrport = NULL;
spin_lock_irqsave(&vport->phba->hbalock, iflags); spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp); rport = lpfc_ndlp_get_nrport(ndlp);
if (rport) if (rport)
nrport = rport->remoteport; nrport = rport->remoteport;
spin_unlock_irqrestore(&vport->phba->hbalock, iflags); spin_unlock(&vport->phba->hbalock);
if (!nrport) if (!nrport)
continue; continue;
@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */ /* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
if (phba->brd_no >= 10) { if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
} }
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name); nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name); nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ", scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id); nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
/* An NVME rport can have multiple roles. */ /* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
} }
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
} }
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
} }
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET | FC_PORT_ROLE_NVME_TARGET |
@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role); nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
} }
scnprintf(tmp, sizeof(tmp), "%s\n", statep); scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto rcu_unlock_buf_done; goto unlock_buf_done;
} }
rcu_read_unlock(); spin_unlock_irq(shost->host_lock);
if (!lport) if (!lport)
goto buffer_done; goto buffer_done;
@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err)); atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE); strlcat(buf, tmp, PAGE_SIZE);
/* RCU is already unlocked. */ /* host_lock is already unlocked. */
goto buffer_done; goto buffer_done;
rcu_unlock_buf_done: unlock_buf_done:
rcu_read_unlock(); spin_unlock_irq(shost->host_lock);
buffer_done: buffer_done:
len = strnlen(buf, PAGE_SIZE); len = strnlen(buf, PAGE_SIZE);
@ -1475,8 +1475,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
int i; int i;
msleep(100); msleep(100);
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0); &portstat_reg.word0))
return -EIO;
/* verify if privileged for the request operation */ /* verify if privileged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
@ -1486,8 +1487,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/* wait for the SLI port firmware ready after firmware reset */ /* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10); msleep(10);
lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0); &portstat_reg.word0))
continue;
if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
continue; continue;
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
@ -1642,7 +1644,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{ {
LPFC_MBOXQ_t *mbox = NULL; LPFC_MBOXQ_t *mbox = NULL;
unsigned long val = 0; unsigned long val = 0;
char *pval = 0; char *pval = NULL;
int rc = 0; int rc = 0;
if (!strncmp("enable", buff_out, if (!strncmp("enable", buff_out,
@ -3533,6 +3535,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2,
LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
LPFC_DELAY_INIT_LINK_INDEFINITELY, LPFC_DELAY_INIT_LINK_INDEFINITELY,
"Suppress Link Up at initialization"); "Suppress Link Up at initialization");
static ssize_t
lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return scnprintf(buf, PAGE_SIZE, "%d\n",
phba->sli4_hba.pc_sli4_params.pls);
}
static DEVICE_ATTR(pls, 0444,
lpfc_pls_show, NULL);
static ssize_t
lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return scnprintf(buf, PAGE_SIZE, "%d\n",
(phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
}
static DEVICE_ATTR(pt, 0444,
lpfc_pt_show, NULL);
/* /*
# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
# 1 - (1024) # 1 - (1024)
@ -3580,9 +3607,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO, static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL); lpfc_txcmplq_hw_show, NULL);
LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
/* /*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30. # until the timer expires. Value range is [0,255]. Default value is 30.
@ -4096,7 +4120,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
val); val);
return -EINVAL; return -EINVAL;
} }
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || /*
* The 'topology' is not a configurable parameter if :
* - persistent topology enabled
* - G7 adapters
* - G6 with no private loop support
*/
if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
(!phba->sli4_hba.pc_sli4_params.pls &&
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
val == 4) { val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@ -5298,7 +5331,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE - len, len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n", "CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu); phba->sli4_hba.curr_disp_cpu);
else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += scnprintf( len += scnprintf(
buf + len, PAGE_SIZE - len, buf + len, PAGE_SIZE - len,
@ -5311,10 +5344,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
else else
len += scnprintf( len += scnprintf(
buf + len, PAGE_SIZE - len, buf + len, PAGE_SIZE - len,
"CPU %02d EQ %04d hdwq %04d " "CPU %02d EQ None hdwq %04d "
"physid %d coreid %d ht %d ua %d\n", "physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu, phba->sli4_hba.curr_disp_cpu,
cpup->eq, cpup->hdwq, cpup->phys_id, cpup->hdwq, cpup->phys_id,
cpup->core_id, cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN)); (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
@ -5329,7 +5362,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id, cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN), (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
cpup->irq); lpfc_get_irq(cpup->eq));
else else
len += scnprintf( len += scnprintf(
buf + len, PAGE_SIZE - len, buf + len, PAGE_SIZE - len,
@ -5340,7 +5373,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id, cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN), (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
cpup->irq); lpfc_get_irq(cpup->eq));
} }
phba->sli4_hba.curr_disp_cpu++; phba->sli4_hba.curr_disp_cpu++;
@ -5711,7 +5744,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
* the driver will advertise it supports to the SCSI layer. * the driver will advertise it supports to the SCSI layer.
* *
* 0 = Set nr_hw_queues by the number of CPUs or HW queues. * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
* 1,128 = Manually specify the maximum nr_hw_queue value to be set, * 1,256 = Manually specify nr_hw_queue value to be advertised,
* *
* Value range is [0,256]. Default value is 8. * Value range is [0,256]. Default value is 8.
*/ */
@ -5729,30 +5762,130 @@ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
* A hardware IO queue maps (qidx) to a specific driver CQ/WQ. * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
* *
* 0 = Configure the number of hdw queues to the number of active CPUs. * 0 = Configure the number of hdw queues to the number of active CPUs.
* 1,128 = Manually specify how many hdw queues to use. * 1,256 = Manually specify how many hdw queues to use.
* *
* Value range is [0,128]. Default value is 0. * Value range is [0,256]. Default value is 0.
*/ */
LPFC_ATTR_R(hdw_queue, LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_DEF, LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues"); "Set the number of I/O Hardware Queues");
static inline void
lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
{
#if IS_ENABLED(CONFIG_X86)
/* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
phba->cfg_irq_numa = 1;
else
phba->cfg_irq_numa = 0;
#else
phba->cfg_irq_numa = 0;
#endif
}
/* /*
* lpfc_irq_chann: Set the number of IRQ vectors that are available * lpfc_irq_chann: Set the number of IRQ vectors that are available
* for Hardware Queues to utilize. This also will map to the number * for Hardware Queues to utilize. This also will map to the number
* of EQ / MSI-X vectors the driver will create. This should never be * of EQ / MSI-X vectors the driver will create. This should never be
* more than the number of Hardware Queues * more than the number of Hardware Queues
* *
* 0 = Configure number of IRQ Channels to the number of active CPUs. * 0 = Configure number of IRQ Channels to:
* 1,128 = Manually specify how many IRQ Channels to use. * if AMD architecture, number of CPUs on HBA's NUMA node
* otherwise, number of active CPUs.
* [1,256] = Manually specify how many IRQ Channels to use.
* *
* Value range is [0,128]. Default value is 0. * Value range is [0,256]. Default value is [0].
*/ */
LPFC_ATTR_R(irq_chann, static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
LPFC_HBA_HDWQ_DEF, module_param(lpfc_irq_chann, uint, 0444);
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
"Set the number of I/O IRQ Channels");
/* lpfc_irq_chann_init - Set the hba irq_chann initial value
* @phba: lpfc_hba pointer.
* @val: contains the initial value
*
* Description:
* Validates the initial value is within range and assigns it to the
* adapter. If not in range, an error message is posted and the
* default value is assigned.
*
* Returns:
* zero if value is in range and is set
* -EINVAL if value was out of range
**/
static int
lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
{
const struct cpumask *numa_mask;
if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8532 use_msi = %u ignoring cfg_irq_numa\n",
phba->cfg_use_msi);
phba->cfg_irq_numa = 0;
phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
return 0;
}
/* Check if default setting was passed */
if (val == LPFC_IRQ_CHANN_DEF)
lpfc_assign_default_irq_numa(phba);
if (phba->cfg_irq_numa) {
numa_mask = &phba->sli4_hba.numa_mask;
if (cpumask_empty(numa_mask)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8533 Could not identify NUMA node, "
"ignoring cfg_irq_numa\n");
phba->cfg_irq_numa = 0;
phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
} else {
phba->cfg_irq_chann = cpumask_weight(numa_mask);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8543 lpfc_irq_chann set to %u "
"(numa)\n", phba->cfg_irq_chann);
}
} else {
if (val > LPFC_IRQ_CHANN_MAX) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8545 lpfc_irq_chann attribute cannot "
"be set to %u, allowed range is "
"[%u,%u]\n",
val,
LPFC_IRQ_CHANN_MIN,
LPFC_IRQ_CHANN_MAX);
phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
return -EINVAL;
}
phba->cfg_irq_chann = val;
}
return 0;
}
/**
* lpfc_irq_chann_show - Display value of irq_chann
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains a string with the list sizes
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
}
static DEVICE_ATTR_RO(lpfc_irq_chann);
/* /*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@ -5933,7 +6066,53 @@ LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
* [1-4] = Multiple of 1/4th Mb of host memory for FW logging * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
* Value range [0..4]. Default value is 0 * Value range [0..4]. Default value is 0
*/ */
LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
lpfc_param_show(ras_fwlog_buffsize);
static ssize_t
lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
{
int ret = 0;
enum ras_state state;
if (!lpfc_rangecheck(val, 0, 4))
return -EINVAL;
if (phba->cfg_ras_fwlog_buffsize == val)
return 0;
if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
return -EINVAL;
spin_lock_irq(&phba->hbalock);
state = phba->ras_fwlog.state;
spin_unlock_irq(&phba->hbalock);
if (state == REG_INPROGRESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
"registration is in progress\n");
return -EBUSY;
}
/* For disable logging: stop the logs and free the DMA.
* For ras_fwlog_buffsize size change we still need to free and
* reallocate the DMA in lpfc_sli4_ras_fwlog_init.
*/
phba->cfg_ras_fwlog_buffsize = val;
if (state == ACTIVE) {
lpfc_ras_stop_fwlog(phba);
lpfc_sli4_ras_dma_free(phba);
}
lpfc_sli4_ras_init(phba);
if (phba->ras_fwlog.ras_enabled)
ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
LPFC_RAS_ENABLE_LOGGING);
return ret;
}
lpfc_param_store(ras_fwlog_buffsize);
static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
/* /*
* lpfc_ras_fwlog_level: Firmware logging verbosity level * lpfc_ras_fwlog_level: Firmware logging verbosity level
@ -6071,8 +6250,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sriov_nr_virtfn, &dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_req_fw_upgrade, &dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up, &dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw, &dev_attr_iocb_hw,
&dev_attr_pls,
&dev_attr_pt,
&dev_attr_txq_hw, &dev_attr_txq_hw,
&dev_attr_txcmplq_hw, &dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_level,
@ -7085,11 +7265,22 @@ struct fc_function_template lpfc_vport_transport_functions = {
static void static void
lpfc_get_hba_function_mode(struct lpfc_hba *phba) lpfc_get_hba_function_mode(struct lpfc_hba *phba)
{ {
/* If it's a SkyHawk FCoE adapter */ /* If the adapter supports FCoE mode */
if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK) switch (phba->pcidev->device) {
case PCI_DEVICE_ID_SKYHAWK:
case PCI_DEVICE_ID_SKYHAWK_VF:
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
case PCI_DEVICE_ID_ZEPHYR_DCSP:
case PCI_DEVICE_ID_HORNET:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE; phba->hba_flag |= HBA_FCOE_MODE;
else break;
default:
/* for others, clear the flag */
phba->hba_flag &= ~HBA_FCOE_MODE; phba->hba_flag &= ~HBA_FCOE_MODE;
}
} }
/** /**
@ -7099,6 +7290,7 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
void void
lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_get_cfgparam(struct lpfc_hba *phba)
{ {
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_ns_query_init(phba, lpfc_ns_query); lpfc_ns_query_init(phba, lpfc_ns_query);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
@ -7205,12 +7397,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L; phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery); lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode); lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1; phba->cfg_enable_dss = 1;
@ -7256,11 +7446,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
} }
if (!phba->cfg_nvmet_mrq) if (!phba->cfg_nvmet_mrq)
phba->cfg_nvmet_mrq = phba->cfg_irq_chann; phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) { if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
phba->cfg_nvmet_mrq = phba->cfg_irq_chann; phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n", "6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq); phba->cfg_nvmet_mrq);

View File

@ -5435,10 +5435,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp; bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */ /* Current logging state */
if (ras_fwlog->ras_active == true) spin_lock_irq(&phba->hbalock);
if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING; ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED; ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
spin_unlock_irq(&phba->hbalock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel; ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@ -5495,10 +5497,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) { if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */ /* Check if already disabled */
if (ras_fwlog->ras_active == false) { spin_lock_irq(&phba->hbalock);
if (ras_fwlog->state != ACTIVE) {
spin_unlock_irq(&phba->hbalock);
rc = -ESRCH; rc = -ESRCH;
goto ras_job_error; goto ras_job_error;
} }
spin_unlock_irq(&phba->hbalock);
/* Disable logging */ /* Disable logging */
lpfc_ras_stop_fwlog(phba); lpfc_ras_stop_fwlog(phba);
@ -5509,8 +5514,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status * FW-logging with new log-level. Return status
* "Logging already Running" to caller. * "Logging already Running" to caller.
**/ **/
if (ras_fwlog->ras_active) spin_lock_irq(&phba->hbalock);
if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS; action_status = -EINPROGRESS;
spin_unlock_irq(&phba->hbalock);
/* Enable logging */ /* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level, rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@ -5626,10 +5633,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error; goto ras_job_error;
/* Logging to be stopped before reading */ /* Logging to be stopped before reading */
if (ras_fwlog->ras_active == true) { spin_lock_irq(&phba->hbalock);
if (ras_fwlog->state == ACTIVE) {
spin_unlock_irq(&phba->hbalock);
rc = -EINPROGRESS; rc = -EINPROGRESS;
goto ras_job_error; goto ras_job_error;
} }
spin_unlock_irq(&phba->hbalock);
if (job->request_len < if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct fc_bsg_request) +

View File

@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
void lpfc_sli4_poll_hbtimer(struct timer_list *t);
void lpfc_sli4_start_polling(struct lpfc_queue *q);
void lpfc_sli4_stop_polling(struct lpfc_queue *q);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@ -586,6 +592,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
void lpfc_nvme_cmd_template(void); void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void); void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn); void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt; extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[]; extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt; extern int lpfc_no_hba_reset_cnt;

View File

@ -763,9 +763,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x x%x " "0208 NameServer Rsp Data: x%x x%x "
"sz x%x\n", "x%x x%x sz x%x\n",
vport->fc_flag, vport->fc_flag,
CTreq->un.gid.Fc4Type, CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
irsp->un.genreq64.bdl.bdeSize); irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport, lpfc_ns_rsp(vport,
@ -961,9 +963,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp == if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4105 NameServer Rsp Data: x%x x%x\n", "4105 NameServer Rsp Data: x%x x%x "
"x%x x%x sz x%x\n",
vport->fc_flag, vport->fc_flag,
CTreq->un.gid.Fc4Type); CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport, lpfc_ns_rsp(vport,
outp, outp,
@ -1025,6 +1031,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
} }
vport->gidft_inp--; vport->gidft_inp--;
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6450 GID_PT cmpl inp %d disc %d\n",
vport->gidft_inp, vport->num_disc_nodes);
/* Link up / RSCN discovery */ /* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) && if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) { (vport->gidft_inp == 0)) {
@ -1159,6 +1170,11 @@ out:
/* Link up / RSCN discovery */ /* Link up / RSCN discovery */
if (vport->num_disc_nodes) if (vport->num_disc_nodes)
vport->num_disc_nodes--; vport->num_disc_nodes--;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6451 GFF_ID cmpl inp %d disc %d\n",
vport->gidft_inp, vport->num_disc_nodes);
if (vport->num_disc_nodes == 0) { if (vport->num_disc_nodes == 0) {
/* /*
* The driver has cycled through all Nports in the RSCN payload. * The driver has cycled through all Nports in the RSCN payload.
@ -1868,6 +1884,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED: case IOERR_SLI_ABORTED:
case IOERR_SLI_DOWN:
/* Driver aborted this IO. No retry as error
* is likely Offline->Online or some adapter
* error. Recovery will try again.
*/
break;
case IOERR_ABORT_IN_PROGRESS: case IOERR_ABORT_IN_PROGRESS:
case IOERR_SEQUENCE_TIMEOUT: case IOERR_SEQUENCE_TIMEOUT:
case IOERR_ILLEGAL_FRAME: case IOERR_ILLEGAL_FRAME:

View File

@ -31,6 +31,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/vmalloc.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
@ -2078,6 +2079,96 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
} }
#endif #endif
static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
char *buffer, int size)
{
int copied = 0;
struct lpfc_dmabuf *dmabuf, *next;
spin_lock_irq(&phba->hbalock);
if (phba->ras_fwlog.state != ACTIVE) {
spin_unlock_irq(&phba->hbalock);
return -EINVAL;
}
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(dmabuf, next,
&phba->ras_fwlog.fwlog_buff_list, list) {
memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
copied += LPFC_RAS_MAX_ENTRY_SIZE;
if (size > copied)
break;
}
return copied;
}
static int
lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file)
{
struct lpfc_debug *debug = file->private_data;
vfree(debug->buffer);
kfree(debug);
return 0;
}
/**
* lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return a negative
* error value.
**/
static int
lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
int size;
int rc = -ENOMEM;
spin_lock_irq(&phba->hbalock);
if (phba->ras_fwlog.state != ACTIVE) {
spin_unlock_irq(&phba->hbalock);
rc = -EINVAL;
goto out;
}
spin_unlock_irq(&phba->hbalock);
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize;
debug->buffer = vmalloc(size);
if (!debug->buffer)
goto free_debug;
debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size);
if (debug->len < 0) {
rc = -EINVAL;
goto free_buffer;
}
file->private_data = debug;
return 0;
free_buffer:
vfree(debug->buffer);
free_debug:
kfree(debug);
out:
return rc;
}
/** /**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer. * @inode: The inode pointer that contains a vport pointer.
@ -5286,6 +5377,16 @@ static const struct file_operations lpfc_debugfs_op_lockstat = {
}; };
#endif #endif
#undef lpfc_debugfs_ras_log
static const struct file_operations lpfc_debugfs_ras_log = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_ras_log_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_ras_log_release,
};
#endif
#undef lpfc_debugfs_op_dumpHBASlim #undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
@ -5457,7 +5558,6 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.release = lpfc_idiag_cmd_release, .release = lpfc_idiag_cmd_release,
}; };
#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
@ -5707,6 +5807,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed; goto debug_failed;
} }
/* RAS log */
snprintf(name, sizeof(name), "ras_log");
phba->debug_ras_log =
debugfs_create_file(name, 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_ras_log);
if (!phba->debug_ras_log) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6148 Cannot create debugfs"
" ras_log\n");
goto debug_failed;
}
/* Setup hbqinfo */ /* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo"); snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo = phba->debug_hbqinfo =
@ -6117,6 +6230,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL; phba->debug_hbqinfo = NULL;
debugfs_remove(phba->debug_ras_log);
phba->debug_ras_log = NULL;
#ifdef LPFC_HDWQ_LOCK_STAT #ifdef LPFC_HDWQ_LOCK_STAT
debugfs_remove(phba->debug_lockstat); /* lockstat */ debugfs_remove(phba->debug_lockstat); /* lockstat */
phba->debug_lockstat = NULL; phba->debug_lockstat = NULL;

View File

@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp; IOCB_t *irsp;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
char *mode;
/* we pass cmdiocb to state machine which needs rspiocb as well */ /* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb; cmdiocb->context_un.rsp_iocb = rspiocb;
@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out; goto out;
} }
/* If we don't send GFT_ID to Fabric, a PRLI error
* could be expected.
*/
if ((vport->fc_flag & FC_FABRIC) ||
(vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
mode = KERN_ERR;
else
mode = KERN_INFO;
/* PRLI failed */ /* PRLI failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, " "2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n", "data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, ndlp->nlp_DID, irsp->ulpStatus,
@ -4291,6 +4301,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb; irsp = &rspiocb->iocb;
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"3177 ELS response failed\n");
goto out;
}
if (cmdiocb->context_un.mbox) if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox; mbox = cmdiocb->context_un.mbox;
@ -4430,7 +4445,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
} }
out: out:
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
@ -5260,6 +5275,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
} }
} }
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6452 Discover PLOGI %d flag x%x\n",
sentplogi, vport->fc_flag);
if (sentplogi) { if (sentplogi) {
lpfc_set_disctmo(vport); lpfc_set_disctmo(vport);
} }
@ -6455,7 +6475,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t payload_len, length, nportid, *cmd; uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt; int rscn_cnt;
int rscn_id = 0, hba_id = 0; int rscn_id = 0, hba_id = 0;
int i; int i, tmo;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt; lp = (uint32_t *) pcmd->virt;
@ -6561,6 +6581,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED; vport->fc_flag |= FC_RSCN_DEFERRED;
/* Restart disctmo if its already running */
if (vport->fc_flag & FC_DISC_TMO) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
jiffies + msecs_to_jiffies(1000 * tmo));
}
if ((rscn_cnt < FC_MAX_HOLD_RSCN) && if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) { !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE; vport->fc_flag |= FC_RSCN_MODE;
@ -6663,9 +6690,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */ /* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0215 RSCN processed Data: x%x x%x x%x x%x\n", "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt, vport->fc_flag, 0, vport->fc_rscn_id_cnt,
vport->port_state); vport->port_state, vport->num_disc_nodes,
vport->gidft_inp);
/* To process RSCN, first compare RSCN data with NameServer */ /* To process RSCN, first compare RSCN data with NameServer */
vport->fc_ns_retry = 0; vport->fc_ns_retry = 0;
@ -7986,20 +8014,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb; struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL; IOCB_t *cmd = NULL;
unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport); lpfc_fabric_abort_vport(vport);
/* /*
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
* ultimately grabs the ring_lock, the driver must splice the list into * ultimately grabs the ring_lock, the driver must splice the list into
* a working list and release the locks before calling the abort. * a working list and release the locks before calling the abort.
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irqsave(&phba->hbalock, iflags);
pring = lpfc_phba_elsring(phba); pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */ /* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) { if (unlikely(!pring)) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, iflags);
return; return;
} }
@ -8014,6 +8044,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport) if (piocb->vport != vport)
continue; continue;
if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
continue;
/* On the ELS ring we can have ELS_REQUESTs or /* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response. * GEN_REQUESTs waiting for a response.
*/ */
@ -8037,21 +8070,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock); spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Abort each txcmpl iocb on aborted list and remove the dlist links. */ /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock); spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist); list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb); lpfc_sli_issue_abort_iotag(phba, pring, piocb);
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, iflags);
} }
if (!list_empty(&abort_list)) if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"3387 abort list for txq not empty\n"); "3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list); INIT_LIST_HEAD(&abort_list);
spin_lock_irq(&phba->hbalock); spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock); spin_lock(&pring->ring_lock);
@ -8091,7 +8124,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock); spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Cancel all the IOCBs from the completions list */ /* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list, lpfc_sli_cancel_iocbs(phba, &abort_list,

View File

@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba)
if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
set_bit(LPFC_DATA_READY, &phba->data_flags); set_bit(LPFC_DATA_READY, &phba->data_flags);
} else { } else {
if (phba->link_state >= LPFC_LINK_UP || /* Driver could have abort request completed in queue
* when link goes down. Allow for this transition.
*/
if (phba->link_state >= LPFC_LINK_DOWN ||
phba->link_flag & LS_MDS_LOOPBACK) { phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT; pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring, lpfc_sli_handle_slow_ring_event(phba, pring,
@ -1135,7 +1138,6 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{ {
struct lpfc_vport *vport = pmb->vport; struct lpfc_vport *vport = pmb->vport;
uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus) if (pmb->u.mb.mbxStatus)
goto out; goto out;
@ -1162,17 +1164,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically /* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl * LPFC_FLOGI while waiting for FLOGI cmpl
*/ */
if (vport->port_state != LPFC_FLOGI) { if (vport->port_state != LPFC_FLOGI)
if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
bbscn = bf_get(lpfc_bbscn_def,
&phba->sli4_hba.bbscn_params);
vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
}
lpfc_initial_flogi(vport); lpfc_initial_flogi(vport);
} else if (vport->fc_flag & FC_PT2PT) { else if (vport->fc_flag & FC_PT2PT)
lpfc_disc_start(vport); lpfc_disc_start(vport);
}
return; return;
out: out:
@ -3456,8 +3452,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag); phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN) else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down UNEXP WWPN Event x%x received " "1313 Link Down Unexpected FA WWPN Event x%x "
"Data: x%x x%x x%x x%x x%x\n", "received Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag, la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag, phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la), bf_get(lpfc_mbx_read_top_mm, la),
@ -4046,7 +4042,7 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC; ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0003 rpi:%x DID:%x flg:%x %d map%x x%px\n", "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref), kref_read(&ndlp->kref),
@ -4575,8 +4571,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp; return ndlp;
free_rpi: free_rpi:
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_free_rpi(vport->phba, rpi); lpfc_sli4_free_rpi(vport->phba, rpi);
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
return NULL; return NULL;
} }
@ -4835,11 +4833,50 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) { if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI; ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
} }
ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_flag &= ~NLP_UNREG_INP;
} }
} }
/*
* Sets the mailbox completion handler to be used for the
* unreg_rpi command. The handler varies based on the state of
* the port and what will be happening to the rpi next.
*/
static void
lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
{
unsigned long iflags;
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
} else if (phba->sli_rev == LPFC_SLI_REV4 &&
(!(vport->load_flag & FC_UNLOADING)) &&
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
} else {
if (vport->load_flag & FC_UNLOADING) {
if (phba->sli_rev == LPFC_SLI_REV4) {
spin_lock_irqsave(&vport->phba->ndlp_lock,
iflags);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
spin_unlock_irqrestore(&vport->phba->ndlp_lock,
iflags);
}
lpfc_nlp_get(ndlp);
}
mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
/* /*
* Free rpi associated with LPFC_NODELIST entry. * Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing * This routine is called from lpfc_freenode(), when we are removing
@ -4860,7 +4897,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RPI_REGISTERED || if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, lpfc_printf_vlog(vport, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"3366 RPI x%x needs to be " "3366 RPI x%x needs to be "
"unregistered nlp_flag x%x " "unregistered nlp_flag x%x "
"did x%x\n", "did x%x\n",
@ -4871,7 +4909,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* no need to queue up another one. * no need to queue up another one.
*/ */
if (ndlp->nlp_flag & NLP_UNREG_INP) { if (ndlp->nlp_flag & NLP_UNREG_INP) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on " "1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x " "NPort x%x deferred x%x flg x%x "
"Data: x%px\n", "Data: x%px\n",
@ -4890,39 +4929,19 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox); lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport; mbox->vport = vport;
if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
mbox->ctx_ndlp = ndlp; if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
mbox->mbox_cmpl = lpfc_nlp_logo_unreg; /*
} else { * accept PLOGIs after unreg_rpi_cmpl
if (phba->sli_rev == LPFC_SLI_REV4 && */
(!(vport->load_flag & FC_UNLOADING)) && acc_plogi = 0;
(bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) &&
(kref_read(&ndlp->kref) > 0)) {
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl =
lpfc_sli4_unreg_rpi_cmpl_clr;
/*
* accept PLOGIs after unreg_rpi_cmpl
*/
acc_plogi = 0;
} else if (vport->load_flag & FC_UNLOADING) {
mbox->ctx_ndlp = NULL;
mbox->mbox_cmpl =
lpfc_sli_def_mbox_cmpl;
} else {
mbox->ctx_ndlp = ndlp;
mbox->mbox_cmpl =
lpfc_sli_def_mbox_cmpl;
}
}
if (((ndlp->nlp_DID & Fabric_DID_MASK) != if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) && Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE))) (!(vport->fc_flag & FC_OFFLINE_MODE)))
ndlp->nlp_flag |= NLP_UNREG_INP; ndlp->nlp_flag |= NLP_UNREG_INP;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on " "1433 unreg_rpi UNREG x%x on "
"NPort x%x deferred flg x%x " "NPort x%x deferred flg x%x "
"Data:x%px\n", "Data:x%px\n",
@ -5057,6 +5076,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb; LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp; struct lpfc_dmabuf *mp;
unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */ /* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@ -5138,8 +5158,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_cleanup_vports_rrqs(vport, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI; ndlp->nlp_flag |= NLP_RELEASE_RPI;
lpfc_unreg_rpi(vport, ndlp); if (!lpfc_unreg_rpi(vport, ndlp)) {
/* Clean up unregistered and non freed rpis */
if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
!(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
lpfc_sli4_free_rpi(vport->phba,
ndlp->nlp_rpi);
spin_lock_irqsave(&vport->phba->ndlp_lock,
iflags);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
spin_unlock_irqrestore(&vport->phba->ndlp_lock,
iflags);
}
}
return 0; return 0;
} }
@ -5165,8 +5197,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi /* For this case we need to cleanup the default rpi
* allocated by the firmware. * allocated by the firmware.
*/ */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, lpfc_printf_vlog(vport, KERN_INFO,
"0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n", LOG_NODE | LOG_DISCOVERY,
"0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
"ref %d map:x%x ndlp x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref), kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp); ndlp->nlp_usg_map, ndlp);
@ -5203,8 +5237,9 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/ */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
"0940 removed node x%px DID x%x " "0940 removed node x%px DID x%x "
" rport not null x%px\n", "rpi %d rport not null x%px\n",
ndlp, ndlp->nlp_DID, ndlp->rport); ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
ndlp->rport);
rport = ndlp->rport; rport = ndlp->rport;
rdata = rport->dd_data; rdata = rport->dd_data;
rdata->pnode = NULL; rdata->pnode = NULL;
@ -5362,6 +5397,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp) if (!ndlp)
return NULL; return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6453 Setup New Node 2B_DISC x%x "
"Data:x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
@ -5375,6 +5417,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
"0014 Could not enable ndlp\n"); "0014 Could not enable ndlp\n");
return NULL; return NULL;
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6454 Setup Enabled Node 2B_DISC x%x "
"Data:x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
@ -5394,6 +5442,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/ */
lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6455 Setup RSCN Node 2B_DISC x%x "
"Data:x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
/* NVME Target mode waits until rport is known to be /* NVME Target mode waits until rport is known to be
* impacted by the RSCN before it transitions. No * impacted by the RSCN before it transitions. No
* active management - just go to NPR provided the * active management - just go to NPR provided the
@ -5405,15 +5459,32 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort /* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again. * we don't need to try to discover it again.
*/ */
if (ndlp->nlp_flag & NLP_RCV_PLOGI) if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
!(ndlp->nlp_type &
(NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL; return NULL;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
} else } else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6456 Skip Setup RSCN Node x%x "
"Data:x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
ndlp = NULL; ndlp = NULL;
}
} else { } else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"6457 Setup Active Node 2B_DISC x%x "
"Data:x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, vport->fc_flag);
/* If the initiator received a PLOGI from this NPort or if the /* If the initiator received a PLOGI from this NPort or if the
* initiator is already in the process of discovery on it, * initiator is already in the process of discovery on it,
* there's no need to try to discover it again. * there's no need to try to discover it again.
@ -5565,10 +5636,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */ /* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0202 Start Discovery hba state x%x " "0202 Start Discovery port state x%x "
"Data: x%x x%x x%x\n", "flg x%x Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
vport->fc_adisc_cnt); vport->fc_adisc_cnt, vport->fc_npr_cnt);
/* First do ADISCs - if any */ /* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport); num_sent = lpfc_els_disc_adisc(vport);
@ -5996,7 +6067,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC; ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n", "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref), kref_read(&ndlp->kref),
@ -6185,12 +6256,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
INIT_LIST_HEAD(&ndlp->nlp_listp); INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) { if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi; ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0007 rpi:%x DID:%x flg:%x refcnt:%d " "0007 Init New ndlp x%px, rpi:x%x DID:%x "
"map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, "flg:x%x refcnt:%d map:x%x\n",
ndlp->nlp_flag, ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
kref_read(&ndlp->kref), ndlp->nlp_flag, kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp); ndlp->nlp_usg_map);
ndlp->active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool, mempool_alloc(vport->phba->active_rrq_pool,
@ -6419,7 +6490,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out; goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1; ret = 1;
lpfc_printf_log(phba, KERN_INFO, LOG_ELS, lpfc_printf_log(phba, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"2624 RPI %x DID %x flag %x " "2624 RPI %x DID %x flag %x "
"still logged in\n", "still logged in\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_DID,

View File

@ -210,7 +210,6 @@ struct lpfc_sli_intf {
#define LPFC_MAX_IMAX 5000000 #define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 0 #define LPFC_DEF_IMAX 0
#define LPFC_IMAX_THRESHOLD 1000
#define LPFC_MAX_AUTO_EQ_DELAY 120 #define LPFC_MAX_AUTO_EQ_DELAY 120
#define LPFC_EQ_DELAY_STEP 15 #define LPFC_EQ_DELAY_STEP 15
#define LPFC_EQD_ISR_TRIGGER 20000 #define LPFC_EQD_ISR_TRIGGER 20000
@ -2320,6 +2319,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 #define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB #define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B #define ADD_STATUS_INVALID_REQUEST 0x4B
#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config { struct lpfc_mbx_sli4_config {
struct mbox_header header; struct mbox_header header;
@ -2809,6 +2809,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_trunk_SHIFT 12 #define lpfc_mbx_rd_conf_trunk_SHIFT 12
#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F #define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
#define lpfc_mbx_rd_conf_trunk_WORD word2 #define lpfc_mbx_rd_conf_trunk_WORD word2
#define lpfc_mbx_rd_conf_pt_SHIFT 20
#define lpfc_mbx_rd_conf_pt_MASK 0x00000003
#define lpfc_mbx_rd_conf_pt_WORD word2
#define lpfc_mbx_rd_conf_tf_SHIFT 22
#define lpfc_mbx_rd_conf_tf_MASK 0x00000001
#define lpfc_mbx_rd_conf_tf_WORD word2
#define lpfc_mbx_rd_conf_ptv_SHIFT 23
#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001
#define lpfc_mbx_rd_conf_ptv_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24 #define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF #define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2 #define lpfc_mbx_rd_conf_topology_WORD word2
@ -3479,6 +3488,9 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_SHIFT 10 #define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001 #define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19 #define cfg_bv1s_WORD word19
#define cfg_pvl_SHIFT 13
#define cfg_pvl_MASK 0x00000001
#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12 #define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001 #define cfg_nsler_MASK 0x00000001
@ -3518,6 +3530,7 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10 #define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x11 #define LPFC_SET_MDS_DIAGS 0x11
#define LPFC_SET_DUAL_DUMP 0x1e
struct lpfc_mbx_set_feature { struct lpfc_mbx_set_feature {
struct mbox_header header; struct mbox_header header;
uint32_t feature; uint32_t feature;
@ -3532,6 +3545,15 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 #define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001 #define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6 #define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
#define lpfc_mbx_set_feature_dd_SHIFT 0
#define lpfc_mbx_set_feature_dd_MASK 0x00000001
#define lpfc_mbx_set_feature_dd_WORD word6
#define lpfc_mbx_set_feature_ddquery_SHIFT 1
#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001
#define lpfc_mbx_set_feature_ddquery_WORD word6
#define LPFC_DISABLE_DUAL_DUMP 0
#define LPFC_ENABLE_DUAL_DUMP 1
#define LPFC_QUERY_OP_DUAL_DUMP 2
uint32_t word7; uint32_t word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0 #define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff #define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
@ -4261,6 +4283,8 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 #define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA #define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
}; };
/* /*
@ -4659,6 +4683,7 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */ uint32_t rsvd_12_15[4]; /* word 12-15 */
}; };
#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3 #define T_REQUEST_TAG 3
#define T_XRI_TAG 1 #define T_XRI_TAG 1
@ -4807,8 +4832,8 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame; struct send_frame_wqe send_frame;
}; };
#define MAGIC_NUMER_G6 0xFEAA0003 #define MAGIC_NUMBER_G6 0xFEAA0003
#define MAGIC_NUMER_G7 0xFEAA0005 #define MAGIC_NUMBER_G7 0xFEAA0005
struct lpfc_grp_hdr { struct lpfc_grp_hdr {
uint32_t size; uint32_t size;

File diff suppressed because it is too large Load Diff

View File

@ -46,6 +46,23 @@
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ #define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */
/* generate message by verbose log setting or severity */
#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
#define lpfc_log_msg(phba, level, mask, fmt, arg...) \
do { \
{ uint32_t log_verbose = (phba)->pport ? \
(phba)->pport->cfg_log_verbose : \
(phba)->cfg_log_verbose; \
if (((mask) & log_verbose) || (level[1] <= '4')) \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
fmt, phba->brd_no, ##arg); \
} \
} while (0)
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \ do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \ { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \

View File

@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba,
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
!(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;

View File

@ -230,9 +230,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_hrb_pool); dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL; phba->lpfc_hrb_pool = NULL;
dma_pool_destroy(phba->txrdy_payload_pool);
phba->txrdy_payload_pool = NULL;
dma_pool_destroy(phba->lpfc_hbq_pool); dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL; phba->lpfc_hbq_pool = NULL;

View File

@ -279,6 +279,55 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
} }
/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
* @phba: pointer to lpfc hba data structure.
* @link_mbox: pointer to CONFIG_LINK mailbox object
*
* This routine is only called if we are SLI3, direct connect pt2pt
* mode and the remote NPort issues the PLOGI after link up.
*/
static void
lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
{
LPFC_MBOXQ_t *login_mbox;
MAILBOX_t *mb = &link_mbox->u.mb;
struct lpfc_iocbq *save_iocb;
struct lpfc_nodelist *ndlp;
int rc;
ndlp = link_mbox->ctx_ndlp;
login_mbox = link_mbox->context3;
save_iocb = login_mbox->context3;
link_mbox->context3 = NULL;
login_mbox->context3 = NULL;
/* Check for CONFIG_LINK error */
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"4575 CONFIG_LINK fails pt2pt discovery: %x\n",
mb->mbxStatus);
mempool_free(login_mbox, phba->mbox_mem_pool);
mempool_free(link_mbox, phba->mbox_mem_pool);
lpfc_sli_release_iocbq(phba, save_iocb);
return;
}
/* Now that CONFIG_LINK completed, and our SID is configured,
* we can now proceed with sending the PLOGI ACC.
*/
rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
save_iocb, ndlp, login_mbox);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"4576 PLOGI ACC fails pt2pt discovery: %x\n",
rc);
mempool_free(login_mbox, phba->mbox_mem_pool);
}
mempool_free(link_mbox, phba->mbox_mem_pool);
lpfc_sli_release_iocbq(phba, save_iocb);
}
static int static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb) struct lpfc_iocbq *cmdiocb)
@ -291,10 +340,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
IOCB_t *icmd; IOCB_t *icmd;
struct serv_parm *sp; struct serv_parm *sp;
uint32_t ed_tov; uint32_t ed_tov;
LPFC_MBOXQ_t *mbox; LPFC_MBOXQ_t *link_mbox;
LPFC_MBOXQ_t *login_mbox;
struct lpfc_iocbq *save_iocb;
struct ls_rjt stat; struct ls_rjt stat;
uint32_t vid, flag; uint32_t vid, flag;
int rc; int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt)); memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@ -343,6 +394,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else else
ndlp->nlp_fcp_info |= CLASS3; ndlp->nlp_fcp_info |= CLASS3;
defer_acc = 0;
ndlp->nlp_class_sup = 0; ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid) if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1; ndlp->nlp_class_sup |= FC_COS_CLASS1;
@ -354,7 +406,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
/* if already logged in, do implicit logout */ /* if already logged in, do implicit logout */
switch (ndlp->nlp_state) { switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE: case NLP_STE_NPR_NODE:
@ -396,6 +447,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST; ndlp->nlp_flag &= ~NLP_FIRSTBURST;
login_mbox = NULL;
link_mbox = NULL;
save_iocb = NULL;
/* Check for Nport to NPort pt2pt protocol */ /* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) && if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) { !(vport->fc_flag & FC_PT2PT_PLOGI)) {
@ -423,17 +478,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport); lpfc_issue_reg_vfi(vport);
else { else {
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); defer_acc = 1;
if (mbox == NULL) link_mbox = mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!link_mbox)
goto out; goto out;
lpfc_config_link(phba, mbox); lpfc_config_link(phba, link_mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
mbox->vport = vport; link_mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); link_mbox->ctx_ndlp = ndlp;
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool); save_iocb = lpfc_sli_get_iocbq(phba);
if (!save_iocb)
goto out; goto out;
} /* Save info from cmd IOCB used in rsp */
memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
sizeof(struct lpfc_iocbq));
} }
lpfc_can_disctmo(vport); lpfc_can_disctmo(vport);
@ -448,8 +508,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_SUPPRESS_RSP; ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
} }
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) if (!login_mbox)
goto out; goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
@ -457,21 +517,19 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unreg_rpi(vport, ndlp); lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
(uint8_t *) sp, mbox, ndlp->nlp_rpi); (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
if (rc) { if (rc)
mempool_free(mbox, phba->mbox_mem_pool);
goto out; goto out;
}
/* ACC PLOGI rsp command needs to execute first, /* ACC PLOGI rsp command needs to execute first,
* queue this mbox command to be processed later. * queue this login_mbox command to be processed later.
*/ */
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
/* /*
* mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc(). * command issued in lpfc_cmpl_els_acc().
*/ */
mbox->vport = vport; login_mbox->vport = vport;
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
@ -484,8 +542,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in * single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines * discovery. Also this will cause multiple state machines
* running in parallel for this node. * running in parallel for this node.
* This only applies to a fabric environment.
*/ */
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
(vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */ /* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp); lpfc_els_abort(phba, ndlp);
} }
@ -504,16 +564,47 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, mbox); ndlp, login_mbox);
if (rc) if (rc)
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(login_mbox, phba->mbox_mem_pool);
return 1; return 1;
} }
rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); if (defer_acc) {
/* So the order here should be:
* Issue CONFIG_LINK mbox
* CONFIG_LINK cmpl
* Issue PLOGI ACC
* PLOGI ACC cmpl
* Issue REG_LOGIN mbox
*/
/* Save the REG_LOGIN mbox for and rcv IOCB copy later */
link_mbox->context3 = login_mbox;
login_mbox->context3 = save_iocb;
/* Start the ball rolling by issuing CONFIG_LINK here */
rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto out;
return 1;
}
rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
if (rc) if (rc)
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(login_mbox, phba->mbox_mem_pool);
return 1; return 1;
out: out:
if (defer_acc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"4577 pt2pt discovery failure: %p %p %p\n",
save_iocb, link_mbox, login_mbox);
if (save_iocb)
lpfc_sli_release_iocbq(phba, save_iocb);
if (link_mbox)
mempool_free(link_mbox, phba->mbox_mem_pool);
if (login_mbox)
mempool_free(login_mbox, phba->mbox_mem_pool);
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
@ -2030,7 +2121,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr)) if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR; ndlp->nlp_type |= NLP_NVME_INITIATOR;
if (phba->nsler && bf_get_be32(prli_nsler, nvpr)) if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
bf_get_be32(prli_conf, nvpr))
ndlp->nlp_nvme_info |= NLP_NVME_NSLER; ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
else else
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;

View File

@ -195,6 +195,46 @@ lpfc_nvme_cmd_template(void)
/* Word 12, 13, 14, 15 - is zero */ /* Word 12, 13, 14, 15 - is zero */
} }
/**
* lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
* @pwqeq: Pointer to command iocb.
* @xritag: Tag that uniqely identifies the local exchange resource.
* @opt: Option bits -
* bit 0 = inhibit sending abts on the link
*
* This function is called with hbalock held.
**/
void
lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
{
union lpfc_wqe128 *wqe = &pwqeq->wqe;
/* WQEs are reused. Clear stale data and set key fields to
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
*/
memset(wqe, 0, sizeof(*wqe));
if (opt & INHIBIT_ABORT)
bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
/* Abort specified xri tag, with the mask deliberately zeroed */
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
/* Abort the IO associated with this outstanding exchange ID. */
wqe->abort_cmd.wqe_com.abort_tag = xritag;
/* iotag for the wqe completion. */
bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
}
/** /**
* lpfc_nvme_create_queue - * lpfc_nvme_create_queue -
* @lpfc_pnvme: Pointer to the driver's nvme instance data * @lpfc_pnvme: Pointer to the driver's nvme instance data
@ -1791,7 +1831,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *abts_buf; struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe; struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_fcpreq_priv *freqpriv;
union lpfc_wqe128 *abts_wqe;
unsigned long flags; unsigned long flags;
int ret_val; int ret_val;
@ -1912,37 +1951,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Ready - mark outstanding as aborted by driver. */ /* Ready - mark outstanding as aborted by driver. */
nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
/* Complete prepping the abort wqe and issue to the FW. */ lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
abts_wqe = &abts_buf->wqe;
/* WQEs are reused. Clear stale data and set key fields to
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
*/
memset(abts_wqe, 0, sizeof(*abts_wqe));
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
/* word 7 */
bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
nvmereq_wqe->iocb.ulpClass);
/* word 8 - tell the FW to abort the IO associated with this
* outstanding exchange ID.
*/
abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
/* word 9 - this is the iotag for the abts_wqe completion. */
bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
abts_buf->iotag);
/* word 10 */
bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
/* word 11 */
bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_buf->iocb_flag |= LPFC_IO_NVME; abts_buf->iocb_flag |= LPFC_IO_NVME;
@ -2084,7 +2093,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
qp = lpfc_ncmd->hdwq; qp = lpfc_ncmd->hdwq;
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for " "6310 XB release deferred for "
"ox_id x%x on reqtag x%x\n", "ox_id x%x on reqtag x%x\n",
@ -2139,12 +2148,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/ */
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
/* Advertise how many hw queues we support based on fcp_io_sched */ /* Advertise how many hw queues we support based on cfg_hdw_queue,
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) * which will not exceed cpu count.
lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; */
else lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
lpfc_nvme_template.max_hw_queues =
phba->sli4_hba.num_present_cpu;
if (!IS_ENABLED(CONFIG_NVME_FC)) if (!IS_ENABLED(CONFIG_NVME_FC))
return ret; return ret;

View File

@ -378,13 +378,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu; int cpu;
unsigned long iflag; unsigned long iflag;
if (ctxp->txrdy) {
dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
ctxp->txrdy_phys);
ctxp->txrdy = NULL;
ctxp->txrdy_phys = 0;
}
if (ctxp->state == LPFC_NVMET_STE_FREE) { if (ctxp->state == LPFC_NVMET_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6411 NVMET free, already free IO x%x: %d %d\n", "6411 NVMET free, already free IO x%x: %d %d\n",
@ -430,7 +423,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
ctxp->wqeq = NULL; ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->offset = 0; ctxp->offset = 0;
ctxp->phba = phba; ctxp->phba = phba;
ctxp->size = size; ctxp->size = size;
@ -1958,12 +1950,10 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload; uint32_t *payload;
uint32_t size, oxid, sid, rc; uint32_t size, oxid, sid, rc;
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
if (!phba->targetport) { if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6154 LS Drop IO x%x\n", oxid); "6154 LS Drop IO\n");
oxid = 0; oxid = 0;
size = 0; size = 0;
sid = 0; sid = 0;
@ -1971,6 +1961,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit; goto dropit;
} }
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt); payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
@ -2326,7 +2319,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->state, ctxp->entry_cnt, ctxp->oxid); ctxp->state, ctxp->entry_cnt, ctxp->oxid);
} }
ctxp->wqeq = NULL; ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->offset = 0; ctxp->offset = 0;
ctxp->phba = phba; ctxp->phba = phba;
ctxp->size = size; ctxp->size = size;
@ -2401,6 +2393,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
d_buf = piocb->context2; d_buf = piocb->context2;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
if (!nvmebuf) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"3015 LS Drop IO\n");
return;
}
if (phba->nvmet_support == 0) { if (phba->nvmet_support == 0) {
lpfc_in_buf_free(phba, &nvmebuf->dbuf); lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return; return;
@ -2429,6 +2426,11 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint64_t isr_timestamp, uint64_t isr_timestamp,
uint8_t cqflag) uint8_t cqflag)
{ {
if (!nvmebuf) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"3167 NVMET FCP Drop IO\n");
return;
}
if (phba->nvmet_support == 0) { if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return; return;
@ -2595,7 +2597,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct scatterlist *sgel; struct scatterlist *sgel;
union lpfc_wqe128 *wqe; union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde; struct ulp_bde64 *bde;
uint32_t *txrdy;
dma_addr_t physaddr; dma_addr_t physaddr;
int i, cnt; int i, cnt;
int do_pbde; int do_pbde;
@ -2757,23 +2758,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
&lpfc_treceive_cmd_template.words[3], &lpfc_treceive_cmd_template.words[3],
sizeof(uint32_t) * 9); sizeof(uint32_t) * 9);
/* Words 0 - 2 : The first sg segment */ /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
txrdy = dma_pool_alloc(phba->txrdy_payload_pool, wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
GFP_KERNEL, &physaddr); wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
if (!txrdy) { wqe->fcp_treceive.bde.addrLow = 0;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, wqe->fcp_treceive.bde.addrHigh = 0;
"6041 Bad txrdy buffer: oxid x%x\n",
ctxp->oxid);
return NULL;
}
ctxp->txrdy = txrdy;
ctxp->txrdy_phys = physaddr;
wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
wqe->fcp_treceive.bde.addrLow =
cpu_to_le32(putPaddrLow(physaddr));
wqe->fcp_treceive.bde.addrHigh =
cpu_to_le32(putPaddrHigh(physaddr));
/* Word 4 */ /* Word 4 */
wqe->fcp_treceive.relative_offset = ctxp->offset; wqe->fcp_treceive.relative_offset = ctxp->offset;
@ -2808,17 +2797,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Word 12 */ /* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
/* Setup 1 TXRDY and 1 SKIP SGE */ /* Setup 2 SKIP SGEs */
txrdy[0] = 0; sgl->addr_hi = 0;
txrdy[1] = cpu_to_be32(rsp->transfer_length); sgl->addr_lo = 0;
txrdy[2] = 0;
sgl->addr_hi = putPaddrHigh(physaddr);
sgl->addr_lo = putPaddrLow(physaddr);
sgl->word2 = 0; sgl->word2 = 0;
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
sgl->word2 = cpu_to_le32(sgl->word2); sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); sgl->sge_len = 0;
sgl++; sgl++;
sgl->addr_hi = 0; sgl->addr_hi = 0;
sgl->addr_lo = 0; sgl->addr_lo = 0;
@ -3239,9 +3224,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
{ {
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq; struct lpfc_iocbq *abts_wqeq;
union lpfc_wqe128 *abts_wqe;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
unsigned long flags; unsigned long flags;
u8 opt;
int rc; int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@ -3280,8 +3265,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 0; return 0;
} }
abts_wqeq = ctxp->abort_wqeq; abts_wqeq = ctxp->abort_wqeq;
abts_wqe = &abts_wqeq->wqe;
ctxp->state = LPFC_NVMET_STE_ABORT; ctxp->state = LPFC_NVMET_STE_ABORT;
opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
spin_unlock_irqrestore(&ctxp->ctxlock, flags); spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */ /* Announce entry to new IO submit field. */
@ -3327,40 +3312,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */ /* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
/* WQEs are reused. Clear stale data and set key fields to lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
*/
memset(abts_wqe, 0, sizeof(*abts_wqe));
/* word 3 */
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
/* word 7 */
bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
/* word 8 - tell the FW to abort the IO associated with this
* outstanding exchange ID.
*/
abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
/* word 9 - this is the iotag for the abts_wqe completion. */
bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
abts_wqeq->iotag);
/* word 10 */
bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
/* word 11 */
bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
abts_wqeq->iocb_cmpl = 0; abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME; abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp; abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport; abts_wqeq->vport = phba->pport;
@ -3495,7 +3452,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
abts_wqeq->iocb_cmpl = 0; abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);

View File

@ -112,9 +112,7 @@ struct lpfc_nvmet_rcv_ctx {
struct lpfc_hba *phba; struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq; struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq; struct lpfc_iocbq *abort_wqeq;
dma_addr_t txrdy_phys;
spinlock_t ctxlock; /* protect flag access */ spinlock_t ctxlock; /* protect flag access */
uint32_t *txrdy;
uint32_t sid; uint32_t sid;
uint32_t offset; uint32_t offset;
uint16_t oxid; uint16_t oxid;

View File

@ -134,21 +134,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
/** /**
* lpfc_update_stats - Update statistical data for the command completion * lpfc_update_stats - Update statistical data for the command completion
* @phba: Pointer to HBA object. * @vport: The virtual port on which this call is executing.
* @lpfc_cmd: lpfc scsi command object pointer. * @lpfc_cmd: lpfc scsi command object pointer.
* *
* This function is called when there is a command completion and this * This function is called when there is a command completion and this
* function updates the statistical data for the command completion. * function updates the statistical data for the command completion.
**/ **/
static void static void
lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{ {
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata; struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode; struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd; struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags; unsigned long flags;
struct Scsi_Host *shost = cmd->device->host; struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
unsigned long latency; unsigned long latency;
int i; int i;
@ -526,7 +526,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
&qp->lpfc_abts_io_buf_list, list) { &qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) { if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list); list_del_init(&psb->list);
psb->exch_busy = 0; psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS; psb->status = IOSTAT_SUCCESS;
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) { if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--; qp->abts_nvme_io_bufs--;
@ -566,7 +566,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
if (iocbq->sli4_xritag != xri) if (iocbq->sli4_xritag != xri)
continue; continue;
psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
psb->exch_busy = 0; psb->flags &= ~LPFC_SBUF_XBUSY;
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq)) if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba); lpfc_worker_wake_up(phba);
@ -786,7 +786,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
psb->prot_seg_cnt = 0; psb->prot_seg_cnt = 0;
qp = psb->hdwq; qp = psb->hdwq;
if (psb->exch_busy) { if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL; psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
@ -3812,7 +3812,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */ /* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd; cmd = lpfc_cmd->pCmd;
if (!cmd) { if (!cmd || !phba) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"2621 IO completion: Not an active IO\n"); "2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock); spin_unlock(&lpfc_cmd->buf_lock);
@ -3824,7 +3824,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq) if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
@ -3835,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus; lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */ /* pick up SLI4 exhange busy status from HBA */
lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) { if (lpfc_cmd->prot_data_type) {
@ -3869,7 +3872,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
} }
#endif #endif
if (lpfc_cmd->status) { if (unlikely(lpfc_cmd->status)) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK)) (lpfc_cmd->result & IOERR_DRVR_MASK))
lpfc_cmd->status = IOSTAT_DRIVER_REJECT; lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
@ -4002,7 +4005,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd)); scsi_get_resid(cmd));
} }
lpfc_update_stats(phba, lpfc_cmd); lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time && if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time + time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@ -4610,17 +4613,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
} }
if (err == 2) { if (unlikely(err)) {
cmnd->result = DID_ERROR << 16; if (err == 2) {
goto out_fail_command_release_buf; cmnd->result = DID_ERROR << 16;
} else if (err) { goto out_fail_command_release_buf;
}
goto out_host_busy_free_buf; goto out_host_busy_free_buf;
} }
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) { if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq = struct lpfc_sli4_hdw_queue *hdwq =
@ -4843,20 +4847,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0); abtsiocb, 0);
} }
/* no longer need the lock after this point */
spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) { if (ret_val == IOCB_ERROR) {
/* Indicate the IO is not being aborted by the driver. */ /* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL; lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock); spin_unlock(&lpfc_cmd->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb); lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED; ret = FAILED;
goto out; goto out;
} }
/* no longer need the lock after this point */
spin_unlock(&lpfc_cmd->buf_lock); spin_unlock(&lpfc_cmd->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba, lpfc_sli_handle_fast_ring_event(phba,

View File

@ -87,6 +87,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_eqe *eqe); struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct lpfc_cqe *cqe);
static IOCB_t * static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@ -467,25 +471,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
} }
static void static void
lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
{ {
struct lpfc_eqe *eqe; struct lpfc_eqe *eqe = NULL;
uint32_t count = 0; u32 eq_count = 0, cq_count = 0;
struct lpfc_cqe *cqe = NULL;
struct lpfc_queue *cq = NULL, *childq = NULL;
int cqid = 0;
/* walk all the EQ entries and drop on the floor */ /* walk all the EQ entries and drop on the floor */
eqe = lpfc_sli4_eq_get(eq); eqe = lpfc_sli4_eq_get(eq);
while (eqe) { while (eqe) {
/* Get the reference to the corresponding CQ */
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
cq = NULL;
list_for_each_entry(childq, &eq->child_list, list) {
if (childq->queue_id == cqid) {
cq = childq;
break;
}
}
/* If CQ is valid, iterate through it and drop all the CQEs */
if (cq) {
cqe = lpfc_sli4_cq_get(cq);
while (cqe) {
__lpfc_sli4_consume_cqe(phba, cq, cqe);
cq_count++;
cqe = lpfc_sli4_cq_get(cq);
}
/* Clear and re-arm the CQ */
phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
LPFC_QUEUE_REARM);
cq_count = 0;
}
__lpfc_sli4_consume_eqe(phba, eq, eqe); __lpfc_sli4_consume_eqe(phba, eq, eqe);
count++; eq_count++;
eqe = lpfc_sli4_eq_get(eq); eqe = lpfc_sli4_eq_get(eq);
} }
/* Clear and re-arm the EQ */ /* Clear and re-arm the EQ */
phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
} }
static int static int
lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
uint8_t rearm)
{ {
struct lpfc_eqe *eqe; struct lpfc_eqe *eqe;
int count = 0, consumed = 0; int count = 0, consumed = 0;
@ -519,8 +550,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
eq->queue_claimed = 0; eq->queue_claimed = 0;
rearm_and_exit: rearm_and_exit:
/* Always clear and re-arm the EQ */ /* Always clear the EQ. */
phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
return count; return count;
} }
@ -2526,6 +2557,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else { } else {
__lpfc_sli_rpi_release(vport, ndlp); __lpfc_sli_rpi_release(vport, ndlp);
} }
if (vport->load_flag & FC_UNLOADING)
lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL; pmb->ctx_ndlp = NULL;
} }
} }
@ -2672,7 +2705,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command " "(%d):0323 Unknown Mailbox command "
"x%x (x%x/x%x) Cmpl\n", "x%x (x%x/x%x) Cmpl\n",
pmb->vport ? pmb->vport->vpi : 0, pmb->vport ? pmb->vport->vpi :
LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand, pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba, lpfc_sli_config_mbox_subsys_get(phba,
pmb), pmb),
@ -2693,7 +2727,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
"(%d):0305 Mbox cmd cmpl " "(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x " "error - RETRYing Data: x%x "
"(x%x/x%x) x%x x%x x%x\n", "(x%x/x%x) x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0, pmb->vport ? pmb->vport->vpi :
LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand, pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba, lpfc_sli_config_mbox_subsys_get(phba,
pmb), pmb),
@ -2701,7 +2736,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmb), pmb),
pmbox->mbxStatus, pmbox->mbxStatus,
pmbox->un.varWords[0], pmbox->un.varWords[0],
pmb->vport->port_state); pmb->vport ? pmb->vport->port_state :
LPFC_VPORT_UNKNOWN);
pmbox->mbxStatus = 0; pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST; pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@ -6167,6 +6203,14 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8; mbox->u.mqe.un.set_feature.param_len = 8;
break; break;
case LPFC_SET_DUAL_DUMP:
bf_set(lpfc_mbx_set_feature_dd,
&mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
bf_set(lpfc_mbx_set_feature_ddquery,
&mbox->u.mqe.un.set_feature, 0);
mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
mbox->u.mqe.un.set_feature.param_len = 4;
break;
} }
return; return;
@ -6184,11 +6228,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{ {
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
ras_fwlog->ras_active = false; spin_lock_irq(&phba->hbalock);
ras_fwlog->state = INACTIVE;
spin_unlock_irq(&phba->hbalock);
/* Disable FW logging to host memory */ /* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS, writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
/* Wait 10ms for firmware to stop using DMA buffer */
usleep_range(10 * 1000, 20 * 1000);
} }
/** /**
@ -6224,7 +6273,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL; ras_fwlog->lwpd.virt = NULL;
} }
ras_fwlog->ras_active = false; spin_lock_irq(&phba->hbalock);
ras_fwlog->state = INACTIVE;
spin_unlock_irq(&phba->hbalock);
} }
/** /**
@ -6326,7 +6377,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras; goto disable_ras;
} }
ras_fwlog->ras_active = true; spin_lock_irq(&phba->hbalock);
ras_fwlog->state = ACTIVE;
spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool); mempool_free(pmb, phba->mbox_mem_pool);
return; return;
@ -6358,6 +6411,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0; int rc = 0;
spin_lock_irq(&phba->hbalock);
ras_fwlog->state = INACTIVE;
spin_unlock_irq(&phba->hbalock);
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize); phba->cfg_ras_fwlog_buffsize);
fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
@ -6417,6 +6474,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
spin_lock_irq(&phba->hbalock);
ras_fwlog->state = REG_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
mbox->vport = phba->pport; mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@ -7148,7 +7208,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int int
lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{ {
int rc, i, cnt, len; int rc, i, cnt, len, dd;
LPFC_MBOXQ_t *mboxq; LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe; struct lpfc_mqe *mqe;
uint8_t *vpd; uint8_t *vpd;
@ -7399,6 +7459,23 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Always try to enable dual dump feature if we can */
lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
"6448 Dual Dump is enabled\n");
else
lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
"6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
"rc:x%x dd:x%x\n",
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
lpfc_sli_config_mbox_subsys_get(
phba, mboxq),
lpfc_sli_config_mbox_opcode_get(
phba, mboxq),
rc, dd);
/* /*
* Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
* calls depends on these resources to complete port setup. * calls depends on these resources to complete port setup.
@ -7523,9 +7600,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
} }
phba->sli4_hba.nvmet_xri_cnt = rc; phba->sli4_hba.nvmet_xri_cnt = rc;
cnt = phba->cfg_iocb_cnt * 1024; /* We allocate an iocbq for every receive context SGL.
/* We need 1 iocbq for every SGL, for IO processing */ * The additional allocation is for abort and ls handling.
cnt += phba->sli4_hba.nvmet_xri_cnt; */
cnt = phba->sli4_hba.nvmet_xri_cnt +
phba->sli4_hba.max_cfg_param.max_xri;
} else { } else {
/* update host common xri-sgl sizes and mappings */ /* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_io_sgl_update(phba); rc = lpfc_sli4_io_sgl_update(phba);
@ -7547,14 +7626,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV; rc = -ENODEV;
goto out_destroy_queue; goto out_destroy_queue;
} }
cnt = phba->cfg_iocb_cnt * 1024; /* Each lpfc_io_buf job structure has an iocbq element.
* This cnt provides for abort, els, ct and ls requests.
*/
cnt = phba->sli4_hba.max_cfg_param.max_xri;
} }
if (!phba->sli.iocbq_lookup) { if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */ /* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2821 initialize iocb list %d total %d\n", "2821 initialize iocb list with %d entries\n",
phba->cfg_iocb_cnt, cnt); cnt);
rc = lpfc_init_iocb_list(phba, cnt); rc = lpfc_init_iocb_list(phba, cnt);
if (rc) { if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@ -7892,7 +7974,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending) if (mbox_pending)
/* process and rearm the EQ */ /* process and rearm the EQ */
lpfc_sli4_process_eq(phba, fpeq); lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
else else
/* Always clear and re-arm the EQ */ /* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@ -8964,7 +9046,8 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* @pring: Pointer to driver SLI ring object. * @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb. * @piocb: Pointer to address of newly added command iocb.
* *
* This function is called with hbalock held to add a command * This function is called with hbalock held for SLI3 ports or
* the ring lock held for SLI4 ports to add a command
* iocb to the txq when SLI layer cannot submit the command iocb * iocb to the txq when SLI layer cannot submit the command iocb
* to the ring. * to the ring.
**/ **/
@ -8972,7 +9055,10 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb) struct lpfc_iocbq *piocb)
{ {
lockdep_assert_held(&phba->hbalock); if (phba->sli_rev == LPFC_SLI_REV4)
lockdep_assert_held(&pring->ring_lock);
else
lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */ /* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq); list_add_tail(&piocb->list, &pring->txq);
} }
@ -9863,7 +9949,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-4 interface spec. * an iocb command to an HBA with SLI-4 interface spec.
* *
* This function is called with hbalock held. The function will return success * This function is called with ringlock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the * after it successfully submit the iocb to firmware or after adding to the
* txq. * txq.
**/ **/
@ -10053,10 +10139,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag) struct lpfc_iocbq *piocb, uint32_t flag)
{ {
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
struct lpfc_queue *eq;
unsigned long iflags; unsigned long iflags;
int rc; int rc;
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
pring = lpfc_sli4_calc_ring(phba, piocb); pring = lpfc_sli4_calc_ring(phba, piocb);
if (unlikely(pring == NULL)) if (unlikely(pring == NULL))
return IOCB_ERROR; return IOCB_ERROR;
@ -10064,6 +10153,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags); spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
} else { } else {
/* For now, SLI2/3 will still use hbalock */ /* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
@ -10678,14 +10769,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
set_bit(LPFC_DATA_READY, &phba->data_flags); set_bit(LPFC_DATA_READY, &phba->data_flags);
} }
prev_pring_flag = pring->flag; prev_pring_flag = pring->flag;
spin_lock_irq(&pring->ring_lock); spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb, list_for_each_entry_safe(iocb, next_iocb,
&pring->txq, list) { &pring->txq, list) {
if (iocb->vport != vport) if (iocb->vport != vport)
continue; continue;
list_move_tail(&iocb->list, &completions); list_move_tail(&iocb->list, &completions);
} }
spin_unlock_irq(&pring->ring_lock); spin_unlock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb, list_for_each_entry_safe(iocb, next_iocb,
&pring->txcmplq, list) { &pring->txcmplq, list) {
if (iocb->vport != vport) if (iocb->vport != vport)
@ -11050,9 +11141,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4]); irsp->ulpStatus, irsp->un.ulpWord[4]);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
lpfc_sli_release_iocbq(phba, abort_iocb);
} }
release_iocb: release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb); lpfc_sli_release_iocbq(phba, cmdiocb);
@ -11736,7 +11824,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq); cur_iocbq);
lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
} }
pdone_q = cmdiocbq->context_un.wait_queue; pdone_q = cmdiocbq->context_un.wait_queue;
@ -13158,13 +13249,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */ /* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL; phba->sli.mbox_active = NULL;
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags); spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */ /* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba); lpfc_worker_wake_up(phba);
return workposted;
out_no_mqe_complete: out_no_mqe_complete:
spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe)) if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
return workposted; spin_unlock_irqrestore(&phba->hbalock, iflags);
return false;
} }
/** /**
@ -13217,7 +13314,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_sli_ring *pring = cq->pring; struct lpfc_sli_ring *pring = cq->pring;
int txq_cnt = 0; int txq_cnt = 0;
int txcmplq_cnt = 0; int txcmplq_cnt = 0;
int fcp_txcmplq_cnt = 0;
/* Check for response status */ /* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
@ -13239,9 +13335,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txcmplq_cnt++; txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", "els_txcmplq_cnt=%d\n",
txq_cnt, phba->iocb_cnt, txq_cnt, phba->iocb_cnt,
fcp_txcmplq_cnt,
txcmplq_cnt); txcmplq_cnt);
return false; return false;
} }
@ -13592,6 +13687,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
LPFC_QUEUE_NOARM); LPFC_QUEUE_NOARM);
consumed = 0; consumed = 0;
cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
} }
if (count == LPFC_NVMET_CQ_NOTIFY) if (count == LPFC_NVMET_CQ_NOTIFY)
@ -14220,7 +14316,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN) if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */ /* Flush, clear interrupt, and rearm the EQ */
lpfc_sli4_eq_flush(phba, fpeq); lpfc_sli4_eqcq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE; return IRQ_NONE;
} }
@ -14230,14 +14326,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
fpeq->last_cpu = raw_smp_processor_id(); fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER && if (icnt > LPFC_EQD_ISR_TRIGGER &&
phba->cfg_irq_chann == 1 && fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax && phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR) phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */ /* process and rearm the EQ */
ecount = lpfc_sli4_process_eq(phba, fpeq); ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) { if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++; fpeq->EQ_no_entry++;
@ -14297,6 +14393,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */ } /* lpfc_sli4_intr_handler */
void lpfc_sli4_poll_hbtimer(struct timer_list *t)
{
struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
struct lpfc_queue *eq;
int i = 0;
rcu_read_lock();
list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
rcu_read_unlock();
}
inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
{
struct lpfc_hba *phba = eq->phba;
int i = 0;
/*
* Unlocking an irq is one of the entry point to check
* for re-schedule, but we are good for io submission
* path as midlayer does a get_cpu to glue us in. Flush
* out the invalidate queue so we can see the updated
* value for flag.
*/
smp_rmb();
if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
/* We will not likely get the completion for the caller
* during this iteration but i guess that's fine.
* Future io's coming on this eq should be able to
* pick it up. As for the case of single io's, they
* will be handled through a sched from polling timer
* function which is currently triggered every 1msec.
*/
i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
return i;
}
static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
if (list_empty(&phba->poll_list)) {
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
/* kickstart slowpath processing for this eq */
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
}
list_add_rcu(&eq->_poll_list, &phba->poll_list);
synchronize_rcu();
}
static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
/* Disable slowpath processing for this eq. Kick start the eq
* by RE-ARMING the eq's ASAP
*/
list_del_rcu(&eq->_poll_list);
synchronize_rcu();
if (list_empty(&phba->poll_list))
del_timer_sync(&phba->cpuhp_poll_timer);
}
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
{
struct lpfc_queue *eq, *next;
list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
list_del(&eq->_poll_list);
INIT_LIST_HEAD(&phba->poll_list);
synchronize_rcu();
}
static inline void
__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
{
if (mode == eq->mode)
return;
/*
* currently this function is only called during a hotplug
* event and the cpu on which this function is executing
* is going offline. By now the hotplug has instructed
* the scheduler to remove this cpu from cpu active mask.
* So we don't need to work about being put aside by the
* scheduler for a high priority process. Yes, the inte-
* rrupts could come but they are known to retire ASAP.
*/
/* Disable polling in the fastpath */
WRITE_ONCE(eq->mode, mode);
/* flush out the store buffer */
smp_wmb();
/*
* Add this eq to the polling list and start polling. For
* a grace period both interrupt handler and poller will
* try to process the eq _but_ that's fine. We have a
* synchronization mechanism in place (queue_claimed) to
* deal with it. This is just a draining phase for int-
* errupt handler (not eq's) as we have guranteed through
* barrier that all the CPUs have seen the new CQ_POLLED
* state. which will effectively disable the REARMING of
* the EQ. The whole idea is eq's die off eventually as
* we are not rearming EQ's anymore.
*/
mode ? lpfc_sli4_add_to_poll_list(eq) :
lpfc_sli4_remove_from_poll_list(eq);
}
void lpfc_sli4_start_polling(struct lpfc_queue *eq)
{
__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
}
void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
/* Kick start for the pending io's in h/w.
* Once we switch back to interrupt processing on a eq
* the io path completion will only arm eq's when it
* receives a completion. But since eq's are in disa-
* rmed state it doesn't receive a completion. This
* creates a deadlock scenaro.
*/
phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
}
/** /**
* lpfc_sli4_queue_free - free a queue structure and associated memory * lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free. * @queue: The queue structure to free.
@ -14371,6 +14608,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
return NULL; return NULL;
INIT_LIST_HEAD(&queue->list); INIT_LIST_HEAD(&queue->list);
INIT_LIST_HEAD(&queue->_poll_list);
INIT_LIST_HEAD(&queue->wq_list); INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->wqfull_list); INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list); INIT_LIST_HEAD(&queue->page_list);
@ -18124,8 +18362,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++; phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++; phba->sli4_hba.rpi_count++;
} }
lpfc_printf_log(phba, KERN_INFO, LOG_SLI, lpfc_printf_log(phba, KERN_INFO,
"0001 rpi:%x max:%x lim:%x\n", LOG_NODE | LOG_DISCOVERY,
"0001 Allocated rpi:x%x max:x%x lim:x%x\n",
(int) rpi, max_rpi, rpi_limit); (int) rpi, max_rpi, rpi_limit);
/* /*
@ -18181,11 +18420,19 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{ {
/*
* if the rpi value indicates a prior unreg has already
* been done, skip the unreg.
*/
if (rpi == LPFC_RPI_ALLOC_ERROR)
return;
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--; phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--; phba->sli4_hba.max_cfg_param.rpi_used--;
} else { } else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI, lpfc_printf_log(phba, KERN_INFO,
LOG_NODE | LOG_DISCOVERY,
"2016 rpi %x not inuse\n", "2016 rpi %x not inuse\n",
rpi); rpi);
} }
@ -19683,6 +19930,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0; return 0;
} }
@ -19703,6 +19952,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
} }
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0; return 0;
} }
@ -19731,6 +19982,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
} }
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0; return 0;
} }
return WQE_ERROR; return WQE_ERROR;
@ -20093,6 +20346,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support &&
!list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
if (phba->cfg_xri_rebalancing) { if (phba->cfg_xri_rebalancing) {
if (lpfc_ncmd->expedite) { if (lpfc_ncmd->expedite) {
/* Return to expedite pool */ /* Return to expedite pool */
@ -20157,13 +20417,6 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
spin_unlock_irqrestore(&qp->io_buf_list_put_lock, spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflag); iflag);
} }
if (phba->cfg_xpsgl && !phba->nvmet_support &&
!list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
} }
/** /**
@ -20399,8 +20652,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *allocated_sgl = NULL; struct sli4_hybrid_sgl *allocated_sgl = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list; struct list_head *buf_list = &hdwq->sgl_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) { if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the sgl_list */ /* break off 1 chunk from the sgl_list */
@ -20412,9 +20666,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
} }
} else { } else {
/* allocate more */ /* allocate more */
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
cpu_to_node(smp_processor_id())); cpu_to_node(hdwq->io_wq->chann));
if (!tmp) { if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI, lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8353 error kmalloc memory for HDWQ " "8353 error kmalloc memory for HDWQ "
@ -20434,7 +20688,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
return NULL; return NULL;
} }
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
} }
@ -20442,7 +20696,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl, struct sli4_hybrid_sgl,
list_node); list_node);
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_sgl; return allocated_sgl;
} }
@ -20466,8 +20720,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *tmp = NULL; struct sli4_hybrid_sgl *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list; struct list_head *buf_list = &hdwq->sgl_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
@ -20480,7 +20735,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
rc = -EINVAL; rc = -EINVAL;
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc; return rc;
} }
@ -20501,8 +20756,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->sgl_list; struct list_head *buf_list = &hdwq->sgl_list;
struct sli4_hybrid_sgl *list_entry = NULL; struct sli4_hybrid_sgl *list_entry = NULL;
struct sli4_hybrid_sgl *tmp = NULL; struct sli4_hybrid_sgl *tmp = NULL;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free sgl pool */ /* Free sgl pool */
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
@ -20514,7 +20770,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry); kfree(list_entry);
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
} }
/** /**
@ -20538,8 +20794,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *allocated_buf = NULL; struct fcp_cmd_rsp_buf *allocated_buf = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) { if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the list */ /* break off 1 chunk from the list */
@ -20552,9 +20809,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
} }
} else { } else {
/* allocate more */ /* allocate more */
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
cpu_to_node(smp_processor_id())); cpu_to_node(hdwq->io_wq->chann));
if (!tmp) { if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI, lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8355 error kmalloc memory for HDWQ " "8355 error kmalloc memory for HDWQ "
@ -20579,7 +20836,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
sizeof(struct fcp_cmnd)); sizeof(struct fcp_cmnd));
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
} }
@ -20587,7 +20844,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf, struct fcp_cmd_rsp_buf,
list_node); list_node);
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_buf; return allocated_buf;
} }
@ -20612,8 +20869,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *tmp = NULL; struct fcp_cmd_rsp_buf *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
@ -20626,7 +20884,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
rc = -EINVAL; rc = -EINVAL;
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc; return rc;
} }
@ -20647,8 +20905,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
struct fcp_cmd_rsp_buf *list_entry = NULL; struct fcp_cmd_rsp_buf *list_entry = NULL;
struct fcp_cmd_rsp_buf *tmp = NULL; struct fcp_cmd_rsp_buf *tmp = NULL;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free cmd_rsp buf pool */ /* Free cmd_rsp buf pool */
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
@ -20661,5 +20920,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry); kfree(list_entry);
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
} }

View File

@ -384,14 +384,13 @@ struct lpfc_io_buf {
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
uint32_t timeout; uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */ uint16_t flags;
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ #define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
/* External DIF device IO conversions */ /* External DIF device IO conversions */
#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */ #define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */ #define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */ #define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */ uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */ uint32_t result; /* From IOCB Word 4. */

View File

@ -41,8 +41,13 @@
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_HDWQ_MIN 0 #define LPFC_HBA_HDWQ_MIN 0
#define LPFC_HBA_HDWQ_MAX 128 #define LPFC_HBA_HDWQ_MAX 256
#define LPFC_HBA_HDWQ_DEF 0 #define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
/* irq_chann range, values */
#define LPFC_IRQ_CHANN_MIN 0
#define LPFC_IRQ_CHANN_MAX 256
#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
/* FCP MQ queue count limiting */ /* FCP MQ queue count limiting */
#define LPFC_FCP_MQ_THRESHOLD_MIN 0 #define LPFC_FCP_MQ_THRESHOLD_MIN 0
@ -133,6 +138,23 @@ struct lpfc_rqb {
struct lpfc_queue { struct lpfc_queue {
struct list_head list; struct list_head list;
struct list_head wq_list; struct list_head wq_list;
/*
* If interrupts are in effect on _all_ the eq's the footprint
* of polling code is zero (except mode). This memory is chec-
* ked for every io to see if the io needs to be polled and
* while completion to check if the eq's needs to be rearmed.
* Keep in same cacheline as the queue ptr to avoid cpu fetch
* stalls. Using 1B memory will leave us with 7B hole. Fill
* it with other frequently used members.
*/
uint16_t last_cpu; /* most recent cpu */
uint16_t hdwq;
uint8_t qe_valid;
uint8_t mode; /* interrupt or polling */
#define LPFC_EQ_INTERRUPT 0
#define LPFC_EQ_POLL 1
struct list_head wqfull_list; struct list_head wqfull_list;
enum lpfc_sli4_queue_type type; enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype; enum lpfc_sli4_queue_subtype subtype;
@ -199,6 +221,7 @@ struct lpfc_queue {
uint8_t q_flag; uint8_t q_flag;
#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */ #define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */ #define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
#define LPFC_NVMET_CQ_NOTIFY 4 #define LPFC_NVMET_CQ_NOTIFY 4
void __iomem *db_regaddr; void __iomem *db_regaddr;
uint16_t dpp_enable; uint16_t dpp_enable;
@ -239,10 +262,8 @@ struct lpfc_queue {
struct delayed_work sched_spwork; struct delayed_work sched_spwork;
uint64_t isr_timestamp; uint64_t isr_timestamp;
uint16_t hdwq;
uint16_t last_cpu; /* most recent cpu */
uint8_t qe_valid;
struct lpfc_queue *assoc_qp; struct lpfc_queue *assoc_qp;
struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */ void **q_pgs; /* array to index entries per page */
}; };
@ -451,11 +472,17 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16 #define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl { struct lpfc_hba_eq_hdl {
uint32_t idx; uint32_t idx;
uint16_t irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba; struct lpfc_hba *phba;
struct lpfc_queue *eq; struct lpfc_queue *eq;
struct cpumask aff_mask;
}; };
#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
/*BB Credit recovery value*/ /*BB Credit recovery value*/
struct lpfc_bbscn_params { struct lpfc_bbscn_params {
uint32_t word0; uint32_t word0;
@ -513,6 +540,7 @@ struct lpfc_pc_sli4_params {
uint8_t cqav; uint8_t cqav;
uint8_t wqsize; uint8_t wqsize;
uint8_t bv1s; uint8_t bv1s;
uint8_t pls;
#define LPFC_WQ_SZ64_SUPPORT 1 #define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2 #define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt; uint8_t wqpcnt;
@ -544,11 +572,10 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \ #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM) LPFC_FOF_IO_CHAN_NUM)
/* Used for IRQ vector to CPU mapping */ /* Used for tracking CPU mapping attributes */
struct lpfc_vector_map_info { struct lpfc_vector_map_info {
uint16_t phys_id; uint16_t phys_id;
uint16_t core_id; uint16_t core_id;
uint16_t irq;
uint16_t eq; uint16_t eq;
uint16_t hdwq; uint16_t hdwq;
uint16_t flag; uint16_t flag;
@ -891,6 +918,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map; struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu; uint16_t num_possible_cpu;
uint16_t num_present_cpu; uint16_t num_present_cpu;
struct cpumask numa_mask;
uint16_t curr_disp_cpu; uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info; struct lpfc_eq_intr_info __percpu *eq_info;
uint32_t conf_trunk; uint32_t conf_trunk;

View File

@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "12.4.0.0" #define LPFC_DRIVER_VERSION "12.6.0.2"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View File

@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue; mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0) if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun; mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
if (setup_sg_tablesize >= 0) if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize; mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7; mac_scsi_template.this_id = setup_hostid & 7;

View File

@ -24,6 +24,8 @@
#define MEGASAS_VERSION "07.710.50.00-rc1" #define MEGASAS_VERSION "07.710.50.00-rc1"
#define MEGASAS_RELDATE "June 28, 2019" #define MEGASAS_RELDATE "June 28, 2019"
#define MEGASAS_MSIX_NAME_LEN 32
/* /*
* Device IDs * Device IDs
*/ */
@ -2203,6 +2205,7 @@ struct megasas_aen_event {
}; };
struct megasas_irq_context { struct megasas_irq_context {
char name[MEGASAS_MSIX_NAME_LEN];
struct megasas_instance *instance; struct megasas_instance *instance;
u32 MSIxIndex; u32 MSIxIndex;
u32 os_irq; u32 os_irq;

View File

@ -5546,9 +5546,11 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev; pdev = instance->pdev;
instance->irq_context[0].instance = instance; instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0; instance->irq_context[0].MSIxIndex = 0;
snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
"megasas", instance->host->host_no);
if (request_irq(pci_irq_vector(pdev, 0), if (request_irq(pci_irq_vector(pdev, 0),
instance->instancet->service_isr, IRQF_SHARED, instance->instancet->service_isr, IRQF_SHARED,
"megasas", &instance->irq_context[0])) { instance->irq_context->name, &instance->irq_context[0])) {
dev_err(&instance->pdev->dev, dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n", "Failed to register IRQ from %s %d\n",
__func__, __LINE__); __func__, __LINE__);
@ -5580,8 +5582,10 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
for (i = 0; i < instance->msix_vectors; i++) { for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance; instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i; instance->irq_context[i].MSIxIndex = i;
snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
"megasas", instance->host->host_no, i);
if (request_irq(pci_irq_vector(pdev, i), if (request_irq(pci_irq_vector(pdev, i),
instance->instancet->service_isr, 0, "megasas", instance->instancet->service_isr, 0, instance->irq_context[i].name,
&instance->irq_context[i])) { &instance->irq_context[i])) {
dev_err(&instance->pdev->dev, dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i); "Failed to register IRQ for vector %d.\n", i);

View File

@ -386,9 +386,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
le32_to_cpu(quad->diff))) == 0) { le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) { if (span_blk != NULL) {
u64 blk, debugBlk; u64 blk;
blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
debugBlk = blk;
blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
*span_blk = blk; *span_blk = blk;
@ -699,9 +698,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
__le16 *pDevHandle = &io_info->devHandle; __le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface; u8 *pPdInterface = &io_info->pd_interface;
u32 logArm, rowMod, armQ, arm; u32 logArm, rowMod, armQ, arm;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
/*Get row and span from io_info for Uneven Span IO.*/ /*Get row and span from io_info for Uneven Span IO.*/
@ -801,9 +798,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u64 *pdBlock = &io_info->pdBlock; u64 *pdBlock = &io_info->pdBlock;
__le16 *pDevHandle = &io_info->devHandle; __le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface; u8 *pPdInterface = &io_info->pd_interface;
struct fusion_context *fusion;
fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
row = mega_div64_32(stripRow, raid->rowDataSize); row = mega_div64_32(stripRow, raid->rowDataSize);

View File

@ -3044,11 +3044,11 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
descp = NULL; descp = NULL;
ioc_info(ioc, " %d %d\n", ioc->high_iops_queues, ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
ioc->msix_vector_count); ioc->reply_queue_count);
i = pci_alloc_irq_vectors_affinity(ioc->pdev, i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues, ioc->high_iops_queues,
ioc->msix_vector_count, irq_flags, descp); ioc->reply_queue_count, irq_flags, descp);
return i; return i;
} }
@ -4242,10 +4242,12 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
static int static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
{ {
Mpi2FWImageHeader_t *FWImgHdr; Mpi2FWImageHeader_t *fw_img_hdr;
Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request; Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply; Mpi2FWUploadReply_t mpi_reply;
int r = 0; int r = 0;
u32 package_version = 0;
void *fwpkg_data = NULL; void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma; dma_addr_t fwpkg_data_dma;
u16 smid, ioc_status; u16 smid, ioc_status;
@ -4302,14 +4304,26 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK; MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data; fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
if (FWImgHdr->PackageVersion.Word) { if (le32_to_cpu(fw_img_hdr->Signature) ==
ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n", MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
FWImgHdr->PackageVersion.Struct.Major, cmp_img_hdr =
FWImgHdr->PackageVersion.Struct.Minor, (Mpi26ComponentImageHeader_t *)
FWImgHdr->PackageVersion.Struct.Unit, (fwpkg_data);
FWImgHdr->PackageVersion.Struct.Dev); package_version =
} le32_to_cpu(
cmp_img_hdr->ApplicationSpecific);
} else
package_version =
le32_to_cpu(
fw_img_hdr->PackageVersion.Word);
if (package_version)
ioc_info(ioc,
"FW Package Ver(%02d.%02d.%02d.%02d)\n",
((package_version) & 0xFF000000) >> 24,
((package_version) & 0x00FF0000) >> 16,
((package_version) & 0x0000FF00) >> 8,
(package_version) & 0x000000FF);
} else { } else {
_debug_dump_mf(&mpi_reply, _debug_dump_mf(&mpi_reply,
sizeof(Mpi2FWUploadReply_t)/4); sizeof(Mpi2FWUploadReply_t)/4);

View File

@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
#define MPT3SAS_DRIVER_VERSION "31.100.00.00" #define MPT3SAS_DRIVER_VERSION "32.100.00.00"
#define MPT3SAS_MAJOR_VERSION 31 #define MPT3SAS_MAJOR_VERSION 32
#define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00 #define MPT3SAS_RELEASE_VERSION 00
@ -303,6 +303,8 @@ struct mpt3sas_nvme_cmd {
#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) #define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) #define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) #define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08)
#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10)
/* /*
* HP HBA branding * HP HBA branding
@ -391,9 +393,12 @@ struct Mpi2ManufacturingPage11_t {
u8 Reserved6; /* 2Fh */ u8 Reserved6; /* 2Fh */
__le32 Reserved7[7]; /* 30h - 4Bh */ __le32 Reserved7[7]; /* 30h - 4Bh */
u8 NVMeAbortTO; /* 4Ch */ u8 NVMeAbortTO; /* 4Ch */
u8 Reserved8; /* 4Dh */ u8 NumPerDevEvents; /* 4Dh */
u16 Reserved9; /* 4Eh */ u8 HostTraceBufferDecrementSizeKB; /* 4Eh */
__le32 Reserved10[4]; /* 50h - 60h */ u8 HostTraceBufferFlags; /* 4Fh */
u16 HostTraceBufferMaxSizeKB; /* 50h */
u16 HostTraceBufferMinSizeKB; /* 52h */
__le32 Reserved10[2]; /* 54h - 5Bh */
}; };
/** /**

View File

@ -466,6 +466,13 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
if ((ioc->diag_buffer_status[i] & if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED)) MPT3_DIAG_BUFFER_IS_RELEASED))
continue; continue;
/*
* add a log message to indicate the release
*/
ioc_info(ioc,
"%s: Releasing the trace buffer due to adapter reset.",
__func__);
mpt3sas_send_diag_release(ioc, i, &issue_reset); mpt3sas_send_diag_release(ioc, i, &issue_reset);
} }
} }
@ -778,6 +785,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_NVME_ENCAPSULATED: case MPI2_FUNCTION_NVME_ENCAPSULATED:
{ {
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
if (!ioc->pcie_sg_lookup) {
dtmprintk(ioc, ioc_info(ioc,
"HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
));
if (ioc->logging_level & MPT_DEBUG_TM)
_debug_dump_mf(nvme_encap_request,
ioc->request_sz/4);
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
}
/* /*
* Get the Physical Address of the sense buffer. * Get the Physical Address of the sense buffer.
* Use Error Response buffer address field to hold the sense * Use Error Response buffer address field to hold the sense
@ -1484,6 +1503,26 @@ _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
return rc; return rc;
} }
/**
* _ctl_diag_get_bufftype - return diag buffer type
* either TRACE, SNAPSHOT, or EXTENDED
* @ioc: per adapter object
* @unique_id: specifies the unique_id for the buffer
*
* returns MPT3_DIAG_UID_NOT_FOUND if the id not found
*/
static u8
_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
{
u8 index;
for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
if (ioc->unique_id[index] == unique_id)
return index;
}
return MPT3_DIAG_UID_NOT_FOUND;
}
/** /**
* _ctl_diag_register_2 - wrapper for registering diag buffer support * _ctl_diag_register_2 - wrapper for registering diag buffer support
@ -1531,11 +1570,88 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
return -EPERM; return -EPERM;
} }
if (diag_register->unique_id == 0) {
ioc_err(ioc,
"%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
diag_register->unique_id, buffer_type);
return -EINVAL;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
!(ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED)) {
ioc_err(ioc,
"%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
__func__, buffer_type, ioc->unique_id[buffer_type]);
return -EINVAL;
}
if (ioc->diag_buffer_status[buffer_type] & if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) { MPT3_DIAG_BUFFER_IS_REGISTERED) {
ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n", /*
__func__, buffer_type); * If driver posts buffer initially, then an application wants
return -EINVAL; * to Register that buffer (own it) without Releasing first,
* the application Register command MUST have the same buffer
* type and size in the Register command (obtained from the
* Query command). Otherwise that Register command will be
* failed. If the application has released the buffer but wants
* to re-register it, it should be allowed as long as the
* Unique-Id/Size match.
*/
if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
ioc->diag_buffer_sz[buffer_type] ==
diag_register->requested_buffer_size) {
if (!(ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED)) {
dctlprintk(ioc, ioc_info(ioc,
"%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
__func__, buffer_type,
ioc->unique_id[buffer_type],
diag_register->unique_id));
/*
* Application wants to own the buffer with
* the same size.
*/
ioc->unique_id[buffer_type] =
diag_register->unique_id;
rc = 0; /* success */
goto out;
}
} else if (ioc->unique_id[buffer_type] !=
MPT3DIAGBUFFUNIQUEID) {
if (ioc->unique_id[buffer_type] !=
diag_register->unique_id ||
ioc->diag_buffer_sz[buffer_type] !=
diag_register->requested_buffer_size ||
!(ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED)) {
ioc_err(ioc,
"%s: already has a registered buffer for buffer_type(0x%02x)\n",
__func__, buffer_type);
return -EINVAL;
}
} else {
ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
__func__, buffer_type);
return -EINVAL;
}
} else if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
ioc->diag_buffer_sz[buffer_type] !=
diag_register->requested_buffer_size) {
ioc_err(ioc,
"%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
__func__, buffer_type,
ioc->diag_buffer_sz[buffer_type]);
return -EINVAL;
}
} }
if (diag_register->requested_buffer_size % 4) { if (diag_register->requested_buffer_size % 4) {
@ -1560,7 +1676,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
request_data = ioc->diag_buffer[buffer_type]; request_data = ioc->diag_buffer[buffer_type];
request_data_sz = diag_register->requested_buffer_size; request_data_sz = diag_register->requested_buffer_size;
ioc->unique_id[buffer_type] = diag_register->unique_id; ioc->unique_id[buffer_type] = diag_register->unique_id;
ioc->diag_buffer_status[buffer_type] = 0; ioc->diag_buffer_status[buffer_type] &=
MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
memcpy(ioc->product_specific[buffer_type], memcpy(ioc->product_specific[buffer_type],
diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
@ -1584,7 +1701,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n", ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
__func__, request_data_sz); __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid); mpt3sas_base_free_smid(ioc, smid);
return -ENOMEM; rc = -ENOMEM;
goto out;
} }
ioc->diag_buffer[buffer_type] = request_data; ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz; ioc->diag_buffer_sz[buffer_type] = request_data_sz;
@ -1649,9 +1767,12 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out: out:
if (rc && request_data) if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz, dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma); request_data, request_data_dma);
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
}
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
return rc; return rc;
@ -1669,6 +1790,10 @@ void
mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
{ {
struct mpt3_diag_register diag_register; struct mpt3_diag_register diag_register;
u32 ret_val;
u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
u32 min_trace_buff_size = 0;
u32 decr_trace_buff_size = 0;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
@ -1677,10 +1802,68 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
ioc->diag_trigger_master.MasterData = ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
/* register for 2MB buffers */ diag_register.unique_id =
diag_register.requested_buffer_size = 2 * (1024 * 1024); (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
diag_register.unique_id = 0x7075900; (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
_ctl_diag_register_2(ioc, &diag_register);
if (trace_buff_size != 0) {
diag_register.requested_buffer_size = trace_buff_size;
min_trace_buff_size =
ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
decr_trace_buff_size =
ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
if (min_trace_buff_size > trace_buff_size) {
/* The buff size is not set correctly */
ioc_err(ioc,
"Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
min_trace_buff_size>>10,
trace_buff_size>>10);
ioc_err(ioc,
"Using zero Min Trace Buff Size\n");
min_trace_buff_size = 0;
}
if (decr_trace_buff_size == 0) {
/*
* retry the min size if decrement
* is not available.
*/
decr_trace_buff_size =
trace_buff_size - min_trace_buff_size;
}
} else {
/* register for 2MB buffers */
diag_register.requested_buffer_size = 2 * (1024 * 1024);
}
do {
ret_val = _ctl_diag_register_2(ioc, &diag_register);
if (ret_val == -ENOMEM && min_trace_buff_size &&
(trace_buff_size - decr_trace_buff_size) >=
min_trace_buff_size) {
/* adjust the buffer size */
trace_buff_size -= decr_trace_buff_size;
diag_register.requested_buffer_size =
trace_buff_size;
} else
break;
} while (true);
if (ret_val == -ENOMEM)
ioc_err(ioc,
"Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
diag_register.requested_buffer_size>>10);
else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
& MPT3_DIAG_BUFFER_IS_REGISTERED) {
ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
diag_register.requested_buffer_size>>10);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
ioc->diag_buffer_status[
MPI2_DIAG_BUF_TYPE_TRACE] |=
MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
}
} }
if (bits_to_register & 2) { if (bits_to_register & 2) {
@ -1723,6 +1906,12 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
} }
rc = _ctl_diag_register_2(ioc, &karg); rc = _ctl_diag_register_2(ioc, &karg);
if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED))
ioc->diag_buffer_status[karg.buffer_type] |=
MPT3_DIAG_BUFFER_IS_APP_OWNED;
return rc; return rc;
} }
@ -1752,7 +1941,13 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n", dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__)); __func__));
buffer_type = karg.unique_id & 0x000000ff; buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
__func__, karg.unique_id);
return -EINVAL;
}
if (!_ctl_diag_capability(ioc, buffer_type)) { if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type); __func__, buffer_type);
@ -1785,12 +1980,21 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM; return -ENOMEM;
} }
request_data_sz = ioc->diag_buffer_sz[buffer_type]; if (ioc->diag_buffer_status[buffer_type] &
request_data_dma = ioc->diag_buffer_dma[buffer_type]; MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz, ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
request_data, request_data_dma); ioc->diag_buffer_status[buffer_type] &=
ioc->diag_buffer[buffer_type] = NULL; ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
ioc->diag_buffer_status[buffer_type] = 0; ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_REGISTERED;
} else {
request_data_sz = ioc->diag_buffer_sz[buffer_type];
request_data_dma = ioc->diag_buffer_dma[buffer_type];
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] = 0;
}
return 0; return 0;
} }
@ -1829,14 +2033,17 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EPERM; return -EPERM;
} }
if ((ioc->diag_buffer_status[buffer_type] & if (!(ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", if ((ioc->diag_buffer_status[buffer_type] &
__func__, buffer_type); MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
return -EINVAL; ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
__func__, buffer_type);
return -EINVAL;
}
} }
if (karg.unique_id & 0xffffff00) { if (karg.unique_id) {
if (karg.unique_id != ioc->unique_id[buffer_type]) { if (karg.unique_id != ioc->unique_id[buffer_type]) {
ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
__func__, karg.unique_id); __func__, karg.unique_id);
@ -1851,13 +2058,21 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM; return -ENOMEM;
} }
if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) if ((ioc->diag_buffer_status[buffer_type] &
karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | MPT3_DIAG_BUFFER_IS_REGISTERED))
MPT3_APP_FLAGS_BUFFER_VALID); karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
else
karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | if (!(ioc->diag_buffer_status[buffer_type] &
MPT3_APP_FLAGS_BUFFER_VALID | MPT3_DIAG_BUFFER_IS_RELEASED))
MPT3_APP_FLAGS_FW_BUFFER_ACCESS); karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
if (!(ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_APP_OWNED))
karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
karg.product_specific[i] = karg.product_specific[i] =
@ -2002,7 +2217,13 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n", dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__)); __func__));
buffer_type = karg.unique_id & 0x000000ff; buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
__func__, karg.unique_id);
return -EINVAL;
}
if (!_ctl_diag_capability(ioc, buffer_type)) { if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type); __func__, buffer_type);
@ -2026,7 +2247,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED) { MPT3_DIAG_BUFFER_IS_RELEASED) {
ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n", ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
__func__, buffer_type); __func__, buffer_type);
return 0; return -EINVAL;
} }
request_data = ioc->diag_buffer[buffer_type]; request_data = ioc->diag_buffer[buffer_type];
@ -2086,7 +2307,13 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n", dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__)); __func__));
buffer_type = karg.unique_id & 0x000000ff; buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
__func__, karg.unique_id);
return -EINVAL;
}
if (!_ctl_diag_capability(ioc, buffer_type)) { if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type); __func__, buffer_type);
@ -2210,6 +2437,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |= ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED; MPT3_DIAG_BUFFER_IS_REGISTERED;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_RELEASED;
dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else { } else {
ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
@ -3130,10 +3359,49 @@ host_trace_buffer_enable_store(struct device *cdev,
memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
ioc_info(ioc, "posting host trace buffers\n"); ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
diag_register.requested_buffer_size = (1024 * 1024);
diag_register.unique_id = 0x7075900; if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
/* post the same buffer allocated previously */
diag_register.requested_buffer_size =
ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
} else {
/*
* Free the diag buffer memory which was previously
* allocated by an application.
*/
if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
&&
(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
pci_free_consistent(ioc->pdev,
ioc->diag_buffer_sz[
MPI2_DIAG_BUF_TYPE_TRACE],
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
ioc->diag_buffer_dma[
MPI2_DIAG_BUF_TYPE_TRACE]);
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
NULL;
}
diag_register.requested_buffer_size = (1024 * 1024);
}
diag_register.unique_id =
(ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
(MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
_ctl_diag_register_2(ioc, &diag_register); _ctl_diag_register_2(ioc, &diag_register);
if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
ioc_info(ioc,
"Trace buffer %d KB allocated through sysfs\n",
diag_register.requested_buffer_size>>10);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
ioc->diag_buffer_status[
MPI2_DIAG_BUF_TYPE_TRACE] |=
MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
}
} else if (!strcmp(str, "release")) { } else if (!strcmp(str, "release")) {
/* exit out if host buffers are already released */ /* exit out if host buffers are already released */
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
@ -3702,12 +3970,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!ioc->diag_buffer[i]) if (!ioc->diag_buffer[i])
continue; continue;
if (!(ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_REGISTERED))
continue;
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
dma_free_coherent(&ioc->pdev->dev, dma_free_coherent(&ioc->pdev->dev,
ioc->diag_buffer_sz[i], ioc->diag_buffer_sz[i],
ioc->diag_buffer[i], ioc->diag_buffer[i],

View File

@ -95,6 +95,14 @@
#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ #define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
struct mpt3_diag_read_buffer) struct mpt3_diag_read_buffer)
/* Trace Buffer default UniqueId */
#define MPT2DIAGBUFFUNIQUEID (0x07075900)
#define MPT3DIAGBUFFUNIQUEID (0x4252434D)
/* UID not found */
#define MPT3_DIAG_UID_NOT_FOUND (0xFF)
/** /**
* struct mpt3_ioctl_header - main header structure * struct mpt3_ioctl_header - main header structure
* @ioc_number - IOC unit number * @ioc_number - IOC unit number
@ -310,6 +318,7 @@ struct mpt3_ioctl_btdh_mapping {
#define MPT3_APP_FLAGS_APP_OWNED (0x0001) #define MPT3_APP_FLAGS_APP_OWNED (0x0001)
#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002) #define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004) #define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008)
/* flags for mpt3_diag_read_buffer */ /* flags for mpt3_diag_read_buffer */
#define MPT3_FLAGS_REREGISTER (0x0001) #define MPT3_FLAGS_REREGISTER (0x0001)

View File

@ -5161,7 +5161,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */ /* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) + sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t); sizeof(Mpi2EventDataSasDeviceStatusChange_t);
event_reply = kzalloc(sz, GFP_KERNEL); event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) { if (!event_reply) {
ioc_err(ioc, "failure at %s:%d/%s()!\n", ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__); __FILE__, __LINE__, __func__);
@ -10193,6 +10193,8 @@ scsih_scan_start(struct Scsi_Host *shost)
int rc; int rc;
if (diag_buffer_enable != -1 && diag_buffer_enable != 0) if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
mpt3sas_enable_diag_buffer(ioc, 1);
if (disable_discovery > 0) if (disable_discovery > 0)
return; return;

View File

@ -113,15 +113,21 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
{ {
u8 issue_reset = 0; u8 issue_reset = 0;
u32 *trig_data = (u32 *)&event_data->u.master;
dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */ /* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
dTriggerDiagPrintk(ioc, /*
ioc_info(ioc, "%s: release trace diag buffer\n", * add a log message so that user knows which event caused
__func__)); * the release
*/
ioc_info(ioc,
"%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n",
__func__, event_data->trigger_type,
trig_data[0], trig_data[1]);
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset); &issue_reset);
} }

View File

@ -1541,7 +1541,7 @@ out:
int mvs_abort_task_set(struct domain_device *dev, u8 *lun) int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
{ {
int rc = TMF_RESP_FUNC_FAILED; int rc;
struct mvs_tmf_task tmf_task; struct mvs_tmf_task tmf_task;
tmf_task.tmf = TMF_ABORT_TASK_SET; tmf_task.tmf = TMF_ABORT_TASK_SET;

View File

@ -1722,7 +1722,7 @@ struct ncb {
** Miscellaneous configuration and status parameters. ** Miscellaneous configuration and status parameters.
**---------------------------------------------------------------- **----------------------------------------------------------------
*/ */
u_char disc; /* Diconnection allowed */ u_char disc; /* Disconnection allowed */
u_char scsi_mode; /* Current SCSI BUS mode */ u_char scsi_mode; /* Current SCSI BUS mode */
u_char order; /* Tag order to use */ u_char order; /* Tag order to use */
u_char verbose; /* Verbosity for this controller*/ u_char verbose; /* Verbosity for this controller*/

View File

@ -1542,7 +1542,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* with ACK reply when below condition is matched: * with ACK reply when below condition is matched:
* MsgIn 00: Command Complete. * MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer. * MsgIn 02: Save Data Pointer.
* MsgIn 04: Diconnect. * MsgIn 04: Disconnect.
* In other case, unexpected BUSFREE is detected. * In other case, unexpected BUSFREE is detected.
*/ */
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph) static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)

View File

@ -32,7 +32,7 @@ config PCMCIA_FDOMAIN
config PCMCIA_NINJA_SCSI config PCMCIA_NINJA_SCSI
tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support" tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
depends on !64BIT depends on !64BIT || COMPILE_TEST
help help
If you intend to attach this type of PCMCIA SCSI host adapter to If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read your computer, say Y here and read

View File

@ -56,9 +56,7 @@
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>"); MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#endif
#include "nsp_io.h" #include "nsp_io.h"

View File

@ -69,6 +69,25 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
static static
DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
/**
* controller_fatal_error_show - check controller is under fatal err
* @cdev: pointer to embedded class device
* @buf: the buffer returned
*
* A sysfs 'read only' shost attribute.
*/
static ssize_t controller_fatal_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return snprintf(buf, PAGE_SIZE, "%d\n",
pm8001_ha->controller_fatal_error);
}
static DEVICE_ATTR_RO(controller_fatal_error);
/** /**
* pm8001_ctl_fw_version_show - firmware version * pm8001_ctl_fw_version_show - firmware version
* @cdev: pointer to embedded class device * @cdev: pointer to embedded class device
@ -804,6 +823,7 @@ static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw); pm8001_show_update_fw, pm8001_store_update_fw);
struct device_attribute *pm8001_host_attrs[] = { struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev, &dev_attr_interface_rev,
&dev_attr_controller_fatal_error,
&dev_attr_fw_version, &dev_attr_fw_version,
&dev_attr_update_fw, &dev_attr_update_fw,
&dev_attr_aap_log, &dev_attr_aap_log,

View File

@ -1336,10 +1336,13 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
* @circularQ: the inbound queue we want to transfer to HBA. * @circularQ: the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized. * @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command. * @payload: the command payload of each operation command.
* @nb: size in bytes of the command payload
* @responseQueue: queue to interrupt on w/ command response (if any)
*/ */
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ, struct inbound_queue_table *circularQ,
u32 opCode, void *payload, u32 responseQueue) u32 opCode, void *payload, size_t nb,
u32 responseQueue)
{ {
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage; void *pMessage;
@ -1350,10 +1353,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
pm8001_printk("No free mpi buffer\n")); pm8001_printk("No free mpi buffer\n"));
return -ENOMEM; return -ENOMEM;
} }
BUG_ON(!payload);
/*Copy to the payload*/ if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
memcpy(pMessage, payload, (pm8001_ha->iomb_size - nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr);
sizeof(struct mpi_msg_hdr))); memcpy(pMessage, payload, nb);
if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size)
memset(pMessage + nb, 0, pm8001_ha->iomb_size -
(nb + sizeof(struct mpi_msg_hdr)));
/*Build the header*/ /*Build the header*/
Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@ -1364,7 +1370,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
/*Update the PI to the firmware*/ /*Update the PI to the firmware*/
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx); circularQ->pi_offset, circularQ->producer_idx);
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
responseQueue, opCode, circularQ->producer_idx, responseQueue, opCode, circularQ->producer_idx,
circularQ->consumer_index)); circularQ->consumer_index));
@ -1436,6 +1442,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */ /* read header */
header_tmp = pm8001_read_32(msgHeader); header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp); msgHeader_tmp = cpu_to_le32(header_tmp);
PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"outbound opcode msgheader:%x ci=%d pi=%d\n",
msgHeader_tmp, circularQ->consumer_idx,
circularQ->producer_index));
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) { if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY != if (OPC_OUB_SKIP_ENTRY !=
(le32_to_cpu(msgHeader_tmp) & 0xfff)) { (le32_to_cpu(msgHeader_tmp) & 0xfff)) {
@ -1604,7 +1614,8 @@ void pm8001_work_fn(struct work_struct *work)
break; break;
default: default:
pm8001_printk("...query task failed!!!\n"); PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"...query task failed!!!\n"));
break; break;
}); });
@ -1758,7 +1769,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag); task_abort.tag = cpu_to_le32(ccb_tag);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
sizeof(task_abort), 0);
if (ret) if (ret)
pm8001_tag_free(pm8001_ha, ccb_tag); pm8001_tag_free(pm8001_ha, ccb_tag);
@ -1831,7 +1843,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
sizeof(sata_cmd), 0);
if (res) { if (res) {
sas_free_task(task); sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag); pm8001_tag_free(pm8001_ha, ccb_tag);
@ -1890,6 +1903,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("SAS Address of IO Failure Drive:" pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr))); "%016llx", SAS_ADDR(t->dev->sas_addr)));
if (status)
PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
"status:0x%x, tag:0x%x, task:0x%p\n",
status, tag, t));
switch (status) { switch (status) {
case IO_SUCCESS: case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@ -2072,7 +2090,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break; break;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status)); pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */ /* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -2125,7 +2143,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
ts = &t->task_status; ts = &t->task_status;
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("port_id = %x,device_id = %x\n", pm8001_printk("port_id = %x,device_id = %x\n",
port_id, dev_id)); port_id, dev_id));
switch (event) { switch (event) {
@ -2263,7 +2281,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n")); pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
return; return;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event)); pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */ /* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -2352,6 +2370,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n")); pm8001_printk("ts null\n"));
return; return;
} }
if (status)
PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
"status:0x%x, tag:0x%x, task::0x%p\n",
status, tag, t));
/* Print sas address of IO failed device */ /* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) { (status != IO_UNDERFLOW)) {
@ -2652,7 +2676,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break; break;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status)); pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */ /* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -2723,7 +2747,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
ts = &t->task_status; ts = &t->task_status;
PM8001_IO_DBG(pm8001_ha, pm8001_printk( PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, dev_id, tag, event)); port_id, dev_id, tag, event));
switch (event) { switch (event) {
@ -2872,7 +2896,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_TO; ts->stat = SAS_OPEN_TO;
break; break;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event)); pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */ /* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -2917,9 +2941,13 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t = ccb->task; t = ccb->task;
ts = &t->task_status; ts = &t->task_status;
pm8001_dev = ccb->device; pm8001_dev = ccb->device;
if (status) if (status) {
PM8001_FAIL_DBG(pm8001_ha, PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("smp IO status 0x%x\n", status)); pm8001_printk("smp IO status 0x%x\n", status));
PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n",
status, tag, t));
}
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
@ -3070,7 +3098,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break; break;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status)); pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE; ts->stat = SAS_DEV_NO_RESPONSE;
@ -3355,7 +3383,8 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0x0F) << 4) | (port_id & 0x0F)); ((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0); payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1); payload.param1 = cpu_to_le32(param1);
pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
} }
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@ -3416,7 +3445,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate); pm8001_get_lrate_mode(phy, link_rate);
break; break;
default: default:
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType)); pm8001_printk("unknown device type(%x)\n", deviceType));
break; break;
} }
@ -3463,7 +3492,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags; unsigned long flags;
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
" phy id = %d\n", port_id, phy_id)); " phy id = %d\n", port_id, phy_id));
port->port_state = portstate; port->port_state = portstate;
@ -3541,7 +3570,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break; break;
default: default:
port->port_attached = 0; port->port_attached = 0;
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" phy Down and(default) = %x\n", pm8001_printk(" phy Down and(default) = %x\n",
portstate)); portstate));
break; break;
@ -3689,7 +3718,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
pm8001_printk(": FLASH_UPDATE_DISABLED\n")); pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
break; break;
default: default:
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("No matched status = %d\n", status)); pm8001_printk("No matched status = %d\n", status));
break; break;
} }
@ -3805,8 +3834,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
pm8001_printk("outbound queue HW event & event type : ")); "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
port_id, phy_id, eventType, status));
switch (eventType) { switch (eventType) {
case HW_EVENT_PHY_START_STATUS: case HW_EVENT_PHY_START_STATUS:
PM8001_MSG_DBG(pm8001_ha, PM8001_MSG_DBG(pm8001_ha,
@ -3990,7 +4020,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break; break;
default: default:
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type = %x\n", eventType)); pm8001_printk("Unknown event type = %x\n", eventType));
break; break;
} }
@ -4161,7 +4191,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n")); pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
break; break;
default: default:
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n", pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
opc)); opc));
break; break;
@ -4284,7 +4314,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
(u32 *)&smp_cmd, 0); &smp_cmd, sizeof(smp_cmd), 0);
if (rc) if (rc)
goto err_out_2; goto err_out_2;
@ -4352,7 +4382,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0; ssp_cmd.esgl = 0;
} }
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
sizeof(ssp_cmd), 0);
return ret; return ret;
} }
@ -4461,7 +4492,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
} }
} }
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
sizeof(sata_cmd), 0);
return ret; return ret;
} }
@ -4496,7 +4528,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr, memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE); pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id; payload.sas_identify.phy_id = phy_id;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
sizeof(payload), 0);
return ret; return ret;
} }
@ -4518,7 +4551,8 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload)); memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag); payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id); payload.phy_id = cpu_to_le32(phy_id);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
sizeof(payload), 0);
return ret; return ret;
} }
@ -4577,7 +4611,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE); SAS_ADDR_SIZE);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
return rc; return rc;
} }
@ -4598,7 +4633,8 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
payload.device_id = cpu_to_le32(device_id); payload.device_id = cpu_to_le32(device_id);
PM8001_MSG_DBG(pm8001_ha, PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("unregister device device_id = %d\n", device_id)); pm8001_printk("unregister device device_id = %d\n", device_id));
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
return ret; return ret;
} }
@ -4621,7 +4657,8 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(1); payload.tag = cpu_to_le32(1);
payload.phyop_phyid = payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
return ret; return ret;
} }
@ -4649,6 +4686,9 @@ static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{ {
pm8001_chip_interrupt_disable(pm8001_ha, vec); pm8001_chip_interrupt_disable(pm8001_ha, vec);
PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"irq vec %d, ODMR:0x%x\n",
vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec); process_oq(pm8001_ha, vec);
pm8001_chip_interrupt_enable(pm8001_ha, vec); pm8001_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED; return IRQ_HANDLED;
@ -4672,7 +4712,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
task_abort.device_id = cpu_to_le32(dev_id); task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag = cpu_to_le32(cmd_tag); task_abort.tag = cpu_to_le32(cmd_tag);
} }
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
sizeof(task_abort), 0);
return ret; return ret;
} }
@ -4729,7 +4770,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_ha->chip_id != chip_8001) if (pm8001_ha->chip_id != chip_8001)
sspTMCmd.ds_ads_m = 0x08; sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0]; circularQ = &pm8001_ha->inbnd_q_tbl[0];
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
sizeof(sspTMCmd), 0);
return ret; return ret;
} }
@ -4819,7 +4861,8 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default: default:
break; break;
} }
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
sizeof(nvmd_req), 0);
if (rc) { if (rc) {
kfree(fw_control_context); kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
@ -4903,7 +4946,8 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default: default:
break; break;
} }
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
sizeof(nvmd_req), 0);
if (rc) { if (rc) {
kfree(fw_control_context); kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
@ -4938,7 +4982,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi = payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
return ret; return ret;
} }
@ -4960,6 +5005,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context) if (!fw_control_context)
return -ENOMEM; return -ENOMEM;
fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"dma fw_control context input length :%x\n", fw_control->len));
memcpy(buffer, fw_control->buffer, fw_control->len); memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@ -5083,7 +5130,8 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag); payload.tag = cpu_to_le32(tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state); payload.nds = cpu_to_le32(state);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
return rc; return rc;
} }
@ -5108,7 +5156,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80); payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc) if (rc)
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
return rc; return rc;

View File

@ -41,6 +41,19 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "pm8001_sas.h" #include "pm8001_sas.h"
#include "pm8001_chips.h" #include "pm8001_chips.h"
#include "pm80xx_hwi.h"
static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
module_param(logging_level, ulong, 0644);
MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120;
module_param(link_rate, ulong, 0644);
MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 1: Link rate 1.5G\n"
" 2: Link rate 3.0G\n"
" 4: Link rate 6.0G\n"
" 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt; static struct scsi_transport_template *pm8001_stt;
@ -432,7 +445,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
} else { } else {
pm8001_ha->io_mem[logicalBar].membase = 0; pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0; pm8001_ha->io_mem[logicalBar].memsize = 0;
pm8001_ha->io_mem[logicalBar].memvirtaddr = 0; pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL;
} }
logicalBar++; logicalBar++;
} }
@ -466,7 +479,15 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->sas = sha; pm8001_ha->sas = sha;
pm8001_ha->shost = shost; pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++; pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = 0x01; pm8001_ha->logging_level = logging_level;
if (link_rate >= 1 && link_rate <= 15)
pm8001_ha->link_rate = (link_rate << 8);
else {
pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
LINKRATE_60 | LINKRATE_120;
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
"Setting link rate to default value\n"));
}
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */ /* IOMB size is 128 for 8088/89 controllers */
if (pm8001_ha->chip_id != chip_8001) if (pm8001_ha->chip_id != chip_8001)
@ -873,7 +894,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
u32 number_of_intr; u32 number_of_intr;
int flag = 0; int flag = 0;
int rc; int rc;
static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
/* SPCv controllers supports 64 msi-x */ /* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) { if (pm8001_ha->chip_id == chip_8001) {
@ -894,14 +914,16 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
rc, pm8001_ha->number_of_intr)); rc, pm8001_ha->number_of_intr));
for (i = 0; i < number_of_intr; i++) { for (i = 0; i < number_of_intr; i++) {
snprintf(intr_drvname[i], sizeof(intr_drvname[0]), snprintf(pm8001_ha->intr_drvname[i],
DRV_NAME"%d", i); sizeof(pm8001_ha->intr_drvname[0]),
"%s-%d", pm8001_ha->name, i);
pm8001_ha->irq_vector[i].irq_id = i; pm8001_ha->irq_vector[i].irq_id = i;
pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i), rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
pm8001_interrupt_handler_msix, flag, pm8001_interrupt_handler_msix, flag,
intr_drvname[i], &(pm8001_ha->irq_vector[i])); pm8001_ha->intr_drvname[i],
&(pm8001_ha->irq_vector[i]));
if (rc) { if (rc) {
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
free_irq(pci_irq_vector(pm8001_ha->pdev, i), free_irq(pci_irq_vector(pm8001_ha->pdev, i),
@ -942,7 +964,7 @@ intx:
pm8001_ha->irq_vector[0].irq_id = 0; pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha; pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc; return rc;
} }

View File

@ -119,7 +119,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL); &mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) { if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n"); pr_err("pm80xx: memory allocation error\n");
return -1; return -1;
} }
*pphys_addr = mem_dma_handle; *pphys_addr = mem_dma_handle;
@ -249,6 +249,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
spin_unlock_irqrestore(&pm8001_ha->lock, flags); spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0; return 0;
default: default:
PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("func 0x%x\n", func));
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
} }
msleep(300); msleep(300);
@ -384,8 +386,9 @@ static int pm8001_task_exec(struct sas_task *task,
struct pm8001_port *port = NULL; struct pm8001_port *port = NULL;
struct sas_task *t = task; struct sas_task *t = task;
struct pm8001_ccb_info *ccb; struct pm8001_ccb_info *ccb;
u32 tag = 0xdeadbeef, rc, n_elem = 0; u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
unsigned long flags = 0; unsigned long flags = 0;
enum sas_protocol task_proto = t->task_proto;
if (!dev->port) { if (!dev->port) {
struct task_status_struct *tsm = &t->task_status; struct task_status_struct *tsm = &t->task_status;
@ -410,7 +413,7 @@ static int pm8001_task_exec(struct sas_task *task,
pm8001_dev = dev->lldd_dev; pm8001_dev = dev->lldd_dev;
port = &pm8001_ha->port[sas_find_local_port_id(dev)]; port = &pm8001_ha->port[sas_find_local_port_id(dev)];
if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) { if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
if (sas_protocol_ata(t->task_proto)) { if (sas_protocol_ata(task_proto)) {
struct task_status_struct *ts = &t->task_status; struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED; ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN; ts->stat = SAS_PHY_DOWN;
@ -432,7 +435,7 @@ static int pm8001_task_exec(struct sas_task *task,
goto err_out; goto err_out;
ccb = &pm8001_ha->ccb_info[tag]; ccb = &pm8001_ha->ccb_info[tag];
if (!sas_protocol_ata(t->task_proto)) { if (!sas_protocol_ata(task_proto)) {
if (t->num_scatter) { if (t->num_scatter) {
n_elem = dma_map_sg(pm8001_ha->dev, n_elem = dma_map_sg(pm8001_ha->dev,
t->scatter, t->scatter,
@ -452,7 +455,7 @@ static int pm8001_task_exec(struct sas_task *task,
ccb->ccb_tag = tag; ccb->ccb_tag = tag;
ccb->task = t; ccb->task = t;
ccb->device = pm8001_dev; ccb->device = pm8001_dev;
switch (t->task_proto) { switch (task_proto) {
case SAS_PROTOCOL_SMP: case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb); rc = pm8001_task_prep_smp(pm8001_ha, ccb);
break; break;
@ -469,8 +472,7 @@ static int pm8001_task_exec(struct sas_task *task,
break; break;
default: default:
dev_printk(KERN_ERR, pm8001_ha->dev, dev_printk(KERN_ERR, pm8001_ha->dev,
"unknown sas_task proto: 0x%x\n", "unknown sas_task proto: 0x%x\n", task_proto);
t->task_proto);
rc = -EINVAL; rc = -EINVAL;
break; break;
} }
@ -493,7 +495,7 @@ err_out_tag:
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
err_out: err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
if (!sas_protocol_ata(t->task_proto)) if (!sas_protocol_ata(task_proto))
if (n_elem) if (n_elem)
dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter, dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir); t->data_dir);
@ -1179,7 +1181,7 @@ int pm8001_query_task(struct sas_task *task)
break; break;
} }
} }
pm8001_printk(":rc= %d\n", rc); pr_err("pm80xx: rc= %d\n", rc);
return rc; return rc;
} }
@ -1202,8 +1204,8 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev = dev->lldd_dev; pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev); pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy; phy_id = pm8001_dev->attached_phy;
rc = pm8001_find_tag(task, &tag); ret = pm8001_find_tag(task, &tag);
if (rc == 0) { if (ret == 0) {
pm8001_printk("no tag for task:%p\n", task); pm8001_printk("no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED; return TMF_RESP_FUNC_FAILED;
} }
@ -1241,26 +1243,50 @@ int pm8001_abort_task(struct sas_task *task)
/* 2. Send Phy Control Hard Reset */ /* 2. Send Phy Control Hard Reset */
reinit_completion(&completion); reinit_completion(&completion);
phy->port_reset_status = PORT_RESET_TMO;
phy->reset_success = false; phy->reset_success = false;
phy->enable_completion = &completion; phy->enable_completion = &completion;
phy->reset_completion = &completion_reset; phy->reset_completion = &completion_reset;
ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET); PHY_HARD_RESET);
if (ret) if (ret) {
phy->enable_completion = NULL;
phy->reset_completion = NULL;
goto out; goto out;
}
/* In the case of the reset timeout/fail we still
* abort the command at the firmware. The assumption
* here is that the drive is off doing something so
* that it's not processing requests, and we want to
* avoid getting a completion for this and either
* leaking the task in libsas or losing the race and
* getting a double free.
*/
PM8001_MSG_DBG(pm8001_ha, PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for local phy ctl\n")); pm8001_printk("Waiting for local phy ctl\n"));
wait_for_completion(&completion); ret = wait_for_completion_timeout(&completion,
if (!phy->reset_success) PM8001_TASK_TIMEOUT * HZ);
goto out; if (!ret || !phy->reset_success) {
phy->enable_completion = NULL;
/* 3. Wait for Port Reset complete / Port reset TMO */ phy->reset_completion = NULL;
PM8001_MSG_DBG(pm8001_ha, } else {
/* 3. Wait for Port Reset complete or
* Port reset TMO
*/
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n")); pm8001_printk("Waiting for Port reset\n"));
wait_for_completion(&completion_reset); ret = wait_for_completion_timeout(
if (phy->port_reset_status) { &completion_reset,
pm8001_dev_gone_notify(dev); PM8001_TASK_TIMEOUT * HZ);
goto out; if (!ret)
phy->reset_completion = NULL;
WARN_ON(phy->port_reset_status ==
PORT_RESET_TMO);
if (phy->port_reset_status == PORT_RESET_TMO) {
pm8001_dev_gone_notify(dev);
goto out;
}
} }
/* /*

View File

@ -66,8 +66,11 @@
#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ #define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ #define PM8001_MSG_LOGGING 0x40 /* misc message logging */
#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \ #define PM8001_DEV_LOGGING 0x80 /* development message logging */
format, __func__, __LINE__, ## arg) #define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \
format, pm8001_ha->name, __func__, __LINE__, ## arg)
#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
do { \ do { \
if (unlikely(HBA->logging_level & LEVEL)) \ if (unlikely(HBA->logging_level & LEVEL)) \
@ -97,6 +100,14 @@ do { \
#define PM8001_MSG_DBG(HBA, CMD) \ #define PM8001_MSG_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD) PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
#define PM8001_DEV_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD)
#define PM8001_DEVIO_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD)
#define PM8001_IOERR_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD)
#define PM8001_USE_TASKLET #define PM8001_USE_TASKLET
#define PM8001_USE_MSIX #define PM8001_USE_MSIX
@ -141,6 +152,8 @@ struct pm8001_ioctl_payload {
#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */ #define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */ #define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */ #define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */
#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */
#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1 #define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0 #define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0 #define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
@ -496,6 +509,7 @@ struct pm8001_hba_info {
u32 forensic_last_offset; u32 forensic_last_offset;
u32 fatal_forensic_shift_offset; u32 fatal_forensic_shift_offset;
u32 forensic_fatal_step; u32 forensic_fatal_step;
u32 forensic_preserved_accumulated_transfer;
u32 evtlog_ib_offset; u32 evtlog_ib_offset;
u32 evtlog_ob_offset; u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
@ -530,11 +544,14 @@ struct pm8001_hba_info {
struct pm8001_ccb_info *ccb_info; struct pm8001_ccb_info *ccb_info;
#ifdef PM8001_USE_MSIX #ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/ int number_of_intr;/*will be used in remove()*/
char intr_drvname[PM8001_MAX_MSIX_VEC]
[PM8001_NAME_LENGTH+1+3+1];
#endif #endif
#ifdef PM8001_USE_TASKLET #ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC]; struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
#endif #endif
u32 logging_level; u32 logging_level;
u32 link_rate;
u32 fw_status; u32 fw_status;
u32 smp_exp_mode; u32 smp_exp_mode;
bool controller_fatal_error; bool controller_fatal_error;
@ -663,7 +680,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ, struct inbound_queue_table *circularQ,
u32 opCode, void *payload, u32 responseQueue); u32 opCode, void *payload, size_t nb,
u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr); u16 messageSize, void **messagePtr);
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,

View File

@ -37,6 +37,7 @@
* POSSIBILITY OF SUCH DAMAGES. * POSSIBILITY OF SUCH DAMAGES.
* *
*/ */
#include <linux/version.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "pm8001_sas.h" #include "pm8001_sas.h"
#include "pm80xx_hwi.h" #include "pm80xx_hwi.h"
@ -75,7 +76,7 @@ void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
destination1 = (u32 *)destination; destination1 = (u32 *)destination;
for (index = 0; index < dw_count; index += 4, destination1++) { for (index = 0; index < dw_count; index += 4, destination1++) {
offset = (soffset + index / 4); offset = (soffset + index);
if (offset < (64 * 1024)) { if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset); value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
*destination1 = cpu_to_le32(value); *destination1 = cpu_to_le32(value);
@ -92,9 +93,12 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 accum_len , reg_val, index, *temp; u32 accum_len , reg_val, index, *temp;
u32 status = 1;
unsigned long start; unsigned long start;
u8 *direct_data; u8 *direct_data;
char *fatal_error_data = buf; char *fatal_error_data = buf;
u32 length_to_read;
u32 offset;
pm8001_ha->forensic_info.data_buf.direct_data = buf; pm8001_ha->forensic_info.data_buf.direct_data = buf;
if (pm8001_ha->chip_id == chip_8001) { if (pm8001_ha->chip_id == chip_8001) {
@ -104,16 +108,35 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
return (char *)pm8001_ha->forensic_info.data_buf.direct_data - return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf; (char *)buf;
} }
/* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
PM8001_IO_DBG(pm8001_ha, PM8001_IO_DBG(pm8001_ha,
pm8001_printk("forensic_info TYPE_NON_FATAL..............\n")); pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
direct_data = (u8 *)fatal_error_data; direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0; pm8001_ha->forensic_info.data_buf.read_len = 0;
pm8001_ha->forensic_preserved_accumulated_transfer = 0;
/* Write signature to fatal dump table */
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
pm8001_ha->forensic_info.data_buf.direct_data = direct_data; pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("ossaHwCB: status1 %d\n", status));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("ossaHwCB: read_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.read_len));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("ossaHwCB: direct_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_len));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("ossaHwCB: direct_offset 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_offset));
}
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */ /* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/ /* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@ -126,30 +149,66 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
/* Read until accum_len is retrived */ /* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address, accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n", /* Determine length of data between previously stored transfer length
accum_len)); * and current accumulated transfer length
*/
length_to_read =
accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n",
length_to_read));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv: last_offset 0x%x\n",
pm8001_ha->forensic_last_offset));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv: read_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.read_len));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_len));
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_offset));
/* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) { if (accum_len == 0xFFFFFFFF) {
PM8001_IO_DBG(pm8001_ha, PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n", pm8001_printk("Possible PCI issue 0x%x not expected\n",
accum_len)); accum_len));
return -EIO; return status;
} }
if (accum_len == 0 || accum_len >= 0x100000) { /* If accumulated length is zero fail the attempt */
if (accum_len == 0) {
pm8001_ha->forensic_info.data_buf.direct_data += pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data, sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF); "%08x ", 0xFFFFFFFF);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data - return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf; (char *)buf;
} }
/* Accumulated length is good so start capturing the first data */
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (pm8001_ha->forensic_fatal_step == 0) { if (pm8001_ha->forensic_fatal_step == 0) {
moreData: moreData:
/* If data to read is less than SYSFS_OFFSET then reduce the
* length of dataLen
*/
if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET
> length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_len =
length_to_read -
pm8001_ha->forensic_last_offset;
} else {
pm8001_ha->forensic_info.data_buf.direct_len =
SYSFS_OFFSET;
}
if (pm8001_ha->forensic_info.data_buf.direct_data) { if (pm8001_ha->forensic_info.data_buf.direct_data) {
/* Data is in bar, copy to host memory */ /* Data is in bar, copy to host memory */
pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc, pm80xx_pci_mem_copy(pm8001_ha,
pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, pm8001_ha->fatal_bar_loc,
pm8001_ha->forensic_info.data_buf.direct_len , pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
1); pm8001_ha->forensic_info.data_buf.direct_len, 1);
} }
pm8001_ha->fatal_bar_loc += pm8001_ha->fatal_bar_loc +=
pm8001_ha->forensic_info.data_buf.direct_len; pm8001_ha->forensic_info.data_buf.direct_len;
@ -160,21 +219,29 @@ moreData:
pm8001_ha->forensic_info.data_buf.read_len = pm8001_ha->forensic_info.data_buf.read_len =
pm8001_ha->forensic_info.data_buf.direct_len; pm8001_ha->forensic_info.data_buf.direct_len;
if (pm8001_ha->forensic_last_offset >= accum_len) { if (pm8001_ha->forensic_last_offset >= length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_data += pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data, sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 3); "%08x ", 3);
for (index = 0; index < (SYSFS_OFFSET / 4); index++) { for (index = 0; index <
(pm8001_ha->forensic_info.data_buf.direct_len
/ 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data += pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha-> sprintf(
forensic_info.data_buf.direct_data, pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", *(temp + index)); "%08x ", *(temp + index));
} }
pm8001_ha->fatal_bar_loc = 0; pm8001_ha->fatal_bar_loc = 0;
pm8001_ha->forensic_fatal_step = 1; pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0; pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0; pm8001_ha->forensic_last_offset = 0;
status = 0;
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset));
return (char *)pm8001_ha-> return (char *)pm8001_ha->
forensic_info.data_buf.direct_data - forensic_info.data_buf.direct_data -
(char *)buf; (char *)buf;
@ -184,12 +251,20 @@ moreData:
sprintf(pm8001_ha-> sprintf(pm8001_ha->
forensic_info.data_buf.direct_data, forensic_info.data_buf.direct_data,
"%08x ", 2); "%08x ", 2);
for (index = 0; index < (SYSFS_OFFSET / 4); index++) { for (index = 0; index <
pm8001_ha->forensic_info.data_buf.direct_data += (pm8001_ha->forensic_info.data_buf.direct_len
sprintf(pm8001_ha-> / 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data
+= sprintf(pm8001_ha->
forensic_info.data_buf.direct_data, forensic_info.data_buf.direct_data,
"%08x ", *(temp + index)); "%08x ", *(temp + index));
} }
status = 0;
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset));
return (char *)pm8001_ha-> return (char *)pm8001_ha->
forensic_info.data_buf.direct_data - forensic_info.data_buf.direct_data -
(char *)buf; (char *)buf;
@ -199,63 +274,122 @@ moreData:
pm8001_ha->forensic_info.data_buf.direct_data += pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data, sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 2); "%08x ", 2);
for (index = 0; index < 256; index++) { for (index = 0; index <
(pm8001_ha->forensic_info.data_buf.direct_len
/ 4) ; index++) {
pm8001_ha->forensic_info.data_buf.direct_data += pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha-> sprintf(pm8001_ha->
forensic_info.data_buf.direct_data, forensic_info.data_buf.direct_data,
"%08x ", *(temp + index)); "%08x ", *(temp + index));
} }
pm8001_ha->fatal_forensic_shift_offset += 0x100; pm8001_ha->fatal_forensic_shift_offset += 0x100;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset); pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0; pm8001_ha->fatal_bar_loc = 0;
status = 0;
offset = (int)
((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data - return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf; (char *)buf;
} }
if (pm8001_ha->forensic_fatal_step == 1) { if (pm8001_ha->forensic_fatal_step == 1) {
pm8001_ha->fatal_forensic_shift_offset = 0; /* store previous accumulated length before triggering next
/* Read 64K of the debug data. */ * accumulated length update
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, */
pm8001_ha->fatal_forensic_shift_offset); pm8001_ha->forensic_preserved_accumulated_transfer =
pm8001_mw32(fatal_table_address, pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
/* continue capturing the fatal log until Dump status is 0x3 */
if (pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_STATUS) <
MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
/* reset fddstat bit by writing to zero*/
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_STATUS, 0x0);
/* set dump control value to '1' so that new data will
* be transferred to shared memory
*/
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
MPI_FATAL_EDUMP_HANDSHAKE_RDY); MPI_FATAL_EDUMP_HANDSHAKE_RDY);
/* Poll FDDHSHK until clear */ /*Poll FDDHSHK until clear */
start = jiffies + (2 * HZ); /* 2 sec */ start = jiffies + (2 * HZ); /* 2 sec */
do { do {
reg_val = pm8001_mr32(fatal_table_address, reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE); MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
} while ((reg_val) && time_before(jiffies, start)); } while ((reg_val) && time_before(jiffies, start));
if (reg_val != 0) { if (reg_val != 0) {
PM8001_FAIL_DBG(pm8001_ha, PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
" = 0x%x\n", reg_val)); reg_val));
return -EIO; /* Fail the dump if a timeout occurs */
} pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF);
return((char *)
pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
}
/* Poll status register until set to 2 or
* 3 for up to 2 seconds
*/
start = jiffies + (2 * HZ); /* 2 sec */
/* Read the next 64K of the debug data. */ do {
pm8001_ha->forensic_fatal_step = 0; reg_val = pm8001_mr32(fatal_table_address,
if (pm8001_mr32(fatal_table_address, MPI_FATAL_EDUMP_TABLE_STATUS);
MPI_FATAL_EDUMP_TABLE_STATUS) != } while (((reg_val != 2) || (reg_val != 3)) &&
MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { time_before(jiffies, start));
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0); if (reg_val < 2) {
goto moreData; PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
} else { "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
pm8001_ha->forensic_info.data_buf.direct_data += reg_val));
sprintf(pm8001_ha-> /* Fail the dump if a timeout occurs */
forensic_info.data_buf.direct_data, pm8001_ha->forensic_info.data_buf.direct_data +=
"%08x ", 4); sprintf(
pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; pm8001_ha->forensic_info.data_buf.direct_data,
pm8001_ha->forensic_info.data_buf.direct_len = 0; "%08x ", 0xFFFFFFFF);
pm8001_ha->forensic_info.data_buf.direct_offset = 0; pm8001_cw32(pm8001_ha, 0,
pm8001_ha->forensic_info.data_buf.read_len = 0; MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
}
/* Read the next block of the debug data.*/
length_to_read = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
pm8001_ha->forensic_preserved_accumulated_transfer;
if (length_to_read != 0x0) {
pm8001_ha->forensic_fatal_step = 0;
goto moreData;
} else {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(
pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 4);
pm8001_ha->forensic_info.data_buf.read_len
= 0xFFFFFFFF;
pm8001_ha->forensic_info.data_buf.direct_len
= 0;
pm8001_ha->forensic_info.data_buf.direct_offset
= 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
}
} }
} }
offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data - return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf; (char *)buf;
} }
@ -317,6 +451,25 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE); pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version = pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION); pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset));
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"Main cfg table; ila rev:%x Inactive fw rev:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
} }
/** /**
@ -521,6 +674,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressib, (offsetib + 0x18)); pm8001_mr32(addressib, (offsetib + 0x18));
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
pm8001_ha->inbnd_q_tbl[i].pi_offset));
} }
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt = pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
@ -549,6 +707,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressob, (offsetob + 0x18)); pm8001_mr32(addressob, (offsetob + 0x18));
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0; pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
pm8001_ha->outbnd_q_tbl[i].ci_offset));
} }
} }
@ -582,6 +745,10 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
((pm8001_ha->number_of_intr - 1) << 8); ((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"Updated Fatal error interrupt vector 0x%x\n",
pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)));
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
@ -591,6 +758,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"Programming DW 0x21 in main cfg table with 0x%x\n",
pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@ -629,6 +799,21 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"IQ %d: Element pri size 0x%x\n",
number,
pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt));
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
pm8001_ha->inbnd_q_tbl[number].lower_base_addr));
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"CI upper base addr 0x%x CI lower base addr 0x%x\n",
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr));
} }
/** /**
@ -652,6 +837,21 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"OQ %d: Element pri size 0x%x\n",
number,
pm8001_ha->outbnd_q_tbl[number].element_size_cnt));
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
pm8001_ha->outbnd_q_tbl[number].lower_base_addr));
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"PI upper base addr 0x%x PI lower base addr 0x%x\n",
pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr));
} }
/** /**
@ -669,9 +869,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */ /* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) { if (IS_SPCV_12G(pm8001_ha->pdev)) {
max_wait_count = 4 * 1000 * 1000;/* 4 sec */ max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else { } else {
max_wait_count = 2 * 1000 * 1000;/* 2 sec */ max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
} }
do { do {
udelay(1); udelay(1);
@ -797,7 +997,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
PM8001_INIT_DBG(pm8001_ha, PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n", pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value)); offset, value));
pcilogic = (value & 0xFC000000) >> 26; pcilogic = (value & 0xFC000000) >> 26;
@ -885,7 +1085,12 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
(THERMAL_ENABLE << 8) | page_code; (THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
payload.cfg_pg[0], payload.cfg_pg[1]));
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc) if (rc)
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
return rc; return rc;
@ -967,7 +1172,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
memcpy(&payload.cfg_pg, &SASConfigPage, memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t)); sizeof(SASProtocolTimerConfig_t));
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc) if (rc)
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
@ -1090,7 +1296,12 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
KEK_MGMT_SUBOP_KEYCARDUPDATE); KEK_MGMT_SUBOP_KEYCARDUPDATE);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"Saving Encryption info to flash. payload 0x%x\n",
payload.new_curidx_ksop));
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc) if (rc)
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
@ -1241,7 +1452,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
pm8001_printk("reset register before write : 0x%x\n", regval)); pm8001_printk("reset register before write : 0x%x\n", regval));
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
mdelay(500); msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha, PM8001_INIT_DBG(pm8001_ha,
@ -1443,7 +1654,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag); task_abort.tag = cpu_to_le32(ccb_tag);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
sizeof(task_abort), 0);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Executing abort task end\n"));
if (ret) { if (ret) {
sas_free_task(task); sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag); pm8001_tag_free(pm8001_ha, ccb_tag);
@ -1519,7 +1733,9 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
sizeof(sata_cmd), 0);
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n"));
if (res) { if (res) {
sas_free_task(task); sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag); pm8001_tag_free(pm8001_ha, ccb_tag);
@ -1570,6 +1786,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
ts = &t->task_status; ts = &t->task_status;
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
"tag::0x%x, status::0x%x task::0x%p\n", tag, status, t));
/* Print sas address of IO failed device */ /* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) (status != IO_UNDERFLOW))
@ -1772,7 +1992,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break; break;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status)); pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */ /* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -1826,7 +2046,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
ts = &t->task_status; ts = &t->task_status;
PM8001_IO_DBG(pm8001_ha, PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event)); port_id, tag, event));
switch (event) { switch (event) {
@ -1963,7 +2183,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_DATA_OVERRUN; ts->stat = SAS_DATA_OVERRUN;
break; break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR: case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
PM8001_IO_DBG(pm8001_ha, PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
/* TBC: used default set values */ /* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -1974,7 +2194,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
return; return;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event)); pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */ /* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -2062,6 +2282,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n")); pm8001_printk("ts null\n"));
return; return;
} }
if (unlikely(status))
PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
"status:0x%x, tag:0x%x, task::0x%p\n",
status, tag, t));
/* Print sas address of IO failed device */ /* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) { (status != IO_UNDERFLOW)) {
@ -2365,7 +2591,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break; break;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status)); pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */ /* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
@ -2382,6 +2608,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x" pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n", " resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat)); t, status, ts->resp, ts->stat));
if (t->slow_task)
complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else { } else {
spin_unlock_irqrestore(&t->task_state_lock, flags); spin_unlock_irqrestore(&t->task_state_lock, flags);
@ -2435,7 +2663,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
} }
ts = &t->task_status; ts = &t->task_status;
PM8001_IO_DBG(pm8001_ha, PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event)); port_id, tag, event));
switch (event) { switch (event) {
@ -2655,6 +2883,9 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("tag::0x%x status::0x%x\n", tag, status));
switch (status) { switch (status) {
case IO_SUCCESS: case IO_SUCCESS:
@ -2822,7 +3053,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break; break;
default: default:
PM8001_IO_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status)); pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE; ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE; ts->stat = SAS_DEV_NO_RESPONSE;
@ -2873,7 +3104,8 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0xFF) << 24) | (port_id & 0xFF)); ((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0); payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1); payload.param1 = cpu_to_le32(param1);
pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
} }
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@ -2964,7 +3196,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate); pm8001_get_lrate_mode(phy, link_rate);
break; break;
default: default:
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType)); pm8001_printk("unknown device type(%x)\n", deviceType));
break; break;
} }
@ -2984,7 +3216,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
if (pm8001_ha->flags == PM8001F_RUN_TIME) if (pm8001_ha->flags == PM8001F_RUN_TIME)
mdelay(200);/*delay a moment to wait disk to spinup*/ msleep(200);/*delay a moment to wait disk to spinup*/
pm8001_bytes_dmaed(pm8001_ha, phy_id); pm8001_bytes_dmaed(pm8001_ha, phy_id);
} }
@ -3013,7 +3245,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags; unsigned long flags;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk( PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port id %d, phy id %d link_rate %d portstate 0x%x\n", "port id %d, phy id %d link_rate %d portstate 0x%x\n",
port_id, phy_id, link_rate, portstate)); port_id, phy_id, link_rate, portstate));
@ -3101,7 +3333,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break; break;
default: default:
port->port_attached = 0; port->port_attached = 0;
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" Phy Down and(default) = 0x%x\n", pm8001_printk(" Phy Down and(default) = 0x%x\n",
portstate)); portstate));
break; break;
@ -3130,8 +3362,10 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (status == 0) { if (status == 0) {
phy->phy_state = PHY_LINK_DOWN; phy->phy_state = PHY_LINK_DOWN;
if (pm8001_ha->flags == PM8001F_RUN_TIME && if (pm8001_ha->flags == PM8001F_RUN_TIME &&
phy->enable_completion != NULL) phy->enable_completion != NULL) {
complete(phy->enable_completion); complete(phy->enable_completion);
phy->enable_completion = NULL;
}
} }
return 0; return 0;
@ -3191,7 +3425,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct pm8001_port *port = &pm8001_ha->port[port_id]; struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
PM8001_MSG_DBG(pm8001_ha, PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
port_id, phy_id, eventType, status)); port_id, phy_id, eventType, status));
@ -3376,7 +3610,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break; break;
default: default:
PM8001_MSG_DBG(pm8001_ha, PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type 0x%x\n", eventType)); pm8001_printk("Unknown event type 0x%x\n", eventType));
break; break;
} }
@ -3758,7 +3992,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
ssp_coalesced_comp_resp(pm8001_ha, piomb); ssp_coalesced_comp_resp(pm8001_ha, piomb);
break; break;
default: default:
PM8001_MSG_DBG(pm8001_ha, pm8001_printk( PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"Unknown outbound Queue IOMB OPC = 0x%x\n", opc)); "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
break; break;
} }
@ -3991,8 +4225,8 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length); &smp_cmd, pm8001_ha->smp_exp_mode, length);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
(u32 *)&smp_cmd, 0); sizeof(smp_cmd), 0);
if (rc) if (rc)
goto err_out_2; goto err_out_2;
return 0; return 0;
@ -4200,7 +4434,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
} }
q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&ssp_cmd, q_index); &ssp_cmd, sizeof(ssp_cmd), q_index);
return ret; return ret;
} }
@ -4441,7 +4675,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
} }
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&sata_cmd, q_index); &sata_cmd, sizeof(sata_cmd), q_index);
return ret; return ret;
} }
@ -4465,23 +4699,9 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
PM8001_INIT_DBG(pm8001_ha, PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("PHY START REQ for phy_id %d\n", phy_id)); pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
/*
** [0:7] PHY Identifier
** [8:11] link rate 1.5G, 3G, 6G
** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
** [14] 0b disable spin up hold; 1b enable spin up hold
** [15] ob no change in current PHY analig setup 1b enable using SPAST
*/
if (!IS_SPCV_12G(pm8001_ha->pdev))
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKMODE_AUTO | LINKRATE_15 |
LINKRATE_30 | LINKRATE_60 | phy_id);
else
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKMODE_AUTO | LINKRATE_15 |
LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
phy_id);
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
/* SSC Disable and SAS Analog ST configuration */ /* SSC Disable and SAS Analog ST configuration */
/** /**
payload.ase_sh_lm_slr_phyid = payload.ase_sh_lm_slr_phyid =
@ -4494,9 +4714,10 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr, memcpy(payload.sas_identify.sas_addr,
&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id; payload.sas_identify.phy_id = phy_id;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
sizeof(payload), 0);
return ret; return ret;
} }
@ -4518,7 +4739,8 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload)); memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag); payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id); payload.phy_id = cpu_to_le32(phy_id);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
sizeof(payload), 0);
return ret; return ret;
} }
@ -4584,7 +4806,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE); SAS_ADDR_SIZE);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc) if (rc)
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
@ -4614,7 +4837,8 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag); payload.tag = cpu_to_le32(tag);
payload.phyop_phyid = payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
} }
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@ -4641,6 +4865,9 @@ static irqreturn_t
pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{ {
pm80xx_chip_interrupt_disable(pm8001_ha, vec); pm80xx_chip_interrupt_disable(pm8001_ha, vec);
PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"irq vec %d, ODMR:0x%x\n",
vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec); process_oq(pm8001_ha, vec);
pm80xx_chip_interrupt_enable(pm8001_ha, vec); pm80xx_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED; return IRQ_HANDLED;
@ -4669,7 +4896,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++; j++;
} }
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc) if (rc)
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);
} }
@ -4711,7 +4939,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
for (i = 0; i < length; i++) for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i)); payload.reserved[i] = cpu_to_le32(*(buf + i));
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
sizeof(payload), 0);
if (rc) if (rc)
pm8001_tag_free(pm8001_ha, tag); pm8001_tag_free(pm8001_ha, tag);

View File

@ -220,6 +220,9 @@
#define SAS_DOPNRJT_RTRY_TMO 128 #define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128 #define SAS_COPNRJT_RTRY_TMO 128
#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */
#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */
/* /*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128

View File

@ -42,7 +42,7 @@ extern uint qedf_debug;
#define QEDF_LOG_LPORT 0x4000 /* lport logs */ #define QEDF_LOG_LPORT 0x4000 /* lport logs */
#define QEDF_LOG_ELS 0x8000 /* ELS logs */ #define QEDF_LOG_ELS 0x8000 /* ELS logs */
#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */ #define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */ #define QEDF_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDF_LOG_TID 0x80000 /* #define QEDF_LOG_TID 0x80000 /*
* FW TID context acquire * FW TID context acquire
* free * free

View File

@ -1926,6 +1926,13 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
return 0; return 0;
} }
static void qedf_get_host_port_id(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
fc_host_port_id(shost) = lport->port_id;
}
static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
*shost) *shost)
{ {
@ -1996,6 +2003,7 @@ static struct fc_function_template qedf_fc_transport_fn = {
.show_host_active_fc4s = 1, .show_host_active_fc4s = 1,
.show_host_maxframe_size = 1, .show_host_maxframe_size = 1,
.get_host_port_id = qedf_get_host_port_id,
.show_host_port_id = 1, .show_host_port_id = 1,
.show_host_supported_speeds = 1, .show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed, .get_host_speed = fc_get_host_speed,

View File

@ -44,7 +44,7 @@ extern uint qedi_dbg_log;
#define QEDI_LOG_LPORT 0x4000 /* lport logs */ #define QEDI_LOG_LPORT 0x4000 /* lport logs */
#define QEDI_LOG_ELS 0x8000 /* ELS logs */ #define QEDI_LOG_ELS 0x8000 /* ELS logs */
#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */ #define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */ #define QEDI_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */ #define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
#define QEDI_LOG_TID 0x80000 /* FW TID context acquire, #define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
* free * free

View File

@ -102,8 +102,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla8044_idc_lock(ha); qla8044_idc_lock(ha);
qla82xx_set_reset_owner(vha); qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha); qla8044_idc_unlock(ha);
} else } else {
ha->fw_dump_mpi = 1;
qla2x00_system_error(vha); qla2x00_system_error(vha);
}
break; break;
case 4: case 4:
if (IS_P3P_TYPE(ha)) { if (IS_P3P_TYPE(ha)) {

Some files were not shown because too many files have changed in this diff Show More