Merge branch 'hfi1-2' into k.o/for-4.7

This commit is contained in:
Doug Ledford 2016-05-26 12:50:05 -04:00
commit 8779e7658d
78 changed files with 553 additions and 2996 deletions

View File

@ -5086,6 +5086,13 @@ F: drivers/block/cciss*
F: include/linux/cciss_ioctl.h
F: include/uapi/linux/cciss_ioctl.h
HFI1 DRIVER
M: Mike Marciniszyn <mike.marciniszyn@intel.com>
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/hfi1
HFS FILESYSTEM
L: linux-fsdevel@vger.kernel.org
S: Orphan
@ -10661,12 +10668,6 @@ M: Arnaud Patard <arnaud.patard@rtp-net.org>
S: Odd Fixes
F: drivers/staging/xgifb/
HFI1 DRIVER
M: Mike Marciniszyn <infinipath@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/staging/rdma/hfi1
STARFIRE/DURALAN NETWORK DRIVER
M: Ion Badulescu <ionut@badula.org>
S: Odd Fixes

View File

@ -85,4 +85,6 @@ source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
endif # INFINIBAND

View File

@ -8,3 +8,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/

View File

@ -7,7 +7,7 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \
hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
eprom.o file_ops.o firmware.o \
init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \

View File

@ -1037,6 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
static void dc_start(struct hfi1_devdata *);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np);
static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
/*
* Error interrupt table entry. This is used as input to the interrupt
@ -6105,7 +6106,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
}
/* this access is valid only when the link is up */
if ((ppd->host_link_state & HLS_UP) == 0) {
if (ppd->host_link_state & HLS_DOWN) {
dd_dev_info(dd, "%s: link state %s not up\n",
__func__, link_state_name(ppd->host_link_state));
ret = -EBUSY;
@ -6961,6 +6962,8 @@ void handle_link_down(struct work_struct *work)
}
reset_neighbor_info(ppd);
if (ppd->mgmt_allowed)
remove_full_mgmt_pkey(ppd);
/* disable the port */
clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@ -7069,6 +7072,12 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
}
static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{
ppd->pkeys[2] = 0;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
}
/*
* Convert the given link width to the OPA link width bitmask.
*/
@ -7429,7 +7438,7 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
retry:
mutex_lock(&ppd->hls_lock);
/* only apply if the link is up */
if (!(ppd->host_link_state & HLS_UP)) {
if (ppd->host_link_state & HLS_DOWN) {
/* still going up..wait and retry */
if (ppd->host_link_state & HLS_GOING_UP) {
if (++tries < 1000) {
@ -9212,9 +9221,6 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
/* Reset the QSFP */
mask = (u64)QSFP_HFI0_RESET_N;
qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
qsfp_mask |= mask;
write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
qsfp_mask = read_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
@ -9252,6 +9258,12 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
__func__);
/*
* The remaining alarms/warnings don't matter if the link is down.
*/
if (ppd->host_link_state & HLS_DOWN)
return 0;
if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
(qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
@ -9346,9 +9358,8 @@ void qsfp_event(struct work_struct *work)
return;
/*
* Turn DC back on after cables has been
* re-inserted. Up until now, the DC has been in
* reset to save power.
* Turn DC back on after cable has been re-inserted. Up until
* now, the DC has been in reset to save power.
*/
dc_start(dd);
@ -9480,7 +9491,15 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
return ret;
}
/* tune the SERDES to a ballpark setting for
get_port_type(ppd);
if (ppd->port_type == PORT_TYPE_QSFP) {
set_qsfp_int_n(ppd, 0);
wait_for_qsfp_init(ppd);
set_qsfp_int_n(ppd, 1);
}
/*
* Tune the SerDes to a ballpark setting for
* optimal signal and bit error rate
* Needs to be done before starting the link
*/
@ -10074,7 +10093,7 @@ u32 driver_physical_state(struct hfi1_pportdata *ppd)
*/
u32 driver_logical_state(struct hfi1_pportdata *ppd)
{
if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
return IB_PORT_DOWN;
switch (ppd->host_link_state & HLS_UP) {
@ -14578,7 +14597,7 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
(reason), (ret))
/*
* Initialize the Avago Thermal sensor.
* Initialize the thermal sensor.
*
* After initialization, enable polling of thermal sensor through
* SBus interface. In order for this to work, the SBus Master

View File

@ -398,6 +398,12 @@
/* Lane ID for general configuration registers */
#define GENERAL_CONFIG 4
/* LINK_TUNING_PARAMETERS fields */
#define TUNING_METHOD_SHIFT 24
/* LINK_OPTIMIZATION_SETTINGS fields */
#define ENABLE_EXT_DEV_CONFIG_SHIFT 24
/* LOAD_DATA 8051 command shifts and fields */
#define LOAD_DATA_FIELD_ID_SHIFT 40
#define LOAD_DATA_FIELD_ID_MASK 0xfull

View File

@ -178,7 +178,8 @@
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_NO_INTEGRITY)
#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR)
#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \
HFI1_USER_SWMINOR)
#ifndef HFI1_KERN_TYPE
#define HFI1_KERN_TYPE 0
@ -349,6 +350,8 @@ struct hfi1_message_header {
#define HFI1_BECN_MASK 1
#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT)
#define HFI1_PSM_IOC_BASE_SEQ 0x0
static inline __u64 rhf_to_cpu(const __le32 *rbuf)
{
return __le64_to_cpu(*((__le64 *)rbuf));

View File

@ -60,7 +60,8 @@ static dev_t hfi1_dev;
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev *cdev, struct device **devp,
bool user_accessible)
bool user_accessible,
struct kobject *parent)
{
const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
struct device *device = NULL;
@ -68,6 +69,7 @@ int hfi1_cdev_init(int minor, const char *name,
cdev_init(cdev, fops);
cdev->owner = THIS_MODULE;
cdev->kobj.parent = parent;
kobject_set_name(&cdev->kobj, name);
ret = cdev_add(cdev, dev, 1);
@ -82,13 +84,13 @@ int hfi1_cdev_init(int minor, const char *name,
else
device = device_create(class, NULL, dev, NULL, "%s", name);
if (!IS_ERR(device))
goto done;
ret = PTR_ERR(device);
device = NULL;
pr_err("Could not create device for minor %d, %s (err %d)\n",
minor, name, -ret);
cdev_del(cdev);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
device = NULL;
pr_err("Could not create device for minor %d, %s (err %d)\n",
minor, name, -ret);
cdev_del(cdev);
}
done:
*devp = device;
return ret;

View File

@ -50,7 +50,8 @@
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev *cdev, struct device **devp,
bool user_accessible);
bool user_accessible,
struct kobject *parent);
void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
const char *class_name(void);
int __init dev_init(void);

View File

@ -1161,7 +1161,7 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
ppd->lmc = lmc;
hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
return 0;
}

View File

@ -0,0 +1,102 @@
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/delay.h>
#include "hfi.h"
#include "common.h"
#include "eprom.h"
#define CMD_SHIFT 24
#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
/* controller interface speeds */
#define EP_SPEED_FULL 0x2 /* full speed */
/*
* How long to wait for the EPROM to become available, in ms.
* The spec 32 Mb EPROM takes around 40s to erase then write.
* Double it for safety.
*/
#define EPROM_TIMEOUT 80000 /* ms */
/*
* Initialize the EPROM handler.
*/
int eprom_init(struct hfi1_devdata *dd)
{
int ret = 0;
/* only the discrete chip has an EPROM */
if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
return 0;
/*
* It is OK if both HFIs reset the EPROM as long as they don't
* do it at the same time.
*/
ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
if (ret) {
dd_dev_err(dd,
"%s: unable to acquire EPROM resource, no EPROM support\n",
__func__);
goto done_asic;
}
/* reset EPROM to be sure it is in a good state */
/* set reset */
write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
/* clear reset, set speed */
write_csr(dd, ASIC_EEP_CTL_STAT,
EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
/* wake the device with command "release powerdown NoID" */
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
dd->eprom_available = true;
release_chip_resource(dd, CR_EPROM);
done_asic:
return ret;
}

View File

@ -72,8 +72,6 @@
*/
static int hfi1_file_open(struct inode *, struct file *);
static int hfi1_file_close(struct inode *, struct file *);
static ssize_t hfi1_file_write(struct file *, const char __user *,
size_t, loff_t *);
static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
@ -86,8 +84,7 @@ static int get_ctxt_info(struct file *, void __user *, __u32);
static int get_base_info(struct file *, void __user *, __u32);
static int setup_ctxt(struct file *);
static int setup_subctxt(struct hfi1_ctxtdata *);
static int get_user_context(struct file *, struct hfi1_user_info *,
int, unsigned);
static int get_user_context(struct file *, struct hfi1_user_info *, int);
static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
static int allocate_ctxt(struct file *, struct hfi1_devdata *,
struct hfi1_user_info *);
@ -97,13 +94,15 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
static int vma_fault(struct vm_area_struct *, struct vm_fault *);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
static const struct file_operations hfi1_file_ops = {
.owner = THIS_MODULE,
.write = hfi1_file_write,
.write_iter = hfi1_write_iter,
.open = hfi1_file_open,
.release = hfi1_file_close,
.unlocked_ioctl = hfi1_file_ioctl,
.poll = hfi1_poll,
.mmap = hfi1_file_mmap,
.llseek = noop_llseek,
@ -169,6 +168,13 @@ static inline int is_valid_mmap(u64 token)
static int hfi1_file_open(struct inode *inode, struct file *fp)
{
struct hfi1_devdata *dd = container_of(inode->i_cdev,
struct hfi1_devdata,
user_cdev);
/* Just take a ref now. Not all opens result in a context assign */
kobject_get(&dd->kobj);
/* The real work is performed later in assign_ctxt() */
fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
if (fp->private_data) /* no cpu affinity by default */
@ -176,127 +182,59 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
return fp->private_data ? 0 : -ENOMEM;
}
static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
size_t count, loff_t *offset)
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
const struct hfi1_cmd __user *ucmd;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_cmd cmd;
struct hfi1_user_info uinfo;
struct hfi1_tid_info tinfo;
int ret = 0;
unsigned long addr;
ssize_t consumed = 0, copy = 0, ret = 0;
void *dest = NULL;
__u64 user_val = 0;
int uctxt_required = 1;
int must_be_root = 0;
int uval = 0;
unsigned long ul_uval = 0;
u16 uval16 = 0;
/* FIXME: This interface cannot continue out of staging */
if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
return -EACCES;
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
cmd != HFI1_IOCTL_GET_VERS &&
!uctxt)
return -EINVAL;
if (count < sizeof(cmd)) {
ret = -EINVAL;
goto bail;
}
switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT:
if (copy_from_user(&uinfo,
(struct hfi1_user_info __user *)arg,
sizeof(uinfo)))
return -EFAULT;
ucmd = (const struct hfi1_cmd __user *)data;
if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
ret = -EFAULT;
goto bail;
}
consumed = sizeof(cmd);
switch (cmd.type) {
case HFI1_CMD_ASSIGN_CTXT:
uctxt_required = 0; /* assigned user context not required */
copy = sizeof(uinfo);
dest = &uinfo;
break;
case HFI1_CMD_SDMA_STATUS_UPD:
case HFI1_CMD_CREDIT_UPD:
copy = 0;
break;
case HFI1_CMD_TID_UPDATE:
case HFI1_CMD_TID_FREE:
case HFI1_CMD_TID_INVAL_READ:
copy = sizeof(tinfo);
dest = &tinfo;
break;
case HFI1_CMD_USER_INFO:
case HFI1_CMD_RECV_CTRL:
case HFI1_CMD_POLL_TYPE:
case HFI1_CMD_ACK_EVENT:
case HFI1_CMD_CTXT_INFO:
case HFI1_CMD_SET_PKEY:
case HFI1_CMD_CTXT_RESET:
copy = 0;
user_val = cmd.addr;
break;
case HFI1_CMD_EP_INFO:
case HFI1_CMD_EP_ERASE_CHIP:
case HFI1_CMD_EP_ERASE_RANGE:
case HFI1_CMD_EP_READ_RANGE:
case HFI1_CMD_EP_WRITE_RANGE:
uctxt_required = 0; /* assigned user context not required */
must_be_root = 1; /* validate user */
copy = 0;
break;
default:
ret = -EINVAL;
goto bail;
}
/* If the command comes with user data, copy it. */
if (copy) {
if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
ret = -EFAULT;
goto bail;
}
consumed += copy;
}
/*
* Make sure there is a uctxt when needed.
*/
if (uctxt_required && !uctxt) {
ret = -EINVAL;
goto bail;
}
/* only root can do these operations */
if (must_be_root && !capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
goto bail;
}
switch (cmd.type) {
case HFI1_CMD_ASSIGN_CTXT:
ret = assign_ctxt(fp, &uinfo);
if (ret < 0)
goto bail;
ret = setup_ctxt(fp);
return ret;
setup_ctxt(fp);
if (ret)
goto bail;
return ret;
ret = user_init(fp);
break;
case HFI1_CMD_CTXT_INFO:
ret = get_ctxt_info(fp, (void __user *)(unsigned long)
user_val, cmd.len);
case HFI1_IOCTL_CTXT_INFO:
ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
sizeof(struct hfi1_ctxt_info));
break;
case HFI1_CMD_USER_INFO:
ret = get_base_info(fp, (void __user *)(unsigned long)
user_val, cmd.len);
case HFI1_IOCTL_USER_INFO:
ret = get_base_info(fp, (void __user *)(unsigned long)arg,
sizeof(struct hfi1_base_info));
break;
case HFI1_CMD_SDMA_STATUS_UPD:
break;
case HFI1_CMD_CREDIT_UPD:
case HFI1_IOCTL_CREDIT_UPD:
if (uctxt && uctxt->sc)
sc_return_credits(uctxt->sc);
break;
case HFI1_CMD_TID_UPDATE:
case HFI1_IOCTL_TID_UPDATE:
if (copy_from_user(&tinfo,
(struct hfi11_tid_info __user *)arg,
sizeof(tinfo)))
return -EFAULT;
ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
if (!ret) {
/*
@ -305,57 +243,82 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
* These fields are adjacent in the structure so
* we can copy them at the same time.
*/
addr = (unsigned long)cmd.addr +
offsetof(struct hfi1_tid_info, tidcnt);
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt) +
sizeof(tinfo.length)))
ret = -EFAULT;
}
break;
case HFI1_CMD_TID_INVAL_READ:
ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
if (ret)
break;
addr = (unsigned long)cmd.addr +
offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
ret = -EFAULT;
break;
case HFI1_CMD_TID_FREE:
case HFI1_IOCTL_TID_FREE:
if (copy_from_user(&tinfo,
(struct hfi11_tid_info __user *)arg,
sizeof(tinfo)))
return -EFAULT;
ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
if (ret)
break;
addr = (unsigned long)cmd.addr +
offsetof(struct hfi1_tid_info, tidcnt);
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
ret = -EFAULT;
break;
case HFI1_CMD_RECV_CTRL:
ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
case HFI1_IOCTL_TID_INVAL_READ:
if (copy_from_user(&tinfo,
(struct hfi11_tid_info __user *)arg,
sizeof(tinfo)))
return -EFAULT;
ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
if (ret)
break;
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
ret = -EFAULT;
break;
case HFI1_CMD_POLL_TYPE:
uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
case HFI1_IOCTL_RECV_CTRL:
ret = get_user(uval, (int __user *)arg);
if (ret != 0)
return -EFAULT;
ret = manage_rcvq(uctxt, fd->subctxt, uval);
break;
case HFI1_CMD_ACK_EVENT:
ret = user_event_ack(uctxt, fd->subctxt, user_val);
case HFI1_IOCTL_POLL_TYPE:
ret = get_user(uval, (int __user *)arg);
if (ret != 0)
return -EFAULT;
uctxt->poll_type = (typeof(uctxt->poll_type))uval;
break;
case HFI1_CMD_SET_PKEY:
case HFI1_IOCTL_ACK_EVENT:
ret = get_user(ul_uval, (unsigned long __user *)arg);
if (ret != 0)
return -EFAULT;
ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
break;
case HFI1_IOCTL_SET_PKEY:
ret = get_user(uval16, (u16 __user *)arg);
if (ret != 0)
return -EFAULT;
if (HFI1_CAP_IS_USET(PKEY_CHECK))
ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
else
ret = -EPERM;
return -EPERM;
break;
case HFI1_CMD_CTXT_RESET: {
case HFI1_IOCTL_CTXT_RESET: {
struct send_context *sc;
struct hfi1_devdata *dd;
if (!uctxt || !uctxt->dd || !uctxt->sc) {
ret = -EINVAL;
break;
}
if (!uctxt || !uctxt->dd || !uctxt->sc)
return -EINVAL;
/*
* There is no protection here. User level has to
* guarantee that no one will be writing to the send
@ -373,10 +336,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
wait_event_interruptible_timeout(
sc->halt_wait, (sc->flags & SCF_HALTED),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
if (!(sc->flags & SCF_HALTED)) {
ret = -ENOLCK;
break;
}
if (!(sc->flags & SCF_HALTED))
return -ENOLCK;
/*
* If the send context was halted due to a Freeze,
* wait until the device has been "unfrozen" before
@ -387,18 +349,16 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
dd->event_queue,
!(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
if (dd->flags & HFI1_FROZEN) {
ret = -ENOLCK;
break;
}
if (dd->flags & HFI1_FORCED_FREEZE) {
if (dd->flags & HFI1_FROZEN)
return -ENOLCK;
if (dd->flags & HFI1_FORCED_FREEZE)
/*
* Don't allow context reset if we are into
* forced freeze
*/
ret = -ENODEV;
break;
}
return -ENODEV;
sc_disable(sc);
ret = sc_enable(sc);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
@ -410,18 +370,17 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
sc_return_credits(sc);
break;
}
case HFI1_CMD_EP_INFO:
case HFI1_CMD_EP_ERASE_CHIP:
case HFI1_CMD_EP_ERASE_RANGE:
case HFI1_CMD_EP_READ_RANGE:
case HFI1_CMD_EP_WRITE_RANGE:
ret = handle_eprom_command(fp, &cmd);
case HFI1_IOCTL_GET_VERS:
uval = HFI1_USER_SWVERSION;
if (put_user(uval, (int __user *)arg))
return -EFAULT;
break;
default:
return -EINVAL;
}
if (ret >= 0)
ret = consumed;
bail:
return ret;
}
@ -738,7 +697,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
{
struct hfi1_filedata *fdata = fp->private_data;
struct hfi1_ctxtdata *uctxt = fdata->uctxt;
struct hfi1_devdata *dd;
struct hfi1_devdata *dd = container_of(inode->i_cdev,
struct hfi1_devdata,
user_cdev);
unsigned long flags, *ev;
fp->private_data = NULL;
@ -747,7 +708,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
goto done;
hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
dd = uctxt->dd;
mutex_lock(&hfi1_mutex);
flush_wc();
@ -813,6 +773,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
kobject_put(&dd->kobj);
kfree(fdata);
return 0;
}
@ -836,7 +797,7 @@ static u64 kvirt_to_phys(void *addr)
static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
{
int i_minor, ret = 0;
unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
unsigned int swmajor, swminor;
swmajor = uinfo->userversion >> 16;
if (swmajor != HFI1_USER_SWMAJOR) {
@ -846,9 +807,6 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
swminor = uinfo->userversion & 0xffff;
if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
alg = uinfo->hfi1_alg;
mutex_lock(&hfi1_mutex);
/* First, lets check if we need to setup a shared context? */
if (uinfo->subctxt_cnt) {
@ -868,7 +826,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
*/
if (!ret) {
i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
ret = get_user_context(fp, uinfo, i_minor - 1, alg);
ret = get_user_context(fp, uinfo, i_minor);
}
done_unlock:
mutex_unlock(&hfi1_mutex);
@ -876,71 +834,26 @@ done:
return ret;
}
/* return true if the device available for general use */
static int usable_device(struct hfi1_devdata *dd)
{
struct hfi1_pportdata *ppd = dd->pport;
return driver_lstate(ppd) == IB_PORT_ACTIVE;
}
static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
int devno, unsigned alg)
int devno)
{
struct hfi1_devdata *dd = NULL;
int ret = 0, devmax, npresent, nup, dev;
int devmax, npresent, nup;
devmax = hfi1_count_units(&npresent, &nup);
if (!npresent) {
ret = -ENXIO;
goto done;
}
if (!nup) {
ret = -ENETDOWN;
goto done;
}
if (devno >= 0) {
dd = hfi1_lookup(devno);
if (!dd)
ret = -ENODEV;
else if (!dd->freectxts)
ret = -EBUSY;
} else {
struct hfi1_devdata *pdd;
if (!npresent)
return -ENXIO;
if (alg == HFI1_ALG_ACROSS) {
unsigned free = 0U;
if (!nup)
return -ENETDOWN;
for (dev = 0; dev < devmax; dev++) {
pdd = hfi1_lookup(dev);
if (!pdd)
continue;
if (!usable_device(pdd))
continue;
if (pdd->freectxts &&
pdd->freectxts > free) {
dd = pdd;
free = pdd->freectxts;
}
}
} else {
for (dev = 0; dev < devmax; dev++) {
pdd = hfi1_lookup(dev);
if (!pdd)
continue;
if (!usable_device(pdd))
continue;
if (pdd->freectxts) {
dd = pdd;
break;
}
}
}
if (!dd)
ret = -EBUSY;
}
done:
return ret ? ret : allocate_ctxt(fp, dd, uinfo);
dd = hfi1_lookup(devno);
if (!dd)
return -ENODEV;
else if (!dd->freectxts)
return -EBUSY;
return allocate_ctxt(fp, dd, uinfo);
}
static int find_shared_ctxt(struct file *fp,
@ -1546,170 +1459,10 @@ done:
return ret;
}
static int ui_open(struct inode *inode, struct file *filp)
{
struct hfi1_devdata *dd;
dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
filp->private_data = dd; /* for other methods */
return 0;
}
static int ui_release(struct inode *inode, struct file *filp)
{
/* nothing to do */
return 0;
}
static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
{
struct hfi1_devdata *dd = filp->private_data;
return fixed_size_llseek(filp, offset, whence,
(dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
}
/* NOTE: assumes unsigned long is 8 bytes */
static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
struct hfi1_devdata *dd = filp->private_data;
void __iomem *base = dd->kregbase;
unsigned long total, csr_off,
barlen = (dd->kregend - dd->kregbase);
u64 data;
/* only read 8 byte quantities */
if ((count % 8) != 0)
return -EINVAL;
/* offset must be 8-byte aligned */
if ((*f_pos % 8) != 0)
return -EINVAL;
/* destination buffer must be 8-byte aligned */
if ((unsigned long)buf % 8 != 0)
return -EINVAL;
/* must be in range */
if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
return -EINVAL;
/* only set the base if we are not starting past the BAR */
if (*f_pos < barlen)
base += *f_pos;
csr_off = *f_pos;
for (total = 0; total < count; total += 8, csr_off += 8) {
/* accessing LCB CSRs requires more checks */
if (is_lcb_offset(csr_off)) {
if (read_lcb_csr(dd, csr_off, (u64 *)&data))
break; /* failed */
}
/*
* Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
* false parity error. Avoid the whole issue by not reading
* them. These registers are defined as having a read value
* of 0.
*/
else if (csr_off == ASIC_GPIO_CLEAR ||
csr_off == ASIC_GPIO_FORCE ||
csr_off == ASIC_QSFP1_CLEAR ||
csr_off == ASIC_QSFP1_FORCE ||
csr_off == ASIC_QSFP2_CLEAR ||
csr_off == ASIC_QSFP2_FORCE)
data = 0;
else if (csr_off >= barlen) {
/*
* read_8051_data can read more than just 8 bytes at
* a time. However, folding this into the loop and
* handling the reads in 8 byte increments allows us
* to smoothly transition from chip memory to 8051
* memory.
*/
if (read_8051_data(dd,
(u32)(csr_off - barlen),
sizeof(data), &data))
break; /* failed */
} else
data = readq(base + total);
if (put_user(data, (unsigned long __user *)(buf + total)))
break;
}
*f_pos += total;
return total;
}
/* NOTE: assumes unsigned long is 8 bytes */
static ssize_t ui_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct hfi1_devdata *dd = filp->private_data;
void __iomem *base;
unsigned long total, data, csr_off;
int in_lcb;
/* only write 8 byte quantities */
if ((count % 8) != 0)
return -EINVAL;
/* offset must be 8-byte aligned */
if ((*f_pos % 8) != 0)
return -EINVAL;
/* source buffer must be 8-byte aligned */
if ((unsigned long)buf % 8 != 0)
return -EINVAL;
/* must be in range */
if (*f_pos + count > dd->kregend - dd->kregbase)
return -EINVAL;
base = (void __iomem *)dd->kregbase + *f_pos;
csr_off = *f_pos;
in_lcb = 0;
for (total = 0; total < count; total += 8, csr_off += 8) {
if (get_user(data, (unsigned long __user *)(buf + total)))
break;
/* accessing LCB CSRs requires a special procedure */
if (is_lcb_offset(csr_off)) {
if (!in_lcb) {
int ret = acquire_lcb_access(dd, 1);
if (ret)
break;
in_lcb = 1;
}
} else {
if (in_lcb) {
release_lcb_access(dd, 1);
in_lcb = 0;
}
}
writeq(data, base + total);
}
if (in_lcb)
release_lcb_access(dd, 1);
*f_pos += total;
return total;
}
static const struct file_operations ui_file_ops = {
.owner = THIS_MODULE,
.llseek = ui_lseek,
.read = ui_read,
.write = ui_write,
.open = ui_open,
.release = ui_release,
};
#define UI_OFFSET 192 /* device minor offset for UI devices */
static int create_ui = 1;
static struct cdev wildcard_cdev;
static struct device *wildcard_device;
static atomic_t user_count = ATOMIC_INIT(0);
static void user_remove(struct hfi1_devdata *dd)
{
if (atomic_dec_return(&user_count) == 0)
hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
}
static int user_add(struct hfi1_devdata *dd)
@ -1717,34 +1470,13 @@ static int user_add(struct hfi1_devdata *dd)
char name[10];
int ret;
if (atomic_inc_return(&user_count) == 1) {
ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
&wildcard_cdev, &wildcard_device,
true);
if (ret)
goto done;
}
snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
&dd->user_cdev, &dd->user_device,
true);
true, &dd->kobj);
if (ret)
goto done;
user_remove(dd);
if (create_ui) {
snprintf(name, sizeof(name),
"%s_ui%d", class_name(), dd->unit);
ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
&dd->ui_cdev, &dd->ui_device,
false);
if (ret)
goto done;
}
return 0;
done:
user_remove(dd);
return ret;
}
@ -1753,13 +1485,7 @@ done:
*/
int hfi1_device_create(struct hfi1_devdata *dd)
{
int r, ret;
r = user_add(dd);
ret = hfi1_diag_add(dd);
if (r && !ret)
ret = r;
return ret;
return user_add(dd);
}
/*
@ -1769,5 +1495,4 @@ int hfi1_device_create(struct hfi1_devdata *dd)
void hfi1_device_remove(struct hfi1_devdata *dd)
{
user_remove(dd);
hfi1_diag_remove(dd);
}

View File

@ -453,6 +453,7 @@ struct rvt_sge_state;
#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
#define HLS_DOWN ~(HLS_UP)
/* use this MTU size if none other is given */
#define HFI1_DEFAULT_ACTIVE_MTU 10240
@ -1168,6 +1169,7 @@ struct hfi1_devdata {
atomic_t aspm_disabled_cnt;
struct hfi1_affinity *affinity;
struct kobject kobj;
};
/* 8051 firmware version helper */
@ -1882,9 +1884,8 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
get_unit_name((dd)->unit), ##__VA_ARGS__)
#define hfi1_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
get_unit_name((dd)->unit), (dd)->unit, (port), \
##__VA_ARGS__)
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...

View File

@ -732,12 +732,12 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
lastfail = hfi1_create_rcvhdrq(dd, rcd);
if (!lastfail)
lastfail = hfi1_setup_eagerbufs(rcd);
if (lastfail)
if (lastfail) {
dd_dev_err(dd,
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
ret = lastfail;
}
}
if (lastfail)
ret = lastfail;
/* Allocate enough memory for user event notification. */
len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
@ -989,8 +989,10 @@ static void release_asic_data(struct hfi1_devdata *dd)
dd->asic_data = NULL;
}
void hfi1_free_devdata(struct hfi1_devdata *dd)
static void __hfi1_free_devdata(struct kobject *kobj)
{
struct hfi1_devdata *dd =
container_of(kobj, struct hfi1_devdata, kobj);
unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags);
@ -1007,6 +1009,15 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
static struct kobj_type hfi1_devdata_type = {
.release = __hfi1_free_devdata,
};
void hfi1_free_devdata(struct hfi1_devdata *dd)
{
kobject_put(&dd->kobj);
}
/*
* Allocate our primary per-unit data structure. Must be done via verbs
* allocator, because the verbs cleanup process both does cleanup and
@ -1102,6 +1113,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
&pdev->dev,
"Could not alloc cpulist info, cpu affinity might be wrong\n");
}
kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
bail:
@ -1300,7 +1312,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
spin_lock(&ppd->cc_state_lock);
cc_state = get_cc_state(ppd);
rcu_assign_pointer(ppd->cc_state, NULL);
RCU_INIT_POINTER(ppd->cc_state, NULL);
spin_unlock(&ppd->cc_state_lock);
if (cc_state)

View File

@ -1402,6 +1402,12 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
if (key == okey)
continue;
/*
* Don't update pkeys[2], if an HFI port without MgmtAllowed
* by neighbor is a switch.
*/
if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1)
continue;
/*
* The SM gives us the complete PKey table. We have
* to ensure that we put the PKeys in the matching
@ -3363,6 +3369,50 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
return reply((struct ib_mad_hdr *)smp);
}
/*
* Apply congestion control information stored in the ppd to the
* active structure.
*/
static void apply_cc_state(struct hfi1_pportdata *ppd)
{
struct cc_state *old_cc_state, *new_cc_state;
new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
if (!new_cc_state)
return;
/*
* Hold the lock for updating *and* to prevent ppd information
* from changing during the update.
*/
spin_lock(&ppd->cc_state_lock);
old_cc_state = get_cc_state(ppd);
if (!old_cc_state) {
/* never active, or shutting down */
spin_unlock(&ppd->cc_state_lock);
kfree(new_cc_state);
return;
}
*new_cc_state = *old_cc_state;
new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
rcu_assign_pointer(ppd->cc_state, new_cc_state);
spin_unlock(&ppd->cc_state_lock);
call_rcu(&old_cc_state->rcu, cc_state_reclaim);
}
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
@ -3374,6 +3424,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct opa_congestion_setting_entry_shadow *entries;
int i;
/*
* Save details from packet into the ppd. Hold the cc_state_lock so
* our information is consistent with anyone trying to apply the state.
*/
spin_lock(&ppd->cc_state_lock);
ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
entries = ppd->congestion_entries;
@ -3384,6 +3439,10 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
p->entries[i].trigger_threshold;
entries[i].ccti_min = p->entries[i].ccti_min;
}
spin_unlock(&ppd->cc_state_lock);
/* now apply the information */
apply_cc_state(ppd);
return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
resp_len);
@ -3526,7 +3585,6 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
int i, j;
u32 sentry, eentry;
u16 ccti_limit;
struct cc_state *old_cc_state, *new_cc_state;
/* sanity check n_blocks, start_block */
if (n_blocks == 0 ||
@ -3546,45 +3604,20 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
if (!new_cc_state)
goto getit;
/*
* Save details from packet into the ppd. Hold the cc_state_lock so
* our information is consistent with anyone trying to apply the state.
*/
spin_lock(&ppd->cc_state_lock);
old_cc_state = get_cc_state(ppd);
if (!old_cc_state) {
spin_unlock(&ppd->cc_state_lock);
kfree(new_cc_state);
return reply((struct ib_mad_hdr *)smp);
}
*new_cc_state = *old_cc_state;
new_cc_state->cct.ccti_limit = ccti_limit;
entries = ppd->ccti_entries;
ppd->total_cct_entry = ccti_limit + 1;
entries = ppd->ccti_entries;
for (j = 0, i = sentry; i < eentry; j++, i++)
entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
memcpy(new_cc_state->cct.entries, entries,
eentry * sizeof(struct ib_cc_table_entry));
new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
rcu_assign_pointer(ppd->cc_state, new_cc_state);
spin_unlock(&ppd->cc_state_lock);
call_rcu(&old_cc_state->rcu, cc_state_reclaim);
/* now apply the information */
apply_cc_state(ppd);
getit:
return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
}

View File

@ -45,6 +45,7 @@
*
*/
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h>
@ -97,7 +98,6 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
{
struct mmu_rb_handler *handlr;
unsigned long flags;
if (!ops->invalidate)
return -EINVAL;
@ -111,9 +111,9 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
INIT_HLIST_NODE(&handlr->mn.hlist);
spin_lock_init(&handlr->lock);
handlr->mn.ops = &mn_opts;
spin_lock_irqsave(&mmu_rb_lock, flags);
list_add_tail(&handlr->list, &mmu_rb_handlers);
spin_unlock_irqrestore(&mmu_rb_lock, flags);
spin_lock(&mmu_rb_lock);
list_add_tail_rcu(&handlr->list, &mmu_rb_handlers);
spin_unlock(&mmu_rb_lock);
return mmu_notifier_register(&handlr->mn, current->mm);
}
@ -130,9 +130,10 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
if (current->mm)
mmu_notifier_unregister(&handler->mn, current->mm);
spin_lock_irqsave(&mmu_rb_lock, flags);
list_del(&handler->list);
spin_unlock_irqrestore(&mmu_rb_lock, flags);
spin_lock(&mmu_rb_lock);
list_del_rcu(&handler->list);
spin_unlock(&mmu_rb_lock);
synchronize_rcu();
spin_lock_irqsave(&handler->lock, flags);
if (!RB_EMPTY_ROOT(root)) {
@ -271,16 +272,15 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
{
struct mmu_rb_handler *handler;
unsigned long flags;
spin_lock_irqsave(&mmu_rb_lock, flags);
list_for_each_entry(handler, &mmu_rb_handlers, list) {
rcu_read_lock();
list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) {
if (handler->root == root)
goto unlock;
}
handler = NULL;
unlock:
spin_unlock_irqrestore(&mmu_rb_lock, flags);
rcu_read_unlock();
return handler;
}

View File

@ -1835,8 +1835,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
struct pio_vl_map *oldmap, *newmap;
if (!vl_scontexts) {
/* send context 0 reserved for VL15 */
for (i = 1; i < dd->num_send_contexts; i++)
for (i = 0; i < dd->num_send_contexts; i++)
if (dd->send_contexts[i].type == SC_KERNEL)
num_kernel_send_contexts++;
/* truncate divide */

View File

@ -49,10 +49,10 @@
/* send context types */
#define SC_KERNEL 0
#define SC_ACK 1
#define SC_USER 2
#define SC_VL15 3
#define SC_MAX 4
#define SC_VL15 1
#define SC_ACK 2
#define SC_USER 3 /* must be the last one: it may take all left */
#define SC_MAX 4 /* count of send context types */
/* invalid send context index */
#define INVALID_SCI 0xff

View File

@ -87,6 +87,17 @@ void free_platform_config(struct hfi1_devdata *dd)
*/
}
void get_port_type(struct hfi1_pportdata *ppd)
{
int ret;
ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
PORT_TABLE_PORT_TYPE, &ppd->port_type,
4);
if (ret)
ppd->port_type = PORT_TYPE_UNKNOWN;
}
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
{
u8 tx_ctrl_byte = on ? 0x0 : 0xF;
@ -529,7 +540,8 @@ static void apply_tunings(
/* Enable external device config if channel is limiting active */
read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, &config_data);
config_data |= limiting_active;
config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
@ -542,7 +554,8 @@ static void apply_tunings(
/* Pass tuning method to 8051 */
read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
&config_data);
config_data |= tuning_method;
config_data &= ~(0xff << TUNING_METHOD_SHIFT);
config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
config_data);
if (ret != HCMD_SUCCESS)
@ -564,8 +577,8 @@ static void apply_tunings(
ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
GENERAL_CONFIG, &config_data);
/* Clear, then set the external device config field */
config_data &= ~(0xFF << 24);
config_data |= (external_device_config << 24);
config_data &= ~(u32)0xFF;
config_data |= external_device_config;
ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
@ -784,12 +797,6 @@ void tune_serdes(struct hfi1_pportdata *ppd)
return;
}
ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
PORT_TABLE_PORT_TYPE, &ppd->port_type,
4);
if (ret)
ppd->port_type = PORT_TYPE_UNKNOWN;
switch (ppd->port_type) {
case PORT_TYPE_DISCONNECTED:
ppd->offline_disabled_reason =

View File

@ -298,6 +298,7 @@ enum link_tuning_encoding {
/* platform.c */
void get_platform_config(struct hfi1_devdata *dd);
void free_platform_config(struct hfi1_devdata *dd);
void get_port_type(struct hfi1_pportdata *ppd);
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
void tune_serdes(struct hfi1_pportdata *ppd);

View File

@ -49,7 +49,6 @@
#include <linux/vmalloc.h>
#include <linux/hash.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
@ -161,9 +160,6 @@ static inline int opa_mtu_enum_to_int(int mtu)
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
* to blindly pass the MTU enum value from the PathRecord to us.
*
* The actual flag used to determine "8k MTU" will change and is currently
* unknown.
*/
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
@ -516,6 +512,7 @@ static void iowait_wakeup(struct iowait *wait, int reason)
static void iowait_sdma_drained(struct iowait *wait)
{
struct rvt_qp *qp = iowait_to_qp(wait);
unsigned long flags;
/*
* This happens when the send engine notes
@ -523,12 +520,12 @@ static void iowait_sdma_drained(struct iowait *wait)
* do the flush work until that QP's
* sdma work has finished.
*/
spin_lock(&qp->s_lock);
spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
spin_unlock(&qp->s_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
/**

View File

@ -134,6 +134,7 @@ static const char * const sdma_state_names[] = {
[sdma_state_s99_running] = "s99_Running",
};
#ifdef CONFIG_SDMA_VERBOSITY
static const char * const sdma_event_names[] = {
[sdma_event_e00_go_hw_down] = "e00_GoHwDown",
[sdma_event_e10_go_hw_start] = "e10_GoHwStart",
@ -150,6 +151,7 @@ static const char * const sdma_event_names[] = {
[sdma_event_e85_link_down] = "e85_LinkDown",
[sdma_event_e90_sw_halted] = "e90_SwHalted",
};
#endif
static const struct sdma_set_state_action sdma_action_table[] = {
[sdma_state_s00_hw_down] = {
@ -376,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde,
sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
if (iowait_sdma_dec(wait) && wait)
if (wait && iowait_sdma_dec(wait))
iowait_drain_wakeup(wait);
}

View File

@ -721,8 +721,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
}
dd_dev_info(dd,
"IB%u: Congestion Control Agent enabled for port %d\n",
dd->unit, port_num);
"Congestion Control Agent enabled for port %d\n",
port_num);
return 0;

View File

@ -66,6 +66,7 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
#define IETH_PRN "ieth rkey 0x%.8x"
#define ATOMICACKETH_PRN "origdata %lld"
#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
@ -166,6 +167,12 @@ const char *parse_everbs_hdrs(
be32_to_cpu(eh->ud.deth[0]),
be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
break;
/* ieth */
case OP(RC, SEND_LAST_WITH_INVALIDATE):
case OP(RC, SEND_ONLY_WITH_INVALIDATE):
trace_seq_printf(p, IETH_PRN,
be32_to_cpu(eh->ieth));
break;
}
trace_seq_putc(p, 0);
return ret;
@ -233,3 +240,4 @@ __hfi1_trace_fn(FIRMWARE);
__hfi1_trace_fn(RCVCTRL);
__hfi1_trace_fn(TID);
__hfi1_trace_fn(MMU);
__hfi1_trace_fn(IOCTL);

View File

@ -74,8 +74,8 @@ __print_symbolic(etype, \
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_devdata *dd,
u64 eflags,
u32 ctxt,
u64 eflags,
u32 etype,
u32 hlen,
u32 tlen,
@ -392,6 +392,8 @@ __print_symbolic(opcode, \
ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
ib_opcode_name(RC_COMPARE_SWAP), \
ib_opcode_name(RC_FETCH_ADD), \
ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
ib_opcode_name(UC_SEND_FIRST), \
ib_opcode_name(UC_SEND_MIDDLE), \
ib_opcode_name(UC_SEND_LAST), \
@ -1341,6 +1343,7 @@ __hfi1_trace_def(FIRMWARE);
__hfi1_trace_def(RCVCTRL);
__hfi1_trace_def(TID);
__hfi1_trace_def(MMU);
__hfi1_trace_def(IOCTL);
#define hfi1_cdbg(which, fmt, ...) \
__hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)

View File

@ -166,6 +166,8 @@ static unsigned initial_pkt_count = 8;
#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
struct sdma_mmu_node;
struct user_sdma_iovec {
struct list_head list;
struct iovec iov;
@ -178,6 +180,7 @@ struct user_sdma_iovec {
* which we last left off.
*/
u64 offset;
struct sdma_mmu_node *node;
};
#define SDMA_CACHE_NODE_EVICT BIT(0)
@ -507,6 +510,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
struct sdma_req_info info;
struct user_sdma_request *req;
u8 opcode, sc, vl;
int req_queued = 0;
if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
hfi1_cdbg(
@ -703,6 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
atomic_inc(&pq->n_reqs);
req_queued = 1;
/* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount);
if (unlikely(ret < 0 && ret != -EBUSY)) {
@ -747,7 +752,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
return 0;
free_req:
user_sdma_free_request(req, true);
pq_update(pq);
if (req_queued)
pq_update(pq);
set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
return ret;
}
@ -1153,6 +1159,7 @@ retry:
}
iovec->pages = node->pages;
iovec->npages = npages;
iovec->node = node;
ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
if (ret) {
@ -1519,18 +1526,13 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
}
if (req->data_iovs) {
struct sdma_mmu_node *node;
struct mmu_rb_node *mnode;
int i;
for (i = 0; i < req->data_iovs; i++) {
mnode = hfi1_mmu_rb_search(
&req->pq->sdma_rb_root,
(unsigned long)req->iovs[i].iov.iov_base,
req->iovs[i].iov.iov_len);
if (!mnode || IS_ERR(mnode))
node = req->iovs[i].node;
if (!node)
continue;
node = container_of(mnode, struct sdma_mmu_node, rb);
if (unpin)
hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
&node->rb);

View File

@ -52,7 +52,6 @@
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include "hfi.h"
@ -336,6 +335,8 @@ const u8 hdr_len_by_opcode[256] = {
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
[IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
[IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
[IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
@ -946,7 +947,6 @@ static int pio_wait(struct rvt_qp *qp,
dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
dev->n_piowait++;
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait);

View File

@ -152,6 +152,7 @@ union ib_ehdrs {
} at;
__be32 imm_data;
__be32 aeth;
__be32 ieth;
struct ib_atomic_eth atomic_eth;
} __packed;

View File

@ -1308,21 +1308,6 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
SYM_LSB(IntMask, fldname##17IntMask)), \
.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SDmaInt),
INTR_AUTO_P(SDmaProgressInt),
INTR_AUTO_P(SDmaIdleInt),
INTR_AUTO_P(SDmaCleanupDone),
INTR_AUTO_C(RcvUrg),
INTR_AUTO_P(ErrInt),
INTR_AUTO(ErrInt), /* non-port-specific errs */
INTR_AUTO(AssertGPIOInt),
INTR_AUTO_P(SendDoneInt),
INTR_AUTO(SendBufAvailInt),
INTR_AUTO_C(RcvAvail),
{ .mask = 0, .sz = 0 }
};
#define TXSYMPTOM_AUTO_P(fldname) \
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
.msg = #fldname, .sz = sizeof(#fldname) }

View File

@ -159,6 +159,7 @@ struct qib_other_headers {
} at;
__be32 imm_data;
__be32 aeth;
__be32 ieth;
struct ib_atomic_eth atomic_eth;
} u;
} __packed;

View File

@ -525,6 +525,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
return PTR_ERR(task);
}
set_user_nice(task, MIN_NICE);
cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
kthread_bind(task, cpu);
wake_up_process(task);

View File

@ -124,11 +124,13 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
int count)
{
int m, i = 0;
struct rvt_dev_info *dev = ib_to_rvt(pd->device);
mr->mapsz = 0;
m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
for (; i < m; i++) {
mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
dev->dparms.node);
if (!mr->map[i]) {
rvt_deinit_mregion(mr);
return -ENOMEM;

View File

@ -397,6 +397,7 @@ static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{
unsigned n;
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
rvt_put_ss(&qp->s_rdma_read_sge);
@ -431,7 +432,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
if (qp->ibqp.qp_type != IB_QPT_RC)
return;
for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
for (n = 0; n < rvt_max_atomic(rdi); n++) {
struct rvt_ack_entry *e = &qp->s_ack_queue[n];
if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
@ -569,7 +570,12 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_ssn = 1;
qp->s_lsn = 0;
qp->s_mig_state = IB_MIG_MIGRATED;
memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
if (qp->s_ack_queue)
memset(
qp->s_ack_queue,
0,
rvt_max_atomic(rdi) *
sizeof(*qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
qp->s_num_rd_atomic = 0;
@ -653,9 +659,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (gfp == GFP_NOIO)
swq = __vmalloc(
(init_attr->cap.max_send_wr + 1) * sz,
gfp, PAGE_KERNEL);
gfp | __GFP_ZERO, PAGE_KERNEL);
else
swq = vmalloc_node(
swq = vzalloc_node(
(init_attr->cap.max_send_wr + 1) * sz,
rdi->dparms.node);
if (!swq)
@ -677,6 +683,16 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_swq;
RCU_INIT_POINTER(qp->next, NULL);
if (init_attr->qp_type == IB_QPT_RC) {
qp->s_ack_queue =
kzalloc_node(
sizeof(*qp->s_ack_queue) *
rvt_max_atomic(rdi),
gfp,
rdi->dparms.node);
if (!qp->s_ack_queue)
goto bail_qp;
}
/*
* Driver needs to set up it's private QP structure and do any
@ -704,9 +720,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.wq = __vmalloc(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz,
gfp, PAGE_KERNEL);
gfp | __GFP_ZERO, PAGE_KERNEL);
else
qp->r_rq.wq = vmalloc_node(
qp->r_rq.wq = vzalloc_node(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz,
rdi->dparms.node);
@ -857,6 +873,7 @@ bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
bail_qp:
kfree(qp->s_ack_queue);
kfree(qp);
bail_swq:
@ -1284,6 +1301,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
vfree(qp->r_rq.wq);
vfree(qp->s_wq);
rdi->driver_f.qp_priv_free(rdi, qp);
kfree(qp->s_ack_queue);
kfree(qp);
return 0;
}

View File

@ -22,6 +22,4 @@ menuconfig STAGING_RDMA
# Please keep entries in alphabetic order
if STAGING_RDMA
source "drivers/staging/rdma/hfi1/Kconfig"
endif

View File

@ -1,2 +1 @@
# Entries for RDMA_STAGING tree
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/

View File

@ -1,6 +0,0 @@
July, 2015
- Remove unneeded file entries in sysfs
- Remove software processing of IB protocol and place in library for use
by qib, ipath (if still present), hfi1, and eventually soft-roce
- Replace incorrect uAPI

File diff suppressed because it is too large Load Diff

View File

@ -1,471 +0,0 @@
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/delay.h>
#include "hfi.h"
#include "common.h"
#include "eprom.h"
/*
* The EPROM is logically divided into three partitions:
* partition 0: the first 128K, visible from PCI ROM BAR
* partition 1: 4K config file (sector size)
* partition 2: the rest
*/
#define P0_SIZE (128 * 1024)
#define P1_SIZE (4 * 1024)
#define P1_START P0_SIZE
#define P2_START (P0_SIZE + P1_SIZE)
/* erase sizes supported by the controller */
#define SIZE_4KB (4 * 1024)
#define MASK_4KB (SIZE_4KB - 1)
#define SIZE_32KB (32 * 1024)
#define MASK_32KB (SIZE_32KB - 1)
#define SIZE_64KB (64 * 1024)
#define MASK_64KB (SIZE_64KB - 1)
/* controller page size, in bytes */
#define EP_PAGE_SIZE 256
#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
/* controller commands */
#define CMD_SHIFT 24
#define CMD_NOP (0)
#define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr)
#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
#define CMD_READ_SR1 ((0x05 << CMD_SHIFT))
#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT))
#define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr)
#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT))
#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT))
#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr)
/* controller interface speeds */
#define EP_SPEED_FULL 0x2 /* full speed */
/* controller status register 1 bits */
#define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */
/* sleep length while waiting for controller */
#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */
#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US))
/* GPIO pins */
#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */
/*
* How long to wait for the EPROM to become available, in ms.
* The spec 32 Mb EPROM takes around 40s to erase then write.
* Double it for safety.
*/
#define EPROM_TIMEOUT 80000 /* ms */
/*
* Turn on external enable line that allows writing on the flash.
*/
static void write_enable(struct hfi1_devdata *dd)
{
/* raise signal */
write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
/* raise enable */
write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
}
/*
* Turn off external enable line that allows writing on the flash.
*/
static void write_disable(struct hfi1_devdata *dd)
{
/* lower signal */
write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
/* lower enable */
write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
}
/*
* Wait for the device to become not busy. Must be called after all
* write or erase operations.
*/
static int wait_for_not_busy(struct hfi1_devdata *dd)
{
unsigned long count = 0;
u64 reg;
int ret = 0;
/* starts page mode */
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1);
while (1) {
udelay(WAIT_SLEEP_US);
usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5);
count++;
reg = read_csr(dd, ASIC_EEP_DATA);
if ((reg & SR1_BUSY) == 0)
break;
/* 200s is the largest time for a 128Mb device */
if (count > COUNT_DELAY_SEC(200)) {
dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n");
ret = -ETIMEDOUT;
break; /* break, not goto - must stop page mode */
}
}
/* stop page mode with a NOP */
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP);
return ret;
}
/*
* Read the device ID from the SPI controller.
*/
static u32 read_device_id(struct hfi1_devdata *dd)
{
/* read the Manufacture Device ID */
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID);
return (u32)read_csr(dd, ASIC_EEP_DATA);
}
/*
* Erase the whole flash.
*/
static int erase_chip(struct hfi1_devdata *dd)
{
int ret;
write_enable(dd);
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE);
ret = wait_for_not_busy(dd);
write_disable(dd);
return ret;
}
/*
* Erase a range.
*/
static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
{
u32 end = start + len;
int ret = 0;
if (end < start)
return -EINVAL;
/* check the end points for the minimum erase */
if ((start & MASK_4KB) || (end & MASK_4KB)) {
dd_dev_err(dd,
"%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
__func__, start, end);
return -EINVAL;
}
write_enable(dd);
while (start < end) {
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
/* check in order of largest to smallest */
if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) {
write_csr(dd, ASIC_EEP_ADDR_CMD,
CMD_SECTOR_ERASE_64KB(start));
start += SIZE_64KB;
} else if (((start & MASK_32KB) == 0) &&
(start + SIZE_32KB <= end)) {
write_csr(dd, ASIC_EEP_ADDR_CMD,
CMD_SECTOR_ERASE_32KB(start));
start += SIZE_32KB;
} else { /* 4KB will work */
write_csr(dd, ASIC_EEP_ADDR_CMD,
CMD_SECTOR_ERASE_4KB(start));
start += SIZE_4KB;
}
ret = wait_for_not_busy(dd);
if (ret)
goto done;
}
done:
write_disable(dd);
return ret;
}
/*
* Read a 256 byte (64 dword) EPROM page.
* All callers have verified the offset is at a page boundary.
*/
static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
{
int i;
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++)
result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
}
/*
* Read length bytes starting at offset. Copy to user address addr.
*/
static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
{
u32 offset;
u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
int ret = 0;
/* reject anything not on an EPROM page boundary */
if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
return -EINVAL;
for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
read_page(dd, start + offset, buffer);
if (copy_to_user((void __user *)(addr + offset),
buffer, EP_PAGE_SIZE)) {
ret = -EFAULT;
goto done;
}
}
done:
return ret;
}
/*
* Write a 256 byte (64 dword) EPROM page.
* All callers have verified the offset is at a page boundary.
*/
static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
{
int i;
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
write_csr(dd, ASIC_EEP_DATA, data[0]);
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++)
write_csr(dd, ASIC_EEP_DATA, data[i]);
/* will close the open page */
return wait_for_not_busy(dd);
}
/*
* Write length bytes starting at offset. Read from user address addr.
*/
static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
{
u32 offset;
u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
int ret = 0;
/* reject anything not on an EPROM page boundary */
if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
return -EINVAL;
write_enable(dd);
for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
if (copy_from_user(buffer, (void __user *)(addr + offset),
EP_PAGE_SIZE)) {
ret = -EFAULT;
goto done;
}
ret = write_page(dd, start + offset, buffer);
if (ret)
goto done;
}
done:
write_disable(dd);
return ret;
}
/* convert an range composite to a length, in bytes */
static inline u32 extract_rlen(u32 composite)
{
return (composite & 0xffff) * EP_PAGE_SIZE;
}
/* convert an range composite to a start, in bytes */
static inline u32 extract_rstart(u32 composite)
{
return (composite >> 16) * EP_PAGE_SIZE;
}
/*
* Perform the given operation on the EPROM. Called from user space. The
* user credentials have already been checked.
*
* Return 0 on success, -ERRNO on error
*/
int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
{
struct hfi1_devdata *dd;
u32 dev_id;
u32 rlen; /* range length */
u32 rstart; /* range start */
int i_minor;
int ret = 0;
/*
* Map the device file to device data using the relative minor.
* The device file minor number is the unit number + 1. 0 is
* the generic device file - reject it.
*/
i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
if (i_minor <= 0)
return -EINVAL;
dd = hfi1_lookup(i_minor - 1);
if (!dd) {
pr_err("%s: cannot find unit %d!\n", __func__, i_minor);
return -EINVAL;
}
/* some devices do not have an EPROM */
if (!dd->eprom_available)
return -EOPNOTSUPP;
ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
if (ret) {
dd_dev_err(dd, "%s: unable to acquire EPROM resource\n",
__func__);
goto done_asic;
}
dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
__func__, cmd->type, cmd->len, cmd->addr);
switch (cmd->type) {
case HFI1_CMD_EP_INFO:
if (cmd->len != sizeof(u32)) {
ret = -ERANGE;
break;
}
dev_id = read_device_id(dd);
/* addr points to a u32 user buffer */
if (copy_to_user((void __user *)cmd->addr, &dev_id,
sizeof(u32)))
ret = -EFAULT;
break;
case HFI1_CMD_EP_ERASE_CHIP:
ret = erase_chip(dd);
break;
case HFI1_CMD_EP_ERASE_RANGE:
rlen = extract_rlen(cmd->len);
rstart = extract_rstart(cmd->len);
ret = erase_range(dd, rstart, rlen);
break;
case HFI1_CMD_EP_READ_RANGE:
rlen = extract_rlen(cmd->len);
rstart = extract_rstart(cmd->len);
ret = read_length(dd, rstart, rlen, cmd->addr);
break;
case HFI1_CMD_EP_WRITE_RANGE:
rlen = extract_rlen(cmd->len);
rstart = extract_rstart(cmd->len);
ret = write_length(dd, rstart, rlen, cmd->addr);
break;
default:
dd_dev_err(dd, "%s: unexpected command %d\n",
__func__, cmd->type);
ret = -EINVAL;
break;
}
release_chip_resource(dd, CR_EPROM);
done_asic:
return ret;
}
/*
* Initialize the EPROM handler.
*/
int eprom_init(struct hfi1_devdata *dd)
{
int ret = 0;
/* only the discrete chip has an EPROM */
if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
return 0;
/*
* It is OK if both HFIs reset the EPROM as long as they don't
* do it at the same time.
*/
ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
if (ret) {
dd_dev_err(dd,
"%s: unable to acquire EPROM resource, no EPROM support\n",
__func__);
goto done_asic;
}
/* reset EPROM to be sure it is in a good state */
/* set reset */
write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
/* clear reset, set speed */
write_csr(dd, ASIC_EEP_CTL_STAT,
EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
/* wake the device with command "release powerdown NoID" */
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
dd->eprom_available = true;
release_chip_resource(dd, CR_EPROM);
done_asic:
return ret;
}

View File

@ -103,6 +103,9 @@ enum {
IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
IB_OPCODE_COMPARE_SWAP = 0x13,
IB_OPCODE_FETCH_ADD = 0x14,
/* opcode 0x15 is reserved */
IB_OPCODE_SEND_LAST_WITH_INVALIDATE = 0x16,
IB_OPCODE_SEND_ONLY_WITH_INVALIDATE = 0x17,
/* real constants follow -- see comment about above IB_OPCODE()
macro for more details */
@ -129,6 +132,8 @@ enum {
IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
IB_OPCODE(RC, COMPARE_SWAP),
IB_OPCODE(RC, FETCH_ADD),
IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE),
IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE),
/* UC */
IB_OPCODE(UC, SEND_FIRST),

View File

@ -149,15 +149,15 @@ struct rvt_driver_params {
int qpn_res_end;
int nports;
int npkeys;
u8 qos_shift;
char cq_name[RVT_CQN_MAX];
int node;
int max_rdma_atomic;
int psn_mask;
int psn_shift;
int psn_modify_mask;
u32 core_cap_flags;
u32 max_mad_size;
u8 qos_shift;
u8 max_rdma_atomic;
};
/* Protection domain */
@ -425,6 +425,15 @@ static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
return rdi->dparms.npkeys;
}
/*
* Return the max atomic suitable for determining
* the size of the ack ring buffer in a QP.
*/
static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
{
return rdi->dparms.max_rdma_atomic + 1;
}
/*
* Return the indexed PKEY from the port PKEY table.
*/

View File

@ -211,8 +211,6 @@ struct rvt_mmap_info {
unsigned size;
};
#define RVT_MAX_RDMA_ATOMIC 16
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
@ -282,8 +280,7 @@ struct rvt_qp {
atomic_t refcount ____cacheline_aligned_in_smp;
wait_queue_head_t wait;
struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
____cacheline_aligned_in_smp;
struct rvt_ack_entry *s_ack_queue;
struct rvt_sge_state s_rdma_read_sge;
spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */

View File

@ -66,7 +66,7 @@
* The major version changes when data structures change in an incompatible
* way. The driver must be the same for initialization to succeed.
*/
#define HFI1_USER_SWMAJOR 5
#define HFI1_USER_SWMAJOR 6
/*
* Minor version differences are always compatible
@ -75,7 +75,12 @@
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
#define HFI1_USER_SWMINOR 0
#define HFI1_USER_SWMINOR 1
/*
* We will encode the major/minor inside a single 32bit version number.
*/
#define HFI1_SWMAJOR_SHIFT 16
/*
* Set of HW and driver capability/feature bits.
@ -107,19 +112,6 @@
#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2)
/*
* If the unit is specified via open, HFI choice is fixed. If port is
* specified, it's also fixed. Otherwise we try to spread contexts
* across ports and HFIs, using different algorithms. WITHIN is
* the old default, prior to this mechanism.
*/
#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
* ports; this is the default */
#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
* active ports within), then next HFI */
#define HFI1_ALG_COUNT 2 /* number of algorithm choices */
/* User commands. */
#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */
#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */
@ -127,7 +119,6 @@
#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */
#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */
#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */
#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */
#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */
#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */
@ -135,13 +126,46 @@
#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */
/* separate EPROM commands from normal PSM commands */
#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */
#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */
/* range 66-74 no longer used */
#define HFI1_CMD_EP_ERASE_RANGE 75 /* erase EPROM range */
#define HFI1_CMD_EP_READ_RANGE 76 /* read EPROM range */
#define HFI1_CMD_EP_WRITE_RANGE 77 /* write EPROM range */
#define HFI1_CMD_GET_VERS 14 /* get the version of the user cdev */
/*
* User IOCTLs can not go above 128 if they do then see common.h and change the
* base for the snoop ioctl
*/
#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */
/*
* Make the ioctls occupy the last 0xf0-0xff portion of the IB range
*/
#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0)
struct hfi1_cmd;
#define HFI1_IOCTL_ASSIGN_CTXT \
_IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info)
#define HFI1_IOCTL_CTXT_INFO \
_IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info)
#define HFI1_IOCTL_USER_INFO \
_IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info)
#define HFI1_IOCTL_TID_UPDATE \
_IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info)
#define HFI1_IOCTL_TID_FREE \
_IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info)
#define HFI1_IOCTL_CREDIT_UPD \
_IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD))
#define HFI1_IOCTL_RECV_CTRL \
_IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int)
#define HFI1_IOCTL_POLL_TYPE \
_IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int)
#define HFI1_IOCTL_ACK_EVENT \
_IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long)
#define HFI1_IOCTL_SET_PKEY \
_IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16)
#define HFI1_IOCTL_CTXT_RESET \
_IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET))
#define HFI1_IOCTL_TID_INVAL_READ \
_IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info)
#define HFI1_IOCTL_GET_VERS \
_IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int)
#define _HFI1_EVENT_FROZEN_BIT 0
#define _HFI1_EVENT_LINKDOWN_BIT 1
@ -199,9 +223,7 @@ struct hfi1_user_info {
* Should be set to HFI1_USER_SWVERSION.
*/
__u32 userversion;
__u16 pad;
/* HFI selection algorithm, if unit has not selected */
__u16 hfi1_alg;
__u32 pad;
/*
* If two or more processes wish to share a context, each process
* must set the subcontext_cnt and subcontext_id to the same
@ -243,12 +265,6 @@ struct hfi1_tid_info {
__u32 length;
};
struct hfi1_cmd {
__u32 type; /* command type */
__u32 len; /* length of struct pointed to by add */
__u64 addr; /* pointer to user structure */
};
enum hfi1_sdma_comp_state {
FREE = 0,
QUEUED,