block-6.4-2023-05-20
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmRotj4QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpkgbD/9RUDGEH4vgTGyTs7FZmMEKCNTX6cQ0RHsa g6/nJpQldxJa/SoHuiZOu3GFAINFZY86KIwvTR340PMyBNW6Tdb+BunQ+MBpLzXj lVCBV5M5+N0uVaSlygSgQmyoKe5SDf2AOK0QHqVQKPd9Xagd27jIuKegq4sfWbLQ G1aQ3lR5/M5t6+9EFvfoAJFfSZxyHq2YLqiWodKINx0Aa3eaacLbvLsq92QLLjA4 ILAQYtDE1J0Z5/RflmJcp5IO9mdSmMOmh4Kws9S5RTrGkPv4uJl3tGGTDvWhUurp 2/Wq/IHR9laHIxv/mP3y0GZKJ8Jolmo2G6v9Qe2s0w1Y/p6AjCHdv/X7LP3vrH8p SjV2xN8wSlsquTvY/lDXg+JuRIftemGhfYo3CMGTYY+TanuaJ+AaWZ1GYiG7v2ye o9vcCd/NA8y9bTlHQgpXXOJaMZeir2dFi74aW+pvy+VbtuPxX/VW0Ss0Nu9KFYLZ pw4zUaz0qsn3Hn67y2M4nzS8u48MArZt/wox61dOwb1sUhAM1FctixfkLx8Wsynj XAj7dNrOa08krpuUnaoHkvhlOhcLdRGF8ULqmvOj4c5FdsR+fLw0LWu3K0WnY58s V/DQ4vXgQM5JhlI0M9xsaI5RKEfjGpQ2kyE1lmgCq1nSZZ2yyosIl92GYxxTYKe3 w2GeliteeQ== =M7b6 -----END PGP SIGNATURE----- Merge tag 'block-6.4-2023-05-20' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: - NVMe pull request via Keith: - More device quirks (Sagi, Hristo, Adrian, Daniel) - Controller delete race (Maurizo) - Multipath cleanup fix (Christoph) - Deny writeable mmap mapping on a readonly block device (Loic) - Kill unused define that got introduced by accident (Christoph) - Error handling fix for s390 dasd (Stefan) - ublk locking fix (Ming) * tag 'block-6.4-2023-05-20' of git://git.kernel.dk/linux: block: remove NFL4_UFLG_MASK block: Deny writable memory mapping if block is read-only s390/dasd: fix command reject error on ESE devices nvme-pci: Add quirk for Teamgroup MP33 SSD ublk: fix AB-BA lockdep warning nvme: do not let the user delete a ctrl before a complete initialization nvme-multipath: don't call blk_mark_disk_dead in nvme_mpath_remove_disk nvme-pci: clamp max_hw_sectors based on DMA optimized limitation nvme-pci: add quirk for missing secondary temperature thresholds nvme-pci: add NVME_QUIRK_BOGUS_NID for HS-SSD-FUTURE 2048G
This commit is contained in:
commit
98be58a6e9
12
block/fops.c
12
block/fops.c
@ -678,6 +678,16 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
|
||||
return error;
|
||||
}
|
||||
|
||||
static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *bd_inode = bdev_file_inode(file);
|
||||
|
||||
if (bdev_read_only(I_BDEV(bd_inode)))
|
||||
return generic_file_readonly_mmap(file, vma);
|
||||
|
||||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
|
||||
const struct file_operations def_blk_fops = {
|
||||
.open = blkdev_open,
|
||||
.release = blkdev_close,
|
||||
@ -685,7 +695,7 @@ const struct file_operations def_blk_fops = {
|
||||
.read_iter = blkdev_read_iter,
|
||||
.write_iter = blkdev_write_iter,
|
||||
.iopoll = iocb_bio_iopoll,
|
||||
.mmap = generic_file_mmap,
|
||||
.mmap = blkdev_mmap,
|
||||
.fsync = blkdev_fsync,
|
||||
.unlocked_ioctl = blkdev_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -1120,6 +1120,11 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
|
||||
return ubq->nr_io_ready == ubq->q_depth;
|
||||
}
|
||||
|
||||
static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
|
||||
{
|
||||
io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
|
||||
}
|
||||
|
||||
static void ublk_cancel_queue(struct ublk_queue *ubq)
|
||||
{
|
||||
int i;
|
||||
@ -1131,8 +1136,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
|
||||
struct ublk_io *io = &ubq->ios[i];
|
||||
|
||||
if (io->flags & UBLK_IO_FLAG_ACTIVE)
|
||||
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
|
||||
IO_URING_F_UNLOCKED);
|
||||
io_uring_cmd_complete_in_task(io->cmd,
|
||||
ublk_cmd_cancel_cb);
|
||||
}
|
||||
|
||||
/* all io commands are canceled */
|
||||
|
@ -3585,6 +3585,9 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
|
||||
return -EBUSY;
|
||||
|
||||
if (device_remove_file_self(dev, attr))
|
||||
nvme_delete_ctrl_sync(ctrl);
|
||||
return count;
|
||||
@ -5045,7 +5048,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||
* that were missed. We identify persistent discovery controllers by
|
||||
* checking that they started once before, hence are reconnecting back.
|
||||
*/
|
||||
if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
|
||||
if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
|
||||
nvme_discovery_ctrl(ctrl))
|
||||
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
|
||||
|
||||
@ -5056,6 +5059,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||
}
|
||||
|
||||
nvme_change_uevent(ctrl, "NVME_EVENT=connected");
|
||||
set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
|
@ -163,7 +163,9 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
|
||||
case hwmon_temp_max:
|
||||
case hwmon_temp_min:
|
||||
if ((!channel && data->ctrl->wctemp) ||
|
||||
(channel && data->log->temp_sensor[channel - 1])) {
|
||||
(channel && data->log->temp_sensor[channel - 1] &&
|
||||
!(data->ctrl->quirks &
|
||||
NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) {
|
||||
if (data->ctrl->quirks &
|
||||
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
|
||||
return 0444;
|
||||
|
@ -884,7 +884,6 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
|
||||
{
|
||||
if (!head->disk)
|
||||
return;
|
||||
blk_mark_disk_dead(head->disk);
|
||||
/* make sure all pending bios are cleaned up */
|
||||
kblockd_schedule_work(&head->requeue_work);
|
||||
flush_work(&head->requeue_work);
|
||||
|
@ -149,6 +149,11 @@ enum nvme_quirks {
|
||||
* Reports garbage in the namespace identifiers (eui64, nguid, uuid).
|
||||
*/
|
||||
NVME_QUIRK_BOGUS_NID = (1 << 18),
|
||||
|
||||
/*
|
||||
* No temperature thresholds for channels other than 0 (Composite).
|
||||
*/
|
||||
NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2956,7 +2956,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
|
||||
* over a single page.
|
||||
*/
|
||||
dev->ctrl.max_hw_sectors = min_t(u32,
|
||||
NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9);
|
||||
NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9);
|
||||
dev->ctrl.max_segments = NVME_MAX_SEGS;
|
||||
|
||||
/*
|
||||
@ -3402,6 +3402,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */
|
||||
.driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
|
||||
{ PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
|
||||
@ -3441,6 +3443,10 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
|
||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
|
||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
|
||||
|
@ -127,6 +127,8 @@ static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
|
||||
struct dasd_device *, struct dasd_device *,
|
||||
unsigned int, int, unsigned int, unsigned int,
|
||||
unsigned int, unsigned int);
|
||||
static int dasd_eckd_query_pprc_status(struct dasd_device *,
|
||||
struct dasd_pprc_data_sc4 *);
|
||||
|
||||
/* initial attempt at a probe function. this can be simplified once
|
||||
* the other detection code is gone */
|
||||
@ -3733,6 +3735,26 @@ static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
|
||||
return count;
|
||||
}
|
||||
|
||||
static int dasd_in_copy_relation(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_pprc_data_sc4 *temp;
|
||||
int rc;
|
||||
|
||||
if (!dasd_eckd_pprc_enabled(device))
|
||||
return 0;
|
||||
|
||||
temp = kzalloc(sizeof(*temp), GFP_KERNEL);
|
||||
if (!temp)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = dasd_eckd_query_pprc_status(device, temp);
|
||||
if (!rc)
|
||||
rc = temp->dev_info[0].state;
|
||||
|
||||
kfree(temp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release allocated space for a given range or an entire volume.
|
||||
*/
|
||||
@ -3749,6 +3771,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
|
||||
int cur_to_trk, cur_from_trk;
|
||||
struct dasd_ccw_req *cqr;
|
||||
u32 beg_cyl, end_cyl;
|
||||
int copy_relation;
|
||||
struct ccw1 *ccw;
|
||||
int trks_per_ext;
|
||||
size_t ras_size;
|
||||
@ -3760,6 +3783,10 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
|
||||
if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
copy_relation = dasd_in_copy_relation(device);
|
||||
if (copy_relation < 0)
|
||||
return ERR_PTR(copy_relation);
|
||||
|
||||
rq = req ? blk_mq_rq_to_pdu(req) : NULL;
|
||||
|
||||
features = &private->features;
|
||||
@ -3788,9 +3815,11 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
|
||||
/*
|
||||
* This bit guarantees initialisation of tracks within an extent that is
|
||||
* not fully specified, but is only supported with a certain feature
|
||||
* subset.
|
||||
* subset and for devices not in a copy relation.
|
||||
*/
|
||||
ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
|
||||
if (features->feature[56] & 0x01 && !copy_relation)
|
||||
ras_data->op_flags.guarantee_init = 1;
|
||||
|
||||
ras_data->lss = private->conf.ned->ID;
|
||||
ras_data->dev_addr = private->conf.ned->unit_addr;
|
||||
ras_data->nr_exts = nr_exts;
|
||||
|
@ -1376,8 +1376,6 @@ enum blk_unique_id {
|
||||
BLK_UID_NAA = 3,
|
||||
};
|
||||
|
||||
#define NFL4_UFLG_MASK 0x0000003F
|
||||
|
||||
struct block_device_operations {
|
||||
void (*submit_bio)(struct bio *bio);
|
||||
int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
|
||||
|
Loading…
x
Reference in New Issue
Block a user