Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes. No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-05-11 09:06:26 -07:00
commit bc88ba0cad
707 changed files with 178297 additions and 154844 deletions

View File

@ -213,7 +213,10 @@ Jeff Garzik <jgarzik@pretzel.yyz.us>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jens Axboe <axboe@suse.de>
Jens Axboe <axboe@kernel.dk> <axboe@suse.de>
Jens Axboe <axboe@kernel.dk> <jens.axboe@oracle.com>
Jens Axboe <axboe@kernel.dk> <axboe@fb.com>
Jens Axboe <axboe@kernel.dk> <axboe@meta.com>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>

View File

@ -14,7 +14,7 @@ to borrow disk space from another computer.
Unlike NFS, it is possible to put any filesystem on it, etc.
For more information, or to download the nbd-client and nbd-server
tools, go to http://nbd.sf.net/.
tools, go to https://github.com/NetworkBlockDevice/nbd.
The nbd kernel module need only be installed on the client
system, as the nbd-server is completely in userspace. In fact,

View File

@ -16,14 +16,18 @@ description:
properties:
compatible:
enum:
- mediatek,mt6779-gce
- mediatek,mt8173-gce
- mediatek,mt8183-gce
- mediatek,mt8186-gce
- mediatek,mt8188-gce
- mediatek,mt8192-gce
- mediatek,mt8195-gce
oneOf:
- enum:
- mediatek,mt6779-gce
- mediatek,mt8173-gce
- mediatek,mt8183-gce
- mediatek,mt8186-gce
- mediatek,mt8188-gce
- mediatek,mt8192-gce
- mediatek,mt8195-gce
- items:
- const: mediatek,mt6795-gce
- const: mediatek,mt8173-gce
"#mbox-cells":
const: 2

View File

@ -19,22 +19,15 @@ properties:
- items:
- enum:
- qcom,ipq5332-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
- qcom,ipq9574-apcs-apps-global
- const: qcom,ipq6018-apcs-apps-global
- items:
- enum:
- qcom,ipq6018-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
- qcom,msm8996-apcs-hmss-global
- qcom,msm8998-apcs-hmss-global
- qcom,qcm2290-apcs-hmss-global
- qcom,sc7180-apss-shared
- qcom,sc8180x-apss-shared
- qcom,sdm660-apcs-hmss-global
- qcom,sdm845-apss-shared
- qcom,sm4250-apcs-hmss-global
- qcom,sm6125-apcs-hmss-global
- qcom,sm6115-apcs-hmss-global
- qcom,sm8150-apss-shared
- const: qcom,sdm845-apss-shared
- items:
- enum:
- qcom,msm8916-apcs-kpss-global
@ -45,6 +38,18 @@ properties:
- qcom,qcs404-apcs-apps-global
- qcom,sdx55-apcs-gcc
- const: syscon
- enum:
- qcom,ipq6018-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
- qcom,msm8996-apcs-hmss-global
- qcom,msm8998-apcs-hmss-global
- qcom,qcm2290-apcs-hmss-global
- qcom,sdm660-apcs-hmss-global
- qcom,sdm845-apss-shared
- qcom,sm4250-apcs-hmss-global
- qcom,sm6115-apcs-hmss-global
- qcom,sm6125-apcs-hmss-global
reg:
maxItems: 1
@ -88,6 +93,7 @@ allOf:
items:
- const: pll
- const: aux
- if:
properties:
compatible:
@ -112,7 +118,6 @@ allOf:
contains:
enum:
- qcom,ipq6018-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
then:
properties:
clocks:
@ -134,14 +139,11 @@ allOf:
- qcom,msm8996-apcs-hmss-global
- qcom,msm8998-apcs-hmss-global
- qcom,qcm2290-apcs-hmss-global
- qcom,sc7180-apss-shared
- qcom,sc8180x-apss-shared
- qcom,sdm660-apcs-hmss-global
- qcom,sdm845-apss-shared
- qcom,sm4250-apcs-hmss-global
- qcom,sm6115-apcs-hmss-global
- qcom,sm6125-apcs-hmss-global
- qcom,sm8150-apss-shared
then:
properties:
clocks: false
@ -153,7 +155,6 @@ allOf:
contains:
enum:
- qcom,ipq6018-apcs-apps-global
- qcom,ipq8074-apcs-apps-global
then:
properties:
'#clock-cells':

View File

@ -776,10 +776,11 @@ peer_notif_delay
Specify the delay, in milliseconds, between each peer
notification (gratuitous ARP and unsolicited IPv6 Neighbor
Advertisement) when they are issued after a failover event.
This delay should be a multiple of the link monitor interval
(arp_interval or miimon, whichever is active). The default
value is 0 which means to match the value of the link monitor
interval.
This delay should be a multiple of the MII link monitor interval
(miimon).
The valid range is 0 - 300000. The default value is 0, which means
to match the value of the MII link monitor interval.
prio
Slave priority. A higher number means higher priority.

View File

@ -116,8 +116,8 @@ Contents:
udplite
vrf
vxlan
x25-iface
x25
x25-iface
xfrm_device
xfrm_proc
xfrm_sync

View File

@ -1,8 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
============================-
X.25 Device Driver Interface
============================-
============================
Version 1.1

View File

@ -41,7 +41,7 @@ A consumer can enable its power supply by calling::
int regulator_enable(regulator);
NOTE:
The supply may already be enabled before regulator_enabled() is called.
The supply may already be enabled before regulator_enable() is called.
This may happen if the consumer shares the regulator or the regulator has been
previously enabled by bootloader or kernel board initialization code.

View File

@ -133,6 +133,19 @@ enable
enable card;
Default: enabled, for PCI and ISA PnP cards
These options are used for either specifying the order of instances or
controlling enabling and disabling of each one of the devices if there
are multiple devices bound with the same driver. For example, there are
many machines which have two HD-audio controllers (one for HDMI/DP
audio and another for onboard analog). In most cases, the second one is
in primary usage, and people would like to assign it as the first
appearing card. They can do it by specifying "index=1,0" module
parameter, which will swap the assignment slots.
Today, with the sound backend like PulseAudio and PipeWire which
supports dynamic configuration, it's of little use, but that was a
help for static configuration in the past.
Module snd-adlib
----------------

View File

@ -3994,21 +3994,21 @@ Driver with A Single Source File
Suppose you have a file xyz.c. Add the following two lines::
snd-xyz-objs := xyz.o
obj-$(CONFIG_SND_XYZ) += snd-xyz.o
snd-xyz-objs := xyz.o
obj-$(CONFIG_SND_XYZ) += snd-xyz.o
2. Create the Kconfig entry
Add the new entry of Kconfig for your xyz driver::
config SND_XYZ
tristate "Foobar XYZ"
depends on SND
select SND_PCM
help
Say Y here to include support for Foobar XYZ soundcard.
To compile this driver as a module, choose M here:
the module will be called snd-xyz.
config SND_XYZ
tristate "Foobar XYZ"
depends on SND
select SND_PCM
help
Say Y here to include support for Foobar XYZ soundcard.
To compile this driver as a module, choose M here:
the module will be called snd-xyz.
The line ``select SND_PCM`` specifies that the driver xyz supports PCM.
In addition to SND_PCM, the following components are supported for
@ -4032,7 +4032,7 @@ located in the new subdirectory, sound/pci/xyz.
1. Add a new directory (``sound/pci/xyz``) in ``sound/pci/Makefile``
as below::
obj-$(CONFIG_SND) += sound/pci/xyz/
obj-$(CONFIG_SND) += sound/pci/xyz/
2. Under the directory ``sound/pci/xyz``, create a Makefile::

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 3
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION =
EXTRAVERSION = -rc1
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -418,8 +418,11 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
bdev->bd_partno = partno;
bdev->bd_inode = inode;
bdev->bd_queue = disk->queue;
if (partno)
bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
else
bdev->bd_has_submit_bio = false;
bdev->bd_stats = alloc_percpu(struct disk_stats);
bdev->bd_has_submit_bio = false;
if (!bdev->bd_stats) {
iput(inode);
return NULL;
@ -428,6 +431,14 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
return bdev;
}
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
void bdev_add(struct block_device *bdev, dev_t dev)
{
bdev->bd_dev = dev;

View File

@ -567,6 +567,9 @@ restart:
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
if (hlist_unhashed(&blkg->blkcg_node))
continue;
spin_lock(&blkcg->lock);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);

View File

@ -212,61 +212,44 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
return true;
}
struct integrity_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct blk_integrity *, char *);
ssize_t (*store)(struct blk_integrity *, const char *, size_t);
};
static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
char *page)
static inline struct blk_integrity *dev_to_bi(struct device *dev)
{
struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr);
return entry->show(bi, page);
return &dev_to_disk(dev)->queue->integrity;
}
static ssize_t integrity_attr_store(struct kobject *kobj,
struct attribute *attr, const char *page,
size_t count)
static ssize_t format_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr);
ssize_t ret = 0;
struct blk_integrity *bi = dev_to_bi(dev);
if (entry->store)
ret = entry->store(bi, page, count);
return ret;
}
static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
{
if (bi->profile && bi->profile->name)
return sprintf(page, "%s\n", bi->profile->name);
else
return sprintf(page, "none\n");
return sysfs_emit(page, "%s\n", bi->profile->name);
return sysfs_emit(page, "none\n");
}
static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", bi->tag_size);
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%u\n", bi->tag_size);
}
static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
static ssize_t protection_interval_bytes_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return sprintf(page, "%u\n",
bi->interval_exp ? 1 << bi->interval_exp : 0);
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%u\n",
bi->interval_exp ? 1 << bi->interval_exp : 0);
}
static ssize_t integrity_verify_store(struct blk_integrity *bi,
const char *page, size_t count)
static ssize_t read_verify_store(struct device *dev,
struct device_attribute *attr,
const char *page, size_t count)
{
struct blk_integrity *bi = dev_to_bi(dev);
char *p = (char *) page;
unsigned long val = simple_strtoul(p, &p, 10);
@ -278,14 +261,20 @@ static ssize_t integrity_verify_store(struct blk_integrity *bi,
return count;
}
static ssize_t integrity_verify_show(struct blk_integrity *bi, char *page)
static ssize_t read_verify_show(struct device *dev,
struct device_attribute *attr, char *page)
{
return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_VERIFY) != 0);
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_VERIFY));
}
static ssize_t integrity_generate_store(struct blk_integrity *bi,
const char *page, size_t count)
static ssize_t write_generate_store(struct device *dev,
struct device_attribute *attr,
const char *page, size_t count)
{
struct blk_integrity *bi = dev_to_bi(dev);
char *p = (char *) page;
unsigned long val = simple_strtoul(p, &p, 10);
@ -297,68 +286,44 @@ static ssize_t integrity_generate_store(struct blk_integrity *bi,
return count;
}
static ssize_t integrity_generate_show(struct blk_integrity *bi, char *page)
static ssize_t write_generate_show(struct device *dev,
struct device_attribute *attr, char *page)
{
return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_GENERATE) != 0);
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_GENERATE));
}
static ssize_t integrity_device_show(struct blk_integrity *bi, char *page)
static ssize_t device_is_integrity_capable_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return sprintf(page, "%u\n",
(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) != 0);
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%u\n",
!!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
}
static struct integrity_sysfs_entry integrity_format_entry = {
.attr = { .name = "format", .mode = 0444 },
.show = integrity_format_show,
};
static struct integrity_sysfs_entry integrity_tag_size_entry = {
.attr = { .name = "tag_size", .mode = 0444 },
.show = integrity_tag_size_show,
};
static struct integrity_sysfs_entry integrity_interval_entry = {
.attr = { .name = "protection_interval_bytes", .mode = 0444 },
.show = integrity_interval_show,
};
static struct integrity_sysfs_entry integrity_verify_entry = {
.attr = { .name = "read_verify", .mode = 0644 },
.show = integrity_verify_show,
.store = integrity_verify_store,
};
static struct integrity_sysfs_entry integrity_generate_entry = {
.attr = { .name = "write_generate", .mode = 0644 },
.show = integrity_generate_show,
.store = integrity_generate_store,
};
static struct integrity_sysfs_entry integrity_device_entry = {
.attr = { .name = "device_is_integrity_capable", .mode = 0444 },
.show = integrity_device_show,
};
static DEVICE_ATTR_RO(format);
static DEVICE_ATTR_RO(tag_size);
static DEVICE_ATTR_RO(protection_interval_bytes);
static DEVICE_ATTR_RW(read_verify);
static DEVICE_ATTR_RW(write_generate);
static DEVICE_ATTR_RO(device_is_integrity_capable);
static struct attribute *integrity_attrs[] = {
&integrity_format_entry.attr,
&integrity_tag_size_entry.attr,
&integrity_interval_entry.attr,
&integrity_verify_entry.attr,
&integrity_generate_entry.attr,
&integrity_device_entry.attr,
NULL,
};
ATTRIBUTE_GROUPS(integrity);
static const struct sysfs_ops integrity_ops = {
.show = &integrity_attr_show,
.store = &integrity_attr_store,
&dev_attr_format.attr,
&dev_attr_tag_size.attr,
&dev_attr_protection_interval_bytes.attr,
&dev_attr_read_verify.attr,
&dev_attr_write_generate.attr,
&dev_attr_device_is_integrity_capable.attr,
NULL
};
static const struct kobj_type integrity_ktype = {
.default_groups = integrity_groups,
.sysfs_ops = &integrity_ops,
const struct attribute_group blk_integrity_attr_group = {
.name = "integrity",
.attrs = integrity_attrs,
};
static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
@ -437,21 +402,3 @@ void blk_integrity_unregister(struct gendisk *disk)
memset(bi, 0, sizeof(*bi));
}
EXPORT_SYMBOL(blk_integrity_unregister);
int blk_integrity_add(struct gendisk *disk)
{
int ret;
ret = kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
&disk_to_dev(disk)->kobj, "%s", "integrity");
if (!ret)
kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
return ret;
}
void blk_integrity_del(struct gendisk *disk)
{
kobject_uevent(&disk->integrity_kobj, KOBJ_REMOVE);
kobject_del(&disk->integrity_kobj);
kobject_put(&disk->integrity_kobj);
}

View File

@ -214,8 +214,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
bip_next->bip_vec[0].bv_offset);
}
int blk_integrity_add(struct gendisk *disk);
void blk_integrity_del(struct gendisk *);
extern const struct attribute_group blk_integrity_attr_group;
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
struct request *r1, struct request *r2)
@ -248,13 +247,6 @@ static inline bool bio_integrity_endio(struct bio *bio)
static inline void bio_integrity_free(struct bio *bio)
{
}
static inline int blk_integrity_add(struct gendisk *disk)
{
return 0;
}
static inline void blk_integrity_del(struct gendisk *disk)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
unsigned long blk_rq_timeout(unsigned long timeout);
@ -419,6 +411,8 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length);
void blk_drop_partitions(struct gendisk *disk);
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
struct lock_class_key *lkclass);

View File

@ -57,12 +57,7 @@ static DEFINE_IDA(ext_devt_ida);
void set_capacity(struct gendisk *disk, sector_t sectors)
{
struct block_device *bdev = disk->part0;
spin_lock(&bdev->bd_size_lock);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
bdev_set_nr_sectors(disk->part0, sectors);
}
EXPORT_SYMBOL(set_capacity);
@ -487,15 +482,11 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
*/
pm_runtime_set_memalloc_noio(ddev, true);
ret = blk_integrity_add(disk);
if (ret)
goto out_del_block_link;
disk->part0->bd_holder_dir =
kobject_create_and_add("holders", &ddev->kobj);
if (!disk->part0->bd_holder_dir) {
ret = -ENOMEM;
goto out_del_integrity;
goto out_del_block_link;
}
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
if (!disk->slave_dir) {
@ -558,8 +549,6 @@ out_put_slave_dir:
disk->slave_dir = NULL;
out_put_holder_dir:
kobject_put(disk->part0->bd_holder_dir);
out_del_integrity:
blk_integrity_del(disk);
out_del_block_link:
sysfs_remove_link(block_depr, dev_name(ddev));
out_device_del:
@ -621,7 +610,6 @@ void del_gendisk(struct gendisk *disk)
if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
return;
blk_integrity_del(disk);
disk_del_events(disk);
mutex_lock(&disk->open_mutex);
@ -1155,6 +1143,9 @@ static const struct attribute_group *disk_attr_groups[] = {
&disk_attr_group,
#ifdef CONFIG_BLK_DEV_IO_TRACE
&blk_trace_attr_group,
#endif
#ifdef CONFIG_BLK_DEV_INTEGRITY
&blk_integrity_attr_group,
#endif
NULL
};

View File

@ -85,14 +85,6 @@ static int (*check_part[])(struct parsed_partitions *) = {
NULL
};
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
{
struct parsed_partitions *state;

View File

@ -125,7 +125,7 @@ static const struct crypto_type crypto_acomp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_acomp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -242,7 +242,7 @@ static const struct crypto_type crypto_aead_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_aead_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -509,7 +509,7 @@ static const struct crypto_type crypto_ahash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_ahash_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -98,7 +98,7 @@ static const struct crypto_type crypto_akcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_akcipher_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -961,6 +961,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request);
void crypto_enqueue_request_head(struct crypto_queue *queue,
struct crypto_async_request *request)
{
if (unlikely(queue->qlen >= queue->max_qlen))
queue->backlog = queue->backlog->prev;
queue->qlen++;
list_add(&request->list, &queue->list);
}

View File

@ -129,9 +129,6 @@ start_request:
if (!engine->retry_support)
engine->cur_req = async_req;
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
if (engine->busy)
was_busy = true;
else
@ -217,6 +214,9 @@ req_err_2:
crypto_request_complete(async_req, ret);
retry:
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
/* If retry mechanism is supported, send new requests to engine */
if (engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);

View File

@ -96,7 +96,7 @@ static const struct crypto_type crypto_kpp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_kpp_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_kpp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -118,7 +118,7 @@ static const struct crypto_type crypto_rng_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_rng_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -240,7 +240,7 @@ static const struct crypto_type crypto_scomp_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_scomp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -548,7 +548,7 @@ static const struct crypto_type crypto_shash_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_shash_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -776,7 +776,7 @@ static const struct crypto_type crypto_skcipher_type = {
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
#ifdef CONFIG_CRYPTO_USER
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_skcipher_report,
#endif
#ifdef CONFIG_CRYPTO_STATS

View File

@ -404,7 +404,6 @@ static int brd_alloc(int i)
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)

View File

@ -1283,7 +1283,7 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
if (!octx) {

View File

@ -609,7 +609,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.len = htonl(size);
}
handle = nbd_cmd_handle(cmd);
memcpy(request.handle, &handle, sizeof(handle));
request.cookie = cpu_to_be64(handle);
trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
@ -621,7 +621,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
trace_nbd_header_sent(req, handle);
if (result < 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
/* If we haven't sent anything we can just return BUSY,
* however if we have sent something we need to make
* sure we only allow this req to be sent until we are
* completely done.
@ -735,7 +735,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
u32 tag;
int ret = 0;
memcpy(&handle, reply->handle, sizeof(handle));
handle = be64_to_cpu(reply->cookie);
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@ -1805,7 +1805,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);

View File

@ -2144,7 +2144,6 @@ static int null_add_dev(struct nullb_device *dev)
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);

View File

@ -129,6 +129,7 @@ struct ublk_queue {
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
bool timeout;
unsigned short nr_io_ready; /* how many ios setup */
struct ublk_device *dev;
struct ublk_io ios[];
@ -898,6 +899,22 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
}
}
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
if (!ubq->timeout) {
send_sig(SIGKILL, ubq->ubq_daemon, 0);
ubq->timeout = true;
}
return BLK_EH_DONE;
}
return BLK_EH_RESET_TIMER;
}
static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@ -957,6 +974,7 @@ static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
.init_hctx = ublk_init_hctx,
.init_request = ublk_init_rq,
.timeout = ublk_timeout,
};
static int ublk_ch_open(struct inode *inode, struct file *filp)
@ -1017,7 +1035,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
}
static void ublk_commit_completion(struct ublk_device *ub,
struct ublksrv_io_cmd *ub_cmd)
const struct ublksrv_io_cmd *ub_cmd)
{
u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
struct ublk_queue *ubq = ublk_get_queue(ub, qid);
@ -1274,7 +1292,7 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
struct ublksrv_io_cmd *ub_cmd)
const struct ublksrv_io_cmd *ub_cmd)
{
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
@ -1381,17 +1399,17 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
struct ublksrv_io_cmd ub_cmd;
/*
* Not necessary for async retry, but let's keep it simple and always
* copy the values to avoid any potential reuse.
*/
ub_cmd.q_id = READ_ONCE(ub_src->q_id);
ub_cmd.tag = READ_ONCE(ub_src->tag);
ub_cmd.result = READ_ONCE(ub_src->result);
ub_cmd.addr = READ_ONCE(ub_src->addr);
const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
const struct ublksrv_io_cmd ub_cmd = {
.q_id = READ_ONCE(ub_src->q_id),
.tag = READ_ONCE(ub_src->tag),
.result = READ_ONCE(ub_src->result),
.addr = READ_ONCE(ub_src->addr)
};
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
@ -1601,7 +1619,7 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
struct gendisk *disk;
int ret = -EINVAL;
@ -1664,7 +1682,7 @@ out_unlock:
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
cpumask_var_t cpumask;
unsigned long queue;
@ -1715,7 +1733,7 @@ static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublksrv_ctrl_dev_info info;
struct ublk_device *ub;
@ -1737,6 +1755,18 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
* support recovery features for unprivileged ublk now
*
* TODO: provide forward progress for RECOVERY handler, so that
* unprivileged device can benefit from it
*/
if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
UBLK_F_USER_RECOVERY);
/* the created device is always owned by current user */
ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
@ -1880,7 +1910,7 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
__func__, cmd->cmd_op, header->dev_id, header->queue_id,
@ -1899,7 +1929,7 @@ static int ublk_ctrl_stop_dev(struct ublk_device *ub)
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
@ -1930,7 +1960,7 @@ static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
static int ublk_ctrl_get_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret;
@ -1961,7 +1991,7 @@ static int ublk_ctrl_get_params(struct ublk_device *ub,
static int ublk_ctrl_set_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
int ret = -EFAULT;
@ -2007,6 +2037,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
put_task_struct(ubq->ubq_daemon);
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
ubq->ubq_daemon = NULL;
ubq->timeout = false;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@ -2021,7 +2052,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
int i;
@ -2063,7 +2094,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
@ -2130,7 +2161,7 @@ exit:
static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
void __user *argp = (void __user *)(unsigned long)header->addr;
char *dev_path = NULL;
@ -2209,7 +2240,7 @@ exit:
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
struct ublk_device *ub = NULL;
u32 cmd_op = cmd->cmd_op;
int ret = -EINVAL;

View File

@ -2215,7 +2215,6 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned

View File

@ -41,7 +41,7 @@ enum {
/* HIWORD_MASK FIELD_PREP */
#define HWM_FIELD_PREP(mask, value) \
({ \
u32 _m = mask; \
u64 _m = mask; \
(_m << 16) | FIELD_PREP(_m, value); \
})

View File

@ -119,17 +119,10 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
return ret;
}
static int imx8m_clk_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
return clk_divider_ops.determine_rate(hw, req);
}
static const struct clk_ops imx8m_clk_composite_divider_ops = {
.recalc_rate = imx8m_clk_composite_divider_recalc_rate,
.round_rate = imx8m_clk_composite_divider_round_rate,
.set_rate = imx8m_clk_composite_divider_set_rate,
.determine_rate = imx8m_clk_divider_determine_rate,
};
static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)

View File

@ -26,7 +26,7 @@ config CLK_STARFIVE_JH7110_SYS
depends on ARCH_STARFIVE || COMPILE_TEST
select AUXILIARY_BUS
select CLK_STARFIVE_JH71X0
select RESET_STARFIVE_JH7110
select RESET_STARFIVE_JH7110 if RESET_CONTROLLER
default ARCH_STARFIVE
help
Say yes here to support the system clock controller on the
@ -35,9 +35,6 @@ config CLK_STARFIVE_JH7110_SYS
config CLK_STARFIVE_JH7110_AON
tristate "StarFive JH7110 always-on clock support"
depends on CLK_STARFIVE_JH7110_SYS
select AUXILIARY_BUS
select CLK_STARFIVE_JH71X0
select RESET_STARFIVE_JH7110
default m if ARCH_STARFIVE
help
Say yes here to support the always-on clock controller on the

View File

@ -151,7 +151,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
}
rctx->p_iv[i] = a;
/* we need to setup all others IVs only in the decrypt way */
if (rctx->op_dir & SS_ENCRYPTION)
if (rctx->op_dir == SS_ENCRYPTION)
return 0;
todo = min(len, sg_dma_len(sg));
len -= todo;

View File

@ -176,7 +176,7 @@ config MAILBOX_TEST
config POLARFIRE_SOC_MAILBOX
tristate "PolarFire SoC (MPFS) Mailbox"
depends on HAS_IOMEM
depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
help
This driver adds support for the PolarFire SoC (MPFS) mailbox controller.

View File

@ -1635,7 +1635,7 @@ static struct platform_driver pdc_mbox_driver = {
.remove = pdc_remove,
.driver = {
.name = "brcm-iproc-pdc-mbox",
.of_match_table = of_match_ptr(pdc_mbox_of_match),
.of_match_table = pdc_mbox_of_match,
},
};
module_platform_driver(pdc_mbox_driver);

View File

@ -325,10 +325,7 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
/* use interrupt for tx's ack */
if (of_find_property(node, "hi6220,mbox-tx-noirq", NULL))
mbox->tx_irq_mode = false;
else
mbox->tx_irq_mode = true;
mbox->tx_irq_mode = !of_property_read_bool(node, "hi6220,mbox-tx-noirq");
if (mbox->tx_irq_mode)
mbox->controller.txdone_irq = true;

View File

@ -12,10 +12,12 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>
@ -38,6 +40,7 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
@ -110,6 +113,8 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
mutex_lock(&tdev->mutex);
tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!tdev->message)
return -ENOMEM;
@ -144,6 +149,8 @@ out:
kfree(tdev->message);
tdev->signal = NULL;
mutex_unlock(&tdev->mutex);
return ret < 0 ? ret : count;
}
@ -392,6 +399,7 @@ static int mbox_test_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,

View File

@ -317,6 +317,71 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
}
EXPORT_SYMBOL_GPL(mbox_flush);
static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
struct device *dev = cl->dev;
unsigned long flags;
int ret;
if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
dev_dbg(dev, "%s: mailbox not free\n", __func__);
return -EBUSY;
}
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
chan->msg_count = 0;
chan->active_req = NULL;
chan->cl = cl;
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
if (ret) {
dev_err(dev, "Unable to startup the chan (%d)\n", ret);
mbox_free_channel(chan);
return ret;
}
}
return 0;
}
/**
* mbox_bind_client - Request a mailbox channel.
* @chan: The mailbox channel to bind the client to.
* @cl: Identity of the client requesting the channel.
*
* The Client specifies its requirements and capabilities while asking for
* a mailbox channel. It can't be called from atomic context.
* The channel is exclusively allocated and can't be used by another
* client before the owner calls mbox_free_channel.
* After assignment, any packet received on this channel will be
* handed over to the client via the 'rx_callback'.
* The framework holds reference to the client, so the mbox_client
* structure shouldn't be modified until the mbox_free_channel returns.
*
* Return: 0 if the channel was assigned to the client successfully.
* <0 for request failure.
*/
int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
int ret;
mutex_lock(&con_mutex);
ret = __mbox_bind_client(chan, cl);
mutex_unlock(&con_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mbox_bind_client);
/**
* mbox_request_channel - Request a mailbox channel.
* @cl: Identity of the client requesting the channel.
@ -340,7 +405,6 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
unsigned long flags;
int ret;
if (!dev || !dev->of_node) {
@ -372,33 +436,9 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
return chan;
}
if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
dev_dbg(dev, "%s: mailbox not free\n", __func__);
mutex_unlock(&con_mutex);
return ERR_PTR(-EBUSY);
}
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
chan->msg_count = 0;
chan->active_req = NULL;
chan->cl = cl;
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
if (ret) {
dev_err(dev, "Unable to startup the chan (%d)\n", ret);
mbox_free_channel(chan);
chan = ERR_PTR(ret);
}
}
ret = __mbox_bind_client(chan, cl);
if (ret)
chan = ERR_PTR(ret);
mutex_unlock(&con_mutex);
return chan;

View File

@ -417,8 +417,6 @@ struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
struct device *dev = cl->dev;
struct omap_mbox *mbox = NULL;
struct omap_mbox_device *mdev;
struct mbox_chan *chan;
unsigned long flags;
int ret;
if (!dev)
@ -441,23 +439,11 @@ struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
if (!mbox || !mbox->chan)
return ERR_PTR(-ENOENT);
chan = mbox->chan;
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
chan->msg_count = 0;
chan->active_req = NULL;
chan->cl = cl;
init_completion(&chan->tx_complete);
spin_unlock_irqrestore(&chan->lock, flags);
ret = mbox_bind_client(mbox->chan, cl);
if (ret)
return ERR_PTR(ret);
ret = chan->mbox->ops->startup(chan);
if (ret) {
pr_err("Unable to startup the chan (%d)\n", ret);
mbox_free_channel(chan);
chan = ERR_PTR(ret);
}
return chan;
return mbox->chan;
}
EXPORT_SYMBOL(omap_mbox_request_channel);
@ -763,8 +749,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
finfo->name = child->name;
if (of_find_property(child, "ti,mbox-send-noirq", NULL))
finfo->send_no_irq = true;
finfo->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)

View File

@ -282,8 +282,7 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan;
struct device *dev;
unsigned long flags;
int rc;
if (subspace_id < 0 || subspace_id >= pcc_chan_count)
return ERR_PTR(-ENOENT);
@ -294,32 +293,10 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pr_err("Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
dev = chan->mbox->dev;
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
chan->msg_count = 0;
chan->active_req = NULL;
chan->cl = cl;
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
if (pchan->plat_irq > 0) {
int rc;
rc = devm_request_irq(dev, pchan->plat_irq, pcc_mbox_irq, 0,
MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(dev, "failed to register PCC interrupt %d\n",
pchan->plat_irq);
pcc_mbox_free_channel(&pchan->chan);
return ERR_PTR(rc);
}
}
rc = mbox_bind_client(chan, cl);
if (rc)
return ERR_PTR(rc);
return &pchan->chan;
}
@ -333,23 +310,12 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
*/
void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
struct pcc_chan_info *pchan_info = to_pcc_chan_info(pchan);
struct mbox_chan *chan = pchan->mchan;
unsigned long flags;
if (!chan || !chan->cl)
return;
if (pchan_info->plat_irq > 0)
devm_free_irq(chan->mbox->dev, pchan_info->plat_irq, chan);
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
spin_unlock_irqrestore(&chan->lock, flags);
mbox_free_channel(chan);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
@ -377,8 +343,48 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
return pcc_chan_reg_read_modify_write(&pchan->db);
}
/**
* pcc_startup - Called from Mailbox Controller code. Used here
* to request the interrupt.
* @chan: Pointer to Mailbox channel to startup.
*
* Return: Err if something failed else 0 for success.
*/
static int pcc_startup(struct mbox_chan *chan)
{
struct pcc_chan_info *pchan = chan->con_priv;
int rc;
if (pchan->plat_irq > 0) {
rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, 0,
MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
pchan->plat_irq);
return rc;
}
}
return 0;
}
/**
* pcc_shutdown - Called from Mailbox Controller code. Used here
* to free the interrupt.
* @chan: Pointer to Mailbox channel to shutdown.
*/
static void pcc_shutdown(struct mbox_chan *chan)
{
struct pcc_chan_info *pchan = chan->con_priv;
if (pchan->plat_irq > 0)
devm_free_irq(chan->mbox->dev, pchan->plat_irq, chan);
}
static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.startup = pcc_startup,
.shutdown = pcc_shutdown,
};
/**

View File

@ -141,9 +141,7 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
@ -153,15 +151,18 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
/* Do not add any more entries using existing driver data */
{ .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);

View File

@ -248,7 +248,7 @@ static struct platform_driver rockchip_mbox_driver = {
.probe = rockchip_mbox_probe,
.driver = {
.name = "rockchip-mailbox",
.of_match_table = of_match_ptr(rockchip_mbox_of_match),
.of_match_table = rockchip_mbox_of_match,
},
};

View File

@ -971,7 +971,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
blk_queue_write_cache(q, true, true);

View File

@ -219,7 +219,7 @@ static unsigned int optimal_io_size(struct block_device *bdev,
}
static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size,
sector_t start, sector_t boundary)
loff_t start, loff_t boundary)
{
if (io_size != opt_size &&
start + opt_size / SECTOR_SIZE <= boundary)
@ -237,8 +237,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
sector_t offset = mddev->bitmap_info.offset;
sector_t ps, sboff, doff;
loff_t sboff, offset = mddev->bitmap_info.offset;
sector_t ps, doff;
unsigned int size = PAGE_SIZE;
unsigned int opt_size = PAGE_SIZE;

View File

@ -6079,6 +6079,38 @@ out_release:
return ret;
}
/*
* If the bio covers multiple data disks, find sector within the bio that has
* the lowest chunk offset in the first chunk.
*/
static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
struct bio *bi)
{
int sectors_per_chunk = conf->chunk_sectors;
int raid_disks = conf->raid_disks;
int dd_idx;
struct stripe_head sh;
unsigned int chunk_offset;
sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
sector_t sector;
/* We pass in fake stripe_head to get back parity disk numbers */
sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
chunk_offset = sector_div(sector, sectors_per_chunk);
if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
return r_sector;
/*
* Bio crosses to the next data disk. Check whether it's in the same
* chunk.
*/
dd_idx++;
while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
dd_idx++;
if (dd_idx >= raid_disks)
return r_sector;
return r_sector + sectors_per_chunk - chunk_offset;
}
static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@ -6150,6 +6182,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
}
md_account_bio(mddev, &bi);
/*
* Lets start with the stripe with the lowest chunk offset in the first
* chunk. That has the best chances of creating IOs adjacent to
* previous IOs in case of sequential IO and thus creates the most
* sequential IO pattern. We don't bother with the optimization when
* reshaping as the performance benefit is not worth the complexity.
*/
if (likely(conf->reshape_progress == MaxSector))
logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
add_wait_queue(&conf->wait_for_overlap, &wait);
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
@ -6178,7 +6221,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
continue;
}
s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
s = find_next_bit_wrap(ctx.sectors_to_do, stripe_cnt, s);
if (s == stripe_cnt)
break;

View File

@ -1035,7 +1035,6 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
{
struct device *dev = &mdp->pdev->dev;
struct device_node *node, *parent;
const struct mtk_mdp_driver_data *data = mdp->mdp_data;
parent = dev->of_node->parent;
@ -1045,7 +1044,7 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
int id, alias_id;
struct mdp_comp *comp;
of_id = of_match_node(data->mdp_sub_comp_dt_ids, node);
of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {

View File

@ -378,8 +378,8 @@ static int mxc_isi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mxc_isi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
SET_RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@ -528,7 +528,7 @@ static struct platform_driver mxc_isi_driver = {
.driver = {
.of_match_table = mxc_isi_of_match,
.name = MXC_ISI_DRIVER_NAME,
.pm = &mxc_isi_pm_ops,
.pm = pm_ptr(&mxc_isi_pm_ops),
}
};
module_platform_driver(mxc_isi_driver);

View File

@ -29,11 +29,10 @@ static inline void mxc_isi_write(struct mxc_isi_pipe *pipe, u32 reg, u32 val)
void mxc_isi_channel_set_inbuf(struct mxc_isi_pipe *pipe, dma_addr_t dma_addr)
{
mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, dma_addr);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, lower_32_bits(dma_addr));
if (pipe->isi->pdata->has_36bit_dma)
mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR, dma_addr >> 32);
#endif
mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR,
upper_32_bits(dma_addr));
}
void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
@ -45,34 +44,36 @@ void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
val = mxc_isi_read(pipe, CHNL_OUT_BUF_CTRL);
if (buf_id == MXC_ISI_BUF1) {
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y, dma_addrs[0]);
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U, dma_addrs[1]);
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V, dma_addrs[2]);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y,
lower_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U,
lower_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V,
lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF1_XTND_ADDR,
dma_addrs[0] >> 32);
upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF1_XTND_ADDR,
dma_addrs[1] >> 32);
upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF1_XTND_ADDR,
dma_addrs[2] >> 32);
upper_32_bits(dma_addrs[2]));
}
#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR;
} else {
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y, dma_addrs[0]);
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U, dma_addrs[1]);
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V, dma_addrs[2]);
#if CONFIG_ARCH_DMA_ADDR_T_64BIT
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y,
lower_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U,
lower_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V,
lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF2_XTND_ADDR,
dma_addrs[0] >> 32);
upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF2_XTND_ADDR,
dma_addrs[1] >> 32);
upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF2_XTND_ADDR,
dma_addrs[2] >> 32);
upper_32_bits(dma_addrs[2]));
}
#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR;
}

View File

@ -728,11 +728,9 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
default:
vnmc = VNMC_IM_ODD;
@ -1312,12 +1310,23 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
}
if (rvin_scaler_needed(vin)) {
/* Gen3 can't scale NV12 */
if (vin->info->model == RCAR_GEN3 &&
vin->format.pixelformat == V4L2_PIX_FMT_NV12)
return -EPIPE;
if (!vin->scaler)
return -EPIPE;
} else {
if (fmt.format.width != vin->format.width ||
fmt.format.height != vin->format.height)
return -EPIPE;
if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) {
if (ALIGN(fmt.format.width, 32) != vin->format.width ||
ALIGN(fmt.format.height, 32) != vin->format.height)
return -EPIPE;
} else {
if (fmt.format.width != vin->format.width ||
fmt.format.height != vin->format.height)
return -EPIPE;
}
}
if (fmt.format.code != vin->mbus_code)

View File

@ -84,6 +84,11 @@ nla_put_failure:
return -EMSGSIZE;
}
/* Limit the max delay range to 300s */
static struct netlink_range_validation delay_range = {
.max = 300000,
};
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
@ -114,7 +119,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};

View File

@ -169,6 +169,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
{ "off", 0, 0},
{ "maxval", 300000, BOND_VALFLAG_MAX},
{ NULL, -1, 0}
};
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
@ -488,7 +494,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_intmax_tbl,
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
}
};

View File

@ -294,19 +294,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
/* Clear PCI MSI-X Pending Bit Array (PBA)
*
* This bit is set if an interrupt event occurs while the vector is
* masked. If this bit is set and we reenable the interrupt, it will
* fire again. Since we're just about to poll the queue state, we don't
* need it to fire again.
*
* Under high softirq load, it's possible that the interrupt condition
* is triggered twice before we got the chance to process it.
*/
gve_write_irq_doorbell_dqo(priv, block,
GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);

View File

@ -654,7 +654,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
BIT(hw->index), BIT(hw->index));
}
if (!hw_list[!hw->index]->wed_dev &&
if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
hw->eth->dma_dev != hw->eth->dev)
mtk_eth_set_dma_device(hw->eth, hw->eth->dev);

View File

@ -307,15 +307,15 @@ static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),

View File

@ -181,6 +181,7 @@ enum power_event {
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
/* LPI control and status defines */
#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */

View File

@ -25,6 +25,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
u32 clk_rate;
value |= GMAC_CORE_INIT;
@ -47,6 +48,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_CONFIG);
/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
/* Enable GMAC interrupts */
value = GMAC_INT_DEFAULT_ENABLE;

View File

@ -436,6 +436,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
@ -474,6 +477,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, dst);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;

View File

@ -67,6 +67,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
struct device *dev = &interface->dev;
struct mvusb_mdio *mvusb;
struct mii_bus *mdio;
int ret;
mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
if (!mdio)
@ -87,7 +88,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
mdio->write = mvusb_mdio_write;
usb_set_intfdata(interface, mvusb);
return of_mdiobus_register(mdio, dev->of_node);
ret = of_mdiobus_register(mdio, dev->of_node);
if (ret)
goto put_dev;
return 0;
put_dev:
usb_put_dev(mvusb->udev);
return ret;
}
static void mvusb_mdio_disconnect(struct usb_interface *interface)

View File

@ -1203,7 +1203,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_2500BASEX] = {
.supported = xpcs_2500basex_features,
.interface = xpcs_2500basex_interfaces,
.num_interfaces = ARRAY_SIZE(xpcs_2500basex_features),
.num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces),
.an_mode = DW_2500BASEX,
},
};

View File

@ -40,6 +40,11 @@ static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
}
static inline int bcm_phy_read_exp_sel(struct phy_device *phydev, u16 reg)
{
return bcm_phy_read_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER);
}
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);

View File

@ -486,7 +486,7 @@ static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
/* Read CORE_EXPA9 */
tmp = bcm_phy_read_exp(phydev, 0x00a9);
tmp = bcm_phy_read_exp_sel(phydev, 0x00a9);
/* CORE_EXPA9[6:1] is rcalcode[5:0] */
rcalcode = (tmp & 0x7e) / 2;
/* Correct RCAL code + 1 is -1% rprogr, LP: +16 */

View File

@ -742,7 +742,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
/* copy skb_ubuf_info for callback when skb has no error */
@ -1197,7 +1197,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
__vlan_get_protocol(skb, skb->protocol, &depth) != 0)
vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
rcu_read_lock();

View File

@ -552,7 +552,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
const struct nvme_uring_cmd *cmd = ioucmd->cmd;
const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
struct nvme_uring_data d;
struct nvme_command c;

View File

@ -784,7 +784,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
fifo = vring->fifo;
/* Return if vdev is not ready. */
if (!fifo->vdev[devid])
if (!fifo || !fifo->vdev[devid])
return;
/* Return if another vring is running. */
@ -980,9 +980,13 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
vq->num_max = vring->num;
vq->priv = vring;
/* Make vq update visible before using it. */
virtio_mb(false);
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
}
return 0;
@ -1302,6 +1306,9 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
/* Make all updates visible before setting the 'is_ready' flag. */
virtio_mb(false);
fifo->is_ready = true;
return 0;

View File

@ -211,6 +211,7 @@ struct bios_rfkill2_state {
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x270, { KEY_MICMUTE } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },

View File

@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
int min_max)
{
unsigned int input;
int ret;
if (kstrtouint(buf, 10, &input))
return -EINVAL;
mutex_lock(&uncore_lock);
uncore_write(data, input, min_max);
ret = uncore_write(data, input, min_max);
mutex_unlock(&uncore_lock);
if (ret)
return ret;
return count;
}

View File

@ -34,6 +34,7 @@ static int intel_scu_pci_probe(struct pci_dev *pdev,
static const struct pci_device_id pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x080e) },
{ PCI_VDEVICE(INTEL, 0x082a) },
{ PCI_VDEVICE(INTEL, 0x08ea) },
{ PCI_VDEVICE(INTEL, 0x0a94) },
{ PCI_VDEVICE(INTEL, 0x11a0) },

View File

@ -10318,6 +10318,7 @@ static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
static int dytc_capabilities;
static bool dytc_mmc_get_available;
static int profile_force;
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
@ -10580,6 +10581,21 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (err)
return err;
/* Check if user wants to override the profile selection */
if (profile_force) {
switch (profile_force) {
case -1:
dytc_capabilities = 0;
break;
case 1:
dytc_capabilities = BIT(DYTC_FC_MMC);
break;
case 2:
dytc_capabilities = BIT(DYTC_FC_PSC);
break;
}
pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
}
if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
pr_debug("MMC is supported\n");
/*
@ -10593,11 +10609,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
dytc_mmc_get_available = true;
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
/* Support for this only works on AMD platforms */
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
return -ENODEV;
}
pr_debug("PSC is supported\n");
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
@ -11646,6 +11657,9 @@ MODULE_PARM_DESC(uwb_state,
"Initial state of the emulated UWB switch");
#endif
module_param(profile_force, int, 0444);
MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
static void thinkpad_acpi_module_exit(void)
{
struct ibm_struct *ibm, *itmp;

View File

@ -336,6 +336,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
.properties = dexp_ursus_7w_props,
};
static const struct property_entry dexp_ursus_kx210i_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data dexp_ursus_kx210i_data = {
.acpi_name = "MSSL1680:00",
.properties = dexp_ursus_kx210i_props,
};
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@ -378,6 +394,11 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
.properties = gdix1001_upside_down_props,
};
static const struct ts_dmi_data gdix1002_00_upside_down_data = {
.acpi_name = "GDIX1002:00",
.properties = gdix1001_upside_down_props,
};
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@ -1185,6 +1206,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
},
},
{
/* DEXP Ursus KX210i */
.driver_data = (void *)&dexp_ursus_kx210i_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
},
},
{
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
@ -1295,6 +1324,18 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
},
},
{
/* Juno Tablet */
.driver_data = (void *)&gdix1002_00_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
/* Above matches are too generic, add partial bios-version match */
DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
},
},
{
/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
.driver_data = (void *)&trekstor_surftab_wintron70_data,

View File

@ -971,8 +971,7 @@ config SCSI_SYM53C8XX_MMIO
config SCSI_IPR
tristate "IBM Power Linux RAID adapter support"
depends on PCI && SCSI && ATA
select SATA_HOST
depends on PCI && SCSI
select FW_LOADER
select IRQ_POLL
select SGL_ALLOC

File diff suppressed because it is too large Load Diff

View File

@ -16,7 +16,6 @@
#include <asm/unaligned.h>
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/libata.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/irq_poll.h>
@ -35,7 +34,6 @@
* This can be adjusted at runtime through sysfs device attributes.
*/
#define IPR_MAX_CMD_PER_LUN 6
#define IPR_MAX_CMD_PER_ATA_LUN 1
/*
* IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
@ -197,7 +195,6 @@
#define IPR_LUN_RESET 0x40
#define IPR_TARGET_RESET 0x20
#define IPR_BUS_RESET 0x10
#define IPR_ATA_PHY_RESET 0x80
#define IPR_ID_HOST_RR_Q 0xC4
#define IPR_QUERY_IOA_CONFIG 0xC5
#define IPR_CANCEL_ALL_REQUESTS 0xCE
@ -521,7 +518,6 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_SCSICDB 0x00
#define IPR_RQTYPE_IOACMD 0x01
#define IPR_RQTYPE_HCAM 0x02
#define IPR_RQTYPE_ATA_PASSTHRU 0x04
#define IPR_RQTYPE_PIPE 0x05
u8 reserved2;
@ -546,30 +542,6 @@ struct ipr_cmd_pkt {
__be16 timeout;
}__attribute__ ((packed, aligned(4)));
struct ipr_ioarcb_ata_regs { /* 22 bytes */
u8 flags;
#define IPR_ATA_FLAG_PACKET_CMD 0x80
#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
u8 reserved[3];
__be16 data;
u8 feature;
u8 nsect;
u8 lbal;
u8 lbam;
u8 lbah;
u8 device;
u8 command;
u8 reserved2[3];
u8 hob_feature;
u8 hob_nsect;
u8 hob_lbal;
u8 hob_lbam;
u8 hob_lbah;
u8 ctl;
}__attribute__ ((packed, aligned(2)));
struct ipr_ioadl_desc {
__be32 flags_and_data_len;
#define IPR_IOADL_FLAGS_MASK 0xff000000
@ -591,15 +563,8 @@ struct ipr_ioadl64_desc {
__be64 address;
}__attribute__((packed, aligned (16)));
struct ipr_ata64_ioadl {
struct ipr_ioarcb_ata_regs regs;
u16 reserved[5];
struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
}__attribute__((packed, aligned (16)));
struct ipr_ioarcb_add_data {
union {
struct ipr_ioarcb_ata_regs regs;
struct ipr_ioadl_desc ioadl[5];
__be32 add_cmd_parms[10];
} u;
@ -665,21 +630,6 @@ struct ipr_ioasa_gpdd {
__be32 ioa_data[2];
}__attribute__((packed, aligned (4)));
struct ipr_ioasa_gata {
u8 error;
u8 nsect; /* Interrupt reason */
u8 lbal;
u8 lbam;
u8 lbah;
u8 device;
u8 status;
u8 alt_status; /* ATA CTL */
u8 hob_nsect;
u8 hob_lbal;
u8 hob_lbam;
u8 hob_lbah;
}__attribute__((packed, aligned (4)));
struct ipr_auto_sense {
__be16 auto_sense_len;
__be16 ioa_data_len;
@ -713,7 +663,6 @@ struct ipr_ioasa_hdr {
__be32 ioasc_specific; /* status code specific field */
#define IPR_ADDITIONAL_STATUS_FMT 0x80000000
#define IPR_AUTOSENSE_VALID 0x40000000
#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
#define IPR_FIELD_POINTER_MASK 0x0000ffff
@ -727,7 +676,6 @@ struct ipr_ioasa {
struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd;
struct ipr_ioasa_gata gata;
} u;
struct ipr_auto_sense auto_sense;
@ -741,7 +689,6 @@ struct ipr_ioasa64 {
struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd;
struct ipr_ioasa_gpdd gpdd;
struct ipr_ioasa_gata gata;
} u;
struct ipr_auto_sense auto_sense;
@ -1279,13 +1226,6 @@ struct ipr_bus_attributes {
u32 max_xfer_rate;
};
struct ipr_sata_port {
struct ipr_ioa_cfg *ioa_cfg;
struct ata_port *ap;
struct ipr_resource_entry *res;
struct ipr_ioasa_gata ioasa;
};
struct ipr_resource_entry {
u8 needs_sync_complete:1;
u8 in_erp:1;
@ -1323,7 +1263,6 @@ struct ipr_resource_entry {
struct ipr_ioa_cfg *ioa_cfg;
struct scsi_device *sdev;
struct ipr_sata_port *sata_port;
struct list_head queue;
}; /* struct ipr_resource_entry */
@ -1582,7 +1521,6 @@ struct ipr_ioa_cfg {
struct ipr_cmnd *reset_cmd;
int (*reset) (struct ipr_cmnd *);
struct ata_host ata_host;
char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmd"
u32 max_cmds;
@ -1604,7 +1542,6 @@ struct ipr_cmnd {
union {
struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
struct ipr_ata64_ioadl ata_ioadl;
} i;
union {
struct ipr_ioasa ioasa;
@ -1612,7 +1549,6 @@ struct ipr_cmnd {
} s;
struct list_head queue;
struct scsi_cmnd *scsi_cmd;
struct ata_queued_cmd *qc;
struct completion completion;
struct timer_list timer;
struct work_struct work;

View File

@ -3362,8 +3362,9 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
pm8001_dbg(pm8001_ha, MSG, " register device is status = %d\n",
status);
pm8001_dbg(pm8001_ha, INIT,
"register device status %d phy_id 0x%x device_id %d\n",
status, pm8001_dev->attached_phy, device_id);
switch (status) {
case DEVREG_SUCCESS:
pm8001_dbg(pm8001_ha, MSG, "DEVREG_SUCCESS\n");
@ -4278,7 +4279,7 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
pm8001_dbg(pm8001_ha, INIT, "unregister device device_id %d\n",
device_id);
return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,

View File

@ -2450,6 +2450,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_ops->ll2->stop(qedi->cdev);
}
cancel_delayed_work_sync(&qedi->recovery_work);
cancel_delayed_work_sync(&qedi->board_disable_work);
qedi_free_iscsi_pf_param(qedi);
rval = qedi_ops->common->update_drv_state(qedi->cdev, false);

View File

@ -5291,6 +5291,26 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
return SUCCESS;
}
static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
{
struct scsi_device *sdp = data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
if (scmd->device == sdp)
scsi_debug_abort_cmnd(scmd);
return true;
}
/* Deletes (stops) timers or work queues of all queued commands per sdev */
static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
{
struct Scsi_Host *shost = sdp->host;
blk_mq_tagset_busy_iter(&shost->tag_set,
scsi_debug_stop_all_queued_iter, sdp);
}
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
@ -5300,6 +5320,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
scsi_debug_stop_all_queued(sdp);
if (devip)
set_bit(SDEBUG_UA_POR, devip->uas_bm);

View File

@ -299,11 +299,11 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
unsigned long completed_reqs;
unsigned long completed_reqs, flags;
spin_lock(&hwq->cq_lock);
spin_lock_irqsave(&hwq->cq_lock, flags);
completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
spin_unlock(&hwq->cq_lock);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
return completed_reqs;
}

View File

@ -115,11 +115,12 @@ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
mapping->gfp_mask);
if (IS_ERR(folio))
if (IS_ERR(folio)) {
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
else if (folio && !folio_test_private(folio))
return NULL;
}
if (!folio_test_private(folio))
folio_attach_private(folio, (void *)1);
return folio;
}

View File

@ -124,7 +124,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
} else {
num_bytes = 0;
}
if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
if (qgroup_to_release_ret &&
block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
qgroup_to_release = block_rsv->qgroup_rsv_reserved -
block_rsv->qgroup_rsv_size;
block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;

View File

@ -2627,6 +2627,10 @@ static bool check_sibling_keys(struct extent_buffer *left,
}
if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
btrfs_crit(left->fs_info, "left extent buffer:");
btrfs_print_tree(left, false);
btrfs_crit(left->fs_info, "right extent buffer:");
btrfs_print_tree(right, false);
btrfs_crit(left->fs_info,
"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
left_last.objectid, left_last.type,
@ -3215,6 +3219,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (check_sibling_keys(left, right)) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
btrfs_tree_unlock(right);
free_extent_buffer(right);
return ret;
@ -3433,6 +3438,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
if (check_sibling_keys(left, right)) {
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
goto out;
}
return __push_leaf_left(trans, path, min_data_size, empty, left,
@ -4478,10 +4484,12 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
{
struct btrfs_key key;
struct btrfs_key orig_key;
struct btrfs_disk_key found_key;
int ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
orig_key = key;
if (key.offset > 0) {
key.offset--;
@ -4498,8 +4506,36 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
if (ret <= 0)
return ret;
/*
* Previous key not found. Even if we were at slot 0 of the leaf we had
* before releasing the path and calling btrfs_search_slot(), we now may
* be in a slot pointing to the same original key - this can happen if
* after we released the path, one of more items were moved from a
* sibling leaf into the front of the leaf we had due to an insertion
* (see push_leaf_right()).
* If we hit this case and our slot is > 0 and just decrement the slot
* so that the caller does not process the same key again, which may or
* may not break the caller, depending on its logic.
*/
if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
ret = comp_keys(&found_key, &orig_key);
if (ret == 0) {
if (path->slots[0] > 0) {
path->slots[0]--;
return 0;
}
/*
* At slot 0, same key as before, it means orig_key is
* the lowest, leftmost, key in the tree. We're done.
*/
return 1;
}
}
btrfs_item_key(path->nodes[0], &found_key, 0);
ret = comp_keys(&found_key, &key);
/*

View File

@ -52,13 +52,13 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
u64 start, end, i_size;
int ret;
spin_lock(&inode->lock);
i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
inode->disk_i_size = i_size;
return;
goto out_unlock;
}
spin_lock(&inode->lock);
ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
&end, EXTENT_DIRTY);
if (!ret && start == 0)
@ -66,6 +66,7 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
else
i_size = 0;
inode->disk_i_size = i_size;
out_unlock:
spin_unlock(&inode->lock);
}

View File

@ -454,7 +454,9 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
case BTRFS_EXCLOP_BALANCE_PAUSED:
spin_lock(&fs_info->super_lock);
ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD);
fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
spin_unlock(&fs_info->super_lock);
break;

View File

@ -826,7 +826,12 @@ out:
!btrfs_test_opt(info, CLEAR_CACHE)) {
btrfs_err(info, "cannot disable free space tree");
ret = -EINVAL;
}
if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) &&
(btrfs_test_opt(info, CLEAR_CACHE) ||
!btrfs_test_opt(info, FREE_SPACE_TREE))) {
btrfs_err(info, "cannot disable free space tree with block-group-tree feature");
ret = -EINVAL;
}
if (!ret)
ret = btrfs_check_mountopts_zoned(info);

View File

@ -395,6 +395,7 @@ void btrfs_free_device(struct btrfs_device *device)
{
WARN_ON(!list_empty(&device->post_commit_list));
rcu_string_free(device->name);
extent_io_tree_release(&device->alloc_state);
btrfs_destroy_dev_zone_info(device);
kfree(device);
}

View File

@ -1168,12 +1168,12 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
return -ERANGE;
/* All the zones are conventional */
if (find_next_bit(zinfo->seq_zones, begin, end) == end)
if (find_next_bit(zinfo->seq_zones, end, begin) == end)
return 0;
/* All the zones are sequential and empty */
if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
if (find_next_zero_bit(zinfo->seq_zones, end, begin) == end &&
find_next_zero_bit(zinfo->empty_zones, end, begin) == end)
return 0;
for (pos = start; pos < start + size; pos += zinfo->zone_size) {

View File

@ -280,8 +280,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
c, server->conn_id);
spin_lock(&server->srv_lock);
if (server->hostname)
seq_printf(m, "Hostname: %s ", server->hostname);
spin_unlock(&server->srv_lock);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (!server->rdma)
goto skip_rdma;
@ -623,10 +625,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server->fastest_cmd[j],
server->slowest_cmd[j]);
for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
if (atomic_read(&server->smb2slowcmd[j]))
if (atomic_read(&server->smb2slowcmd[j])) {
spin_lock(&server->srv_lock);
seq_printf(m, " %d slow responses from %s for command %d\n",
atomic_read(&server->smb2slowcmd[j]),
server->hostname, j);
spin_unlock(&server->srv_lock);
}
#endif /* STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {

View File

@ -81,19 +81,19 @@ do { \
#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
do { \
const char *sn = ""; \
if (server && server->hostname) \
sn = server->hostname; \
spin_lock(&server->srv_lock); \
if ((type) & FYI && cifsFYI & CIFS_INFO) { \
pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
__FILE__, sn, ##__VA_ARGS__); \
__FILE__, server->hostname, \
##__VA_ARGS__); \
} else if ((type) & VFS) { \
pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \
sn, ##__VA_ARGS__); \
server->hostname, ##__VA_ARGS__); \
} else if ((type) & NOISY && (NOISY != 0)) { \
pr_debug_ ## ratefunc("\\\\%s " fmt, \
sn, ##__VA_ARGS__); \
server->hostname, ##__VA_ARGS__); \
} \
spin_unlock(&server->srv_lock); \
} while (0)
#define cifs_server_dbg(type, fmt, ...) \

View File

@ -874,14 +874,12 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
struct cifs_mnt_data mnt_data;
struct dentry *root;
/*
* Prints in Kernel / CIFS log the attempted mount operation
* If CIFS_DEBUG && cifs_FYI
*/
if (cifsFYI)
cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
else
cifs_info("Attempting to mount %s\n", old_ctx->UNC);
if (cifsFYI) {
cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
old_ctx->source, flags);
} else {
cifs_info("Attempting to mount %s\n", old_ctx->source);
}
cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
if (cifs_sb == NULL) {

View File

@ -736,17 +736,23 @@ struct TCP_Server_Info {
#endif
struct mutex refpath_lock; /* protects leaf_fullpath */
/*
* Canonical DFS full paths that were used to chase referrals in mount and reconnect.
* origin_fullpath: Canonical copy of smb3_fs_context::source.
* It is used for matching existing DFS tcons.
*
* origin_fullpath: first or original referral path
* leaf_fullpath: last referral path (might be changed due to nested links in reconnect)
* leaf_fullpath: Canonical DFS referral path related to this
* connection.
* It is used in DFS cache refresher, reconnect and may
* change due to nested DFS links.
*
* current_fullpath: pointer to either origin_fullpath or leaf_fullpath
* NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect()
* Both protected by @refpath_lock and @srv_lock. The @refpath_lock is
* mosly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O).
* While @srv_lock is held for making string and NULL comparions against
* both fields as in mount(2) and cache refresh.
*
* format: \\HOST\SHARE\[OPTIONAL PATH]
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *origin_fullpath, *leaf_fullpath, *current_fullpath;
char *origin_fullpath, *leaf_fullpath;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
@ -1232,8 +1238,8 @@ struct cifs_tcon {
struct cached_fids *cfids;
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct list_head ulist; /* cache update list */
struct list_head dfs_ses_list;
struct delayed_work dfs_cache_work;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
};
@ -1750,7 +1756,6 @@ struct cifs_mount_ctx {
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
char *origin_fullpath, *leaf_fullpath;
struct list_head dfs_ses_list;
};

View File

@ -8,6 +8,7 @@
#ifndef _CIFSPROTO_H
#define _CIFSPROTO_H
#include <linux/nls.h>
#include <linux/ctype.h>
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dfs_cache.h"
@ -572,7 +573,7 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
extern struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context *ctx);
extern void cifs_put_smb_ses(struct cifs_ses *ses);
void __cifs_put_smb_ses(struct cifs_ses *ses);
extern struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
@ -696,4 +697,45 @@ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
void cifs_put_tcon_super(struct super_block *sb);
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
/* Put references of @ses and @ses->dfs_root_ses */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
struct cifs_ses *rses = ses->dfs_root_ses;
__cifs_put_smb_ses(ses);
if (rses)
__cifs_put_smb_ses(rses);
}
/* Get an active reference of @ses and @ses->dfs_root_ses.
*
* NOTE: make sure to call this function when incrementing reference count of
* @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
* will also get its reference count incremented.
*
* cifs_put_smb_ses() will put both references, so call it when you're done.
*/
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
ses->ses_count++;
if (ses->dfs_root_ses)
ses->dfs_root_ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
{
if (strlen(s1) != strlen(s2))
return false;
for (; *s1; s1++, s2++) {
if (*s1 == '/' || *s1 == '\\') {
if (*s2 != '/' && *s2 != '\\')
return false;
} else if (tolower(*s1) != tolower(*s2))
return false;
}
return true;
}
#endif /* _CIFSPROTO_H */

View File

@ -403,8 +403,10 @@ static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const cha
if (server->hostname != target) {
hostname = extract_hostname(target);
if (!IS_ERR(hostname)) {
spin_lock(&server->srv_lock);
kfree(server->hostname);
server->hostname = hostname;
spin_unlock(&server->srv_lock);
} else {
cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
__func__, PTR_ERR(hostname));
@ -452,7 +454,6 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
static int reconnect_dfs_server(struct TCP_Server_Info *server)
{
int rc = 0;
const char *refpath = server->current_fullpath + 1;
struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
struct dfs_cache_tgt_iterator *target_hint = NULL;
int num_targets = 0;
@ -465,8 +466,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
* through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
* refreshing the referral, so, in this case, default it to 1.
*/
if (!dfs_cache_noreq_find(refpath, NULL, &tl))
mutex_lock(&server->refpath_lock);
if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl))
num_targets = dfs_cache_get_nr_tgts(&tl);
mutex_unlock(&server->refpath_lock);
if (!num_targets)
num_targets = 1;
@ -510,7 +513,9 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
} while (server->tcpStatus == CifsNeedReconnect);
dfs_cache_noreq_update_tgthint(refpath, target_hint);
mutex_lock(&server->refpath_lock);
dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint);
mutex_unlock(&server->refpath_lock);
dfs_cache_free_tgts(&tl);
/* Need to set up echo worker again once connection has been established */
@ -561,9 +566,7 @@ cifs_echo_request(struct work_struct *work)
goto requeue_echo;
rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
if (rc)
cifs_dbg(FYI, "Unable to send echo request to server: %s\n",
server->hostname);
cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
/* Check witness registrations */
cifs_swn_check();
@ -993,10 +996,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
#ifdef CONFIG_CIFS_DFS_UPCALL
kfree(server->origin_fullpath);
kfree(server->leaf_fullpath);
#endif
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
@ -1384,26 +1385,13 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
return true;
}
static bool dfs_src_pathname_equal(const char *s1, const char *s2)
{
if (strlen(s1) != strlen(s2))
return false;
for (; *s1; s1++, s2++) {
if (*s1 == '/' || *s1 == '\\') {
if (*s2 != '/' && *s2 != '\\')
return false;
} else if (tolower(*s1) != tolower(*s2))
return false;
}
return true;
}
/* this function must be called with srv_lock held */
static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx,
bool dfs_super_cmp)
static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
lockdep_assert_held(&server->srv_lock);
if (ctx->nosharesock)
return 0;
@ -1429,27 +1417,41 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
(struct sockaddr *)&server->srcaddr))
return 0;
/*
* When matching DFS superblocks, we only check for original source pathname as the
* currently connected target might be different than the one parsed earlier in i.e.
* mount.cifs(8).
* - Match for an DFS tcon (@server->origin_fullpath).
* - Match for an DFS root server connection (@server->leaf_fullpath).
* - If none of the above and @ctx->leaf_fullpath is set, then
* it is a new DFS connection.
* - If 'nodfs' mount option was passed, then match only connections
* that have no DFS referrals set
* (e.g. can't failover to other targets).
*/
if (dfs_super_cmp) {
if (!ctx->source || !server->origin_fullpath ||
!dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
return 0;
} else {
/* Skip addr, hostname and port matching for DFS connections */
if (server->leaf_fullpath) {
if (!ctx->leaf_fullpath ||
strcasecmp(server->leaf_fullpath, ctx->leaf_fullpath))
if (!ctx->nodfs) {
if (ctx->source && server->origin_fullpath) {
if (!dfs_src_pathname_equal(ctx->source,
server->origin_fullpath))
return 0;
} else if (strcasecmp(server->hostname, ctx->server_hostname) ||
!match_server_address(server, addr) ||
!match_port(server, addr)) {
} else if (server->leaf_fullpath) {
if (!ctx->leaf_fullpath ||
strcasecmp(server->leaf_fullpath,
ctx->leaf_fullpath))
return 0;
} else if (ctx->leaf_fullpath) {
return 0;
}
} else if (server->origin_fullpath || server->leaf_fullpath) {
return 0;
}
/*
* Match for a regular connection (address/hostname/port) which has no
* DFS referrals set.
*/
if (!server->origin_fullpath && !server->leaf_fullpath &&
(strcasecmp(server->hostname, ctx->server_hostname) ||
!match_server_address(server, addr) ||
!match_port(server, addr)))
return 0;
if (!match_security(server, ctx))
return 0;
@ -1480,7 +1482,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
*/
if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx, false)) {
if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
spin_unlock(&server->srv_lock);
continue;
}
@ -1580,7 +1582,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
rc = -ENOMEM;
goto out_err;
}
tcp_ses->current_fullpath = tcp_ses->leaf_fullpath;
}
if (ctx->nosharesock)
@ -1810,7 +1811,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (tcon == NULL)
return -ENOMEM;
spin_lock(&server->srv_lock);
scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
spin_unlock(&server->srv_lock);
xid = get_xid();
tcon->ses = ses;
@ -1863,7 +1866,7 @@ cifs_free_ipc(struct cifs_ses *ses)
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
struct cifs_ses *ses;
struct cifs_ses *ses, *ret = NULL;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
@ -1873,23 +1876,22 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
continue;
}
spin_lock(&ses->chan_lock);
if (!match_session(ses, ctx)) {
if (match_session(ses, ctx)) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
continue;
ret = ses;
break;
}
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
if (ret)
cifs_smb_ses_inc_refcount(ret);
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
return ret;
}
void cifs_put_smb_ses(struct cifs_ses *ses)
void __cifs_put_smb_ses(struct cifs_ses *ses)
{
unsigned int rc, xid;
unsigned int chan_count;
@ -2244,6 +2246,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
*/
spin_lock(&cifs_tcp_ses_lock);
ses->dfs_root_ses = ctx->dfs_root_ses;
if (ses->dfs_root_ses)
ses->dfs_root_ses->ses_count++;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@ -2260,12 +2264,15 @@ get_ses_fail:
}
/* this function must be called with tc_lock held */
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx, bool dfs_super_cmp)
static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
struct TCP_Server_Info *server = tcon->ses->server;
if (tcon->status == TID_EXITING)
return 0;
/* Skip UNC validation when matching DFS superblocks */
if (!dfs_super_cmp && strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
/* Skip UNC validation when matching DFS connections or superblocks */
if (!server->origin_fullpath && !server->leaf_fullpath &&
strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
return 0;
if (tcon->seal != ctx->seal)
return 0;
@ -2288,7 +2295,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->tc_lock);
if (!match_tcon(tcon, ctx, false)) {
if (!match_tcon(tcon, ctx)) {
spin_unlock(&tcon->tc_lock);
continue;
}
@ -2334,6 +2341,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
/* cancel polling of interfaces */
cancel_delayed_work_sync(&tcon->query_interfaces);
#ifdef CONFIG_CIFS_DFS_UPCALL
cancel_delayed_work_sync(&tcon->dfs_cache_work);
#endif
if (tcon->use_witness) {
int rc;
@ -2581,7 +2591,9 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
}
#ifdef CONFIG_CIFS_DFS_UPCALL
INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
#endif
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
@ -2659,9 +2671,11 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
return 1;
}
static int
match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
static int match_prepath(struct super_block *sb,
struct TCP_Server_Info *server,
struct cifs_mnt_data *mnt_data)
{
struct smb3_fs_context *ctx = mnt_data->ctx;
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
@ -2669,6 +2683,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
new->prepath;
if (server->origin_fullpath &&
dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
return 1;
if (old_set && new_set && !strcmp(new->prepath, old->prepath))
return 1;
else if (!old_set && !new_set)
@ -2687,7 +2705,6 @@ cifs_match_super(struct super_block *sb, void *data)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
bool dfs_super_cmp;
int rc = 0;
spin_lock(&cifs_tcp_ses_lock);
@ -2702,18 +2719,16 @@ cifs_match_super(struct super_block *sb, void *data)
ses = tcon->ses;
tcp_srv = ses->server;
dfs_super_cmp = IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && tcp_srv->origin_fullpath;
ctx = mnt_data->ctx;
spin_lock(&tcp_srv->srv_lock);
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx, dfs_super_cmp) ||
if (!match_server(tcp_srv, ctx) ||
!match_session(ses, ctx) ||
!match_tcon(tcon, ctx, dfs_super_cmp) ||
!match_prepath(sb, mnt_data)) {
!match_tcon(tcon, ctx) ||
!match_prepath(sb, tcp_srv, mnt_data)) {
rc = 0;
goto out;
}
@ -3458,8 +3473,6 @@ out:
error:
dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
kfree(mnt_ctx.origin_fullpath);
kfree(mnt_ctx.leaf_fullpath);
cifs_mount_put_conns(&mnt_ctx);
return rc;
}

View File

@ -99,7 +99,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
return rc;
}
static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_root_ses *root_ses;
@ -127,7 +127,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param ref = {};
bool is_refsrv = false;
bool is_refsrv;
int rc, rc2;
rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
@ -157,8 +157,10 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
rc = cifs_is_path_remote(mnt_ctx);
}
dfs_cache_noreq_update_tgthint(ref_path + 1, tit);
if (rc == -EREMOTE && is_refsrv) {
rc2 = get_root_smb_session(mnt_ctx);
rc2 = add_root_smb_session(mnt_ctx);
if (rc2)
rc = rc2;
}
@ -248,16 +250,19 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
tcon = mnt_ctx->tcon;
mutex_lock(&server->refpath_lock);
spin_lock(&server->srv_lock);
if (!server->origin_fullpath) {
server->origin_fullpath = origin_fullpath;
server->current_fullpath = server->leaf_fullpath;
origin_fullpath = NULL;
}
spin_unlock(&server->srv_lock);
mutex_unlock(&server->refpath_lock);
if (list_empty(&tcon->dfs_ses_list)) {
list_replace_init(&mnt_ctx->dfs_ses_list,
&tcon->dfs_ses_list);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
dfs_cache_get_ttl() * HZ);
} else {
dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
}
@ -272,15 +277,21 @@ out:
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
{
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses;
char *source = ctx->source;
bool nodfs = ctx->nodfs;
int rc;
*isdfs = false;
/* Temporarily set @ctx->source to NULL as we're not matching DFS
* superblocks yet. See cifs_match_super() and match_server().
*/
ctx->source = NULL;
rc = get_session(mnt_ctx, NULL);
if (rc)
return rc;
goto out;
ctx->dfs_root_ses = mnt_ctx->ses;
/*
* If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
@ -289,23 +300,41 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
* Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
* to respond with PATH_NOT_COVERED to requests that include the prefix.
*/
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL)) {
if (!nodfs) {
rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
if (rc) {
if (rc != -ENOENT && rc != -EOPNOTSUPP)
goto out;
nodfs = true;
}
}
if (nodfs) {
rc = cifs_mount_get_tcon(mnt_ctx);
if (rc)
return rc;
rc = cifs_is_path_remote(mnt_ctx);
if (!rc || rc != -EREMOTE)
return rc;
if (!rc)
rc = cifs_is_path_remote(mnt_ctx);
goto out;
}
*isdfs = true;
rc = get_root_smb_session(mnt_ctx);
if (rc)
return rc;
return __dfs_mount_share(mnt_ctx);
/*
* Prevent DFS root session of being put in the first call to
* cifs_mount_put_conns(). If another DFS root server was not found
* while chasing the referrals (@ctx->dfs_root_ses == @ses), then we
* can safely put extra refcount of @ses.
*/
ses = mnt_ctx->ses;
mnt_ctx->ses = NULL;
mnt_ctx->server = NULL;
rc = __dfs_mount_share(mnt_ctx);
if (ses == ctx->dfs_root_ses)
cifs_put_smb_ses(ses);
out:
/*
* Restore previous value of @ctx->source so DFS superblock can be
* matched in cifs_match_super().
*/
ctx->source = source;
return rc;
}
/* Update dfs referral path of superblock */
@ -342,10 +371,11 @@ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb
rc = PTR_ERR(npath);
} else {
mutex_lock(&server->refpath_lock);
spin_lock(&server->srv_lock);
kfree(server->leaf_fullpath);
server->leaf_fullpath = npath;
spin_unlock(&server->srv_lock);
mutex_unlock(&server->refpath_lock);
server->current_fullpath = server->leaf_fullpath;
}
return rc;
}
@ -374,6 +404,54 @@ static int target_share_matches_server(struct TCP_Server_Info *server, char *sha
return rc;
}
static void __tree_connect_ipc(const unsigned int xid, char *tree,
struct cifs_sb_info *cifs_sb,
struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
struct cifs_tcon *tcon = ses->tcon_ipc;
int rc;
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
if (cifs_chan_needs_reconnect(ses, server) ||
ses->ses_status != SES_GOOD) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
cifs_server_dbg(FYI, "%s: skipping ipc reconnect due to disconnected ses\n",
__func__);
return;
}
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
cifs_server_lock(server);
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
cifs_server_unlock(server);
rc = server->ops->tree_connect(xid, ses, tree, tcon,
cifs_sb->local_nls);
cifs_server_dbg(FYI, "%s: tree_reconnect %s: %d\n", __func__, tree, rc);
spin_lock(&tcon->tc_lock);
if (rc) {
tcon->status = TID_NEED_TCON;
} else {
tcon->status = TID_GOOD;
tcon->need_reconnect = false;
}
spin_unlock(&tcon->tc_lock);
}
static void tree_connect_ipc(const unsigned int xid, char *tree,
struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon)
{
struct cifs_ses *ses = tcon->ses;
__tree_connect_ipc(xid, tree, cifs_sb, ses);
__tree_connect_ipc(xid, tree, cifs_sb, CIFS_DFS_ROOT_SES(ses));
}
static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, char *tree, bool islink,
struct dfs_cache_tgt_list *tl)
@ -382,7 +460,6 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
struct TCP_Server_Info *server = tcon->ses->server;
const struct smb_version_operations *ops = server->ops;
struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses);
struct cifs_tcon *ipc = root_ses->tcon_ipc;
char *share = NULL, *prefix = NULL;
struct dfs_cache_tgt_iterator *tit;
bool target_match;
@ -403,7 +480,7 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
share = prefix = NULL;
/* Check if share matches with tcp ses */
rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
rc = dfs_cache_get_tgt_share(server->leaf_fullpath + 1, tit, &share, &prefix);
if (rc) {
cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
break;
@ -417,19 +494,15 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
continue;
}
dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
if (ipc->need_reconnect) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
cifs_dbg(FYI, "%s: reconnect ipc: %d\n", __func__, rc);
}
dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, tit);
tree_connect_ipc(xid, tree, cifs_sb, tcon);
scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
if (!islink) {
rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
break;
}
/*
* If no dfs referrals were returned from link target, then just do a TREE_CONNECT
* to it. Otherwise, cache the dfs referral and then mark current tcp ses for
@ -539,8 +612,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
cifs_sb = CIFS_SB(sb);
/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
if (!server->current_fullpath ||
dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
if (!server->leaf_fullpath ||
dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) {
rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
goto out;
}

View File

@ -43,8 +43,12 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
size_t len;
char *s;
if (unlikely(!server->origin_fullpath))
spin_lock(&server->srv_lock);
if (unlikely(!server->origin_fullpath)) {
spin_unlock(&server->srv_lock);
return ERR_PTR(-EREMOTE);
}
spin_unlock(&server->srv_lock);
s = dentry_path_raw(dentry, page, PATH_MAX);
if (IS_ERR(s))
@ -53,13 +57,18 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
if (!s[1])
s++;
spin_lock(&server->srv_lock);
len = strlen(server->origin_fullpath);
if (s < (char *)page + len)
if (s < (char *)page + len) {
spin_unlock(&server->srv_lock);
return ERR_PTR(-ENAMETOOLONG);
}
s -= len;
memcpy(s, server->origin_fullpath, len);
spin_unlock(&server->srv_lock);
convert_delimiter(s, '/');
return s;
}

View File

@ -20,12 +20,14 @@
#include "cifs_unicode.h"
#include "smb2glob.h"
#include "dns_resolve.h"
#include "dfs.h"
#include "dfs_cache.h"
#define CACHE_HTABLE_SIZE 32
#define CACHE_MAX_ENTRIES 64
#define CACHE_MIN_TTL 120 /* 2 minutes */
#define CACHE_HTABLE_SIZE 32
#define CACHE_MAX_ENTRIES 64
#define CACHE_MIN_TTL 120 /* 2 minutes */
#define CACHE_DEFAULT_TTL 300 /* 5 minutes */
#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
@ -50,10 +52,9 @@ struct cache_entry {
};
static struct kmem_cache *cache_slab __read_mostly;
static struct workqueue_struct *dfscache_wq __read_mostly;
struct workqueue_struct *dfscache_wq;
static int cache_ttl;
static DEFINE_SPINLOCK(cache_ttl_lock);
atomic_t dfs_cache_ttl;
static struct nls_table *cache_cp;
@ -65,10 +66,6 @@ static atomic_t cache_count;
static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
static DECLARE_RWSEM(htable_rw_lock);
static void refresh_cache_worker(struct work_struct *work);
static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
/**
* dfs_cache_canonical_path - get a canonical DFS path
*
@ -290,7 +287,9 @@ int dfs_cache_init(void)
int rc;
int i;
dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
dfscache_wq = alloc_workqueue("cifs-dfscache",
WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
0);
if (!dfscache_wq)
return -ENOMEM;
@ -306,6 +305,7 @@ int dfs_cache_init(void)
INIT_HLIST_HEAD(&cache_htable[i]);
atomic_set(&cache_count, 0);
atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
cache_cp = load_nls("utf8");
if (!cache_cp)
cache_cp = load_nls_default();
@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
int rc;
struct cache_entry *ce;
unsigned int hash;
int ttl;
WARN_ON(!rwsem_is_locked(&htable_rw_lock));
@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
if (IS_ERR(ce))
return ce;
spin_lock(&cache_ttl_lock);
if (!cache_ttl) {
cache_ttl = ce->ttl;
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} else {
cache_ttl = min_t(int, cache_ttl, ce->ttl);
mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
}
spin_unlock(&cache_ttl_lock);
ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
atomic_set(&dfs_cache_ttl, ttl);
hlist_add_head(&ce->hlist, &cache_htable[hash]);
dump_ce(ce);
@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
*/
void dfs_cache_destroy(void)
{
cancel_delayed_work_sync(&refresh_task);
unload_nls(cache_cp);
flush_cache_ents();
kmem_cache_destroy(cache_slab);
@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
* target shares in @refs.
*/
static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
const char *path,
struct dfs_cache_tgt_list *old_tl,
struct dfs_cache_tgt_list *new_tl)
{
@ -1153,8 +1147,10 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
nit = dfs_cache_get_next_tgt(new_tl, nit)) {
if (target_share_equal(server,
dfs_cache_get_tgt_name(oit),
dfs_cache_get_tgt_name(nit)))
dfs_cache_get_tgt_name(nit))) {
dfs_cache_noreq_update_tgthint(path, nit);
return;
}
}
}
@ -1162,13 +1158,28 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
cifs_signal_cifsd_for_reconnect(server, true);
}
static bool is_ses_good(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
struct cifs_tcon *tcon = ses->tcon_ipc;
bool ret;
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
ret = !cifs_chan_needs_reconnect(ses, server) &&
ses->ses_status == SES_GOOD &&
!tcon->need_reconnect;
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
return ret;
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
{
struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
struct cifs_tcon *ipc = ses->tcon_ipc;
struct TCP_Server_Info *server = ses->server;
bool needs_refresh = false;
struct cache_entry *ce;
unsigned int xid;
@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
goto out;
}
spin_lock(&ipc->tc_lock);
if (ipc->status != TID_GOOD) {
spin_unlock(&ipc->tc_lock);
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
ses = CIFS_DFS_ROOT_SES(ses);
if (!is_ses_good(ses)) {
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
__func__);
goto out;
}
spin_unlock(&ipc->tc_lock);
ce = cache_refresh_path(xid, ses, path, true);
if (!IS_ERR(ce)) {
rc = get_targets(ce, &new_tl);
up_read(&htable_rw_lock);
cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
}
out:
@ -1216,10 +1226,11 @@ out:
static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
{
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_ses *ses = tcon->ses;
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
__refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
mutex_unlock(&server->refpath_lock);
return 0;
}
@ -1263,56 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
return refresh_tcon(tcon, true);
}
/*
* Worker that will refresh DFS cache from all active mounts based on lowest TTL value
* from a DFS referral.
*/
static void refresh_cache_worker(struct work_struct *work)
/* Refresh all DFS referrals related to DFS tcon */
void dfs_cache_refresh(struct work_struct *work)
{
struct TCP_Server_Info *server;
struct cifs_tcon *tcon, *ntcon;
struct list_head tcons;
struct dfs_root_ses *rses;
struct cifs_tcon *tcon;
struct cifs_ses *ses;
INIT_LIST_HEAD(&tcons);
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
ses = tcon->ses;
server = ses->server;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!server->leaf_fullpath)
continue;
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->tcon_ipc) {
ses->ses_count++;
list_add_tail(&ses->tcon_ipc->ulist, &tcons);
}
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (!tcon->ipc) {
tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons);
}
}
}
}
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
struct TCP_Server_Info *server = tcon->ses->server;
list_del_init(&tcon->ulist);
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
mutex_unlock(&server->refpath_lock);
list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
ses = rses->ses;
server = ses->server;
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath)
__refresh_tcon(server->leaf_fullpath + 1, tcon, false);
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
mutex_unlock(&server->refpath_lock);
if (tcon->ipc)
cifs_put_smb_ses(tcon->ses);
else
cifs_put_tcon(tcon);
}
spin_lock(&cache_ttl_lock);
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
spin_unlock(&cache_ttl_lock);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);
}

View File

@ -13,6 +13,9 @@
#include <linux/uuid.h>
#include "cifsglob.h"
extern struct workqueue_struct *dfscache_wq;
extern atomic_t dfs_cache_ttl;
#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
struct dfs_cache_tgt_list {
@ -42,6 +45,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
char **prefix);
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
void dfs_cache_refresh(struct work_struct *work);
static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
@ -89,4 +93,9 @@ dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
return tl ? tl->tl_numtgts : 0;
}
static inline int dfs_cache_get_ttl(void)
{
return atomic_read(&dfs_cache_ttl);
}
#endif /* _CIFS_DFS_CACHE_H */

Some files were not shown because too many files have changed in this diff Show More