Two fixups

- Fix a potential error pointer dereference by checking the return value
   of exynos_drm_crtc_get_by_type() function before accessing to crtc
   object.
 - Fix a wrong error checking in exynos_drm_dma.c modules, which was reported
   by Dan[1]
 
 [1] https://lore.kernel.org/all/33e52277-1349-472b-a55b-ab5c3462bfcf@moroto.mountain/
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEENRKOoF7NhdPGpscnVzg0iQxDErgFAmVxRrkACgkQVzg0iQxD
 Erh+6A//flOVFtqI8xPWpiFqGzKCqvK7qWPwGeaCgyzeu9AJialfchCnqqDNUTKB
 SnyqMHLDUaYAAml4765ApjVzPSv+QHzGbRGrTOfARg6WggiReoqvVLm9+FajE97T
 QLBiHunH6D7WxKUZZ9V2fjKB1DU3qqU+Xpxn0yV0z0UMD2UjjQ/WnJB9142TBphl
 QK5qfSTG7zynWz/vPxIEe/cxZfEyVO9lUArp9DpX70VL4ptGcd3qjNTDisV1U2NF
 ArT3Ou/68qN4tITPzymORikU6b7Wfgim52Eewtz2AI/S7g4rR+2rhEB2ayp0OlRK
 gU8iRQDp1EJEysk/WuBg7VPaXeG9dbSjEkj0JgLPoSxMvKh1zvo83z9yu8nQimmY
 zHolHF+m95eFPQH2UINvz6QTMcPBWQnolafQDGAU9KDDyuZ9c8pXLWPtNBdTXiig
 HQTctfYWCEVOBastjj5srblLHPSDiGViE0lFbxKnX9RoxCXgvMaka3XoXqk6wKwl
 svyGJlHzDyTZO5Fpe52qwivOythcqqX7JsMSPuTSo/AotUH/9zEkINqURNJlwEgb
 KwUJ+QnZESb01nEWFVtPdqtyclXEI2oQGx13AHsMTVdOQXWYhV6bg7FucMUTurzC
 rgFJ/jJPx74SLbwKBbbdmmdKlfK7v3bmEIaJPOsGpQTPgn037Sk=
 =Abe4
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEGaM000JtH4Vhbu4NZMZxZtnUbQkFAmV33EcACgkQZMZxZtnU
 bQlcXhAAnRtyDLFZKdnAN5guf4aSUW1WOYXaZ7IuPHfrlEMMATBTtV3VjoZdQFGD
 b7Sp0qlMwkqGSvDp4bPlkHJXQABz2R2GBs63d4byq5HM3wVsEqZRuh4IV9zbtg/3
 CKMUVkXE5G+V6QX2WpxDuuRT0wHnvnv69VxtqkuYpZri/eXcT1IrO+9oesSCLL/c
 4XO7NhD687d7CezPG9thhIWxGA2lA+9l5pQc8oHpYewNfuP7jqwhLKmdLcuXWp45
 /6jTGr5JzCjYBpituY/ttR+hMZm5LweEgwqepjnG3208gSjNT+fog54YmD5SqH/P
 3sIg8B2BjTzzkHDO56G0t556qFzildOm64e7ZYq9BUOUZewJWt/+WZHT7DnPbgZB
 CkIGfGp/klAuO5kwzER5sL3tJF61FUEk2TJEMvUBRmSiVoXFmVWkzlkOzEbI9w7D
 xZjd8PDGoOEROBdztBTaKkTqPf5ZVm8P3BgwyX2LFdRobTpzMa/jm6yZiPU8yFq6
 3wJPnKgXE01Kgk3XVjoXCP4USx8Am5OxSsYT7MSsY6s3bzf3+vpa2wpDwYeA83RO
 1GhSoUS4nbsbv+HbY/Xdk3ANMrkvRduPmJpK1+GcSbsk0gnrWMPypmilUi0fmF2k
 0ljgSpkY6hbyofirc3deiym6iITwkqeB5ru4pnB9GNoDBtZJ/fY=
 =jFrM
 -----END PGP SIGNATURE-----

Merge tag 'exynos-drm-next-for-v6.7-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into exynos-drm-next

Two fixups
- Fix a potential error pointer dereference by checking the return value
  of exynos_drm_crtc_get_by_type() function before accessing to crtc
  object.
- Fix a wrong error checking in exynos_drm_dma.c modules, which was reported
  by Dan[1]

[1] https://lore.kernel.org/all/33e52277-1349-472b-a55b-ab5c3462bfcf@moroto.mountain/
This commit is contained in:
Inki Dae 2023-12-12 13:06:29 +09:00
commit a2f8994c10
268 changed files with 3242 additions and 1431 deletions

View File

@ -59,15 +59,6 @@ Description:
brightness. Reading this file when no hw brightness change
event has happened will return an ENODATA error.
What: /sys/class/leds/<led>/color
Date: June 2023
KernelVersion: 6.5
Description:
Color of the LED.
This is a read-only file. Reading this file returns the color
of the LED as a string (e.g: "red", "green", "multicolor").
What: /sys/class/leds/<led>/trigger
Date: March 2006
KernelVersion: 2.6.17

View File

@ -9,7 +9,7 @@ title: NXP S32G2 pin controller
maintainers:
- Ghennadi Procopciuc <Ghennadi.Procopciuc@oss.nxp.com>
- Chester Lin <clin@suse.com>
- Chester Lin <chester62515@gmail.com>
description: |
S32G2 pinmux is implemented in SIUL2 (System Integration Unit Lite2),

View File

@ -5076,7 +5076,6 @@ CLANG CONTROL FLOW INTEGRITY SUPPORT
M: Sami Tolvanen <samitolvanen@google.com>
M: Kees Cook <keescook@chromium.org>
R: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
L: llvm@lists.linux.dev
S: Supported
B: https://github.com/ClangBuiltLinux/linux/issues
@ -5091,8 +5090,9 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com>
R: Tom Rix <trix@redhat.com>
R: Nick Desaulniers <ndesaulniers@google.com>
R: Bill Wendling <morbo@google.com>
R: Justin Stitt <justinstitt@google.com>
L: llvm@lists.linux.dev
S: Supported
W: https://clangbuiltlinux.github.io/
@ -5242,7 +5242,6 @@ F: drivers/platform/x86/compal-laptop.c
COMPILER ATTRIBUTES
M: Miguel Ojeda <ojeda@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
S: Maintained
F: include/linux/compiler_attributes.h
@ -11524,7 +11523,6 @@ F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <masahiroy@kernel.org>
R: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
R: Nicolas Schier <nicolas@fjasle.eu>
L: linux-kbuild@vger.kernel.org
S: Maintained
@ -17956,6 +17954,8 @@ L: iommu@lists.linux.dev
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
F: drivers/iommu/arm/arm-smmu/arm-smmu-qcom*
F: drivers/iommu/msm_iommu*
QUALCOMM IPC ROUTER (QRTR) DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -1839,6 +1839,10 @@ static int __init __kpti_install_ng_mappings(void *__unused)
static void __init kpti_install_ng_mappings(void)
{
/* Check whether KPTI is going to be used */
if (!cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
return;
/*
* We don't need to rewrite the page-tables if either we've done
* it already or we have KASLR enabled and therefore have not

View File

@ -23,6 +23,15 @@
#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
#define __REST_1FPVSR(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
REST_FPR(n,base); \
b 3f; \
2: REST_VSR(n,c,base); \
3:
#define __REST_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: SAVE_32VSRS(n,c,base); \
3:
#else
#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
SAVE_32FPVSRS(0, R4, R3)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
REST_1FPVSR(0, R4, R3)
blr
EXPORT_SYMBOL(store_fp_state)
@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
2: SAVE_32FPVSRS(0, R4, R6)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r6)
REST_1FPVSR(0, R4, R6)
blr

View File

@ -1198,11 +1198,11 @@ void kvmppc_save_user_regs(void)
usermsr = current->thread.regs->msr;
/* Caller has enabled FP/VEC/VSX/TM in MSR */
if (usermsr & MSR_FP)
save_fpu(current);
__giveup_fpu(current);
if (usermsr & MSR_VEC)
save_altivec(current);
__giveup_altivec(current);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (usermsr & MSR_TM) {

View File

@ -33,6 +33,7 @@ _GLOBAL(store_vr_state)
mfvscr v0
li r4, VRSTATE_VSCR
stvx v0, r4, r3
lvx v0, 0, r3
blr
EXPORT_SYMBOL(store_vr_state)
@ -109,6 +110,7 @@ _GLOBAL(save_altivec)
mfvscr v0
li r4,VRSTATE_VSCR
stvx v0,r4,r7
lvx v0,0,r7
blr
#ifdef CONFIG_VSX

View File

@ -33,9 +33,12 @@ EXPORT_SYMBOL_GPL(hypercall_page);
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info.
* The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
* Make sure that xen_vcpu_info doesn't cross a page boundary by making it
* cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
* which matches the cache line size of 64-bit x86 processors).
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
int err;
struct vcpu_info *vcpup;
BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
/*

View File

@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
struct trap_info;
void xen_copy_trap_info(struct trap_info *traps);
DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU(unsigned long, xen_cr3);
DECLARE_PER_CPU(unsigned long, xen_current_cr3);

View File

@ -501,9 +501,17 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return;
pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
bio->bi_bdev);
/* Older lvm-tools actually trigger this */
if (bio->bi_bdev->bd_ro_warned)
return;
bio->bi_bdev->bd_ro_warned = true;
/*
* Use ioctl to set underlying disk of raid/dm to read-only
* will trigger this.
*/
pr_warn("Trying to write to read-only block-device %pg\n",
bio->bi_bdev);
}
}

View File

@ -1512,14 +1512,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
static bool blk_is_flush_data_rq(struct request *rq)
{
return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
}
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
{
/*
* If we find a request that isn't idle we know the queue is busy
* as it's checked in the iter.
* Return false to stop the iteration.
*
* In case of queue quiesce, if one flush data request is completed,
* don't count it as inflight given the flush sequence is suspended,
* and the original flush data request is invisible to driver, just
* like other pending requests because of quiesce
*/
if (blk_mq_request_started(rq)) {
if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
blk_is_flush_data_rq(rq) &&
blk_mq_request_completed(rq))) {
bool *busy = priv;
*busy = true;

View File

@ -615,6 +615,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
#endif
/* Common attributes for bio-based and request-based queues. */
static struct attribute *queue_attrs[] = {
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
@ -659,6 +660,7 @@ static struct attribute *queue_attrs[] = {
NULL,
};
/* Request-based queue attributes that are not relevant for bio-based queues. */
static struct attribute *blk_mq_queue_attrs[] = {
&queue_requests_entry.attr,
&elv_iosched_entry.attr,

View File

@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
struct acpi_video_device *video = cooling_dev->devdata;
*state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
return 0;
@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
struct acpi_video_device *video = cooling_dev->devdata;
unsigned long long level;
int offset;
@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
static int
video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
struct acpi_video_device *video = cooling_dev->devdata;
int level;
if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
@ -1125,7 +1122,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = data;
data->device_id = device_id;
data->video = video;
@ -1747,8 +1743,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
device->backlight->props.brightness =
acpi_video_get_brightness(device->backlight);
device->cooling_dev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
device->cooling_dev = thermal_cooling_device_register("LCD", device,
&video_cooling_ops);
if (IS_ERR(device->cooling_dev)) {
/*
* Set cooling_dev to NULL so we don't crash trying to free it.

View File

@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
int err;
const struct iommu_ops *ops;
/* Serialise to make dev->iommu stable under our potential fwspec */
mutex_lock(&iommu_probe_device_lock);
/*
* If we already translated the fwspec there is nothing left to do,
* return the iommu_ops.
*/
ops = acpi_iommu_fwspec_ops(dev);
if (ops)
if (ops) {
mutex_unlock(&iommu_probe_device_lock);
return ops;
}
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
mutex_unlock(&iommu_probe_device_lock);
/*
* If we have reason to believe the IOMMU driver missed the initial

View File

@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
* Ask the sd driver to issue START STOP UNIT on runtime suspend
* and resume and shutdown only. For system level suspend/resume,
* devices power state is handled directly by libata EH.
* Given that disks are always spun up on system resume, also
* make sure that the sd driver forces runtime suspended disks
* to be resumed to correctly reflect the power state of the
* device.
*/
sdev->manage_runtime_start_stop = true;
sdev->manage_shutdown = true;
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
sdev->force_runtime_start_on_system_start = 1;
}
/*

View File

@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
return 0;
}
@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
return 0;
}
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
u32 max_limit_perf, min_limit_perf;
struct amd_cpudata *cpudata = policy->driver_data;
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
return 0;
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!cpudata->max_freq)
return -ENODEV;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
cap_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf;
@ -518,7 +542,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
return amd_pstate_update_freq(policy, target_freq, true);
if (!amd_pstate_update_freq(policy, target_freq, true))
return target_freq;
return policy->cur;
}
static void amd_pstate_adjust_perf(unsigned int cpu,
@ -532,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
struct amd_cpudata *cpudata = policy->driver_data;
unsigned int target_freq;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
max_freq = READ_ONCE(cpudata->max_freq);
@ -745,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
/* Initial processor data capability frequencies */
cpudata->max_freq = max_freq;
cpudata->min_freq = min_freq;
cpudata->max_limit_freq = max_freq;
cpudata->min_limit_freq = min_freq;
cpudata->nominal_freq = nominal_freq;
cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
@ -850,11 +882,16 @@ static ssize_t show_energy_performance_available_preferences(
{
int i = 0;
int offset = 0;
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
return sysfs_emit_at(buf, offset, "%s\n",
energy_perf_strings[EPP_INDEX_PERFORMANCE]);
while (energy_perf_strings[i] != NULL)
offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
sysfs_emit_at(buf, offset, "\n");
offset += sysfs_emit_at(buf, offset, "\n");
return offset;
}
@ -1183,16 +1220,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static void amd_pstate_epp_init(unsigned int cpu)
static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
u32 max_perf, min_perf;
u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
u64 value;
s16 epp;
max_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
@ -1210,9 +1256,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(0);
if (cpudata->epp_policy == cpudata->policy)
goto skip_epp;
cpudata->epp_policy = cpudata->policy;
/* Get BIOS pre-defined epp value */
@ -1222,7 +1265,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
* This return value can only be negative for shared_memory
* systems where EPP register read/write not supported.
*/
goto skip_epp;
return;
}
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@ -1236,8 +1279,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_set_epp(cpudata, epp);
skip_epp:
cpufreq_cpu_put(policy);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@ -1252,7 +1293,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
amd_pstate_epp_init(policy->cpu);
amd_pstate_epp_update_limit(policy);
return 0;
}

View File

@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)

View File

@ -23,8 +23,10 @@
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
@ -55,6 +57,7 @@ struct qcom_cpufreq_match_data {
struct qcom_cpufreq_drv_cpu {
int opp_token;
struct device **virt_devs;
};
struct qcom_cpufreq_drv {
@ -424,6 +427,30 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
.get_version = qcom_cpufreq_ipq8074_name_version,
};
static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
{
const char * const *name = drv->data->genpd_names;
int i;
if (!drv->cpus[cpu].virt_devs)
return;
for (i = 0; *name; i++, name++)
device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
}
static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
{
const char * const *name = drv->data->genpd_names;
int i;
if (!drv->cpus[cpu].virt_devs)
return;
for (i = 0; *name; i++, name++)
pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
}
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
@ -478,6 +505,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
of_node_put(np);
for_each_possible_cpu(cpu) {
struct device **virt_devs = NULL;
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@ -498,7 +526,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (drv->data->genpd_names) {
config.genpd_names = drv->data->genpd_names;
config.virt_devs = NULL;
config.virt_devs = &virt_devs;
}
if (config.supported_hw || config.genpd_names) {
@ -509,6 +537,27 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
goto free_opp;
}
}
if (virt_devs) {
const char * const *name = config.genpd_names;
int i, j;
for (i = 0; *name; i++, name++) {
ret = pm_runtime_resume_and_get(virt_devs[i]);
if (ret) {
dev_err(cpu_dev, "failed to resume %s: %d\n",
*name, ret);
/* Rollback previous PM runtime calls */
name = config.genpd_names;
for (j = 0; *name && j < i; j++, name++)
pm_runtime_put(virt_devs[j]);
goto free_opp;
}
}
drv->cpus[cpu].virt_devs = virt_devs;
}
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@ -522,8 +571,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
for_each_possible_cpu(cpu)
for_each_possible_cpu(cpu) {
qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
return ret;
}
@ -534,15 +585,31 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
for_each_possible_cpu(cpu) {
qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
}
static int qcom_cpufreq_suspend(struct device *dev)
{
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
unsigned int cpu;
for_each_possible_cpu(cpu)
qcom_cpufreq_suspend_virt_devs(drv, cpu);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL);
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
.remove_new = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
.pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
},
};

View File

@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage &&
dma_fence_is_later(fence, old)) ||
dma_fence_is_later_or_same(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);

View File

@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
fw_unit_attributes,
&unit->attribute_group);
if (device_register(&unit->device) < 0)
goto skip_unit;
fw_device_get(device);
continue;
skip_unit:
kfree(unit);
if (device_register(&unit->device) < 0) {
put_device(&unit->device);
continue;
}
}
}

View File

@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->use_10_for_rw = 1;
if (sbp2_param_exclusive_login) {
sdev->manage_system_start_stop = true;
sdev->manage_runtime_start_stop = true;
sdev->manage_shutdown = true;
sdev->manage_system_start_stop = 1;
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
}
if (sdev->type == TYPE_ROM)

View File

@ -101,7 +101,7 @@ retry:
* overlap on physical address level.
*/
list_for_each_entry(entry, &accepting_list, list) {
if (entry->end < range.start)
if (entry->end <= range.start)
continue;
if (entry->start >= range.end)
continue;

View File

@ -4,8 +4,6 @@
* Copyright (C) 2017 Broadcom
*/
#include <linux/device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
@ -21,7 +19,6 @@ struct panel_bridge {
struct drm_bridge bridge;
struct drm_connector connector;
struct drm_panel *panel;
struct device_link *link;
u32 connector_type;
};
@ -63,24 +60,13 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
struct drm_panel *panel = panel_bridge->panel;
struct drm_device *drm_dev = bridge->dev;
int ret;
panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
DL_FLAG_STATELESS);
if (!panel_bridge->link) {
DRM_ERROR("Failed to add device link between %s and %s\n",
dev_name(drm_dev->dev), dev_name(panel->dev));
return -EINVAL;
}
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
device_link_del(panel_bridge->link);
return -ENODEV;
}
@ -92,7 +78,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
panel_bridge->connector_type);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
device_link_del(panel_bridge->link);
return ret;
}
@ -115,8 +100,6 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
device_link_del(panel_bridge->link);
/*
* Cleanup the connector if we know it was initialized.
*

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (c) 2022 Red Hat.
*

View File

@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
/*
/**
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @dev: drm_device to import into
* @file_priv: drm file-private structure
@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
*
* Returns 0 on success or a negative error code on failure.
*/
static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd,
uint32_t *handle)
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd,
uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@ -360,6 +360,7 @@ out_put:
dma_buf_put(dma_buf);
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
/*
/**
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
*/
static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
uint32_t flags,
int *prime_fd)
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
uint32_t flags,
int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@ -506,6 +507,7 @@ out_unlock:
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@ -864,9 +866,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
* This is the implementation of the &drm_gem_object_funcs.export functions
* for GEM drivers using the PRIME helpers. It is used as the default for
* drivers that do not set their own.
* This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
* using the PRIME helpers. It is used as the default in
* drm_gem_prime_handle_to_fd().
*/
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags)
@ -962,9 +964,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
* This is the implementation of the gem_prime_import functions for GEM
* drivers using the PRIME helpers. It is the default for drivers that do
* not set their own &drm_driver.gem_prime_import.
* This is the implementation of the gem_prime_import functions for GEM drivers
* using the PRIME helpers. Drivers can use this as their
* &drm_driver.gem_prime_import implementation. It is used as the default
* implementation in drm_gem_prime_fd_to_handle().
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.

View File

@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
return 0;
if (!priv->mapping) {
void *mapping;
void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
else
mapping = ERR_PTR(-ENODEV);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
if (!mapping)
return -ENODEV;
priv->mapping = mapping;
}

View File

@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder);

View File

@ -41,12 +41,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
}
static const u8 uabi_classes[] = {
#define I915_NO_UABI_CLASS ((u16)(-1))
static const u16 uabi_classes[] = {
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
[OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
};
static int engine_cmp(void *priv, const struct list_head *A,
@ -200,6 +203,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
void intel_engines_driver_register(struct drm_i915_private *i915)
{
u16 name_instance, other_instance = 0;
struct legacy_ring ring = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
@ -216,27 +220,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
/*
* We don't want to expose the GSC engine to the users, but we
* still rename it so it is easier to identify in the debug logs
*/
if (engine->id == GSC0) {
engine_rename(engine, "gsc", 0);
continue;
}
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
if (engine->uabi_class == I915_NO_UABI_CLASS) {
name_instance = other_instance++;
} else {
GEM_BUG_ON(engine->uabi_class >=
ARRAY_SIZE(i915->engine_uabi_class_count));
name_instance =
i915->engine_uabi_class_count[engine->uabi_class]++;
}
engine->uabi_instance = name_instance;
GEM_BUG_ON(engine->uabi_class >=
ARRAY_SIZE(i915->engine_uabi_class_count));
engine->uabi_instance =
i915->engine_uabi_class_count[engine->uabi_class]++;
/* Replace the internal name with the final user facing name */
/*
* Replace the internal name with the final user and log facing
* name.
*/
engine_rename(engine,
intel_engine_class_repr(engine->class),
engine->uabi_instance);
name_instance);
if (engine->uabi_class == I915_NO_UABI_CLASS)
continue;
rb_link_node(&engine->uabi_node, prev, p);
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);

View File

@ -38,7 +38,7 @@ typedef struct PACKED_REGISTRY_TABLE
{
NvU32 size;
NvU32 numEntries;
PACKED_REGISTRY_ENTRY entries[0];
PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
} PACKED_REGISTRY_TABLE;
#endif

View File

@ -325,8 +325,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
if (pi < 0)
pi = i;
/* pick the last one as it will be smallest. */
pi = i;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;

View File

@ -365,10 +365,8 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
}
ret = r535_gsp_cmdq_push(gsp, rpc);
if (ret) {
mutex_unlock(&gsp->cmdq.mutex);
if (ret)
return ERR_PTR(ret);
}
if (wait) {
msg = r535_gsp_msg_recv(gsp, fn, repc);
@ -1048,7 +1046,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
char *strings;
int str_offset;
int i;
size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES);
/* add strings + null terminator */
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)

View File

@ -1764,6 +1764,7 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_qfh032011_53g_init_cmd,
.lp11_before_reset = true,
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {

View File

@ -1254,9 +1254,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
if (!pinfo->dsi[1]) {
if (IS_ERR(pinfo->dsi[1])) {
dev_err(dev, "cannot get secondary DSI device\n");
return -ENODEV;
return PTR_ERR(pinfo->dsi[1]);
}
}

View File

@ -1522,6 +1522,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
{
struct qi_desc desc;
/*
* VT-d spec, section 4.3:
*
* Software is recommended to not submit any Device-TLB invalidation
* requests while address remapping hardware is disabled.
*/
if (!(iommu->gcmd & DMA_GCMD_TE))
return;
if (mask) {
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
@ -1587,6 +1596,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
/*
* VT-d spec, section 4.3:
*
* Software is recommended to not submit any Device-TLB invalidation
* requests while address remapping hardware is disabled.
*/
if (!(iommu->gcmd & DMA_GCMD_TE))
return;
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID(pfsid);

View File

@ -299,7 +299,7 @@ static int iommu_skip_te_disable;
#define IDENTMAP_AZALIA 4
const struct iommu_ops intel_iommu_ops;
const struct iommu_dirty_ops intel_dirty_ops;
static const struct iommu_dirty_ops intel_dirty_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
@ -2207,6 +2207,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr |= DMA_FL_PTE_DIRTY;
}
domain->has_mappings = true;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
@ -2490,7 +2492,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return ret;
}
iommu_enable_pci_caps(info);
if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
iommu_enable_pci_caps(info);
return 0;
}
@ -3925,8 +3928,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
*/
static void domain_context_clear(struct device_domain_info *info)
{
if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
return;
if (!dev_is_pci(info->dev))
domain_context_clear_one(info, info->bus, info->devfn);
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);
@ -4360,7 +4363,8 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
if (!domain_support_force_snooping(dmar_domain) ||
(!dmar_domain->use_first_level && dmar_domain->has_mappings)) {
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
@ -4925,7 +4929,7 @@ static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
return 0;
}
const struct iommu_dirty_ops intel_dirty_ops = {
static const struct iommu_dirty_ops intel_dirty_ops = {
.set_dirty_tracking = intel_iommu_set_dirty_tracking,
.read_and_clear_dirty = intel_iommu_read_and_clear_dirty,
};
@ -5073,7 +5077,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
ver != 0x9a && ver != 0xa7)
ver != 0x9a && ver != 0xa7 && ver != 0x7d)
return;
if (risky_device(dev))

View File

@ -602,6 +602,9 @@ struct dmar_domain {
*/
u8 dirty_tracking:1; /* Dirty tracking is enabled */
u8 nested_parent:1; /* Has other domains nested on it */
u8 has_mappings:1; /* Has mappings configured through
* iommu_map() interface.
*/
spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */

View File

@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
rcu_read_unlock();
}
static void intel_flush_svm_all(struct intel_svm *svm)
{
struct device_domain_info *info;
struct intel_svm_dev *sdev;
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
info = dev_iommu_priv_get(sdev->dev);
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
if (info->ats_enabled) {
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
svm->pasid, sdev->qdep,
0, 64 - VTD_PAGE_SHIFT);
quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
svm->pasid, sdev->qdep);
}
}
rcu_read_unlock();
}
/* Pages have been freed at this point */
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
if (start == 0 && end == -1UL) {
intel_flush_svm_all(svm);
return;
}
intel_flush_svm_range(svm, start,
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
}

View File

@ -485,11 +485,12 @@ static void iommu_deinit_device(struct device *dev)
dev_iommu_free(dev);
}
DEFINE_MUTEX(iommu_probe_device_lock);
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
static DEFINE_MUTEX(iommu_probe_device_lock);
struct group_device *gdev;
int ret;
@ -502,17 +503,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
* probably be able to use device_lock() here to minimise the scope,
* but for now enforcing a simple global ordering is fine.
*/
mutex_lock(&iommu_probe_device_lock);
lockdep_assert_held(&iommu_probe_device_lock);
/* Device is probed already if in a group */
if (dev->iommu_group) {
ret = 0;
goto out_unlock;
}
if (dev->iommu_group)
return 0;
ret = iommu_init_device(dev, ops);
if (ret)
goto out_unlock;
return ret;
group = dev->iommu_group;
gdev = iommu_group_alloc_device(group, dev);
@ -548,7 +547,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
list_add_tail(&group->entry, group_list);
}
mutex_unlock(&group->mutex);
mutex_unlock(&iommu_probe_device_lock);
if (dev_is_pci(dev))
iommu_dma_set_pci_32bit_workaround(dev);
@ -562,8 +560,6 @@ err_put_group:
iommu_deinit_device(dev);
mutex_unlock(&group->mutex);
iommu_group_put(group);
out_unlock:
mutex_unlock(&iommu_probe_device_lock);
return ret;
}
@ -573,7 +569,9 @@ int iommu_probe_device(struct device *dev)
const struct iommu_ops *ops;
int ret;
mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, NULL);
mutex_unlock(&iommu_probe_device_lock);
if (ret)
return ret;
@ -1788,7 +1786,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
*/
if (ops->default_domain) {
if (req_type)
return NULL;
return ERR_PTR(-EINVAL);
return ops->default_domain;
}
@ -1797,15 +1795,15 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
/* The driver gave no guidance on what type to use, try the default */
dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
if (dom)
if (!IS_ERR(dom))
return dom;
/* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
return NULL;
return ERR_PTR(-EINVAL);
dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA);
if (!dom)
return NULL;
if (IS_ERR(dom))
return dom;
pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
iommu_def_domain_type, group->name);
@ -1822,7 +1820,9 @@ static int probe_iommu_group(struct device *dev, void *data)
struct list_head *group_list = data;
int ret;
mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, group_list);
mutex_unlock(&iommu_probe_device_lock);
if (ret == -ENODEV)
ret = 0;
@ -2094,10 +2094,17 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
else if (ops->domain_alloc)
domain = ops->domain_alloc(alloc_type);
else
return NULL;
return ERR_PTR(-EOPNOTSUPP);
/*
* Many domain_alloc ops now return ERR_PTR, make things easier for the
* driver by accepting ERR_PTR from all domain_alloc ops instead of
* having two rules.
*/
if (IS_ERR(domain))
return domain;
if (!domain)
return NULL;
return ERR_PTR(-ENOMEM);
domain->type = type;
/*
@ -2110,9 +2117,14 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
if (!domain->ops)
domain->ops = ops->default_domain_ops;
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
iommu_domain_free(domain);
domain = NULL;
if (iommu_is_dma_domain(domain)) {
int rc;
rc = iommu_get_dma_cookie(domain);
if (rc) {
iommu_domain_free(domain);
return ERR_PTR(rc);
}
}
return domain;
}
@ -2129,10 +2141,15 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
struct iommu_domain *domain;
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
return __iommu_domain_alloc(bus->iommu_ops, NULL,
domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
IOMMU_DOMAIN_UNMANAGED);
if (IS_ERR(domain))
return NULL;
return domain;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
@ -3041,8 +3058,8 @@ static int iommu_setup_default_domain(struct iommu_group *group,
return -EINVAL;
dom = iommu_group_alloc_default_domain(group, req_type);
if (!dom)
return -ENODEV;
if (IS_ERR(dom))
return PTR_ERR(dom);
if (group->default_domain == dom)
return 0;
@ -3243,21 +3260,23 @@ void iommu_device_unuse_default_domain(struct device *dev)
static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
{
struct iommu_domain *domain;
if (group->blocking_domain)
return 0;
group->blocking_domain =
__iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
if (!group->blocking_domain) {
domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
if (IS_ERR(domain)) {
/*
* For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
* create an empty domain instead.
*/
group->blocking_domain = __iommu_group_domain_alloc(
group, IOMMU_DOMAIN_UNMANAGED);
if (!group->blocking_domain)
return -EINVAL;
domain = __iommu_group_domain_alloc(group,
IOMMU_DOMAIN_UNMANAGED);
if (IS_ERR(domain))
return PTR_ERR(domain);
}
group->blocking_domain = domain;
return 0;
}

View File

@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
const u32 *id)
{
const struct iommu_ops *ops = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct iommu_fwspec *fwspec;
int err = NO_IOMMU;
if (!master_np)
return NULL;
/* Serialise to make dev->iommu stable under our potential fwspec */
mutex_lock(&iommu_probe_device_lock);
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec) {
if (fwspec->ops)
if (fwspec->ops) {
mutex_unlock(&iommu_probe_device_lock);
return fwspec->ops;
}
/* In the deferred case, start again from scratch */
iommu_fwspec_free(dev);
}
@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
fwspec = dev_iommu_fwspec_get(dev);
ops = fwspec->ops;
}
mutex_unlock(&iommu_probe_device_lock);
/*
* If we have reason to believe the IOMMU driver missed the initial
* probe for dev, replay it to get things in order.
@ -191,7 +197,7 @@ iommu_resv_region_get_type(struct device *dev,
if (start == phys->start && end == phys->end)
return IOMMU_RESV_DIRECT;
dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
&start, &end);
return IOMMU_RESV_RESERVED;
}

View File

@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
}
static DEVICE_ATTR_RO(max_brightness);
static ssize_t color_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *color_text = "invalid";
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->color < LED_COLOR_ID_MAX)
color_text = led_colors[led_cdev->color];
return sysfs_emit(buf, "%s\n", color_text);
}
static DEVICE_ATTR_RO(color);
#ifdef CONFIG_LEDS_TRIGGERS
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
static struct bin_attribute *led_trigger_bin_attrs[] = {
@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_max_brightness.attr,
&dev_attr_color.attr,
NULL,
};

View File

@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
w->journal = NULL;
}
static void btree_node_write_unlock(struct closure *cl)
static CLOSURE_CALLBACK(btree_node_write_unlock)
{
struct btree *b = container_of(cl, struct btree, io);
closure_type(b, struct btree, io);
up(&b->io_mutex);
}
static void __btree_node_write_done(struct closure *cl)
static CLOSURE_CALLBACK(__btree_node_write_done)
{
struct btree *b = container_of(cl, struct btree, io);
closure_type(b, struct btree, io);
struct btree_write *w = btree_prev_write(b);
bch_bbio_free(b->bio, b->c);
@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl)
closure_return_with_destructor(cl, btree_node_write_unlock);
}
static void btree_node_write_done(struct closure *cl)
static CLOSURE_CALLBACK(btree_node_write_done)
{
struct btree *b = container_of(cl, struct btree, io);
closure_type(b, struct btree, io);
bio_free_pages(b->bio);
__btree_node_write_done(cl);
__btree_node_write_done(&cl->work);
}
static void btree_node_write_endio(struct bio *bio)
@ -1522,7 +1522,7 @@ out_nocoalesce:
bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++)
if (!IS_ERR(new_nodes[i])) {
if (!IS_ERR_OR_NULL(new_nodes[i])) {
btree_node_free(new_nodes[i]);
rw_unlock(true, new_nodes[i]);
}

View File

@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio)
closure_put(&w->c->journal.io);
}
static void journal_write(struct closure *cl);
static CLOSURE_CALLBACK(journal_write);
static void journal_write_done(struct closure *cl)
static CLOSURE_CALLBACK(journal_write_done)
{
struct journal *j = container_of(cl, struct journal, io);
closure_type(j, struct journal, io);
struct journal_write *w = (j->cur == j->w)
? &j->w[1]
: &j->w[0];
@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier(cl, journal_write, bch_journal_wq);
}
static void journal_write_unlock(struct closure *cl)
static CLOSURE_CALLBACK(journal_write_unlock)
__releases(&c->journal.lock)
{
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
closure_type(c, struct cache_set, journal.io);
c->journal.io_in_flight = 0;
spin_unlock(&c->journal.lock);
}
static void journal_write_unlocked(struct closure *cl)
static CLOSURE_CALLBACK(journal_write_unlocked)
__releases(c->journal.lock)
{
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
closure_type(c, struct cache_set, journal.io);
struct cache *ca = c->cache;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl)
continue_at(cl, journal_write_done, NULL);
}
static void journal_write(struct closure *cl)
static CLOSURE_CALLBACK(journal_write)
{
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
closure_type(c, struct cache_set, journal.io);
spin_lock(&c->journal.lock);
journal_write_unlocked(cl);
journal_write_unlocked(&cl->work);
}
static void journal_try_write(struct cache_set *c)

View File

@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
/* Moving GC - IO loop */
static void moving_io_destructor(struct closure *cl)
static CLOSURE_CALLBACK(moving_io_destructor)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
closure_type(io, struct moving_io, cl);
kfree(io);
}
static void write_moving_finish(struct closure *cl)
static CLOSURE_CALLBACK(write_moving_finish)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bio_free_pages(bio);
@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io)
bch_bio_map(bio, NULL);
}
static void write_moving(struct closure *cl)
static CLOSURE_CALLBACK(write_moving)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
closure_type(io, struct moving_io, cl);
struct data_insert_op *op = &io->op;
if (!op->status) {
@ -113,9 +113,9 @@ static void write_moving(struct closure *cl)
continue_at(cl, write_moving_finish, op->wq);
}
static void read_moving_submit(struct closure *cl)
static CLOSURE_CALLBACK(read_moving_submit)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);

View File

@ -25,7 +25,7 @@
struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *cl);
static CLOSURE_CALLBACK(bch_data_insert_start);
static unsigned int cache_mode(struct cached_dev *dc)
{
@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
/* Insert data into cache */
static void bch_data_insert_keys(struct closure *cl)
static CLOSURE_CALLBACK(bch_data_insert_keys)
{
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
closure_type(op, struct data_insert_op, cl);
atomic_t *journal_ref = NULL;
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
int ret;
@ -136,9 +136,9 @@ out:
continue_at(cl, bch_data_insert_keys, op->wq);
}
static void bch_data_insert_error(struct closure *cl)
static CLOSURE_CALLBACK(bch_data_insert_error)
{
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
closure_type(op, struct data_insert_op, cl);
/*
* Our data write just errored, which means we've got a bunch of keys to
@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl)
op->insert_keys.top = dst;
bch_data_insert_keys(cl);
bch_data_insert_keys(&cl->work);
}
static void bch_data_insert_endio(struct bio *bio)
@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio)
bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
}
static void bch_data_insert_start(struct closure *cl)
static CLOSURE_CALLBACK(bch_data_insert_start)
{
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
closure_type(op, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
if (op->bypass)
@ -305,16 +305,16 @@ err:
* If op->bypass is true, instead of inserting the data it invalidates the
* region of the cache represented by op->bio and op->inode.
*/
void bch_data_insert(struct closure *cl)
CLOSURE_CALLBACK(bch_data_insert)
{
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
closure_type(op, struct data_insert_op, cl);
trace_bcache_write(op->c, op->inode, op->bio,
op->writeback, op->bypass);
bch_keylist_init(&op->insert_keys);
bio_get(op->bio);
bch_data_insert_start(cl);
bch_data_insert_start(&cl->work);
}
/*
@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
return n == bio ? MAP_DONE : MAP_CONTINUE;
}
static void cache_lookup(struct closure *cl)
static CLOSURE_CALLBACK(cache_lookup)
{
struct search *s = container_of(cl, struct search, iop.cl);
closure_type(s, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
struct cached_dev *dc;
int ret;
@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s,
bio_cnt_set(bio, 3);
}
static void search_free(struct closure *cl)
static CLOSURE_CALLBACK(search_free)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
atomic_dec(&s->iop.c->search_inflight);
@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio,
/* Cached devices */
static void cached_dev_bio_complete(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_bio_complete)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
cached_dev_put(dc);
search_free(cl);
search_free(&cl->work);
}
/* Process reads */
static void cached_dev_read_error_done(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_read_error_done)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
if (s->iop.replace_collision)
bch_mark_cache_miss_collision(s->iop.c, s->d);
@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl)
if (s->iop.bio)
bio_free_pages(s->iop.bio);
cached_dev_bio_complete(cl);
cached_dev_bio_complete(&cl->work);
}
static void cached_dev_read_error(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_read_error)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio;
/*
@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl)
continue_at(cl, cached_dev_read_error_done, NULL);
}
static void cached_dev_cache_miss_done(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
struct bcache_device *d = s->d;
if (s->iop.replace_collision)
@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl)
if (s->iop.bio)
bio_free_pages(s->iop.bio);
cached_dev_bio_complete(cl);
cached_dev_bio_complete(&cl->work);
closure_put(&d->cl);
}
static void cached_dev_read_done(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_read_done)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl)
continue_at(cl, cached_dev_cache_miss_done, NULL);
}
static void cached_dev_read_done_bh(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_read_done_bh)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
/* Process writes */
static void cached_dev_write_complete(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_write_complete)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
up_read_non_owner(&dc->writeback_lock);
cached_dev_bio_complete(cl);
cached_dev_bio_complete(&cl->work);
}
static void cached_dev_write(struct cached_dev *dc, struct search *s)
@ -1048,9 +1048,9 @@ insert_data:
continue_at(cl, cached_dev_write_complete, NULL);
}
static void cached_dev_nodata(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_nodata)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio;
if (s->iop.flush_journal)
@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
return MAP_CONTINUE;
}
static void flash_dev_nodata(struct closure *cl)
static CLOSURE_CALLBACK(flash_dev_nodata)
{
struct search *s = container_of(cl, struct search, cl);
closure_type(s, struct search, cl);
if (s->iop.flush_journal)
bch_journal_meta(s->iop.c, cl);

View File

@ -34,7 +34,7 @@ struct data_insert_op {
};
unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
CLOSURE_CALLBACK(bch_data_insert);
void bch_cached_dev_request_init(struct cached_dev *dc);
void cached_dev_submit_bio(struct bio *bio);

View File

@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
submit_bio(bio);
}
static void bch_write_bdev_super_unlock(struct closure *cl)
static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
{
struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
closure_type(dc, struct cached_dev, sb_write);
up(&dc->sb_write_mutex);
}
@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio)
closure_put(&ca->set->sb_write);
}
static void bcache_write_super_unlock(struct closure *cl)
static CLOSURE_CALLBACK(bcache_write_super_unlock)
{
struct cache_set *c = container_of(cl, struct cache_set, sb_write);
closure_type(c, struct cache_set, sb_write);
up(&c->sb_write_mutex);
}
@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio)
closure_put(cl);
}
static void uuid_io_unlock(struct closure *cl)
static CLOSURE_CALLBACK(uuid_io_unlock)
{
struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
closure_type(c, struct cache_set, uuid_write);
up(&c->uuid_write_mutex);
}
@ -1344,9 +1344,9 @@ void bch_cached_dev_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
static void cached_dev_free(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_free)
{
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
closure_type(dc, struct cached_dev, disk.cl);
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);
@ -1378,9 +1378,9 @@ static void cached_dev_free(struct closure *cl)
kobject_put(&dc->disk.kobj);
}
static void cached_dev_flush(struct closure *cl)
static CLOSURE_CALLBACK(cached_dev_flush)
{
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
closure_type(dc, struct cached_dev, disk.cl);
struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock);
@ -1499,9 +1499,9 @@ void bch_flash_dev_release(struct kobject *kobj)
kfree(d);
}
static void flash_dev_free(struct closure *cl)
static CLOSURE_CALLBACK(flash_dev_free)
{
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
atomic_long_sub(bcache_dev_sectors_dirty(d),
@ -1512,9 +1512,9 @@ static void flash_dev_free(struct closure *cl)
kobject_put(&d->kobj);
}
static void flash_dev_flush(struct closure *cl)
static CLOSURE_CALLBACK(flash_dev_flush)
{
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
bcache_device_unlink(d);
@ -1670,9 +1670,9 @@ void bch_cache_set_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
static void cache_set_free(struct closure *cl)
static CLOSURE_CALLBACK(cache_set_free)
{
struct cache_set *c = container_of(cl, struct cache_set, cl);
closure_type(c, struct cache_set, cl);
struct cache *ca;
debugfs_remove(c->debug);
@ -1711,9 +1711,9 @@ static void cache_set_free(struct closure *cl)
kobject_put(&c->kobj);
}
static void cache_set_flush(struct closure *cl)
static CLOSURE_CALLBACK(cache_set_flush)
{
struct cache_set *c = container_of(cl, struct cache_set, caching);
closure_type(c, struct cache_set, caching);
struct cache *ca = c->cache;
struct btree *b;
@ -1808,9 +1808,9 @@ static void conditional_stop_bcache_device(struct cache_set *c,
}
}
static void __cache_set_unregister(struct closure *cl)
static CLOSURE_CALLBACK(__cache_set_unregister)
{
struct cache_set *c = container_of(cl, struct cache_set, caching);
closure_type(c, struct cache_set, caching);
struct cached_dev *dc;
struct bcache_device *d;
size_t i;

View File

@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w)
bch_bio_map(bio, NULL);
}
static void dirty_io_destructor(struct closure *cl)
static CLOSURE_CALLBACK(dirty_io_destructor)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
closure_type(io, struct dirty_io, cl);
kfree(io);
}
static void write_dirty_finish(struct closure *cl)
static CLOSURE_CALLBACK(write_dirty_finish)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio)
closure_put(&io->cl);
}
static void write_dirty(struct closure *cl)
static CLOSURE_CALLBACK(write_dirty)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio)
dirty_endio(bio);
}
static void read_dirty_submit(struct closure *cl)
static CLOSURE_CALLBACK(read_dirty_submit)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
closure_type(io, struct dirty_io, cl);
closure_bio_submit(io->dc->disk.c, &io->bio, cl);

View File

@ -434,7 +434,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
remaining_size = size;
order = MAX_ORDER - 1;
order = MAX_ORDER;
while (remaining_size) {
struct page *pages;
unsigned size_to_add, to_copy;

View File

@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
*/
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
{
return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
return (struct dm_verity_fec_io *)
((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
}
/*

View File

@ -642,7 +642,6 @@ static void verity_work(struct work_struct *w)
io->in_tasklet = false;
verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
@ -668,7 +667,9 @@ static void verity_end_io(struct bio *bio)
struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
(!verity_fec_is_enabled(io->v) ||
verity_is_system_shutting_down() ||
(bio->bi_opf & REQ_RAHEAD))) {
verity_finish_io(io, bio->bi_status);
return;
}
@ -792,6 +793,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
verity_fec_init_io(io);
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);

View File

@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
}
static inline u8 *verity_io_digest_end(struct dm_verity *v,
struct dm_verity_io *io)
{
return verity_io_want_digest(v, io) + v->digest_size;
}
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter,
int (*process)(struct dm_verity *v,

View File

@ -2,6 +2,7 @@
config VIDEO_MGB4
tristate "Digiteq Automotive MGB4 support"
depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO
depends on COMMON_CLK
select VIDEOBUF2_DMA_SG
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER

View File

@ -42,6 +42,10 @@
#define MGB4_USER_IRQS 16
#define DIGITEQ_VID 0x1ed8
#define T100_DID 0x0101
#define T200_DID 0x0201
ATTRIBUTE_GROUPS(mgb4_pci);
static int flashid;
@ -151,7 +155,7 @@ static struct spi_master *get_spi_adap(struct platform_device *pdev)
return dev ? container_of(dev, struct spi_master, dev) : NULL;
}
static int init_spi(struct mgb4_dev *mgbdev)
static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
{
struct resource spi_resources[] = {
{
@ -213,8 +217,13 @@ static int init_spi(struct mgb4_dev *mgbdev)
snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name),
"mgb4-fw.%d", flashid);
mgbdev->partitions[0].name = mgbdev->fw_part_name;
mgbdev->partitions[0].size = 0x400000;
mgbdev->partitions[0].offset = 0x400000;
if (devid == T200_DID) {
mgbdev->partitions[0].size = 0x950000;
mgbdev->partitions[0].offset = 0x1000000;
} else {
mgbdev->partitions[0].size = 0x400000;
mgbdev->partitions[0].offset = 0x400000;
}
mgbdev->partitions[0].mask_flags = 0;
snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name),
@ -551,7 +560,7 @@ static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_video_regs;
/* SPI FLASH */
rv = init_spi(mgbdev);
rv = init_spi(mgbdev, id->device);
if (rv < 0)
goto err_cmt_regs;
@ -666,7 +675,8 @@ static void mgb4_remove(struct pci_dev *pdev)
}
static const struct pci_device_id mgb4_pci_ids[] = {
{ PCI_DEVICE(0x1ed8, 0x0101), },
{ PCI_DEVICE(DIGITEQ_VID, T100_DID), },
{ PCI_DEVICE(DIGITEQ_VID, T200_DID), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mgb4_pci_ids);

View File

@ -373,7 +373,7 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
(7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
vsp1_wpf_stop(pipe->output);
return ret;
}

View File

@ -43,14 +43,6 @@ static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
data);
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
static const struct v4l2_subdev_ops rpf_ops = {
.pad = &vsp1_rwpf_pad_ops,
};
/* -----------------------------------------------------------------------------
* VSP1 Entity Operations
*/
@ -411,7 +403,7 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
rpf->entity.index = index;
sprintf(name, "rpf.%u", index);
ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops,
ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);

View File

@ -24,7 +24,7 @@ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Pad Operations
* V4L2 Subdevice Operations
*/
static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
@ -243,7 +243,7 @@ done:
return ret;
}
const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
static const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = vsp1_rwpf_enum_mbus_code,
.enum_frame_size = vsp1_rwpf_enum_frame_size,
@ -253,6 +253,10 @@ const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.set_selection = vsp1_rwpf_set_selection,
};
const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops = {
.pad = &vsp1_rwpf_pad_ops,
};
/* -----------------------------------------------------------------------------
* Controls
*/

View File

@ -79,9 +79,11 @@ static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
void vsp1_wpf_stop(struct vsp1_rwpf *wpf);
int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
extern const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops;
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
struct v4l2_subdev_state *sd_state);

View File

@ -186,17 +186,13 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Core Operations
* VSP1 Entity Operations
*/
static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
void vsp1_wpf_stop(struct vsp1_rwpf *wpf)
{
struct vsp1_rwpf *wpf = to_rwpf(subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
if (enable)
return 0;
/*
* Write to registers directly when stopping the stream as there will be
* no pipeline run to apply the display list.
@ -204,27 +200,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
VI6_WPF_SRCRPF, 0);
return 0;
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
static const struct v4l2_subdev_video_ops wpf_video_ops = {
.s_stream = wpf_s_stream,
};
static const struct v4l2_subdev_ops wpf_ops = {
.video = &wpf_video_ops,
.pad = &vsp1_rwpf_pad_ops,
};
/* -----------------------------------------------------------------------------
* VSP1 Entity Operations
*/
static void vsp1_wpf_destroy(struct vsp1_entity *entity)
{
struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
@ -583,7 +560,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
wpf->entity.index = index;
sprintf(name, "wpf.%u", index);
ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops,
ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);

View File

@ -1482,6 +1482,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
} else if (mq->in_recovery) {
blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}

View File

@ -551,7 +551,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, 0);
mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
@ -559,10 +561,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, 0);
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host);
if (err)
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_retune_release(host);
return err;

View File

@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host);
if (!ret)
pr_debug("%s: cqhci: Failed to clear tasks\n",
mmc_hostname(mmc));
pr_warn("%s: cqhci: Failed to clear tasks\n",
mmc_hostname(mmc));
return ret;
}
@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host);
if (!ret)
pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret;
}
@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/*
* After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper
* layers will need to send a STOP command), so we set the timeout based on a
* generous command timeout.
* layers will need to send a STOP command), however failing to halt complicates
* the recovery, so set a timeout that would reasonably allow I/O to complete.
*/
#define CQHCI_START_HALT_TIMEOUT 5
#define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc)
{
@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
/*
* The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway.
*/
if (!ok) {
pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
cqcfg |= CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
/* Be sure that there are no tasks */
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
WARN_ON(!ok);
}
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
/* Disable to make sure tasks really are cleared */
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg |= CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!ok)
cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host);

View File

@ -1189,6 +1189,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
}
static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
bool enable)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
if (enable)
value &= ~GLI_9763E_CFG_LPSN_DIS;
else
value |= GLI_9763E_CFG_LPSN_DIS;
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
unsigned int timing)
{
@ -1297,6 +1323,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
if (ret)
goto cleanup;
/* Disable LPM negotiation to avoid entering L1 state. */
gl9763e_set_low_power_negotiation(slot, false);
return 0;
cleanup:
@ -1340,31 +1369,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
}
#ifdef CONFIG_PM
static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
if (enable)
value &= ~GLI_9763E_CFG_LPSN_DIS;
else
value |= GLI_9763E_CFG_LPSN_DIS;
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];

View File

@ -416,12 +416,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
mmc_request_done(host->mmc, mrq);
}
static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
switch (mode) {
case MMC_POWER_OFF:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
mmc_regulator_disable_vqmmc(mmc);
break;
case MMC_POWER_ON:
mmc_regulator_enable_vqmmc(mmc);
break;
case MMC_POWER_UP:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
break;
}
}
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
.set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
@ -823,6 +844,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
ret = mmc_regulator_get_supply(host->mmc);
if (ret)
goto pm_runtime_disable;
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;

View File

@ -577,6 +577,18 @@ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
MAC_1000FD;
}
static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
u16 reg, val;
@ -3880,7 +3892,8 @@ static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (chip->info->ops->pcs_ops->pcs_init) {
if (chip->info->ops->pcs_ops &&
chip->info->ops->pcs_ops->pcs_init) {
err = chip->info->ops->pcs_ops->pcs_init(chip, port);
if (err)
return err;
@ -3895,7 +3908,8 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
mv88e6xxx_teardown_devlink_regions_port(ds, port);
if (chip->info->ops->pcs_ops->pcs_teardown)
if (chip->info->ops->pcs_ops &&
chip->info->ops->pcs_ops->pcs_teardown)
chip->info->ops->pcs_ops->pcs_teardown(chip, port);
}
@ -4340,7 +4354,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@ -4440,7 +4454,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@ -5069,7 +5083,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@ -5117,7 +5131,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6185_phylink_get_caps,
.phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {

View File

@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return skb;
}
@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent;
bool recycle_rx_buf = false;
void *buf_data;
u32 xdp_act;
@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
} else {
recycle_rx_buf = true;
}
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
goto err_build_skb;
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
if (recycle_rx_buf)
dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return;
err_build_skb:
@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
/* If there's enough room to align the FD address, do it.
* It will help hardware optimize accesses.
*/
aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
return -ENOMEM;
/* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it
@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_dl_port_add;
net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");

View File

@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
unsigned int headroom = DPAA2_ETH_SWA_SIZE;
unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
/* If we don't have an skb (e.g. XDP buffer), we only need space for
* the software annotation area

View File

@ -569,6 +569,50 @@ resume_traffic:
dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
}
/**
* ice_lag_build_netdev_list - populate the lag struct's netdev list
* @lag: local lag struct
* @ndlist: pointer to netdev list to populate
*/
static void ice_lag_build_netdev_list(struct ice_lag *lag,
struct ice_lag_netdev_list *ndlist)
{
struct ice_lag_netdev_list *nl;
struct net_device *tmp_nd;
INIT_LIST_HEAD(&ndlist->node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
nl->netdev = tmp_nd;
list_add(&nl->node, &ndlist->node);
}
rcu_read_unlock();
lag->netdev_head = &ndlist->node;
}
/**
* ice_lag_destroy_netdev_list - free lag struct's netdev list
* @lag: pointer to local lag struct
* @ndlist: pointer to lag struct netdev list
*/
static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
struct ice_lag_netdev_list *ndlist)
{
struct ice_lag_netdev_list *entry, *n;
rcu_read_lock();
list_for_each_entry_safe(entry, n, &ndlist->node, node) {
list_del(&entry->node);
kfree(entry);
}
rcu_read_unlock();
lag->netdev_head = NULL;
}
/**
* ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
* @lag: primary interface LAG struct
@ -597,7 +641,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
{
struct ice_lag_netdev_list ndlist;
struct list_head *tmp, *n;
u8 pri_port, act_port;
struct ice_lag *lag;
struct ice_vsi *vsi;
@ -621,38 +664,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
pri_port = pf->hw.port_info->lport;
act_port = lag->active_port;
if (lag->upper_netdev) {
struct ice_lag_netdev_list *nl;
struct net_device *tmp_nd;
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
nl->netdev = tmp_nd;
list_add(&nl->node, &ndlist.node);
}
rcu_read_unlock();
}
lag->netdev_head = &ndlist.node;
if (lag->upper_netdev)
ice_lag_build_netdev_list(lag, &ndlist);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
lag->bonded && lag->primary && pri_port != act_port &&
!list_empty(lag->netdev_head))
ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
list_for_each_safe(tmp, n, &ndlist.node) {
struct ice_lag_netdev_list *entry;
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
list_del(&entry->node);
kfree(entry);
}
lag->netdev_head = NULL;
ice_lag_destroy_netdev_list(lag, &ndlist);
new_vf_unlock:
mutex_unlock(&pf->lag_mutex);
@ -679,6 +699,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
/**
* ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
* @lag: local lag struct
* @src_prt: lport value for source port
* @dst_prt: lport value for destination port
*
* This function is used to move nodes during an out-of-netdev-event situation,
* primarily when the driver needs to reconfigure or recreate resources.
*
* Must be called while holding the lag_mutex to avoid lag events from
* processing while out-of-sync moves are happening. Also, paired moves,
* such as used in a reset flow, should both be called under the same mutex
* lock to avoid changes between start of reset and end of reset.
*/
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
{
struct ice_lag_netdev_list ndlist;
ice_lag_build_netdev_list(lag, &ndlist);
ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
ice_lag_destroy_netdev_list(lag, &ndlist);
}
#define ICE_LAG_SRIOV_CP_RECIPE 10
#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
@ -2051,7 +2094,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
{
struct ice_lag_netdev_list ndlist;
struct ice_lag *lag, *prim_lag;
struct list_head *tmp, *n;
u8 act_port, loc_port;
if (!pf->lag || !pf->lag->bonded)
@ -2063,21 +2105,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
if (lag->primary) {
prim_lag = lag;
} else {
struct ice_lag_netdev_list *nl;
struct net_device *tmp_nd;
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
nl->netdev = tmp_nd;
list_add(&nl->node, &ndlist.node);
}
rcu_read_unlock();
lag->netdev_head = &ndlist.node;
ice_lag_build_netdev_list(lag, &ndlist);
prim_lag = ice_lag_find_primary(lag);
}
@ -2107,13 +2135,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_clear_rdma_cap(pf);
lag_rebuild_out:
list_for_each_safe(tmp, n, &ndlist.node) {
struct ice_lag_netdev_list *entry;
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
list_del(&entry->node);
kfree(entry);
}
ice_lag_destroy_netdev_list(lag, &ndlist);
mutex_unlock(&pf->lag_mutex);
}

View File

@ -65,4 +65,5 @@ int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */

View File

@ -829,12 +829,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
struct ice_lag *lag;
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
act_prt = ICE_LAG_INVALID_PORT;
pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@ -845,6 +849,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
act_prt = lag->active_port;
if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
lag->upper_netdev)
ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
else
act_prt = ICE_LAG_INVALID_PORT;
}
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
@ -937,6 +952,11 @@ out_unlock:
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
return err;
}

View File

@ -1603,9 +1603,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
struct ice_lag *lag;
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
int i = -1, q_idx;
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
act_prt = ICE_LAG_INVALID_PORT;
pri_prt = pf->hw.port_info->lport;
if (lag && lag->bonded && lag->primary) {
act_prt = lag->active_port;
if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
lag->upper_netdev)
ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
else
act_prt = ICE_LAG_INVALID_PORT;
}
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@ -1729,6 +1744,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@ -1743,6 +1763,11 @@ error_param:
vf->vf_id, i);
}
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */

View File

@ -5505,6 +5505,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer = &nix_hw->ipolicer[layer];
for (idx = 0; idx < req->prof_count[layer]; idx++) {
if (idx == MAX_BANDPROF_PER_PFFUNC)
break;
prof_idx = req->prof_idx[layer][idx];
if (prof_idx >= ipolicer->band_prof.max ||
ipolicer->pfvf_map[prof_idx] != pcifunc)
@ -5518,8 +5520,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer->pfvf_map[prof_idx] = 0x00;
ipolicer->match_id[prof_idx] = 0;
rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
if (idx == MAX_BANDPROF_PER_PFFUNC)
break;
}
}
mutex_unlock(&rvu->rsrc_lock);

View File

@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF;
aq->prof.hl_en = 0;
aq->prof_mask.hl_en = 1;
/* Fill AQ info */
aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF;

View File

@ -1070,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);

View File

@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF);
vfs -= 64;
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
}
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
return IRQ_HANDLED;
}
@ -1870,6 +1873,8 @@ int otx2_open(struct net_device *netdev)
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
otx2_tc_apply_ingress_police_rules(pf);
err = otx2_rxtx_enable(pf, true);
/* If a mbox communication error happens at this point then interface
* will end up in a state such that it is in down state but hardware

View File

@ -47,6 +47,9 @@ struct otx2_tc_flow {
bool is_act_police;
u32 prio;
struct npc_install_flow_req req;
u64 rate;
u32 burst;
bool is_pps;
};
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
@ -284,6 +287,41 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
struct otx2_tc_flow *node)
{
int rc;
mutex_lock(&nic->mbox.lock);
rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
if (rc) {
mutex_unlock(&nic->mbox.lock);
return rc;
}
rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
node->burst, node->rate, node->is_pps);
if (rc)
goto free_leaf;
rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
return 0;
free_leaf:
if (cn10k_free_leaf_profile(nic, node->leaf_profile))
netdev_err(nic->netdev,
"Unable to free leaf bandwidth profile(%d)\n",
node->leaf_profile);
mutex_unlock(&nic->mbox.lock);
return rc;
}
static int otx2_tc_act_set_police(struct otx2_nic *nic,
struct otx2_tc_flow *node,
struct flow_cls_offload *f,
@ -300,39 +338,20 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return -EINVAL;
}
mutex_lock(&nic->mbox.lock);
rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
if (rc) {
mutex_unlock(&nic->mbox.lock);
return rc;
}
rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
if (rc)
goto free_leaf;
rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
req->match_id = mark & 0xFFFFULL;
req->index = rq_idx;
req->op = NIX_RX_ACTIONOP_UCAST;
set_bit(rq_idx, &nic->rq_bmap);
node->is_act_police = true;
node->rq = rq_idx;
node->burst = burst;
node->rate = rate;
node->is_pps = pps;
return 0;
rc = otx2_tc_act_set_hw_police(nic, node);
if (!rc)
set_bit(rq_idx, &nic->rq_bmap);
free_leaf:
if (cn10k_free_leaf_profile(nic, node->leaf_profile))
netdev_err(nic->netdev,
"Unable to free leaf bandwidth profile(%d)\n",
node->leaf_profile);
mutex_unlock(&nic->mbox.lock);
return rc;
}
@ -1044,6 +1063,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
}
if (flow_node->is_act_police) {
__clear_bit(flow_node->rq, &nic->rq_bmap);
if (nic->flags & OTX2_FLAG_INTF_DOWN)
goto free_mcam_flow;
mutex_lock(&nic->mbox.lock);
err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
@ -1059,11 +1083,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
"Unable to free leaf bandwidth profile(%d)\n",
flow_node->leaf_profile);
__clear_bit(flow_node->rq, &nic->rq_bmap);
mutex_unlock(&nic->mbox.lock);
}
free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu);
@ -1083,6 +1106,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
if (nic->flags & OTX2_FLAG_INTF_DOWN) {
NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
return -EINVAL;
}
if (flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow");
@ -1442,3 +1470,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
otx2_destroy_tc_flow_list(nic);
}
EXPORT_SYMBOL(otx2_shutdown_tc);
static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
struct otx2_tc_flow *node)
{
struct npc_install_flow_req *req;
if (otx2_tc_act_set_hw_police(nic, node))
return;
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req)
goto err;
memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
if (otx2_sync_mbox_msg(&nic->mbox))
netdev_err(nic->netdev,
"Failed to install MCAM flow entry for ingress rule");
err:
mutex_unlock(&nic->mbox.lock);
}
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_flow *node;
/* If any ingress policer rules exist for the interface then
* apply those rules. Ingress policer rules depend on bandwidth
* profiles linked to the receive queues. Since no receive queues
* exist when interface is down, ingress policer rules are stored
* and configured in hardware after all receive queues are allocated
* in otx2_open.
*/
list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
if (node->is_act_police)
otx2_tc_config_ingress_rule(nic, node);
}
}
EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);

View File

@ -579,6 +579,7 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@ -4582,6 +4583,8 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
rtl_reset_work(tp);
}
out_unlock:
rtnl_unlock();
@ -4615,7 +4618,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
} else {
/* In few cases rx is broken after link-down otherwise */
if (rtl_is_8125(tp))
rtl_reset_work(tp);
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d);
}
@ -4691,7 +4694,7 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp);
rtl8169_rx_clear(tp);
cancel_work_sync(&tp->wk.work);
cancel_work(&tp->wk.work);
free_irq(tp->irq, tp);
@ -4925,6 +4928,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
cancel_work_sync(&tp->wk.work);
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)

View File

@ -515,6 +515,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
} else {
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
CXR31_SEL_LINK0);
}
/* Receive frame limit set register */
ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
@ -537,14 +546,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
} else {
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
CXR31_SEL_LINK0);
}
}
static void ravb_emac_init_rcar(struct net_device *ndev)
@ -1811,19 +1812,20 @@ static int ravb_open(struct net_device *ndev)
if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
/* PHY control start */
error = ravb_phy_start(ndev);
if (error)
goto out_ptp_stop;
netif_tx_start_all_queues(ndev);
return 0;
out_ptp_stop:
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
out_free_irq_mgmta:
if (!info->multi_irqs)
goto out_free_irq;
@ -1874,6 +1876,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct net_device *ndev = priv->ndev;
int error;
if (!rtnl_trylock()) {
usleep_range(1000, 2000);
schedule_work(&priv->work);
return;
}
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
@ -1907,7 +1915,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
*/
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
__func__, error);
return;
goto out_unlock;
}
ravb_emac_init(ndev);
@ -1917,6 +1925,9 @@ out:
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
out_unlock:
rtnl_unlock();
}
/* Packet transmit function for Ethernet AVB */
@ -2645,9 +2656,14 @@ static int ravb_probe(struct platform_device *pdev)
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
reset_control_deassert(rstc);
error = reset_control_deassert(rstc);
if (error)
goto out_free_netdev;
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
error = pm_runtime_resume_and_get(&pdev->dev);
if (error < 0)
goto out_rpm_disable;
if (info->multi_irqs) {
if (info->err_mgmt_irqs)
@ -2872,11 +2888,12 @@ out_disable_gptp_clk:
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
out_release:
free_netdev(ndev);
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
return error;
}
@ -2886,22 +2903,26 @@ static void ravb_remove(struct platform_device *pdev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
/* Stop PTP Clock driver */
if (info->ccc_gac)
ravb_ptp_stop(ndev);
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
unregister_netdev(ndev);
if (info->nc_queues)
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
/* Stop PTP Clock driver */
if (info->ccc_gac)
ravb_ptp_stop(ndev);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);

View File

@ -1504,8 +1504,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->tx_queue;
netdev_tx_t ret = NETDEV_TX_OK;
struct rswitch_ext_desc *desc;
int ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
@ -1517,10 +1517,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
return ret;
dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
dev_kfree_skb_any(skb);
return ret;
}
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err_kfree;
gq->skbs[gq->cur] = skb;
desc = &gq->tx_ring[gq->cur];
@ -1533,10 +1531,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
struct rswitch_gwca_ts_info *ts_info;
ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
if (!ts_info) {
dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
return -ENOMEM;
}
if (!ts_info)
goto err_unmap;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
@ -1558,6 +1554,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
gq->cur = rswitch_next_queue_index(gq, true, 1);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
err_unmap:
dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
err_kfree:
dev_kfree_skb_any(skb);
return ret;
}

View File

@ -177,8 +177,10 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
}

View File

@ -1972,11 +1972,11 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (!pdev->msi_enabled && !pdev->msix_enabled)
return;
pci_free_irq_vectors(wx->pdev);
if (pdev->msix_enabled) {
kfree(wx->msix_entries);
wx->msix_entries = NULL;
}
pci_free_irq_vectors(wx->pdev);
}
EXPORT_SYMBOL(wx_reset_interrupt_capability);

View File

@ -93,7 +93,7 @@ static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
{
struct nsim_bpf_bound_prog *state;
if (!prog || !prog->aux->offload)
if (!prog || !bpf_prog_is_offloaded(prog->aux))
return;
state = prog->aux->offload->dev_priv;
@ -311,7 +311,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
if (!bpf->prog)
return 0;
if (!bpf->prog->aux->offload) {
if (!bpf_prog_is_offloaded(bpf->prog->aux)) {
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}

View File

@ -851,6 +851,12 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return -EACCES;
}
if (data[IFLA_NETKIT_PEER_INFO]) {
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
"netkit peer info cannot be changed after device creation");
return -EINVAL;
}
if (data[IFLA_NETKIT_POLICY]) {
attr = data[IFLA_NETKIT_POLICY];
policy = nla_get_u32(attr);

View File

@ -57,8 +57,7 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
depends on ATH9K && DEBUG_FS
select MAC80211_DEBUGFS
depends on ATH9K && DEBUG_FS && MAC80211_DEBUGFS
select ATH9K_COMMON_DEBUG
help
Say Y, if you need access to ath9k's statistics for
@ -70,7 +69,6 @@ config ATH9K_DEBUGFS
config ATH9K_STATION_STATISTICS
bool "Detailed station statistics"
depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS
select MAC80211_DEBUGFS
default n
help
This option enables detailed statistics for association stations.

View File

@ -707,8 +707,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
if (WARN_ON(!link_conf || !mvm_link_sta))
if (WARN_ON(!link_conf || !mvm_link_sta)) {
ret = -EINVAL;
goto err;
}
ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
mvm_link_sta);

View File

@ -375,6 +375,7 @@ static int mt7921_load_clc(struct mt792x_dev *dev, const char *fw_name)
int ret, i, len, offset = 0;
u8 *clc_base = NULL, hw_encap = 0;
dev->phy.clc_chan_conf = 0xff;
if (mt7921_disable_clc ||
mt76_is_usb(&dev->mt76))
return 0;

View File

@ -14,7 +14,7 @@
static void
mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data,
enum nl80211_iftype iftype)
enum nl80211_iftype iftype)
{
struct ieee80211_sta_he_cap *he_cap = &data->he_cap;
struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
@ -53,7 +53,7 @@ mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
switch (i) {
switch (iftype) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;

View File

@ -1192,8 +1192,16 @@ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
queue_delayed_work(nvme_wq, &ctrl->ka_work,
nvme_keep_alive_work_period(ctrl));
unsigned long now = jiffies;
unsigned long delay = nvme_keep_alive_work_period(ctrl);
unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
if (time_after(now, ka_next_check_tm))
delay = 0;
else
delay = ka_next_check_tm - now;
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
@ -1479,7 +1487,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
return -ENODEV;
ret = -ENODEV;
goto error;
}
info->anagrpid = id->anagrpid;
@ -1497,8 +1506,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
error:
kfree(id);
return 0;
return ret;
}
static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
@ -1890,9 +1901,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
/*
* The block layer can't support LBA sizes larger than the page size
* yet, so catch this early and don't allow block I/O.
* or smaller than a sector size yet, so catch this early and don't
* allow block I/O.
*/
if (ns->lba_shift > PAGE_SHIFT) {
if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
capacity = 0;
bs = (1 << 9);
}
@ -2029,6 +2041,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (ret)
return ret;
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
ret = -ENODEV;
goto error;
}
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
@ -2090,6 +2109,8 @@ out:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
error:
kfree(id);
return ret;
}
@ -4471,6 +4492,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
ctrl->ka_last_check_time = jiffies;
BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
PAGE_SIZE);

View File

@ -12,7 +12,8 @@ config PINCTRL_CS42L43
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
depends on MFD_LOCHNAGAR
# Avoid clash caused by MIPS defining RST, which is used in the driver
depends on MFD_LOCHNAGAR && !MIPS
select GPIOLIB
select PINMUX
select PINCONF

View File

@ -1262,17 +1262,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
struct pinctrl_state *old_state = p->state;
struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
if (p->state) {
if (old_state) {
/*
* For each pinmux setting in the old state, forget SW's record
* of mux owner for that pingroup. Any pingroups which are
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
list_for_each_entry(setting, &p->state->settings, node) {
list_for_each_entry(setting, &old_state->settings, node) {
if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
continue;
pinmux_disable_setting(setting);

View File

@ -843,8 +843,8 @@ static int s32_pinctrl_probe_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
if (mem_regions == 0) {
dev_err(&pdev->dev, "mem_regions is 0\n");
if (mem_regions == 0 || mem_regions >= 10000) {
dev_err(&pdev->dev, "mem_regions is invalid: %u\n", mem_regions);
return -EINVAL;
}

View File

@ -143,6 +143,7 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
* @pinctrl_desc: pin controller description
* @name: Chip controller name
* @tpin: Total number of pins
* @gpio_reset: GPIO line handler that can reset the IC
*/
struct cy8c95x0_pinctrl {
struct regmap *regmap;

View File

@ -146,7 +146,7 @@ static int rtd_pinctrl_get_function_groups(struct pinctrl_dev *pcdev,
static const struct rtd_pin_desc *rtd_pinctrl_find_mux(struct rtd_pinctrl *data, unsigned int pin)
{
if (!data->info->muxes[pin].name)
if (data->info->muxes[pin].name)
return &data->info->muxes[pin];
return NULL;
@ -249,7 +249,7 @@ static const struct pinctrl_pin_desc
static const struct rtd_pin_config_desc
*rtd_pinctrl_find_config(struct rtd_pinctrl *data, unsigned int pin)
{
if (!data->info->configs[pin].name)
if (data->info->configs[pin].name)
return &data->info->configs[pin];
return NULL;

View File

@ -1273,9 +1273,11 @@ static struct stm32_desc_pin *stm32_pctrl_get_desc_pin_from_gpio(struct stm32_pi
int i;
/* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */
pin_desc = pctl->pins + stm32_pin_nb;
if (pin_desc->pin.number == stm32_pin_nb)
return pin_desc;
if (stm32_pin_nb < pctl->npins) {
pin_desc = pctl->pins + stm32_pin_nb;
if (pin_desc->pin.number == stm32_pin_nb)
return pin_desc;
}
/* Otherwise, loop all array to find the pin with the right number */
for (i = 0; i < pctl->npins; i++) {
@ -1368,6 +1370,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
}
names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
if (!names) {
err = -ENOMEM;
goto err_clk;
}
for (i = 0; i < npins; i++) {
stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
if (stm32_pin && stm32_pin->pin.name)

View File

@ -35,7 +35,7 @@ scmi_pd_set_perf_state(struct generic_pm_domain *genpd, unsigned int state)
if (!state)
return -EINVAL;
ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, true);
ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, false);
if (ret)
dev_warn(&genpd->dev, "Failed with %d when trying to set %d perf level",
ret, state);

View File

@ -1044,6 +1044,7 @@ static int rpmpd_probe(struct platform_device *pdev)
rpmpds[i]->pd.power_off = rpmpd_power_off;
rpmpds[i]->pd.power_on = rpmpd_power_on;
rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
rpmpds[i]->pd.flags = GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&rpmpds[i]->pd, NULL, true);
data->domains[i] = &rpmpds[i]->pd;

View File

@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/units.h>
struct dtpm_cpu {
struct dtpm dtpm;
@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
return scale_pd_power_uw(pd_mask, pd->table[i].power *
MICROWATT_PER_MILLIWATT);
return scale_pd_power_uw(pd_mask, pd->table[i].power);
}
return 0;
@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
nr_cpus = cpumask_weight(&cpus);
dtpm->power_min = em->table[0].power;
dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_min *= nr_cpus;
dtpm->power_max = em->table[em->nr_perf_states - 1].power;
dtpm->power_max *= MICROWATT_PER_MILLIWATT;
dtpm->power_max *= nr_cpus;
return 0;

View File

@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
struct em_perf_domain *pd = em_pd_get(dev);
dtpm->power_min = pd->table[0].power;
dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
dtpm->power_max *= MICROWATT_PER_MILLIWATT;
return 0;
}
@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
unsigned long freq;
u64 power;
int i;
for (i = 0; i < pd->nr_perf_states; i++) {
power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
if (power > power_limit)
if (pd->table[i].power > power_limit)
break;
}
@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
power_limit = pd->table[i - 1].power;
return power_limit;
}
@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
power = pd->table[i].power;
power *= status.busy_time;
power >>= 10;

View File

@ -3949,8 +3949,15 @@ static int sd_resume(struct device *dev, bool runtime)
static int sd_resume_system(struct device *dev)
{
if (pm_runtime_suspended(dev))
if (pm_runtime_suspended(dev)) {
struct scsi_disk *sdkp = dev_get_drvdata(dev);
struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
if (sdp && sdp->force_runtime_start_on_system_start)
pm_request_resume(dev);
return 0;
}
return sd_resume(dev, false);
}

View File

@ -6444,11 +6444,24 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
unsigned long flags;
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
/* Release cmd in MCQ mode if abort succeeds */
if (is_mcq_enabled(hba) && (*ret == 0)) {
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
}
return *ret == 0;
}

Some files were not shown because too many files have changed in this diff Show More