Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
tools/testing/selftests/net/mptcp/mptcp_join.sh34aa6e3bcc
("selftests: mptcp: add ip mptcp wrappers")857898eb4b
("selftests: mptcp: add missing join check")6ef84b1517
("selftests: mptcp: more robust signal race test") https://lore.kernel.org/all/20220221131842.468893-1-broonie@kernel.org/ drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.cfb7e76ea3f
("net/mlx5e: TC, Skip redundant ct clear actions")c63741b426
("net/mlx5e: Fix MPLSoUDP encap to use MPLS action information")09bf979232
("net/mlx5e: TC, Move pedit_headers_action to parse_attr")84ba8062e3
("net/mlx5e: Test CT and SAMPLE on flow attr")efe6f961cd
("net/mlx5e: CT, Don't set flow flag CT for ct clear flow")3b49a7edec
("net/mlx5e: TC, Reject rules with multiple CT actions") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
aaa25a2fa7
@ -468,6 +468,7 @@ Description:
|
||||
auto: Charge normally, respect thresholds
|
||||
inhibit-charge: Do not charge while AC is attached
|
||||
force-discharge: Force discharge while AC is attached
|
||||
================ ====================================
|
||||
|
||||
What: /sys/class/power_supply/<supply_name>/technology
|
||||
Date: May 2007
|
||||
|
@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: SiFive GPIO controller
|
||||
|
||||
maintainers:
|
||||
- Yash Shah <yash.shah@sifive.com>
|
||||
- Paul Walmsley <paul.walmsley@sifive.com>
|
||||
|
||||
properties:
|
||||
|
@ -20,7 +20,7 @@ description: |
|
||||
|
||||
maintainers:
|
||||
- Kishon Vijay Abraham I <kishon@ti.com>
|
||||
- Roger Quadros <rogerq@ti.com
|
||||
- Roger Quadros <rogerq@kernel.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -8,7 +8,7 @@ title: OMAP USB2 PHY
|
||||
|
||||
maintainers:
|
||||
- Kishon Vijay Abraham I <kishon@ti.com>
|
||||
- Roger Quadros <rogerq@ti.com>
|
||||
- Roger Quadros <rogerq@kernel.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: SiFive PWM controller
|
||||
|
||||
maintainers:
|
||||
- Yash Shah <yash.shah@sifive.com>
|
||||
- Sagar Kadam <sagar.kadam@sifive.com>
|
||||
- Paul Walmsley <paul.walmsley@sifive.com>
|
||||
|
||||
|
@ -9,7 +9,6 @@ title: SiFive L2 Cache Controller
|
||||
|
||||
maintainers:
|
||||
- Sagar Kadam <sagar.kadam@sifive.com>
|
||||
- Yash Shah <yash.shah@sifive.com>
|
||||
- Paul Walmsley <paul.walmsley@sifive.com>
|
||||
|
||||
description:
|
||||
|
@ -8,6 +8,7 @@ title: Audio codec controlled by ChromeOS EC
|
||||
|
||||
maintainers:
|
||||
- Cheng-Yi Chiang <cychiang@chromium.org>
|
||||
- Tzung-Bi Shih <tzungbi@google.com>
|
||||
|
||||
description: |
|
||||
Google's ChromeOS EC codec is a digital mic codec provided by the
|
||||
|
@ -7,7 +7,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
|
||||
title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller
|
||||
|
||||
maintainers:
|
||||
- Roger Quadros <rogerq@ti.com>
|
||||
- Roger Quadros <rogerq@kernel.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: TI Keystone Soc USB Controller
|
||||
|
||||
maintainers:
|
||||
- Roger Quadros <rogerq@ti.com>
|
||||
- Roger Quadros <rogerq@kernel.org>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
29
MAINTAINERS
29
MAINTAINERS
@ -4549,6 +4549,7 @@ F: drivers/platform/chrome/
|
||||
|
||||
CHROMEOS EC CODEC DRIVER
|
||||
M: Cheng-Yi Chiang <cychiang@chromium.org>
|
||||
M: Tzung-Bi Shih <tzungbi@google.com>
|
||||
R: Guenter Roeck <groeck@chromium.org>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
|
||||
@ -7012,12 +7013,6 @@ L: linux-edac@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/edac/sb_edac.c
|
||||
|
||||
EDAC-SIFIVE
|
||||
M: Yash Shah <yash.shah@sifive.com>
|
||||
L: linux-edac@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/edac/sifive_edac.c
|
||||
|
||||
EDAC-SKYLAKE
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
L: linux-edac@vger.kernel.org
|
||||
@ -9264,6 +9259,15 @@ S: Maintained
|
||||
W: https://github.com/o2genum/ideapad-slidebar
|
||||
F: drivers/input/misc/ideapad_slidebar.c
|
||||
|
||||
IDMAPPED MOUNTS
|
||||
M: Christian Brauner <brauner@kernel.org>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
|
||||
F: Documentation/filesystems/idmappings.rst
|
||||
F: tools/testing/selftests/mount_setattr/
|
||||
F: include/linux/mnt_idmapping.h
|
||||
|
||||
IDT VersaClock 5 CLOCK DRIVER
|
||||
M: Luca Ceresoli <luca@lucaceresoli.net>
|
||||
S: Maintained
|
||||
@ -16000,14 +16004,6 @@ F: Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
|
||||
F: drivers/misc/fastrpc.c
|
||||
F: include/uapi/misc/fastrpc.h
|
||||
|
||||
QUALCOMM GENERIC INTERFACE I2C DRIVER
|
||||
M: Akash Asthana <akashast@codeaurora.org>
|
||||
M: Mukesh Savaliya <msavaliy@codeaurora.org>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-qcom-geni.c
|
||||
|
||||
QUALCOMM HEXAGON ARCHITECTURE
|
||||
M: Brian Cain <bcain@codeaurora.org>
|
||||
L: linux-hexagon@vger.kernel.org
|
||||
@ -16079,8 +16075,8 @@ F: Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
|
||||
F: drivers/mtd/nand/raw/qcom_nandc.c
|
||||
|
||||
QUALCOMM RMNET DRIVER
|
||||
M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
|
||||
M: Sean Tranchetti <stranche@codeaurora.org>
|
||||
M: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
|
||||
M: Sean Tranchetti <quic_stranche@quicinc.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
|
||||
@ -16372,6 +16368,7 @@ F: drivers/watchdog/realtek_otto_wdt.c
|
||||
|
||||
REALTEK RTL83xx SMI DSA ROUTER CHIPS
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
M: Alvin Šipraga <alsi@bang-olufsen.dk>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
|
||||
F: drivers/net/dsa/realtek/*
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -106,7 +106,7 @@
|
||||
msr_s SYS_ICC_SRE_EL2, x0
|
||||
isb // Make sure SRE is now set
|
||||
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
|
||||
tbz x0, #0, 1f // and check that it sticks
|
||||
tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
|
||||
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
||||
.Lskip_gicv3_\@:
|
||||
.endm
|
||||
|
@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
|
||||
: "r" (val), "r" (regs->ior), "r" (regs->isr)
|
||||
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
|
||||
{
|
||||
@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
|
||||
__asm__ __volatile__ (
|
||||
" mtsp %4, %%sr1\n"
|
||||
" zdep %2, 29, 2, %%r19\n"
|
||||
" dep %%r0, 31, 2, %2\n"
|
||||
" dep %%r0, 31, 2, %3\n"
|
||||
" mtsar %%r19\n"
|
||||
" zvdepi -2, 32, %%r19\n"
|
||||
"1: ldw 0(%%sr1,%3),%%r20\n"
|
||||
@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
|
||||
" andcm %%r21, %%r19, %%r21\n"
|
||||
" or %1, %%r20, %1\n"
|
||||
" or %2, %%r21, %2\n"
|
||||
"3: stw %1,0(%%sr1,%1)\n"
|
||||
"3: stw %1,0(%%sr1,%3)\n"
|
||||
"4: stw %%r1,4(%%sr1,%3)\n"
|
||||
"5: stw %2,8(%%sr1,%3)\n"
|
||||
" copy %%r0, %0\n"
|
||||
@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
|
||||
break;
|
||||
}
|
||||
#ifdef CONFIG_PA20
|
||||
switch (regs->iir & OPCODE2_MASK)
|
||||
{
|
||||
case OPCODE_FLDD_L:
|
||||
@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
flop=1;
|
||||
ret = emulate_std(regs, R2(regs->iir),1);
|
||||
break;
|
||||
#ifdef CONFIG_PA20
|
||||
case OPCODE_LDD_L:
|
||||
ret = emulate_ldd(regs, R2(regs->iir),0);
|
||||
break;
|
||||
case OPCODE_STD_L:
|
||||
ret = emulate_std(regs, R2(regs->iir),0);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
switch (regs->iir & OPCODE3_MASK)
|
||||
{
|
||||
case OPCODE_FLDW_L:
|
||||
flop=1;
|
||||
ret = emulate_ldw(regs, R2(regs->iir),0);
|
||||
ret = emulate_ldw(regs, R2(regs->iir), 1);
|
||||
break;
|
||||
case OPCODE_LDW_M:
|
||||
ret = emulate_ldw(regs, R2(regs->iir),1);
|
||||
ret = emulate_ldw(regs, R2(regs->iir), 0);
|
||||
break;
|
||||
|
||||
case OPCODE_FSTW_L:
|
||||
|
@ -421,14 +421,14 @@ InstructionTLBMiss:
|
||||
*/
|
||||
/* Get PTE (linux-style) and check access */
|
||||
mfspr r3,SPRN_IMISS
|
||||
#ifdef CONFIG_MODULES
|
||||
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
#endif
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
#ifdef CONFIG_MODULES
|
||||
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||
|
@ -3264,12 +3264,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
|
||||
case BARRIER_EIEIO:
|
||||
eieio();
|
||||
break;
|
||||
#ifdef CONFIG_PPC64
|
||||
case BARRIER_LWSYNC:
|
||||
asm volatile("lwsync" : : : "memory");
|
||||
break;
|
||||
case BARRIER_PTESYNC:
|
||||
asm volatile("ptesync" : : : "memory");
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/reboot.h>
|
||||
@ -85,7 +86,7 @@ static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mas
|
||||
pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
|
||||
break;
|
||||
}
|
||||
hmask |= 1 << hartid;
|
||||
hmask |= BIT(hartid);
|
||||
}
|
||||
|
||||
return hmask;
|
||||
@ -160,7 +161,7 @@ static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
|
||||
{
|
||||
unsigned long hart_mask;
|
||||
|
||||
if (!cpu_mask)
|
||||
if (!cpu_mask || cpumask_empty(cpu_mask))
|
||||
cpu_mask = cpu_online_mask;
|
||||
hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
|
||||
|
||||
@ -176,7 +177,7 @@ static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
|
||||
int result = 0;
|
||||
unsigned long hart_mask;
|
||||
|
||||
if (!cpu_mask)
|
||||
if (!cpu_mask || cpumask_empty(cpu_mask))
|
||||
cpu_mask = cpu_online_mask;
|
||||
hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
|
||||
|
||||
@ -249,26 +250,37 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
|
||||
|
||||
static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
|
||||
{
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0;
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
|
||||
struct sbiret ret = {0};
|
||||
int result;
|
||||
|
||||
if (!cpu_mask)
|
||||
if (!cpu_mask || cpumask_empty(cpu_mask))
|
||||
cpu_mask = cpu_online_mask;
|
||||
|
||||
for_each_cpu(cpuid, cpu_mask) {
|
||||
hartid = cpuid_to_hartid_map(cpuid);
|
||||
if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
|
||||
ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
|
||||
hmask, hbase, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
goto ecall_failed;
|
||||
hmask = 0;
|
||||
hbase = 0;
|
||||
if (hmask) {
|
||||
if (hartid + BITS_PER_LONG <= htop ||
|
||||
hbase + BITS_PER_LONG <= hartid) {
|
||||
ret = sbi_ecall(SBI_EXT_IPI,
|
||||
SBI_EXT_IPI_SEND_IPI, hmask,
|
||||
hbase, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
goto ecall_failed;
|
||||
hmask = 0;
|
||||
} else if (hartid < hbase) {
|
||||
/* shift the mask to fit lower hartid */
|
||||
hmask <<= hbase - hartid;
|
||||
hbase = hartid;
|
||||
}
|
||||
}
|
||||
if (!hmask)
|
||||
if (!hmask) {
|
||||
hbase = hartid;
|
||||
hmask |= 1UL << (hartid - hbase);
|
||||
htop = hartid;
|
||||
} else if (hartid > htop) {
|
||||
htop = hartid;
|
||||
}
|
||||
hmask |= BIT(hartid - hbase);
|
||||
}
|
||||
|
||||
if (hmask) {
|
||||
@ -344,25 +356,35 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
|
||||
unsigned long start, unsigned long size,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0;
|
||||
unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
|
||||
int result;
|
||||
|
||||
if (!cpu_mask)
|
||||
if (!cpu_mask || cpumask_empty(cpu_mask))
|
||||
cpu_mask = cpu_online_mask;
|
||||
|
||||
for_each_cpu(cpuid, cpu_mask) {
|
||||
hartid = cpuid_to_hartid_map(cpuid);
|
||||
if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
|
||||
result = __sbi_rfence_v02_call(fid, hmask, hbase,
|
||||
start, size, arg4, arg5);
|
||||
if (result)
|
||||
return result;
|
||||
hmask = 0;
|
||||
hbase = 0;
|
||||
if (hmask) {
|
||||
if (hartid + BITS_PER_LONG <= htop ||
|
||||
hbase + BITS_PER_LONG <= hartid) {
|
||||
result = __sbi_rfence_v02_call(fid, hmask,
|
||||
hbase, start, size, arg4, arg5);
|
||||
if (result)
|
||||
return result;
|
||||
hmask = 0;
|
||||
} else if (hartid < hbase) {
|
||||
/* shift the mask to fit lower hartid */
|
||||
hmask <<= hbase - hartid;
|
||||
hbase = hartid;
|
||||
}
|
||||
}
|
||||
if (!hmask)
|
||||
if (!hmask) {
|
||||
hbase = hartid;
|
||||
hmask |= 1UL << (hartid - hbase);
|
||||
htop = hartid;
|
||||
} else if (hartid > htop) {
|
||||
htop = hartid;
|
||||
}
|
||||
hmask |= BIT(hartid - hbase);
|
||||
}
|
||||
|
||||
if (hmask) {
|
||||
|
@ -344,10 +344,8 @@ static void sgx_reclaim_pages(void)
|
||||
{
|
||||
struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
|
||||
struct sgx_backing backing[SGX_NR_TO_SCAN];
|
||||
struct sgx_epc_section *section;
|
||||
struct sgx_encl_page *encl_page;
|
||||
struct sgx_epc_page *epc_page;
|
||||
struct sgx_numa_node *node;
|
||||
pgoff_t page_index;
|
||||
int cnt = 0;
|
||||
int ret;
|
||||
@ -418,13 +416,7 @@ skip:
|
||||
kref_put(&encl_page->encl->refcount, sgx_encl_release);
|
||||
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
|
||||
|
||||
section = &sgx_epc_sections[epc_page->section];
|
||||
node = section->node;
|
||||
|
||||
spin_lock(&node->lock);
|
||||
list_add_tail(&epc_page->list, &node->free_page_list);
|
||||
spin_unlock(&node->lock);
|
||||
atomic_long_inc(&sgx_nr_free_pages);
|
||||
sgx_free_epc_page(epc_page);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,11 +91,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct fpu *fpu = &target->thread.fpu;
|
||||
struct user32_fxsr_struct newstate;
|
||||
struct fxregs_state newstate;
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_FXSR))
|
||||
return -ENODEV;
|
||||
|
||||
@ -116,9 +114,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||
/* Copy the state */
|
||||
memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
|
||||
|
||||
/* Clear xmm8..15 */
|
||||
/* Clear xmm8..15 for 32-bit callers */
|
||||
BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
|
||||
memset(&fpu->fpstate->regs.fxsave.xmm_space[8], 0, 8 * 16);
|
||||
if (in_ia32_syscall())
|
||||
memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
|
||||
|
||||
/* Mark FP and SSE as in use when XSAVE is enabled */
|
||||
if (use_xsave())
|
||||
|
@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
|
||||
},
|
||||
[REGSET_FP] = {
|
||||
.core_note_type = NT_PRFPREG,
|
||||
.n = sizeof(struct user_i387_struct) / sizeof(long),
|
||||
.n = sizeof(struct fxregs_state) / sizeof(long),
|
||||
.size = sizeof(long), .align = sizeof(long),
|
||||
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
|
||||
},
|
||||
@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = {
|
||||
},
|
||||
[REGSET_XFP] = {
|
||||
.core_note_type = NT_PRXFPREG,
|
||||
.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
|
||||
.n = sizeof(struct fxregs_state) / sizeof(u32),
|
||||
.size = sizeof(u32), .align = sizeof(u32),
|
||||
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
|
||||
},
|
||||
|
@ -7018,6 +7018,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
#endif
|
||||
|
||||
wbt_enable_default(bfqd->queue);
|
||||
|
||||
kfree(bfqd);
|
||||
}
|
||||
|
||||
|
@ -284,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
|
||||
void blk_set_queue_dying(struct request_queue *q)
|
||||
{
|
||||
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||
blk_queue_start_drain(q);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
|
||||
|
||||
/**
|
||||
* blk_cleanup_queue - shutdown a request queue
|
||||
* @q: request queue to shutdown
|
||||
@ -308,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
WARN_ON_ONCE(blk_queue_registered(q));
|
||||
|
||||
/* mark @q DYING, no new request or merges will be allowed afterwards */
|
||||
blk_set_queue_dying(q);
|
||||
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||
blk_queue_start_drain(q);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
||||
|
@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
|
||||
page = alloc_page(GFP_NOIO | gfp_mask);
|
||||
page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
|
||||
if (!page)
|
||||
goto cleanup;
|
||||
|
||||
|
@ -736,6 +736,10 @@ static void blk_complete_request(struct request *req)
|
||||
|
||||
/* Completion has already been traced */
|
||||
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
||||
|
||||
if (req_op(req) == REQ_OP_ZONE_APPEND)
|
||||
bio->bi_iter.bi_sector = req->__sector;
|
||||
|
||||
if (!is_flush)
|
||||
bio_endio(bio);
|
||||
bio = next;
|
||||
|
@ -525,8 +525,6 @@ void elv_unregister_queue(struct request_queue *q)
|
||||
kobject_del(&e->kobj);
|
||||
|
||||
e->registered = 0;
|
||||
/* Re-enable throttling in case elevator disabled it */
|
||||
wbt_enable_default(q);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -289,6 +289,8 @@ static void blkdev_bio_end_io_async(struct bio *bio)
|
||||
struct kiocb *iocb = dio->iocb;
|
||||
ssize_t ret;
|
||||
|
||||
WRITE_ONCE(iocb->private, NULL);
|
||||
|
||||
if (likely(!bio->bi_status)) {
|
||||
ret = dio->size;
|
||||
iocb->ki_pos += ret;
|
||||
|
@ -548,6 +548,20 @@ out_free_ext_minor:
|
||||
}
|
||||
EXPORT_SYMBOL(device_add_disk);
|
||||
|
||||
/**
|
||||
* blk_mark_disk_dead - mark a disk as dead
|
||||
* @disk: disk to mark as dead
|
||||
*
|
||||
* Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
|
||||
* to this disk.
|
||||
*/
|
||||
void blk_mark_disk_dead(struct gendisk *disk)
|
||||
{
|
||||
set_bit(GD_DEAD, &disk->state);
|
||||
blk_queue_start_drain(disk->queue);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
|
||||
|
||||
/**
|
||||
* del_gendisk - remove the gendisk
|
||||
* @disk: the struct gendisk to remove
|
||||
|
@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
|
||||
(void *)1},
|
||||
/* T40 can not handle C3 idle state */
|
||||
{ set_max_cstate, "IBM ThinkPad T40", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
|
||||
(void *)2},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -400,7 +400,7 @@ int __init_or_acpilib acpi_table_parse_entries_array(
|
||||
|
||||
acpi_get_table(id, instance, &table_header);
|
||||
if (!table_header) {
|
||||
pr_warn("%4.4s not present\n", id);
|
||||
pr_debug("%4.4s not present\n", id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -79,6 +79,7 @@
|
||||
#include <linux/ioprio.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/statfs.h>
|
||||
|
||||
#include "loop.h"
|
||||
|
||||
@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo)
|
||||
granularity = 0;
|
||||
|
||||
} else {
|
||||
struct kstatfs sbuf;
|
||||
|
||||
max_discard_sectors = UINT_MAX >> 9;
|
||||
granularity = inode->i_sb->s_blocksize;
|
||||
if (!vfs_statfs(&file->f_path, &sbuf))
|
||||
granularity = sbuf.f_bsize;
|
||||
else
|
||||
max_discard_sectors = 0;
|
||||
}
|
||||
|
||||
if (max_discard_sectors) {
|
||||
|
@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
||||
"Completion workers still active!\n");
|
||||
}
|
||||
|
||||
blk_set_queue_dying(dd->queue);
|
||||
blk_mark_disk_dead(dd->disk);
|
||||
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
|
||||
|
||||
/* Clean up the block layer. */
|
||||
|
@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
|
||||
* IO to complete/fail.
|
||||
*/
|
||||
blk_mq_freeze_queue(rbd_dev->disk->queue);
|
||||
blk_set_queue_dying(rbd_dev->disk->queue);
|
||||
blk_mark_disk_dead(rbd_dev->disk);
|
||||
}
|
||||
|
||||
del_gendisk(rbd_dev->disk);
|
||||
|
@ -2126,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info)
|
||||
|
||||
/* No more blkif_request(). */
|
||||
blk_mq_stop_hw_queues(info->rq);
|
||||
blk_set_queue_dying(info->rq);
|
||||
blk_mark_disk_dead(info->gd);
|
||||
set_capacity(info->gd, 0);
|
||||
|
||||
for_each_rinfo(info, rinfo, i) {
|
||||
|
@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
|
||||
__func__, atchan->irq_status);
|
||||
|
||||
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
|
||||
!(atchan->irq_status & error_mask))
|
||||
!(atchan->irq_status & error_mask)) {
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atchan->irq_status & error_mask)
|
||||
at_xdmac_handle_error(atchan);
|
||||
|
@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
|
||||
if (!cmd_q->qbase) {
|
||||
dev_err(dev, "unable to allocate command queue\n");
|
||||
ret = -ENOMEM;
|
||||
goto e_dma_alloc;
|
||||
goto e_destroy_pool;
|
||||
}
|
||||
|
||||
cmd_q->qidx = 0;
|
||||
@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
|
||||
|
||||
/* Request an irq */
|
||||
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
|
||||
if (ret)
|
||||
goto e_pool;
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to allocate an IRQ\n");
|
||||
goto e_free_dma;
|
||||
}
|
||||
|
||||
/* Update the device registers with queue information. */
|
||||
cmd_q->qcontrol &= ~CMD_Q_SIZE;
|
||||
@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
|
||||
/* Register the DMA engine support */
|
||||
ret = pt_dmaengine_register(pt);
|
||||
if (ret)
|
||||
goto e_dmaengine;
|
||||
goto e_free_irq;
|
||||
|
||||
/* Set up debugfs entries */
|
||||
ptdma_debugfs_setup(pt);
|
||||
|
||||
return 0;
|
||||
|
||||
e_dmaengine:
|
||||
e_free_irq:
|
||||
free_irq(pt->pt_irq, pt);
|
||||
|
||||
e_dma_alloc:
|
||||
e_free_dma:
|
||||
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
|
||||
|
||||
e_pool:
|
||||
dev_err(dev, "unable to allocate an IRQ\n");
|
||||
e_destroy_pool:
|
||||
dma_pool_destroy(pt->cmd_q.dma_pool);
|
||||
|
||||
return ret;
|
||||
|
@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||
|
||||
dmac->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, dmac);
|
||||
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
|
||||
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
|
||||
ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
|
||||
if (ret < 0)
|
||||
|
@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
ret = pm_runtime_get(schan->dev);
|
||||
|
||||
spin_unlock_irq(&schan->chan_lock);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
|
||||
pm_runtime_put(schan->dev);
|
||||
}
|
||||
|
||||
pm_runtime_barrier(schan->dev);
|
||||
|
||||
|
@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
||||
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
|
||||
&stm32_dmamux->dmarouter);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
goto pm_disable;
|
||||
|
||||
return 0;
|
||||
|
||||
pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
err_clk:
|
||||
clk_disable_unprepare(stm32_dmamux->clk);
|
||||
|
||||
|
@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
|
||||
else
|
||||
return (char *)ptr;
|
||||
|
||||
r = (unsigned long)p % align;
|
||||
r = (unsigned long)ptr % align;
|
||||
|
||||
if (r == 0)
|
||||
return (char *)ptr;
|
||||
|
@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SMU saves SDMA state for us */
|
||||
if (adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
return sdma_v4_0_hw_fini(adev);
|
||||
}
|
||||
|
||||
@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SMU restores SDMA state for us */
|
||||
if (adev->in_s0ix)
|
||||
return 0;
|
||||
|
||||
return sdma_v4_0_hw_init(adev);
|
||||
}
|
||||
|
||||
|
@ -1238,21 +1238,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
|
||||
&dpm_context->dpm_tables.soc_table;
|
||||
struct smu_umd_pstate_table *pstate_table =
|
||||
&smu->pstate_table;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
pstate_table->gfxclk_pstate.min = gfx_table->min;
|
||||
pstate_table->gfxclk_pstate.peak = gfx_table->max;
|
||||
if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
|
||||
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
|
||||
|
||||
pstate_table->uclk_pstate.min = mem_table->min;
|
||||
pstate_table->uclk_pstate.peak = mem_table->max;
|
||||
if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
|
||||
pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
|
||||
|
||||
pstate_table->socclk_pstate.min = soc_table->min;
|
||||
pstate_table->socclk_pstate.peak = soc_table->max;
|
||||
if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
|
||||
pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
|
||||
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
|
||||
break;
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
|
||||
pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
|
||||
pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
|
||||
break;
|
||||
case CHIP_BEIGE_GOBY:
|
||||
pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
|
||||
pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
|
||||
pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,6 +33,14 @@ typedef enum {
|
||||
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
|
||||
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
|
||||
|
||||
#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
|
||||
#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
|
||||
#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
|
||||
|
||||
#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
|
||||
#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
|
||||
#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
|
||||
|
||||
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
|
@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
|
||||
|
||||
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
|
||||
{
|
||||
int ret = 0, index = 0;
|
||||
int ret = 0;
|
||||
|
||||
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
|
||||
SMU_MSG_GfxDeviceDriverReset);
|
||||
if (index < 0)
|
||||
return index == -EACCES ? 0 : index;
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "Failed to mode reset!\n");
|
||||
|
||||
|
@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
|
||||
state->mode_blob = NULL;
|
||||
|
||||
if (mode) {
|
||||
struct drm_property_blob *blob;
|
||||
|
||||
drm_mode_convert_to_umode(&umode, mode);
|
||||
state->mode_blob =
|
||||
drm_property_create_blob(state->crtc->dev,
|
||||
sizeof(umode),
|
||||
&umode);
|
||||
if (IS_ERR(state->mode_blob))
|
||||
return PTR_ERR(state->mode_blob);
|
||||
blob = drm_property_create_blob(crtc->dev,
|
||||
sizeof(umode), &umode);
|
||||
if (IS_ERR(blob))
|
||||
return PTR_ERR(blob);
|
||||
|
||||
drm_mode_copy(&state->mode, mode);
|
||||
|
||||
state->mode_blob = blob;
|
||||
state->enable = true;
|
||||
drm_dbg_atomic(crtc->dev,
|
||||
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
|
||||
|
@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
|
||||
*/
|
||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
|
||||
if (cma_obj->map_noncoherent) {
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
@ -101,6 +101,7 @@ config DRM_I915_USERPTR
|
||||
config DRM_I915_GVT
|
||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||
depends on DRM_I915
|
||||
depends on X86
|
||||
depends on 64BIT
|
||||
default n
|
||||
help
|
||||
|
@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
|
||||
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
|
||||
if (DISPLAY_VER(i915) >= 11 &&
|
||||
(plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
|
||||
(plane_state->view.color_plane[0].y +
|
||||
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
|
||||
plane_state->no_fbc_reason = "plane end Y offset misaligned";
|
||||
return false;
|
||||
}
|
||||
|
@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
port++;
|
||||
}
|
||||
|
||||
/*
|
||||
* The port numbering and mapping here is bizarre. The now-obsolete
|
||||
* swsci spec supports ports numbered [0..4]. Port E is handled as a
|
||||
* special case, but port F and beyond are not. The functionality is
|
||||
* supposed to be obsolete for new platforms. Just bail out if the port
|
||||
* number is out of bounds after mapping.
|
||||
*/
|
||||
if (port > 4) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
|
||||
intel_encoder->base.base.id, intel_encoder->base.name,
|
||||
port_name(intel_encoder->port), port);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enable)
|
||||
parm |= 4 << 8;
|
||||
|
||||
|
@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
|
||||
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
bo->priority = I915_TTM_PRIO_PURGE;
|
||||
} else if (!i915_gem_object_has_pages(obj)) {
|
||||
if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
|
||||
bo->priority = I915_TTM_PRIO_HAS_PAGES;
|
||||
bo->priority = I915_TTM_PRIO_NO_PAGES;
|
||||
} else {
|
||||
if (bo->priority > I915_TTM_PRIO_NO_PAGES)
|
||||
bo->priority = I915_TTM_PRIO_NO_PAGES;
|
||||
bo->priority = I915_TTM_PRIO_HAS_PAGES;
|
||||
}
|
||||
|
||||
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
|
||||
|
@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
|
||||
ops->set_pfn(se, s->shadow_page.mfn);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Check if can do 2M page
|
||||
* @vgpu: target vgpu
|
||||
* @entry: target pfn's gtt entry
|
||||
@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
|
||||
* intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
|
||||
* @vgpu: a vGPU
|
||||
* @off: register offset
|
||||
* @p_data: data will be returned to guest
|
||||
|
@ -4853,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes,
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
|
||||
for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
|
||||
if (dbuf_slices[i].active_pipes == active_pipes)
|
||||
return dbuf_slices[i].join_mbus;
|
||||
}
|
||||
@ -4870,7 +4870,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
|
||||
for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
|
||||
if (dbuf_slices[i].active_pipes == active_pipes &&
|
||||
dbuf_slices[i].join_mbus == join_mbus)
|
||||
return dbuf_slices[i].dbuf_mask[pipe];
|
||||
|
@ -786,15 +786,98 @@ void mtk_dsi_ddp_stop(struct device *dev)
|
||||
mtk_dsi_poweroff(dsi);
|
||||
}
|
||||
|
||||
static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_simple_encoder_init(drm, &dsi->encoder,
|
||||
DRM_MODE_ENCODER_DSI);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to encoder init to drm\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
|
||||
|
||||
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret)
|
||||
goto err_cleanup_encoder;
|
||||
|
||||
dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
|
||||
if (IS_ERR(dsi->connector)) {
|
||||
DRM_ERROR("Unable to create bridge connector\n");
|
||||
ret = PTR_ERR(dsi->connector);
|
||||
goto err_cleanup_encoder;
|
||||
}
|
||||
drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
|
||||
|
||||
return 0;
|
||||
|
||||
err_cleanup_encoder:
|
||||
drm_encoder_cleanup(&dsi->encoder);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
int ret;
|
||||
struct drm_device *drm = data;
|
||||
struct mtk_dsi *dsi = dev_get_drvdata(dev);
|
||||
|
||||
ret = mtk_dsi_encoder_init(drm, dsi);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return device_reset_optional(dev);
|
||||
}
|
||||
|
||||
static void mtk_dsi_unbind(struct device *dev, struct device *master,
|
||||
void *data)
|
||||
{
|
||||
struct mtk_dsi *dsi = dev_get_drvdata(dev);
|
||||
|
||||
drm_encoder_cleanup(&dsi->encoder);
|
||||
}
|
||||
|
||||
static const struct component_ops mtk_dsi_component_ops = {
|
||||
.bind = mtk_dsi_bind,
|
||||
.unbind = mtk_dsi_unbind,
|
||||
};
|
||||
|
||||
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
|
||||
struct mipi_dsi_device *device)
|
||||
{
|
||||
struct mtk_dsi *dsi = host_to_dsi(host);
|
||||
struct device *dev = host->dev;
|
||||
int ret;
|
||||
|
||||
dsi->lanes = device->lanes;
|
||||
dsi->format = device->format;
|
||||
dsi->mode_flags = device->mode_flags;
|
||||
dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
|
||||
if (IS_ERR(dsi->next_bridge))
|
||||
return PTR_ERR(dsi->next_bridge);
|
||||
|
||||
drm_bridge_add(&dsi->bridge);
|
||||
|
||||
ret = component_add(host->dev, &mtk_dsi_component_ops);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to add dsi_host component: %d\n", ret);
|
||||
drm_bridge_remove(&dsi->bridge);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
|
||||
struct mipi_dsi_device *device)
|
||||
{
|
||||
struct mtk_dsi *dsi = host_to_dsi(host);
|
||||
|
||||
component_del(host->dev, &mtk_dsi_component_ops);
|
||||
drm_bridge_remove(&dsi->bridge);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
|
||||
|
||||
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
|
||||
.attach = mtk_dsi_host_attach,
|
||||
.detach = mtk_dsi_host_detach,
|
||||
.transfer = mtk_dsi_host_transfer,
|
||||
};
|
||||
|
||||
static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_simple_encoder_init(drm, &dsi->encoder,
|
||||
DRM_MODE_ENCODER_DSI);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to encoder init to drm\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
|
||||
|
||||
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret)
|
||||
goto err_cleanup_encoder;
|
||||
|
||||
dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
|
||||
if (IS_ERR(dsi->connector)) {
|
||||
DRM_ERROR("Unable to create bridge connector\n");
|
||||
ret = PTR_ERR(dsi->connector);
|
||||
goto err_cleanup_encoder;
|
||||
}
|
||||
drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
|
||||
|
||||
return 0;
|
||||
|
||||
err_cleanup_encoder:
|
||||
drm_encoder_cleanup(&dsi->encoder);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
int ret;
|
||||
struct drm_device *drm = data;
|
||||
struct mtk_dsi *dsi = dev_get_drvdata(dev);
|
||||
|
||||
ret = mtk_dsi_encoder_init(drm, dsi);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return device_reset_optional(dev);
|
||||
}
|
||||
|
||||
static void mtk_dsi_unbind(struct device *dev, struct device *master,
|
||||
void *data)
|
||||
{
|
||||
struct mtk_dsi *dsi = dev_get_drvdata(dev);
|
||||
|
||||
drm_encoder_cleanup(&dsi->encoder);
|
||||
}
|
||||
|
||||
static const struct component_ops mtk_dsi_component_ops = {
|
||||
.bind = mtk_dsi_bind,
|
||||
.unbind = mtk_dsi_unbind,
|
||||
};
|
||||
|
||||
static int mtk_dsi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mtk_dsi *dsi;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct drm_panel *panel;
|
||||
struct resource *regs;
|
||||
int irq_num;
|
||||
int ret;
|
||||
@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
|
||||
&panel, &dsi->next_bridge);
|
||||
if (ret)
|
||||
goto err_unregister_host;
|
||||
|
||||
if (panel) {
|
||||
dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
|
||||
if (IS_ERR(dsi->next_bridge)) {
|
||||
ret = PTR_ERR(dsi->next_bridge);
|
||||
goto err_unregister_host;
|
||||
}
|
||||
}
|
||||
|
||||
dsi->driver_data = of_device_get_match_data(dev);
|
||||
|
||||
dsi->engine_clk = devm_clk_get(dev, "engine");
|
||||
@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
|
||||
dsi->bridge.of_node = dev->of_node;
|
||||
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
|
||||
|
||||
drm_bridge_add(&dsi->bridge);
|
||||
|
||||
ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to add component: %d\n", ret);
|
||||
goto err_unregister_host;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_host:
|
||||
@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
|
||||
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
|
||||
|
||||
mtk_output_dsi_disable(dsi);
|
||||
drm_bridge_remove(&dsi->bridge);
|
||||
component_del(&pdev->dev, &mtk_dsi_component_ops);
|
||||
mipi_dsi_host_unregister(&dsi->host);
|
||||
|
||||
return 0;
|
||||
|
@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
|
||||
* so don't register a backlight device
|
||||
*/
|
||||
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
||||
(rdev->pdev->device == 0x6741))
|
||||
(rdev->pdev->device == 0x6741) &&
|
||||
!dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
|
||||
return;
|
||||
|
||||
if (!radeon_encoder->enc_priv)
|
||||
|
@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
|
||||
|
||||
tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
|
||||
&hwmon_thermal_ops);
|
||||
/*
|
||||
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
|
||||
* so ignore that error but forward any other error.
|
||||
*/
|
||||
if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
|
||||
return PTR_ERR(tzd);
|
||||
if (IS_ERR(tzd)) {
|
||||
if (PTR_ERR(tzd) != -ENODEV)
|
||||
return PTR_ERR(tzd);
|
||||
dev_info(dev, "temp%d_input not attached to any thermal zone\n",
|
||||
index + 1);
|
||||
devm_kfree(dev, tdata);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
|
||||
if (err)
|
||||
|
@ -59,7 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
|
||||
[NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 },
|
||||
[NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 },
|
||||
[NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 },
|
||||
[NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 },
|
||||
[NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 },
|
||||
[NTC_LAST] = { },
|
||||
};
|
||||
|
||||
|
@ -911,6 +911,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
|
||||
pmbus_update_sensor_data(client, s2);
|
||||
|
||||
regval = status & mask;
|
||||
if (regval) {
|
||||
ret = pmbus_write_byte_data(client, page, reg, regval);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
if (s1 && s2) {
|
||||
s64 v1, v2;
|
||||
|
||||
|
@ -488,7 +488,7 @@ config I2C_BRCMSTB
|
||||
|
||||
config I2C_CADENCE
|
||||
tristate "Cadence I2C Controller"
|
||||
depends on ARCH_ZYNQ || ARM64 || XTENSA
|
||||
depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST
|
||||
help
|
||||
Say yes here to select Cadence I2C Host Controller. This controller is
|
||||
e.g. used by Xilinx Zynq.
|
||||
@ -680,7 +680,7 @@ config I2C_IMG
|
||||
|
||||
config I2C_IMX
|
||||
tristate "IMX I2C interface"
|
||||
depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
|
||||
depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST
|
||||
select I2C_SLAVE
|
||||
help
|
||||
Say Y here if you want to use the IIC bus controller on
|
||||
@ -935,7 +935,7 @@ config I2C_QCOM_GENI
|
||||
|
||||
config I2C_QUP
|
||||
tristate "Qualcomm QUP based I2C controller"
|
||||
depends on ARCH_QCOM
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
help
|
||||
If you say yes to this option, support will be included for the
|
||||
built-in I2C interface on the Qualcomm SoCs.
|
||||
|
@ -23,6 +23,11 @@
|
||||
#define BCM2835_I2C_FIFO 0x10
|
||||
#define BCM2835_I2C_DIV 0x14
|
||||
#define BCM2835_I2C_DEL 0x18
|
||||
/*
|
||||
* 16-bit field for the number of SCL cycles to wait after rising SCL
|
||||
* before deciding the slave is not responding. 0 disables the
|
||||
* timeout detection.
|
||||
*/
|
||||
#define BCM2835_I2C_CLKT 0x1c
|
||||
|
||||
#define BCM2835_I2C_C_READ BIT(0)
|
||||
@ -474,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
|
||||
adap->dev.of_node = pdev->dev.of_node;
|
||||
adap->quirks = of_device_get_match_data(&pdev->dev);
|
||||
|
||||
/*
|
||||
* Disable the hardware clock stretching timeout. SMBUS
|
||||
* specifies a limit for how long the device can stretch the
|
||||
* clock, but core I2C doesn't.
|
||||
*/
|
||||
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0);
|
||||
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
|
||||
|
||||
ret = i2c_add_adapter(adap);
|
||||
|
@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
/* set the data in/out register size for compatible SoCs */
|
||||
if (of_device_is_compatible(dev->device->of_node,
|
||||
"brcmstb,brcmper-i2c"))
|
||||
"brcm,brcmper-i2c"))
|
||||
dev->data_regsz = sizeof(u8);
|
||||
else
|
||||
dev->data_regsz = sizeof(u32);
|
||||
|
@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
|
||||
cci->master[idx].adap.quirks = &cci->data->quirks;
|
||||
cci->master[idx].adap.algo = &cci_algo;
|
||||
cci->master[idx].adap.dev.parent = dev;
|
||||
cci->master[idx].adap.dev.of_node = child;
|
||||
cci->master[idx].adap.dev.of_node = of_node_get(child);
|
||||
cci->master[idx].master = idx;
|
||||
cci->master[idx].cci = cci;
|
||||
|
||||
@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
|
||||
continue;
|
||||
|
||||
ret = i2c_add_adapter(&cci->master[i].adap);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
of_node_put(cci->master[i].adap.dev.of_node);
|
||||
goto error_i2c;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
||||
@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
error_i2c:
|
||||
for (; i >= 0; i--) {
|
||||
if (cci->master[i].cci)
|
||||
for (--i ; i >= 0; i--) {
|
||||
if (cci->master[i].cci) {
|
||||
i2c_del_adapter(&cci->master[i].adap);
|
||||
of_node_put(cci->master[i].adap.dev.of_node);
|
||||
}
|
||||
}
|
||||
error:
|
||||
disable_irq(cci->irq);
|
||||
@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cci->data->num_masters; i++) {
|
||||
if (cci->master[i].cci)
|
||||
if (cci->master[i].cci) {
|
||||
i2c_del_adapter(&cci->master[i].adap);
|
||||
of_node_put(cci->master[i].adap.dev.of_node);
|
||||
}
|
||||
cci_halt(cci, i);
|
||||
}
|
||||
|
||||
|
@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev)
|
||||
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
|
||||
__clear_bit(KEY_RESERVED, dev->keybit);
|
||||
|
||||
/* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
|
||||
if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
|
||||
__clear_bit(BTN_RIGHT, dev->keybit);
|
||||
__clear_bit(BTN_MIDDLE, dev->keybit);
|
||||
}
|
||||
|
||||
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
|
||||
input_cleanse_bitmasks(dev);
|
||||
|
||||
|
@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client)
|
||||
"Marking SMBus companion %s as gone\n",
|
||||
dev_name(&smbdev->client->dev));
|
||||
smbdev->dead = true;
|
||||
device_link_remove(&smbdev->client->dev,
|
||||
&smbdev->psmouse->ps2dev.serio->dev);
|
||||
serio_rescan(smbdev->psmouse->ps2dev.serio);
|
||||
} else {
|
||||
list_del(&smbdev->node);
|
||||
@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse)
|
||||
kfree(smbdev);
|
||||
} else {
|
||||
smbdev->dead = true;
|
||||
device_link_remove(&smbdev->client->dev,
|
||||
&psmouse->ps2dev.serio->dev);
|
||||
psmouse_dbg(smbdev->psmouse,
|
||||
"posting removal request for SMBus companion %s\n",
|
||||
dev_name(&smbdev->client->dev));
|
||||
@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse,
|
||||
|
||||
if (smbdev->client) {
|
||||
/* We have our companion device */
|
||||
if (!device_link_add(&smbdev->client->dev,
|
||||
&psmouse->ps2dev.serio->dev,
|
||||
DL_FLAG_STATELESS))
|
||||
psmouse_warn(psmouse,
|
||||
"failed to set up link with iSMBus companion %s\n",
|
||||
dev_name(&smbdev->client->dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -571,8 +571,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id zinitix_of_match[] = {
|
||||
{ .compatible = "zinitix,bt402" },
|
||||
{ .compatible = "zinitix,bt403" },
|
||||
{ .compatible = "zinitix,bt404" },
|
||||
{ .compatible = "zinitix,bt412" },
|
||||
{ .compatible = "zinitix,bt413" },
|
||||
{ .compatible = "zinitix,bt431" },
|
||||
{ .compatible = "zinitix,bt432" },
|
||||
{ .compatible = "zinitix,bt531" },
|
||||
{ .compatible = "zinitix,bt532" },
|
||||
{ .compatible = "zinitix,bt538" },
|
||||
{ .compatible = "zinitix,bt541" },
|
||||
{ .compatible = "zinitix,bt548" },
|
||||
{ .compatible = "zinitix,bt554" },
|
||||
{ .compatible = "zinitix,at100" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, zinitix_of_match);
|
||||
|
@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
||||
set_bit(DMF_FREEING, &md->flags);
|
||||
spin_unlock(&_minor_lock);
|
||||
|
||||
blk_set_queue_dying(md->queue);
|
||||
blk_mark_disk_dead(md->disk);
|
||||
|
||||
/*
|
||||
* Take suspend_lock so that presuspend and postsuspend methods
|
||||
|
@ -264,16 +264,20 @@ static int phram_setup(const char *val)
|
||||
}
|
||||
}
|
||||
|
||||
if (erasesize)
|
||||
div_u64_rem(len, (uint32_t)erasesize, &rem);
|
||||
|
||||
if (len == 0 || erasesize == 0 || erasesize > len
|
||||
|| erasesize > UINT_MAX || rem) {
|
||||
|| erasesize > UINT_MAX) {
|
||||
parse_err("illegal erasesize or len\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
div_u64_rem(len, (uint32_t)erasesize, &rem);
|
||||
if (rem) {
|
||||
parse_err("len is not multiple of erasesize\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = register_device(name, start, len, (uint32_t)erasesize);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
@ -42,7 +42,8 @@ config MTD_NAND_OMAP2
|
||||
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
|
||||
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
select OMAP_GPMC if ARCH_K3
|
||||
select MEMORY
|
||||
select OMAP_GPMC
|
||||
help
|
||||
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
|
||||
and Keystone platforms.
|
||||
|
@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
mtd->oobsize / trans,
|
||||
host->hwcfg.sector_size_1k);
|
||||
|
||||
if (!ret) {
|
||||
if (ret != -EBADMSG) {
|
||||
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
|
||||
|
||||
if (*err_addr)
|
||||
|
@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
|
||||
this->hw.must_apply_timings = false;
|
||||
ret = gpmi_nfc_apply_timings(this);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
|
||||
@ -2414,6 +2414,7 @@ unmap:
|
||||
|
||||
this->bch = false;
|
||||
|
||||
out_pm:
|
||||
pm_runtime_mark_last_busy(this->dev);
|
||||
pm_runtime_put_autosuspend(this->dev);
|
||||
|
||||
|
@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
|
||||
struct ingenic_ecc *ecc;
|
||||
|
||||
pdev = of_find_device_by_node(np);
|
||||
if (!pdev || !platform_get_drvdata(pdev))
|
||||
if (!pdev)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
if (!platform_get_drvdata(pdev)) {
|
||||
put_device(&pdev->dev);
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
|
||||
ecc = platform_get_drvdata(pdev);
|
||||
clk_prepare_enable(ecc->clk);
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitops.h>
|
||||
@ -3073,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
||||
if (dma_mapping_error(dev, nandc->base_dma))
|
||||
return -ENXIO;
|
||||
|
||||
ret = qcom_nandc_alloc(nandc);
|
||||
if (ret)
|
||||
goto err_nandc_alloc;
|
||||
|
||||
ret = clk_prepare_enable(nandc->core_clk);
|
||||
if (ret)
|
||||
goto err_core_clk;
|
||||
@ -3085,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto err_aon_clk;
|
||||
|
||||
ret = qcom_nandc_alloc(nandc);
|
||||
if (ret)
|
||||
goto err_nandc_alloc;
|
||||
|
||||
ret = qcom_nandc_setup(nandc);
|
||||
if (ret)
|
||||
goto err_setup;
|
||||
@ -3096,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
err_setup:
|
||||
qcom_nandc_unalloc(nandc);
|
||||
err_nandc_alloc:
|
||||
clk_disable_unprepare(nandc->aon_clk);
|
||||
err_aon_clk:
|
||||
clk_disable_unprepare(nandc->core_clk);
|
||||
err_core_clk:
|
||||
qcom_nandc_unalloc(nandc);
|
||||
err_nandc_alloc:
|
||||
dma_unmap_resource(dev, res->start, resource_size(res),
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
||||
const struct mtd_partition **pparts,
|
||||
struct mtd_part_parser_data *data)
|
||||
{
|
||||
size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
|
||||
int ret, i, j, tmpparts, numparts = 0;
|
||||
struct smem_flash_pentry *pentry;
|
||||
struct smem_flash_ptable *ptable;
|
||||
size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
|
||||
struct mtd_partition *parts;
|
||||
int ret, i, numparts;
|
||||
char *name, *c;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
|
||||
@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
||||
pr_debug("Parsing partition table info from SMEM\n");
|
||||
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
|
||||
if (IS_ERR(ptable)) {
|
||||
pr_err("Error reading partition table header\n");
|
||||
if (PTR_ERR(ptable) != -EPROBE_DEFER)
|
||||
pr_err("Error reading partition table header\n");
|
||||
return PTR_ERR(ptable);
|
||||
}
|
||||
|
||||
@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
||||
}
|
||||
|
||||
/* Ensure that # of partitions is less than the max we have allocated */
|
||||
numparts = le32_to_cpu(ptable->numparts);
|
||||
if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
|
||||
tmpparts = le32_to_cpu(ptable->numparts);
|
||||
if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
|
||||
pr_err("Partition numbers exceed the max limit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
||||
return PTR_ERR(ptable);
|
||||
}
|
||||
|
||||
for (i = 0; i < tmpparts; i++) {
|
||||
pentry = &ptable->pentry[i];
|
||||
if (pentry->name[0] != '\0')
|
||||
numparts++;
|
||||
}
|
||||
|
||||
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
|
||||
if (!parts)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < numparts; i++) {
|
||||
for (i = 0, j = 0; i < tmpparts; i++) {
|
||||
pentry = &ptable->pentry[i];
|
||||
if (pentry->name[0] == '\0')
|
||||
continue;
|
||||
@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
|
||||
for (c = name; *c != '\0'; c++)
|
||||
*c = tolower(*c);
|
||||
|
||||
parts[i].name = name;
|
||||
parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
|
||||
parts[i].mask_flags = pentry->attr;
|
||||
parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
|
||||
parts[j].name = name;
|
||||
parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
|
||||
parts[j].mask_flags = pentry->attr;
|
||||
parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
|
||||
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
|
||||
i, pentry->name, le32_to_cpu(pentry->offset),
|
||||
le32_to_cpu(pentry->length), pentry->attr);
|
||||
j++;
|
||||
}
|
||||
|
||||
pr_debug("SMEM partition table found: ver: %d len: %d\n",
|
||||
le32_to_cpu(ptable->version), numparts);
|
||||
le32_to_cpu(ptable->version), tmpparts);
|
||||
*pparts = parts;
|
||||
|
||||
return numparts;
|
||||
|
||||
out_free_parts:
|
||||
while (--i >= 0)
|
||||
kfree(parts[i].name);
|
||||
while (--j >= 0)
|
||||
kfree(parts[j].name);
|
||||
kfree(parts);
|
||||
*pparts = NULL;
|
||||
|
||||
@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
|
||||
|
||||
for (i = 0; i < nr_parts; i++)
|
||||
kfree(pparts[i].name);
|
||||
|
||||
kfree(pparts);
|
||||
}
|
||||
|
||||
static const struct of_device_id qcomsmem_of_match_table[] = {
|
||||
|
@ -26,7 +26,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
|
||||
struct dsa_switch *ds = dev->ds;
|
||||
u8 port_member = 0, cpu_port;
|
||||
const struct dsa_port *dp;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return;
|
||||
@ -45,13 +45,33 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
|
||||
continue;
|
||||
if (!dsa_port_bridge_same(dp, other_dp))
|
||||
continue;
|
||||
if (other_p->stp_state != BR_STATE_FORWARDING)
|
||||
continue;
|
||||
|
||||
if (other_p->stp_state == BR_STATE_FORWARDING &&
|
||||
p->stp_state == BR_STATE_FORWARDING) {
|
||||
if (p->stp_state == BR_STATE_FORWARDING) {
|
||||
val |= BIT(port);
|
||||
port_member |= BIT(i);
|
||||
}
|
||||
|
||||
/* Retain port [i]'s relationship to other ports than [port] */
|
||||
for (j = 0; j < ds->num_ports; j++) {
|
||||
const struct dsa_port *third_dp;
|
||||
struct ksz_port *third_p;
|
||||
|
||||
if (j == i)
|
||||
continue;
|
||||
if (j == port)
|
||||
continue;
|
||||
if (!dsa_is_user_port(ds, j))
|
||||
continue;
|
||||
third_p = &dev->ports[j];
|
||||
if (third_p->stp_state != BR_STATE_FORWARDING)
|
||||
continue;
|
||||
third_dp = dsa_to_port(ds, j);
|
||||
if (dsa_port_bridge_same(other_dp, third_dp))
|
||||
val |= BIT(j);
|
||||
}
|
||||
|
||||
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
|
||||
}
|
||||
|
||||
|
@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_FIRMWARE(FW_FILE_NAME_E1);
|
||||
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
|
||||
MODULE_FIRMWARE(FW_FILE_NAME_E2);
|
||||
MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
|
||||
MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
|
||||
MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
|
||||
|
||||
int bnx2x_num_queues;
|
||||
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
|
||||
|
@ -4776,8 +4776,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
|
||||
return rc;
|
||||
|
||||
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
|
||||
req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
|
||||
req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
|
||||
if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
|
||||
req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
|
||||
req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
|
||||
}
|
||||
req->mask = cpu_to_le32(vnic->rx_mask);
|
||||
return hwrm_req_send_silent(bp, req);
|
||||
}
|
||||
@ -7820,6 +7822,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_remap_fw_health_regs(struct bnxt *bp)
|
||||
{
|
||||
if (!bp->fw_health)
|
||||
return;
|
||||
|
||||
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
|
||||
bp->fw_health->status_reliable = true;
|
||||
bp->fw_health->resets_reliable = true;
|
||||
} else {
|
||||
bnxt_try_map_fw_health_reg(bp);
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_fw_health *fw_health = bp->fw_health;
|
||||
@ -8672,6 +8687,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
|
||||
vnic->uc_filter_count = 1;
|
||||
|
||||
vnic->rx_mask = 0;
|
||||
if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
|
||||
goto skip_rx_mask;
|
||||
|
||||
if (bp->dev->flags & IFF_BROADCAST)
|
||||
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
|
||||
|
||||
@ -8681,7 +8699,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
|
||||
if (bp->dev->flags & IFF_ALLMULTI) {
|
||||
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
||||
vnic->mc_list_count = 0;
|
||||
} else {
|
||||
} else if (bp->dev->flags & IFF_MULTICAST) {
|
||||
u32 mask = 0;
|
||||
|
||||
bnxt_mc_list_updated(bp, &mask);
|
||||
@ -8692,6 +8710,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
|
||||
if (rc)
|
||||
goto err_out;
|
||||
|
||||
skip_rx_mask:
|
||||
rc = bnxt_hwrm_set_coal(bp);
|
||||
if (rc)
|
||||
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
|
||||
@ -9883,8 +9902,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
|
||||
resc_reinit = true;
|
||||
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
|
||||
fw_reset = true;
|
||||
else if (bp->fw_health && !bp->fw_health->status_reliable)
|
||||
bnxt_try_map_fw_health_reg(bp);
|
||||
else
|
||||
bnxt_remap_fw_health_regs(bp);
|
||||
|
||||
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
|
||||
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
|
||||
@ -10364,13 +10383,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
|
||||
goto half_open_err;
|
||||
}
|
||||
|
||||
rc = bnxt_alloc_mem(bp, false);
|
||||
rc = bnxt_alloc_mem(bp, true);
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
|
||||
goto half_open_err;
|
||||
}
|
||||
rc = bnxt_init_nic(bp, false);
|
||||
set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
|
||||
rc = bnxt_init_nic(bp, true);
|
||||
if (rc) {
|
||||
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
|
||||
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
|
||||
goto half_open_err;
|
||||
}
|
||||
@ -10378,7 +10399,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
|
||||
|
||||
half_open_err:
|
||||
bnxt_free_skbs(bp);
|
||||
bnxt_free_mem(bp, false);
|
||||
bnxt_free_mem(bp, true);
|
||||
dev_close(bp->dev);
|
||||
return rc;
|
||||
}
|
||||
@ -10388,9 +10409,10 @@ half_open_err:
|
||||
*/
|
||||
void bnxt_half_close_nic(struct bnxt *bp)
|
||||
{
|
||||
bnxt_hwrm_resource_free(bp, false, false);
|
||||
bnxt_hwrm_resource_free(bp, false, true);
|
||||
bnxt_free_skbs(bp);
|
||||
bnxt_free_mem(bp, false);
|
||||
bnxt_free_mem(bp, true);
|
||||
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
|
||||
}
|
||||
|
||||
void bnxt_reenable_sriov(struct bnxt *bp)
|
||||
@ -10806,7 +10828,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
|
||||
if (dev->flags & IFF_ALLMULTI) {
|
||||
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
||||
vnic->mc_list_count = 0;
|
||||
} else {
|
||||
} else if (dev->flags & IFF_MULTICAST) {
|
||||
mc_update = bnxt_mc_list_updated(bp, &mask);
|
||||
}
|
||||
|
||||
@ -10883,9 +10905,10 @@ skip_uc:
|
||||
!bnxt_promisc_ok(bp))
|
||||
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
||||
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
|
||||
if (rc && vnic->mc_list_count) {
|
||||
if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
|
||||
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
|
||||
rc);
|
||||
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
|
||||
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
||||
vnic->mc_list_count = 0;
|
||||
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
|
||||
|
@ -1921,6 +1921,7 @@ struct bnxt {
|
||||
#define BNXT_STATE_RECOVER 12
|
||||
#define BNXT_STATE_FW_NON_FATAL_COND 13
|
||||
#define BNXT_STATE_FW_ACTIVATE_RESET 14
|
||||
#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
|
||||
|
||||
#define BNXT_NO_FW_ACCESS(bp) \
|
||||
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
|
||||
|
@ -367,6 +367,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
|
||||
}
|
||||
}
|
||||
|
||||
/* Live patch status in NVM */
|
||||
#define BNXT_LIVEPATCH_NOT_INSTALLED 0
|
||||
#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
|
||||
#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
|
||||
#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
|
||||
FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
|
||||
#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
|
||||
|
||||
#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
|
||||
|
||||
static int
|
||||
bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -374,8 +384,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
|
||||
struct hwrm_fw_livepatch_query_input *query_req;
|
||||
struct hwrm_fw_livepatch_output *patch_resp;
|
||||
struct hwrm_fw_livepatch_input *patch_req;
|
||||
u16 flags, live_patch_state;
|
||||
bool activated = false;
|
||||
u32 installed = 0;
|
||||
u16 flags;
|
||||
u8 target;
|
||||
int rc;
|
||||
|
||||
@ -394,7 +405,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
|
||||
hwrm_req_drop(bp, query_req);
|
||||
return rc;
|
||||
}
|
||||
patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
|
||||
patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
|
||||
patch_resp = hwrm_req_hold(bp, patch_req);
|
||||
|
||||
@ -407,12 +417,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
|
||||
}
|
||||
|
||||
flags = le16_to_cpu(query_resp->status_flags);
|
||||
if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
|
||||
live_patch_state = BNXT_LIVEPATCH_STATE(flags);
|
||||
|
||||
if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
|
||||
continue;
|
||||
if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
|
||||
!strncmp(query_resp->active_ver, query_resp->install_ver,
|
||||
sizeof(query_resp->active_ver)))
|
||||
|
||||
if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
|
||||
activated = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
|
||||
patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
|
||||
else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
|
||||
patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
|
||||
|
||||
patch_req->fw_target = target;
|
||||
rc = hwrm_req_send(bp, patch_req);
|
||||
@ -424,8 +442,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
|
||||
}
|
||||
|
||||
if (!rc && !installed) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "No live patches found");
|
||||
rc = -ENOENT;
|
||||
if (activated) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
|
||||
rc = -EEXIST;
|
||||
} else {
|
||||
NL_SET_ERR_MSG_MOD(extack, "No live patches found");
|
||||
rc = -ENOENT;
|
||||
}
|
||||
}
|
||||
hwrm_req_drop(bp, query_req);
|
||||
hwrm_req_drop(bp, patch_req);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "bnxt_hsi.h"
|
||||
#include "bnxt.h"
|
||||
#include "bnxt_hwrm.h"
|
||||
#include "bnxt_ulp.h"
|
||||
#include "bnxt_xdp.h"
|
||||
#include "bnxt_ptp.h"
|
||||
#include "bnxt_ethtool.h"
|
||||
@ -1972,6 +1973,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
|
||||
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
|
||||
fec->active_fec |= ETHTOOL_FEC_LLRS;
|
||||
break;
|
||||
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
|
||||
fec->active_fec |= ETHTOOL_FEC_OFF;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3457,7 +3461,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
data = skb_put(skb, pkt_size);
|
||||
eth_broadcast_addr(data);
|
||||
ether_addr_copy(&data[i], bp->dev->dev_addr);
|
||||
i += ETH_ALEN;
|
||||
ether_addr_copy(&data[i], bp->dev->dev_addr);
|
||||
i += ETH_ALEN;
|
||||
@ -3551,9 +3555,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
|
||||
if (!offline) {
|
||||
bnxt_run_fw_tests(bp, test_mask, &test_results);
|
||||
} else {
|
||||
rc = bnxt_close_nic(bp, false, false);
|
||||
if (rc)
|
||||
bnxt_ulp_stop(bp);
|
||||
rc = bnxt_close_nic(bp, true, false);
|
||||
if (rc) {
|
||||
bnxt_ulp_start(bp, rc);
|
||||
return;
|
||||
}
|
||||
bnxt_run_fw_tests(bp, test_mask, &test_results);
|
||||
|
||||
buf[BNXT_MACLPBK_TEST_IDX] = 1;
|
||||
@ -3563,6 +3570,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
|
||||
if (rc) {
|
||||
bnxt_hwrm_mac_loopback(bp, false);
|
||||
etest->flags |= ETH_TEST_FL_FAILED;
|
||||
bnxt_ulp_start(bp, rc);
|
||||
return;
|
||||
}
|
||||
if (bnxt_run_loopback(bp))
|
||||
@ -3588,7 +3596,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
|
||||
}
|
||||
bnxt_hwrm_phy_loopback(bp, false, false);
|
||||
bnxt_half_close_nic(bp);
|
||||
rc = bnxt_open_nic(bp, false, true);
|
||||
rc = bnxt_open_nic(bp, true, true);
|
||||
bnxt_ulp_start(bp, rc);
|
||||
}
|
||||
if (rc || bnxt_test_irq(bp)) {
|
||||
buf[BNXT_IRQ_TEST_IDX] = 1;
|
||||
|
@ -644,17 +644,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
|
||||
|
||||
/* Last byte of resp contains valid bit */
|
||||
valid = ((u8 *)ctx->resp) + len - 1;
|
||||
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
|
||||
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
|
||||
/* make sure we read from updated DMA memory */
|
||||
dma_rmb();
|
||||
if (*valid)
|
||||
break;
|
||||
usleep_range(1, 5);
|
||||
if (j < 10) {
|
||||
udelay(1);
|
||||
j++;
|
||||
} else {
|
||||
usleep_range(20, 30);
|
||||
j += 20;
|
||||
}
|
||||
}
|
||||
|
||||
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
|
||||
hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
|
||||
hwrm_total_timeout(i), req_type,
|
||||
hwrm_total_timeout(i) + j, req_type,
|
||||
le16_to_cpu(ctx->req->seq_id), len, *valid);
|
||||
goto exit;
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
|
||||
}
|
||||
|
||||
|
||||
#define HWRM_VALID_BIT_DELAY_USEC 150
|
||||
#define HWRM_VALID_BIT_DELAY_USEC 50000
|
||||
|
||||
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
|
||||
{
|
||||
|
@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ftgmac100_adjust_link(struct net_device *netdev)
|
||||
{
|
||||
struct ftgmac100 *priv = netdev_priv(netdev);
|
||||
struct phy_device *phydev = netdev->phydev;
|
||||
bool tx_pause, rx_pause;
|
||||
int new_speed;
|
||||
|
||||
/* We store "no link" as speed 0 */
|
||||
if (!phydev->link)
|
||||
new_speed = 0;
|
||||
else
|
||||
new_speed = phydev->speed;
|
||||
|
||||
/* Grab pause settings from PHY if configured to do so */
|
||||
if (priv->aneg_pause) {
|
||||
rx_pause = tx_pause = phydev->pause;
|
||||
if (phydev->asym_pause)
|
||||
tx_pause = !rx_pause;
|
||||
} else {
|
||||
rx_pause = priv->rx_pause;
|
||||
tx_pause = priv->tx_pause;
|
||||
}
|
||||
|
||||
/* Link hasn't changed, do nothing */
|
||||
if (phydev->speed == priv->cur_speed &&
|
||||
phydev->duplex == priv->cur_duplex &&
|
||||
rx_pause == priv->rx_pause &&
|
||||
tx_pause == priv->tx_pause)
|
||||
return;
|
||||
|
||||
/* Print status if we have a link or we had one and just lost it,
|
||||
* don't print otherwise.
|
||||
*/
|
||||
if (new_speed || priv->cur_speed)
|
||||
phy_print_status(phydev);
|
||||
|
||||
priv->cur_speed = new_speed;
|
||||
priv->cur_duplex = phydev->duplex;
|
||||
priv->rx_pause = rx_pause;
|
||||
priv->tx_pause = tx_pause;
|
||||
|
||||
/* Link is down, do nothing else */
|
||||
if (!new_speed)
|
||||
return;
|
||||
|
||||
/* Disable all interrupts */
|
||||
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
|
||||
|
||||
/* Reset the adapter asynchronously */
|
||||
schedule_work(&priv->reset_task);
|
||||
}
|
||||
|
||||
static int ftgmac100_mii_probe(struct net_device *netdev)
|
||||
{
|
||||
struct ftgmac100 *priv = netdev_priv(netdev);
|
||||
struct platform_device *pdev = to_platform_device(priv->dev);
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct phy_device *phydev;
|
||||
phy_interface_t phy_intf;
|
||||
int err;
|
||||
|
||||
/* Default to RGMII. It's a gigabit part after all */
|
||||
err = of_get_phy_mode(np, &phy_intf);
|
||||
if (err)
|
||||
phy_intf = PHY_INTERFACE_MODE_RGMII;
|
||||
|
||||
/* Aspeed only supports these. I don't know about other IP
|
||||
* block vendors so I'm going to just let them through for
|
||||
* now. Note that this is only a warning if for some obscure
|
||||
* reason the DT really means to lie about it or it's a newer
|
||||
* part we don't know about.
|
||||
*
|
||||
* On the Aspeed SoC there are additionally straps and SCU
|
||||
* control bits that could tell us what the interface is
|
||||
* (or allow us to configure it while the IP block is held
|
||||
* in reset). For now I chose to keep this driver away from
|
||||
* those SoC specific bits and assume the device-tree is
|
||||
* right and the SCU has been configured properly by pinmux
|
||||
* or the firmware.
|
||||
*/
|
||||
if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
|
||||
netdev_warn(netdev,
|
||||
"Unsupported PHY mode %s !\n",
|
||||
phy_modes(phy_intf));
|
||||
}
|
||||
|
||||
phydev = phy_find_first(priv->mii_bus);
|
||||
if (!phydev) {
|
||||
netdev_info(netdev, "%s: no PHY found\n", netdev->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
phydev = phy_connect(netdev, phydev_name(phydev),
|
||||
&ftgmac100_adjust_link, phy_intf);
|
||||
|
||||
if (IS_ERR(phydev)) {
|
||||
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
|
||||
return PTR_ERR(phydev);
|
||||
}
|
||||
|
||||
/* Indicate that we support PAUSE frames (see comment in
|
||||
* Documentation/networking/phy.rst)
|
||||
*/
|
||||
phy_support_asym_pause(phydev);
|
||||
|
||||
/* Display what we found */
|
||||
phy_attached_info(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
|
||||
{
|
||||
struct net_device *netdev = bus->priv;
|
||||
@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ftgmac100_reset_task(struct work_struct *work)
|
||||
static void ftgmac100_reset(struct ftgmac100 *priv)
|
||||
{
|
||||
struct ftgmac100 *priv = container_of(work, struct ftgmac100,
|
||||
reset_task);
|
||||
struct net_device *netdev = priv->netdev;
|
||||
int err;
|
||||
|
||||
@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void ftgmac100_reset_task(struct work_struct *work)
|
||||
{
|
||||
struct ftgmac100 *priv = container_of(work, struct ftgmac100,
|
||||
reset_task);
|
||||
|
||||
ftgmac100_reset(priv);
|
||||
}
|
||||
|
||||
static void ftgmac100_adjust_link(struct net_device *netdev)
|
||||
{
|
||||
struct ftgmac100 *priv = netdev_priv(netdev);
|
||||
struct phy_device *phydev = netdev->phydev;
|
||||
bool tx_pause, rx_pause;
|
||||
int new_speed;
|
||||
|
||||
/* We store "no link" as speed 0 */
|
||||
if (!phydev->link)
|
||||
new_speed = 0;
|
||||
else
|
||||
new_speed = phydev->speed;
|
||||
|
||||
/* Grab pause settings from PHY if configured to do so */
|
||||
if (priv->aneg_pause) {
|
||||
rx_pause = tx_pause = phydev->pause;
|
||||
if (phydev->asym_pause)
|
||||
tx_pause = !rx_pause;
|
||||
} else {
|
||||
rx_pause = priv->rx_pause;
|
||||
tx_pause = priv->tx_pause;
|
||||
}
|
||||
|
||||
/* Link hasn't changed, do nothing */
|
||||
if (phydev->speed == priv->cur_speed &&
|
||||
phydev->duplex == priv->cur_duplex &&
|
||||
rx_pause == priv->rx_pause &&
|
||||
tx_pause == priv->tx_pause)
|
||||
return;
|
||||
|
||||
/* Print status if we have a link or we had one and just lost it,
|
||||
* don't print otherwise.
|
||||
*/
|
||||
if (new_speed || priv->cur_speed)
|
||||
phy_print_status(phydev);
|
||||
|
||||
priv->cur_speed = new_speed;
|
||||
priv->cur_duplex = phydev->duplex;
|
||||
priv->rx_pause = rx_pause;
|
||||
priv->tx_pause = tx_pause;
|
||||
|
||||
/* Link is down, do nothing else */
|
||||
if (!new_speed)
|
||||
return;
|
||||
|
||||
/* Disable all interrupts */
|
||||
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
|
||||
|
||||
/* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
|
||||
* order consistent to prevent dead lock.
|
||||
*/
|
||||
if (netdev->phydev)
|
||||
mutex_unlock(&netdev->phydev->lock);
|
||||
|
||||
ftgmac100_reset(priv);
|
||||
|
||||
if (netdev->phydev)
|
||||
mutex_lock(&netdev->phydev->lock);
|
||||
|
||||
}
|
||||
|
||||
static int ftgmac100_mii_probe(struct net_device *netdev)
|
||||
{
|
||||
struct ftgmac100 *priv = netdev_priv(netdev);
|
||||
struct platform_device *pdev = to_platform_device(priv->dev);
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct phy_device *phydev;
|
||||
phy_interface_t phy_intf;
|
||||
int err;
|
||||
|
||||
/* Default to RGMII. It's a gigabit part after all */
|
||||
err = of_get_phy_mode(np, &phy_intf);
|
||||
if (err)
|
||||
phy_intf = PHY_INTERFACE_MODE_RGMII;
|
||||
|
||||
/* Aspeed only supports these. I don't know about other IP
|
||||
* block vendors so I'm going to just let them through for
|
||||
* now. Note that this is only a warning if for some obscure
|
||||
* reason the DT really means to lie about it or it's a newer
|
||||
* part we don't know about.
|
||||
*
|
||||
* On the Aspeed SoC there are additionally straps and SCU
|
||||
* control bits that could tell us what the interface is
|
||||
* (or allow us to configure it while the IP block is held
|
||||
* in reset). For now I chose to keep this driver away from
|
||||
* those SoC specific bits and assume the device-tree is
|
||||
* right and the SCU has been configured properly by pinmux
|
||||
* or the firmware.
|
||||
*/
|
||||
if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
|
||||
netdev_warn(netdev,
|
||||
"Unsupported PHY mode %s !\n",
|
||||
phy_modes(phy_intf));
|
||||
}
|
||||
|
||||
phydev = phy_find_first(priv->mii_bus);
|
||||
if (!phydev) {
|
||||
netdev_info(netdev, "%s: no PHY found\n", netdev->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
phydev = phy_connect(netdev, phydev_name(phydev),
|
||||
&ftgmac100_adjust_link, phy_intf);
|
||||
|
||||
if (IS_ERR(phydev)) {
|
||||
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
|
||||
return PTR_ERR(phydev);
|
||||
}
|
||||
|
||||
/* Indicate that we support PAUSE frames (see comment in
|
||||
* Documentation/networking/phy.rst)
|
||||
*/
|
||||
phy_support_asym_pause(phydev);
|
||||
|
||||
/* Display what we found */
|
||||
phy_attached_info(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ftgmac100_open(struct net_device *netdev)
|
||||
{
|
||||
struct ftgmac100 *priv = netdev_priv(netdev);
|
||||
|
@ -5934,10 +5934,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
|
||||
be64_to_cpu(session_token));
|
||||
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
|
||||
H_SESSION_ERR_DETECTED, session_token, 0, 0);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
netdev_err(netdev,
|
||||
"H_VIOCTL initiated failover failed, rc %ld\n",
|
||||
rc);
|
||||
goto last_resort;
|
||||
}
|
||||
|
||||
return count;
|
||||
|
||||
last_resort:
|
||||
netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
|
||||
|
@ -5386,15 +5386,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
|
||||
/* There is no need to reset BW when mqprio mode is on. */
|
||||
if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||||
return 0;
|
||||
|
||||
if (!vsi->mqprio_qopt.qopt.hw) {
|
||||
if (pf->flags & I40E_FLAG_DCB_ENABLED)
|
||||
goto skip_reset;
|
||||
|
||||
if (IS_ENABLED(CONFIG_I40E_DCB) &&
|
||||
i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
|
||||
goto skip_reset;
|
||||
|
||||
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
|
||||
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
|
||||
if (ret)
|
||||
dev_info(&pf->pdev->dev,
|
||||
@ -5402,8 +5394,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
|
||||
vsi->seid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
skip_reset:
|
||||
memset(&bw_data, 0, sizeof(bw_data));
|
||||
bw_data.tc_valid_bits = enabled_tc;
|
||||
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
|
||||
|
@ -281,7 +281,6 @@ enum ice_pf_state {
|
||||
ICE_VFLR_EVENT_PENDING,
|
||||
ICE_FLTR_OVERFLOW_PROMISC,
|
||||
ICE_VF_DIS,
|
||||
ICE_VF_DEINIT_IN_PROGRESS,
|
||||
ICE_CFG_BUSY,
|
||||
ICE_SERVICE_SCHED,
|
||||
ICE_SERVICE_DIS,
|
||||
|
@ -3379,7 +3379,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
|
||||
|
||||
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
|
||||
!ice_fw_supports_report_dflt_cfg(hw)) {
|
||||
struct ice_link_default_override_tlv tlv;
|
||||
struct ice_link_default_override_tlv tlv = { 0 };
|
||||
|
||||
status = ice_get_link_default_override(&tlv, pi);
|
||||
if (status)
|
||||
|
@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
|
||||
ctrl_vsi->rxq_map[vf->vf_id];
|
||||
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
|
||||
rule_info.flags_info.act_valid = true;
|
||||
rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
|
||||
|
||||
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
|
||||
vf->repr->mac_rule);
|
||||
|
@ -1802,7 +1802,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
||||
* reset, so print the event prior to reset.
|
||||
*/
|
||||
ice_print_vf_rx_mdd_event(vf);
|
||||
mutex_lock(&pf->vf[i].cfg_lock);
|
||||
ice_reset_vf(&pf->vf[i], false);
|
||||
mutex_unlock(&pf->vf[i].cfg_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ enum ice_protocol_type {
|
||||
|
||||
enum ice_sw_tunnel_type {
|
||||
ICE_NON_TUN = 0,
|
||||
ICE_SW_TUN_AND_NON_TUN,
|
||||
ICE_SW_TUN_VXLAN,
|
||||
ICE_SW_TUN_GENEVE,
|
||||
ICE_SW_TUN_NVGRE,
|
||||
|
@ -1533,9 +1533,12 @@ exit:
|
||||
static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
|
||||
{
|
||||
struct timespec64 now, then;
|
||||
int ret;
|
||||
|
||||
then = ns_to_timespec64(delta);
|
||||
ice_ptp_gettimex64(info, &now, NULL);
|
||||
ret = ice_ptp_gettimex64(info, &now, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
now = timespec64_add(now, then);
|
||||
|
||||
return ice_ptp_settime64(info, (const struct timespec64 *)&now);
|
||||
|
@ -4617,6 +4617,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
|
||||
case ICE_SW_TUN_NVGRE:
|
||||
prof_type = ICE_PROF_TUN_GRE;
|
||||
break;
|
||||
case ICE_SW_TUN_AND_NON_TUN:
|
||||
default:
|
||||
prof_type = ICE_PROF_ALL;
|
||||
break;
|
||||
@ -5385,7 +5386,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
|
||||
if (status)
|
||||
goto err_ice_add_adv_rule;
|
||||
|
||||
if (rinfo->tun_type != ICE_NON_TUN) {
|
||||
if (rinfo->tun_type != ICE_NON_TUN &&
|
||||
rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
|
||||
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
|
||||
s_rule->pdata.lkup_tx_rx.hdr,
|
||||
pkt_offsets);
|
||||
|
@ -709,7 +709,7 @@ ice_tc_set_port(struct flow_match_ports match,
|
||||
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
|
||||
else
|
||||
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
|
||||
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
|
||||
|
||||
headers->l4_key.dst_port = match.key->dst;
|
||||
headers->l4_mask.dst_port = match.mask->dst;
|
||||
}
|
||||
@ -718,7 +718,7 @@ ice_tc_set_port(struct flow_match_ports match,
|
||||
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
|
||||
else
|
||||
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
|
||||
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
|
||||
|
||||
headers->l4_key.src_port = match.key->src;
|
||||
headers->l4_mask.src_port = match.mask->src;
|
||||
}
|
||||
|
@ -502,8 +502,6 @@ void ice_free_vfs(struct ice_pf *pf)
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
unsigned int tmp, i;
|
||||
|
||||
set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
|
||||
|
||||
if (!pf->vf)
|
||||
return;
|
||||
|
||||
@ -521,22 +519,26 @@ void ice_free_vfs(struct ice_pf *pf)
|
||||
else
|
||||
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
|
||||
|
||||
/* Avoid wait time by stopping all VFs at the same time */
|
||||
ice_for_each_vf(pf, i)
|
||||
ice_dis_vf_qs(&pf->vf[i]);
|
||||
|
||||
tmp = pf->num_alloc_vfs;
|
||||
pf->num_qps_per_vf = 0;
|
||||
pf->num_alloc_vfs = 0;
|
||||
for (i = 0; i < tmp; i++) {
|
||||
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
|
||||
struct ice_vf *vf = &pf->vf[i];
|
||||
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
|
||||
ice_dis_vf_qs(vf);
|
||||
|
||||
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
|
||||
/* disable VF qp mappings and set VF disable state */
|
||||
ice_dis_vf_mappings(&pf->vf[i]);
|
||||
set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
|
||||
ice_free_vf_res(&pf->vf[i]);
|
||||
ice_dis_vf_mappings(vf);
|
||||
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
|
||||
ice_free_vf_res(vf);
|
||||
}
|
||||
|
||||
mutex_destroy(&pf->vf[i].cfg_lock);
|
||||
mutex_unlock(&vf->cfg_lock);
|
||||
|
||||
mutex_destroy(&vf->cfg_lock);
|
||||
}
|
||||
|
||||
if (ice_sriov_free_msix_res(pf))
|
||||
@ -572,7 +574,6 @@ void ice_free_vfs(struct ice_pf *pf)
|
||||
i);
|
||||
|
||||
clear_bit(ICE_VF_DIS, pf->state);
|
||||
clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
|
||||
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
|
||||
}
|
||||
|
||||
@ -1564,6 +1565,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
|
||||
ice_for_each_vf(pf, v) {
|
||||
vf = &pf->vf[v];
|
||||
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
|
||||
vf->driver_caps = 0;
|
||||
ice_vc_set_default_allowlist(vf);
|
||||
|
||||
@ -1578,6 +1581,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
|
||||
ice_vf_pre_vsi_rebuild(vf);
|
||||
ice_vf_rebuild_vsi(vf);
|
||||
ice_vf_post_vsi_rebuild(vf);
|
||||
|
||||
mutex_unlock(&vf->cfg_lock);
|
||||
}
|
||||
|
||||
if (ice_is_eswitch_mode_switchdev(pf))
|
||||
@ -1628,6 +1633,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
|
||||
u32 reg;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&vf->cfg_lock);
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
|
||||
@ -2143,9 +2150,12 @@ void ice_process_vflr_event(struct ice_pf *pf)
|
||||
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
|
||||
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
|
||||
reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
|
||||
if (reg & BIT(bit_idx))
|
||||
if (reg & BIT(bit_idx)) {
|
||||
/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
ice_reset_vf(vf, true);
|
||||
mutex_unlock(&vf->cfg_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2222,7 +2232,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
|
||||
if (!vf)
|
||||
return;
|
||||
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
ice_vc_reset_vf(vf);
|
||||
mutex_unlock(&vf->cfg_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5759,10 +5771,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
|
||||
struct device *dev;
|
||||
int err = 0;
|
||||
|
||||
/* if de-init is underway, don't process messages from VF */
|
||||
if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
|
||||
return;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (ice_validate_vf_id(pf, vf_id)) {
|
||||
err = -EINVAL;
|
||||
|
@ -2704,6 +2704,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
|
||||
|
||||
static struct platform_device *port_platdev[3];
|
||||
|
||||
static void mv643xx_eth_shared_of_remove(void)
|
||||
{
|
||||
int n;
|
||||
|
||||
for (n = 0; n < 3; n++) {
|
||||
platform_device_del(port_platdev[n]);
|
||||
port_platdev[n] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||
struct device_node *pnp)
|
||||
{
|
||||
@ -2740,7 +2750,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
of_get_mac_address(pnp, ppd.mac_addr);
|
||||
ret = of_get_mac_address(pnp, ppd.mac_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
|
||||
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
|
||||
@ -2804,21 +2816,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
|
||||
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
|
||||
if (ret) {
|
||||
of_node_put(pnp);
|
||||
mv643xx_eth_shared_of_remove();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mv643xx_eth_shared_of_remove(void)
|
||||
{
|
||||
int n;
|
||||
|
||||
for (n = 0; n < 3; n++) {
|
||||
platform_device_del(port_platdev[n]);
|
||||
port_platdev[n] = NULL;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
@ -6870,6 +6870,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
|
||||
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
|
||||
dev->dev.of_node = port_node;
|
||||
|
||||
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
|
||||
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
|
||||
|
||||
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
|
||||
port->phylink_config.dev = &dev->dev;
|
||||
port->phylink_config.type = PHYLINK_NETDEV;
|
||||
@ -6940,9 +6943,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
|
||||
port->phylink_config.supported_interfaces);
|
||||
}
|
||||
|
||||
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
|
||||
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
|
||||
|
||||
phylink = phylink_create(&port->phylink_config, port_fwnode,
|
||||
phy_mode, &mvpp2_phylink_ops);
|
||||
if (IS_ERR(phylink)) {
|
||||
|
@ -18,11 +18,13 @@ struct mlx5e_tc_act_parse_state {
|
||||
struct netlink_ext_ack *extack;
|
||||
u32 actions;
|
||||
bool ct;
|
||||
bool ct_clear;
|
||||
bool encap;
|
||||
bool decap;
|
||||
bool mpls_push;
|
||||
bool ptype_host;
|
||||
const struct ip_tunnel_info *tun_info;
|
||||
struct mlx5e_mpls_info mpls_info;
|
||||
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
int if_count;
|
||||
struct mlx5_tc_ct_priv *ct_priv;
|
||||
|
@ -31,6 +31,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
|
||||
int err;
|
||||
|
||||
/* It's redundant to do ct clear more than once. */
|
||||
if (clear_action && parse_state->ct_clear)
|
||||
return 0;
|
||||
|
||||
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
|
||||
&attr->parse_attr->mod_hdr_acts,
|
||||
act, parse_state->extack);
|
||||
@ -46,6 +50,7 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
flow_flag_set(parse_state->flow, CT);
|
||||
parse_state->ct = true;
|
||||
}
|
||||
parse_state->ct_clear = clear_action;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -178,6 +178,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
return -ENOMEM;
|
||||
|
||||
parse_state->encap = false;
|
||||
|
||||
if (parse_state->mpls_push) {
|
||||
memcpy(&parse_attr->mpls_info[esw_attr->out_count],
|
||||
&parse_state->mpls_info, sizeof(parse_state->mpls_info));
|
||||
parse_state->mpls_push = false;
|
||||
}
|
||||
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
|
||||
esw_attr->out_count++;
|
||||
/* attr->dests[].rep is resolved when we handle encap */
|
||||
|
@ -23,6 +23,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
copy_mpls_info(struct mlx5e_mpls_info *mpls_info,
|
||||
const struct flow_action_entry *act)
|
||||
{
|
||||
mpls_info->label = act->mpls_push.label;
|
||||
mpls_info->tc = act->mpls_push.tc;
|
||||
mpls_info->bos = act->mpls_push.bos;
|
||||
mpls_info->ttl = act->mpls_push.ttl;
|
||||
}
|
||||
|
||||
static int
|
||||
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
const struct flow_action_entry *act,
|
||||
@ -30,6 +40,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
struct mlx5_flow_attr *attr)
|
||||
{
|
||||
parse_state->mpls_push = true;
|
||||
copy_mpls_info(&parse_state->mpls_info, act);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ enum {
|
||||
|
||||
struct mlx5e_tc_flow_parse_attr {
|
||||
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
struct net_device *filter_dev;
|
||||
struct mlx5_flow_spec spec;
|
||||
struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
|
||||
|
@ -768,6 +768,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
const struct ip_tunnel_info *tun_info;
|
||||
const struct mlx5e_mpls_info *mpls_info;
|
||||
unsigned long tbl_time_before = 0;
|
||||
struct mlx5e_encap_entry *e;
|
||||
struct mlx5e_encap_key key;
|
||||
@ -778,6 +779,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
|
||||
parse_attr = attr->parse_attr;
|
||||
tun_info = parse_attr->tun_info[out_index];
|
||||
mpls_info = &parse_attr->mpls_info[out_index];
|
||||
family = ip_tunnel_info_af(tun_info);
|
||||
key.ip_tun_key = &tun_info->key;
|
||||
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
|
||||
@ -828,6 +830,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
goto out_err_init;
|
||||
}
|
||||
e->tun_info = tun_info;
|
||||
memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info));
|
||||
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
|
||||
if (err)
|
||||
goto out_err_init;
|
||||
|
@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[],
|
||||
struct mlx5e_encap_entry *r)
|
||||
{
|
||||
const struct ip_tunnel_key *tun_key = &r->tun_info->key;
|
||||
const struct mlx5e_mpls_info *mpls_info = &r->mpls_info;
|
||||
struct udphdr *udp = (struct udphdr *)(buf);
|
||||
struct mpls_shim_hdr *mpls;
|
||||
u32 tun_id;
|
||||
|
||||
tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
|
||||
mpls = (struct mpls_shim_hdr *)(udp + 1);
|
||||
*ip_proto = IPPROTO_UDP;
|
||||
|
||||
udp->dest = tun_key->tp_dst;
|
||||
*mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
|
||||
*mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
|
||||
void *headers_v)
|
||||
{
|
||||
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
|
||||
struct flow_match_enc_keyid enc_keyid;
|
||||
struct flow_match_mpls match;
|
||||
void *misc2_c;
|
||||
void *misc2_v;
|
||||
|
||||
misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||
misc_parameters_2);
|
||||
misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||
misc_parameters_2);
|
||||
|
||||
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
|
||||
return 0;
|
||||
|
||||
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
|
||||
return 0;
|
||||
|
||||
flow_rule_match_enc_keyid(rule, &enc_keyid);
|
||||
|
||||
if (!enc_keyid.mask->keyid)
|
||||
return 0;
|
||||
|
||||
if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
|
||||
!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
|
||||
return 0;
|
||||
|
||||
flow_rule_match_mpls(rule, &match);
|
||||
|
||||
/* Only support matching the first LSE */
|
||||
if (match.mask->used_lses != 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||
misc_parameters_2);
|
||||
misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||
misc_parameters_2);
|
||||
|
||||
MLX5_SET(fte_match_set_misc2, misc2_c,
|
||||
outer_first_mpls_over_udp.mpls_label,
|
||||
match.mask->ls[0].mpls_label);
|
||||
|
@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
|
||||
if (size_read < 0) {
|
||||
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
|
||||
__func__, size_read);
|
||||
return 0;
|
||||
return size_read;
|
||||
}
|
||||
|
||||
i += size_read;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user