Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
Some of the devlink bits were tricky, but I think I got it right. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
675f176b4d
2
.mailmap
2
.mailmap
@ -25,6 +25,8 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
|
||||
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
|
||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
|
||||
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
|
||||
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
|
||||
|
92
Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst
Normal file
92
Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst
Normal file
@ -0,0 +1,92 @@
|
||||
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Cross-Thread Return Address Predictions
|
||||
=======================================
|
||||
|
||||
Certain AMD and Hygon processors are subject to a cross-thread return address
|
||||
predictions vulnerability. When running in SMT mode and one sibling thread
|
||||
transitions out of C0 state, the other sibling thread could use return target
|
||||
predictions from the sibling thread that transitioned out of C0.
|
||||
|
||||
The Spectre v2 mitigations protect the Linux kernel, as it fills the return
|
||||
address prediction entries with safe targets when context switching to the idle
|
||||
thread. However, KVM does allow a VMM to prevent exiting guest mode when
|
||||
transitioning out of C0. This could result in a guest-controlled return target
|
||||
being consumed by the sibling thread.
|
||||
|
||||
Affected processors
|
||||
-------------------
|
||||
|
||||
The following CPUs are vulnerable:
|
||||
|
||||
- AMD Family 17h processors
|
||||
- Hygon Family 18h processors
|
||||
|
||||
Related CVEs
|
||||
------------
|
||||
|
||||
The following CVE entry is related to this issue:
|
||||
|
||||
============== =======================================
|
||||
CVE-2022-27672 Cross-Thread Return Address Predictions
|
||||
============== =======================================
|
||||
|
||||
Problem
|
||||
-------
|
||||
|
||||
Affected SMT-capable processors support 1T and 2T modes of execution when SMT
|
||||
is enabled. In 2T mode, both threads in a core are executing code. For the
|
||||
processor core to enter 1T mode, it is required that one of the threads
|
||||
requests to transition out of the C0 state. This can be communicated with the
|
||||
HLT instruction or with an MWAIT instruction that requests non-C0.
|
||||
When the thread re-enters the C0 state, the processor transitions back
|
||||
to 2T mode, assuming the other thread is also still in C0 state.
|
||||
|
||||
In affected processors, the return address predictor (RAP) is partitioned
|
||||
depending on the SMT mode. For instance, in 2T mode each thread uses a private
|
||||
16-entry RAP, but in 1T mode, the active thread uses a 32-entry RAP. Upon
|
||||
transition between 1T/2T mode, the RAP contents are not modified but the RAP
|
||||
pointers (which control the next return target to use for predictions) may
|
||||
change. This behavior may result in return targets from one SMT thread being
|
||||
used by RET predictions in the sibling thread following a 1T/2T switch. In
|
||||
particular, a RET instruction executed immediately after a transition to 1T may
|
||||
use a return target from the thread that just became idle. In theory, this
|
||||
could lead to information disclosure if the return targets used do not come
|
||||
from trustworthy code.
|
||||
|
||||
Attack scenarios
|
||||
----------------
|
||||
|
||||
An attack can be mounted on affected processors by performing a series of CALL
|
||||
instructions with targeted return locations and then transitioning out of C0
|
||||
state.
|
||||
|
||||
Mitigation mechanism
|
||||
--------------------
|
||||
|
||||
Before entering idle state, the kernel context switches to the idle thread. The
|
||||
context switch fills the RAP entries (referred to as the RSB in Linux) with safe
|
||||
targets by performing a sequence of CALL instructions.
|
||||
|
||||
Prevent a guest VM from directly putting the processor into an idle state by
|
||||
intercepting HLT and MWAIT instructions.
|
||||
|
||||
Both mitigations are required to fully address this issue.
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
---------------------------------------------
|
||||
|
||||
Use existing Spectre v2 mitigations that will fill the RSB on context switch.
|
||||
|
||||
Mitigation control for KVM - module parameter
|
||||
---------------------------------------------
|
||||
|
||||
By default, the KVM hypervisor mitigates this issue by intercepting guest
|
||||
attempts to transition out of C0. A VMM can use the KVM_CAP_X86_DISABLE_EXITS
|
||||
capability to override those interceptions, but since this is not common, the
|
||||
mitigation that covers this path is not enabled by default.
|
||||
|
||||
The mitigation for the KVM_CAP_X86_DISABLE_EXITS capability can be turned on
|
||||
using the boolean module parameter mitigate_smt_rsb, e.g.:
|
||||
kvm.mitigate_smt_rsb=1
|
@ -18,3 +18,4 @@ are configurable at compile, boot or run time.
|
||||
core-scheduling.rst
|
||||
l1d_flush.rst
|
||||
processor_mmio_stale_data.rst
|
||||
cross-thread-rsb.rst
|
||||
|
18
MAINTAINERS
18
MAINTAINERS
@ -16135,7 +16135,7 @@ F: drivers/pci/controller/pci-v3-semi.c
|
||||
|
||||
PCI ENDPOINT SUBSYSTEM
|
||||
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
|
||||
R: Krzysztof Wilczyński <kw@linux.com>
|
||||
M: Krzysztof Wilczyński <kw@linux.com>
|
||||
R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
R: Kishon Vijay Abraham I <kishon@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
@ -16143,7 +16143,7 @@ S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-pci/list/
|
||||
B: https://bugzilla.kernel.org
|
||||
C: irc://irc.oftc.net/linux-pci
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
F: Documentation/PCI/endpoint/*
|
||||
F: Documentation/misc-devices/pci-endpoint-test.rst
|
||||
F: drivers/misc/pci_endpoint_test.c
|
||||
@ -16178,7 +16178,7 @@ S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-pci/list/
|
||||
B: https://bugzilla.kernel.org
|
||||
C: irc://irc.oftc.net/linux-pci
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
F: Documentation/driver-api/pci/p2pdma.rst
|
||||
F: drivers/pci/p2pdma.c
|
||||
F: include/linux/pci-p2pdma.h
|
||||
@ -16200,14 +16200,14 @@ F: drivers/pci/controller/pci-xgene-msi.c
|
||||
|
||||
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
|
||||
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
|
||||
M: Krzysztof Wilczyński <kw@linux.com>
|
||||
R: Rob Herring <robh@kernel.org>
|
||||
R: Krzysztof Wilczyński <kw@linux.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-pci/list/
|
||||
B: https://bugzilla.kernel.org
|
||||
C: irc://irc.oftc.net/linux-pci
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
F: Documentation/devicetree/bindings/pci/
|
||||
F: drivers/pci/controller/
|
||||
F: drivers/pci/pci-bridge-emul.c
|
||||
@ -16220,7 +16220,7 @@ S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-pci/list/
|
||||
B: https://bugzilla.kernel.org
|
||||
C: irc://irc.oftc.net/linux-pci
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
F: Documentation/PCI/
|
||||
F: Documentation/devicetree/bindings/pci/
|
||||
F: arch/x86/kernel/early-quirks.c
|
||||
@ -20120,6 +20120,7 @@ F: drivers/watchdog/sunplus_wdt.c
|
||||
SUPERH
|
||||
M: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
M: Rich Felker <dalias@libc.org>
|
||||
M: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
L: linux-sh@vger.kernel.org
|
||||
S: Maintained
|
||||
Q: http://patchwork.kernel.org/project/linux-sh/list/
|
||||
@ -20352,8 +20353,7 @@ S: Maintained
|
||||
F: drivers/platform/x86/system76_acpi.c
|
||||
|
||||
SYSV FILESYSTEM
|
||||
M: Christoph Hellwig <hch@infradead.org>
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: Documentation/filesystems/sysv-fs.rst
|
||||
F: fs/sysv/
|
||||
F: include/linux/sysv_fs.h
|
||||
@ -21848,11 +21848,9 @@ W: http://en.wikipedia.org/wiki/Util-linux
|
||||
T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
|
||||
|
||||
UUID HELPERS
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.infradead.org/users/hch/uuid.git
|
||||
F: include/linux/uuid.h
|
||||
F: include/uapi/linux/uuid.h
|
||||
F: lib/test_uuid.c
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1181,6 +1181,7 @@
|
||||
clock-names = "dp", "pclk";
|
||||
phys = <&edp_phy>;
|
||||
phy-names = "dp";
|
||||
power-domains = <&power RK3288_PD_VIO>;
|
||||
resets = <&cru SRST_EDP>;
|
||||
reset-names = "dp";
|
||||
rockchip,grf = <&grf>;
|
||||
|
@ -178,7 +178,7 @@
|
||||
tsin-num = <0>;
|
||||
serial-not-parallel;
|
||||
i2c-bus = <&ssc2>;
|
||||
reset-gpios = <&pio15 4 GPIO_ACTIVE_HIGH>;
|
||||
reset-gpios = <&pio15 4 GPIO_ACTIVE_LOW>;
|
||||
dvb-card = <STV0367_TDA18212_NIMA_1>;
|
||||
};
|
||||
};
|
||||
|
@ -1886,7 +1886,7 @@
|
||||
sd_emmc_b: sd@5000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0x5000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_B>,
|
||||
<&clkc CLKID_SD_EMMC_B_CLK0>,
|
||||
@ -1898,7 +1898,7 @@
|
||||
sd_emmc_c: mmc@7000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0x7000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_C>,
|
||||
<&clkc CLKID_SD_EMMC_C_CLK0>,
|
||||
|
@ -2324,7 +2324,7 @@
|
||||
sd_emmc_a: sd@ffe03000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0xffe03000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_A>,
|
||||
<&clkc CLKID_SD_EMMC_A_CLK0>,
|
||||
@ -2336,7 +2336,7 @@
|
||||
sd_emmc_b: sd@ffe05000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0xffe05000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_B>,
|
||||
<&clkc CLKID_SD_EMMC_B_CLK0>,
|
||||
@ -2348,7 +2348,7 @@
|
||||
sd_emmc_c: mmc@ffe07000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0xffe07000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_C>,
|
||||
<&clkc CLKID_SD_EMMC_C_CLK0>,
|
||||
|
@ -603,21 +603,21 @@
|
||||
sd_emmc_a: mmc@70000 {
|
||||
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
|
||||
reg = <0x0 0x70000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sd_emmc_b: mmc@72000 {
|
||||
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
|
||||
reg = <0x0 0x72000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sd_emmc_c: mmc@74000 {
|
||||
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
|
||||
reg = <0x0 0x74000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
@ -2146,7 +2146,7 @@
|
||||
};
|
||||
|
||||
vdosys0: syscon@1c01a000 {
|
||||
compatible = "mediatek,mt8195-mmsys", "syscon";
|
||||
compatible = "mediatek,mt8195-vdosys0", "mediatek,mt8195-mmsys", "syscon";
|
||||
reg = <0 0x1c01a000 0 0x1000>;
|
||||
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
|
||||
#clock-cells = <1>;
|
||||
@ -2292,7 +2292,7 @@
|
||||
};
|
||||
|
||||
vdosys1: syscon@1c100000 {
|
||||
compatible = "mediatek,mt8195-mmsys", "syscon";
|
||||
compatible = "mediatek,mt8195-vdosys1", "syscon";
|
||||
reg = <0 0x1c100000 0 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
@ -96,7 +96,6 @@
|
||||
linux,default-trigger = "heartbeat";
|
||||
gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
|
||||
default-state = "on";
|
||||
mode = <0x23>;
|
||||
};
|
||||
|
||||
user_led: led-1 {
|
||||
@ -104,7 +103,6 @@
|
||||
linux,default-trigger = "mmc1";
|
||||
gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
|
||||
default-state = "off";
|
||||
mode = <0x05>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -111,7 +111,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
dmc_opp_table: dmc_opp_table {
|
||||
dmc_opp_table: opp-table-3 {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp00 {
|
||||
|
@ -104,6 +104,13 @@
|
||||
};
|
||||
};
|
||||
|
||||
&cpu_alert0 {
|
||||
temperature = <65000>;
|
||||
};
|
||||
&cpu_alert1 {
|
||||
temperature = <68000>;
|
||||
};
|
||||
|
||||
&cpu_l0 {
|
||||
cpu-supply = <&vdd_cpu_l>;
|
||||
};
|
||||
|
@ -589,7 +589,7 @@
|
||||
clocks = <&cru HCLK_M_CRYPTO0>, <&cru HCLK_S_CRYPTO0>, <&cru SCLK_CRYPTO0>;
|
||||
clock-names = "hclk_master", "hclk_slave", "sclk";
|
||||
resets = <&cru SRST_CRYPTO0>, <&cru SRST_CRYPTO0_S>, <&cru SRST_CRYPTO0_M>;
|
||||
reset-names = "master", "lave", "crypto";
|
||||
reset-names = "master", "slave", "crypto-rst";
|
||||
};
|
||||
|
||||
crypto1: crypto@ff8b8000 {
|
||||
@ -599,7 +599,7 @@
|
||||
clocks = <&cru HCLK_M_CRYPTO1>, <&cru HCLK_S_CRYPTO1>, <&cru SCLK_CRYPTO1>;
|
||||
clock-names = "hclk_master", "hclk_slave", "sclk";
|
||||
resets = <&cru SRST_CRYPTO1>, <&cru SRST_CRYPTO1_S>, <&cru SRST_CRYPTO1_M>;
|
||||
reset-names = "master", "slave", "crypto";
|
||||
reset-names = "master", "slave", "crypto-rst";
|
||||
};
|
||||
|
||||
i2c1: i2c@ff110000 {
|
||||
@ -2241,13 +2241,11 @@
|
||||
pcfg_input_pull_up: pcfg-input-pull-up {
|
||||
input-enable;
|
||||
bias-pull-up;
|
||||
drive-strength = <2>;
|
||||
};
|
||||
|
||||
pcfg_input_pull_down: pcfg-input-pull-down {
|
||||
input-enable;
|
||||
bias-pull-down;
|
||||
drive-strength = <2>;
|
||||
};
|
||||
|
||||
clock {
|
||||
|
@ -353,6 +353,17 @@
|
||||
};
|
||||
};
|
||||
|
||||
&pmu_io_domains {
|
||||
pmuio2-supply = <&vcc_3v3>;
|
||||
vccio1-supply = <&vcc_3v3>;
|
||||
vccio3-supply = <&vcc_3v3>;
|
||||
vccio4-supply = <&vcca_1v8>;
|
||||
vccio5-supply = <&vcc_3v3>;
|
||||
vccio6-supply = <&vcca_1v8>;
|
||||
vccio7-supply = <&vcc_3v3>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&pwm0 {
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -571,6 +571,8 @@
|
||||
};
|
||||
|
||||
&i2s1_8ch {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2s1m0_sclktx &i2s1m0_lrcktx &i2s1m0_sdi0 &i2s1m0_sdo0>;
|
||||
rockchip,trcm-sync-tx-only;
|
||||
status = "okay";
|
||||
};
|
||||
@ -730,14 +732,13 @@
|
||||
disable-wp;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
|
||||
sd-uhs-sdr104;
|
||||
sd-uhs-sdr50;
|
||||
vmmc-supply = <&vcc3v3_sd>;
|
||||
vqmmc-supply = <&vccio_sd>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&sdmmc2 {
|
||||
supports-sdio;
|
||||
bus-width = <4>;
|
||||
disable-wp;
|
||||
cap-sd-highspeed;
|
||||
|
@ -966,6 +966,7 @@
|
||||
clock-names = "aclk_mst", "aclk_slv",
|
||||
"aclk_dbi", "pclk", "aux";
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc 0>,
|
||||
<0 0 0 2 &pcie_intc 1>,
|
||||
|
@ -163,7 +163,6 @@ config PPC
|
||||
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
|
||||
select ARCH_WANTS_NO_INSTR
|
||||
select ARCH_WEAK_RELEASE_ACQUIRE
|
||||
select BINFMT_ELF
|
||||
select BUILDTIME_TABLE_SORT
|
||||
|
@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void)
|
||||
*/
|
||||
static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
|
||||
{
|
||||
bool must_hard_disable = (exit_must_hard_disable() || !restartable);
|
||||
|
||||
/* This must be done with RI=1 because tracing may touch vmaps */
|
||||
trace_hardirqs_on();
|
||||
|
||||
if (exit_must_hard_disable() || !restartable)
|
||||
if (must_hard_disable)
|
||||
__hard_EE_RI_disable();
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* This pattern matches prep_irq_for_idle */
|
||||
if (unlikely(lazy_irq_pending_nocheck())) {
|
||||
if (exit_must_hard_disable() || !restartable) {
|
||||
if (must_hard_disable) {
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
__hard_RI_enable();
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/kexec_ranges.h>
|
||||
#include <asm/crashdump-ppc64.h>
|
||||
#include <asm/mmzone.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
struct umem_info {
|
||||
|
@ -721,6 +721,10 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
||||
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
|
||||
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
|
||||
}
|
||||
|
||||
#define pmdp_collapse_flush pmdp_collapse_flush
|
||||
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
/*
|
||||
|
@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p)
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
unsigned long probe_addr = (unsigned long)p->addr;
|
||||
u16 *insn = (u16 *)p->addr;
|
||||
|
||||
if (probe_addr & 0x1)
|
||||
if ((unsigned long)insn & 0x1)
|
||||
return -EILSEQ;
|
||||
|
||||
if (!arch_check_kprobe(p))
|
||||
return -EILSEQ;
|
||||
|
||||
/* copy instruction */
|
||||
p->opcode = *p->addr;
|
||||
p->opcode = (kprobe_opcode_t)(*insn++);
|
||||
if (GET_INSN_LENGTH(p->opcode) == 4)
|
||||
p->opcode |= (kprobe_opcode_t)(*insn) << 16;
|
||||
|
||||
/* decode instruction */
|
||||
switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
|
||||
|
@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
fp = (unsigned long)__builtin_frame_address(0);
|
||||
sp = current_stack_pointer;
|
||||
pc = (unsigned long)walk_stackframe;
|
||||
level = -1;
|
||||
} else {
|
||||
/* task blocked in __switch_to */
|
||||
fp = task->thread.s[0];
|
||||
@ -43,7 +44,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long low, high;
|
||||
struct stackframe *frame;
|
||||
|
||||
if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
|
||||
if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
|
||||
break;
|
||||
|
||||
/* Validate frame pointer */
|
||||
|
@ -90,8 +90,10 @@ void flush_icache_pte(pte_t pte)
|
||||
if (PageHuge(page))
|
||||
page = compound_head(page);
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
if (!test_bit(PG_dcache_clean, &page->flags)) {
|
||||
flush_icache_all();
|
||||
set_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
@ -81,3 +81,23 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
||||
|
||||
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
||||
VM_BUG_ON(pmd_trans_huge(*pmdp));
|
||||
/*
|
||||
* When leaf PTE entries (regular pages) are collapsed into a leaf
|
||||
* PMD entry (huge page), a valid non-leaf PTE is converted into a
|
||||
* valid leaf PTE at the level 1 page table. Since the sfence.vma
|
||||
* forms that specify an address only apply to leaf PTEs, we need a
|
||||
* global flush here. collapse_huge_page() assumes these flushes are
|
||||
* eager, so just do the fence here.
|
||||
*/
|
||||
flush_tlb_mm(vma->vm_mm);
|
||||
return pmd;
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
@ -466,5 +466,6 @@
|
||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
||||
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -123,6 +123,8 @@
|
||||
#define INTEL_FAM6_METEORLAKE 0xAC
|
||||
#define INTEL_FAM6_METEORLAKE_L 0xAA
|
||||
|
||||
#define INTEL_FAM6_LUNARLAKE_M 0xBD
|
||||
|
||||
/* "Small Core" Processors (Atom/E-Core) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
|
||||
|
@ -1256,6 +1256,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
#define MMIO_SBDS BIT(2)
|
||||
/* CPU is affected by RETbleed, speculating where you would not expect it */
|
||||
#define RETBLEED BIT(3)
|
||||
/* CPU is affected by SMT (cross-thread) return predictions */
|
||||
#define SMT_RSB BIT(4)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||
@ -1287,8 +1289,8 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
|
||||
VULNBL_AMD(0x15, RETBLEED),
|
||||
VULNBL_AMD(0x16, RETBLEED),
|
||||
VULNBL_AMD(0x17, RETBLEED),
|
||||
VULNBL_HYGON(0x18, RETBLEED),
|
||||
VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
|
||||
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1406,6 +1408,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
!(ia32_cap & ARCH_CAP_PBRSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
|
||||
|
||||
if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
|
||||
setup_force_cpu_bug(X86_BUG_SMT_RSB);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
@ -625,7 +625,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn)
|
||||
/* 1 byte conditional jump */
|
||||
p->ainsn.emulate_op = kprobe_emulate_jcc;
|
||||
p->ainsn.jcc.type = opcode & 0xf;
|
||||
p->ainsn.rel32 = *(char *)insn->immediate.bytes;
|
||||
p->ainsn.rel32 = insn->immediate.value;
|
||||
break;
|
||||
case 0x0f:
|
||||
opcode = insn->opcode.bytes[1];
|
||||
|
@ -191,6 +191,10 @@ module_param(enable_pmu, bool, 0444);
|
||||
bool __read_mostly eager_page_split = true;
|
||||
module_param(eager_page_split, bool, 0644);
|
||||
|
||||
/* Enable/disable SMT_RSB bug mitigation */
|
||||
bool __read_mostly mitigate_smt_rsb;
|
||||
module_param(mitigate_smt_rsb, bool, 0444);
|
||||
|
||||
/*
|
||||
* Restoring the host value for MSRs that are only consumed when running in
|
||||
* usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
|
||||
@ -4448,10 +4452,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = KVM_CLOCK_VALID_FLAGS;
|
||||
break;
|
||||
case KVM_CAP_X86_DISABLE_EXITS:
|
||||
r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
|
||||
KVM_X86_DISABLE_EXITS_CSTATE;
|
||||
if(kvm_can_mwait_in_guest())
|
||||
r |= KVM_X86_DISABLE_EXITS_MWAIT;
|
||||
r = KVM_X86_DISABLE_EXITS_PAUSE;
|
||||
|
||||
if (!mitigate_smt_rsb) {
|
||||
r |= KVM_X86_DISABLE_EXITS_HLT |
|
||||
KVM_X86_DISABLE_EXITS_CSTATE;
|
||||
|
||||
if (kvm_can_mwait_in_guest())
|
||||
r |= KVM_X86_DISABLE_EXITS_MWAIT;
|
||||
}
|
||||
break;
|
||||
case KVM_CAP_X86_SMM:
|
||||
if (!IS_ENABLED(CONFIG_KVM_SMM))
|
||||
@ -6227,15 +6236,26 @@ split_irqchip_unlock:
|
||||
if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
|
||||
break;
|
||||
|
||||
if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
|
||||
kvm_can_mwait_in_guest())
|
||||
kvm->arch.mwait_in_guest = true;
|
||||
if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
|
||||
kvm->arch.hlt_in_guest = true;
|
||||
if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
|
||||
kvm->arch.pause_in_guest = true;
|
||||
if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
|
||||
kvm->arch.cstate_in_guest = true;
|
||||
|
||||
#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
|
||||
"KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
|
||||
|
||||
if (!mitigate_smt_rsb) {
|
||||
if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() &&
|
||||
(cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
|
||||
pr_warn_once(SMT_RSB_MSG);
|
||||
|
||||
if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
|
||||
kvm_can_mwait_in_guest())
|
||||
kvm->arch.mwait_in_guest = true;
|
||||
if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
|
||||
kvm->arch.hlt_in_guest = true;
|
||||
if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
|
||||
kvm->arch.cstate_in_guest = true;
|
||||
}
|
||||
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_MSR_PLATFORM_INFO:
|
||||
@ -13456,6 +13476,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
|
||||
static int __init kvm_x86_init(void)
|
||||
{
|
||||
kvm_mmu_x86_module_init();
|
||||
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
|
||||
return 0;
|
||||
}
|
||||
module_init(kvm_x86_init);
|
||||
|
@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
|
||||
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
|
||||
cancel_delayed_work_sync(&acpi_desc->dwork);
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
cancel_delayed_work_sync(&acpi_desc->dwork);
|
||||
|
||||
/*
|
||||
* Bounce the nvdimm bus lock to make sure any in-flight
|
||||
|
@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
|
||||
unsigned long rate, unsigned long parent_rate,
|
||||
unsigned int *pm, unsigned int *pn, unsigned int *pod)
|
||||
{
|
||||
unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
|
||||
unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
|
||||
|
||||
/* The frequency after the N divider must be between 1 and 50 MHz. */
|
||||
n = parent_rate / (1 * MHZ);
|
||||
@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
|
||||
/* The N divider must be >= 2. */
|
||||
n = clamp_val(n, 2, 1 << pll_info->n_bits);
|
||||
|
||||
for (;; n >>= 1) {
|
||||
od = (unsigned int)-1;
|
||||
rate /= MHZ;
|
||||
parent_rate /= MHZ;
|
||||
|
||||
do {
|
||||
m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
|
||||
} while ((m > m_max || m & 1) && (od < 4));
|
||||
|
||||
if (od < 4 && m >= 4 && m <= m_max)
|
||||
break;
|
||||
for (m = m_max; m >= m_max && n >= 2; n--) {
|
||||
m = rate * n / parent_rate;
|
||||
od = m & 1;
|
||||
m <<= od;
|
||||
}
|
||||
|
||||
*pm = m;
|
||||
*pn = n;
|
||||
*pn = n + 1;
|
||||
*pod = 1 << od;
|
||||
}
|
||||
|
||||
|
@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
|
||||
|
||||
for (unsigned int i = 0; i < num_clks; i++) {
|
||||
struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
|
||||
char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
|
||||
char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
|
||||
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
snprintf(name, 23, "%s_out%u", parent->name, i);
|
||||
out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
|
||||
out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
|
||||
out_hw->reg_offset;
|
||||
@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
|
||||
|
||||
for (unsigned int i = 0; i < num_clks; i++) {
|
||||
struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
|
||||
char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
|
||||
|
||||
if (!name)
|
||||
pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
|
||||
strchrnul(dev->of_node->full_name, '@'), i);
|
||||
if (!pll_hw->name)
|
||||
return -ENOMEM;
|
||||
|
||||
pll_hw->base = data->pll_base[i];
|
||||
snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
|
||||
pll_hw->name = (const char *)name;
|
||||
pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
|
||||
pll_hw->parents,
|
||||
&mpfs_ccc_pll_ops, 0);
|
||||
|
@ -143,21 +143,6 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
|
||||
return lval * xo_rate;
|
||||
}
|
||||
|
||||
/* Get the current frequency of the CPU (after throttling) */
|
||||
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
||||
{
|
||||
struct qcom_cpufreq_data *data;
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
data = policy->driver_data;
|
||||
|
||||
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
|
||||
}
|
||||
|
||||
/* Get the frequency requested by the cpufreq core for the CPU */
|
||||
static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
|
||||
{
|
||||
@ -179,6 +164,23 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
|
||||
return policy->freq_table[index].frequency;
|
||||
}
|
||||
|
||||
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
||||
{
|
||||
struct qcom_cpufreq_data *data;
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
data = policy->driver_data;
|
||||
|
||||
if (data->throttle_irq >= 0)
|
||||
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
|
||||
|
||||
return qcom_cpufreq_get_freq(cpu);
|
||||
}
|
||||
|
||||
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
@ -704,6 +706,8 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
qcom_cpufreq.soc_data = of_device_get_match_data(dev);
|
||||
if (!qcom_cpufreq.soc_data)
|
||||
return -ENODEV;
|
||||
|
||||
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
|
||||
if (!clk_data)
|
||||
|
@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
|
||||
struct cxl_port *iter = cxled_to_port(cxled);
|
||||
struct cxl_ep *ep;
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
|
||||
iter = to_cxl_port(iter->dev.parent);
|
||||
@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
||||
|
||||
cxl_rr = cxl_rr_load(iter, cxlr);
|
||||
cxld = cxl_rr->decoder;
|
||||
rc = cxld->reset(cxld);
|
||||
if (cxld->reset)
|
||||
rc = cxld->reset(cxld);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
|
||||
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
|
||||
cxl_rr = cxl_rr_load(iter, cxlr);
|
||||
cxld = cxl_rr->decoder;
|
||||
cxld->reset(cxld);
|
||||
if (cxld->reset)
|
||||
cxld->reset(cxld);
|
||||
}
|
||||
|
||||
cxled->cxld.reset(&cxled->cxld);
|
||||
@ -991,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
|
||||
int i, distance;
|
||||
|
||||
/*
|
||||
* Passthrough ports impose no distance requirements between
|
||||
* Passthrough decoders impose no distance requirements between
|
||||
* peers
|
||||
*/
|
||||
if (port->nr_dports == 1)
|
||||
if (cxl_rr->nr_targets == 1)
|
||||
distance = 0;
|
||||
else
|
||||
distance = p->nr_targets / cxl_rr->nr_targets;
|
||||
|
@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
|
||||
/**
|
||||
* dax_holder() - obtain the holder of a dax device
|
||||
* @dax_dev: a dax_device instance
|
||||
|
||||
*
|
||||
* Return: the holder's data which represents the holder if registered,
|
||||
* otherwize NULL.
|
||||
*/
|
||||
|
@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
|
||||
const u8 *type1_family = efi_get_smbios_string(1, family);
|
||||
|
||||
/*
|
||||
* Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
|
||||
* has not been called prior.
|
||||
* Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
|
||||
* SetVirtualAddressMap() has not been called prior.
|
||||
*/
|
||||
if (!type1_family || strcmp(type1_family, "Altra"))
|
||||
if (!type1_family || (
|
||||
strcmp(type1_family, "eMAG") &&
|
||||
strcmp(type1_family, "Altra") &&
|
||||
strcmp(type1_family, "Altra Max")))
|
||||
return false;
|
||||
|
||||
efi_warn("Working around broken SetVirtualAddressMap()\n");
|
||||
|
@ -1531,6 +1531,7 @@ config GPIO_MLXBF2
|
||||
tristate "Mellanox BlueField 2 SoC GPIO"
|
||||
depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
|
||||
select GPIO_GENERIC
|
||||
select GPIOLIB_IRQCHIP
|
||||
help
|
||||
Say Y here if you want GPIO support on Mellanox BlueField 2 SoC.
|
||||
|
||||
|
@ -30,7 +30,6 @@ struct fsl_gpio_soc_data {
|
||||
|
||||
struct vf610_gpio_port {
|
||||
struct gpio_chip gc;
|
||||
struct irq_chip ic;
|
||||
void __iomem *base;
|
||||
void __iomem *gpio_base;
|
||||
const struct fsl_gpio_soc_data *sdata;
|
||||
@ -207,20 +206,24 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
|
||||
|
||||
static void vf610_gpio_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct vf610_gpio_port *port =
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(d));
|
||||
void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct vf610_gpio_port *port = gpiochip_get_data(gc);
|
||||
irq_hw_number_t gpio_num = irqd_to_hwirq(d);
|
||||
void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
|
||||
|
||||
vf610_gpio_writel(0, pcr_base);
|
||||
gpiochip_disable_irq(gc, gpio_num);
|
||||
}
|
||||
|
||||
static void vf610_gpio_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct vf610_gpio_port *port =
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(d));
|
||||
void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct vf610_gpio_port *port = gpiochip_get_data(gc);
|
||||
irq_hw_number_t gpio_num = irqd_to_hwirq(d);
|
||||
void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
|
||||
|
||||
vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
|
||||
gpiochip_enable_irq(gc, gpio_num);
|
||||
vf610_gpio_writel(port->irqc[gpio_num] << PORT_PCR_IRQC_OFFSET,
|
||||
pcr_base);
|
||||
}
|
||||
|
||||
@ -237,6 +240,17 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_chip vf610_irqchip = {
|
||||
.name = "gpio-vf610",
|
||||
.irq_ack = vf610_gpio_irq_ack,
|
||||
.irq_mask = vf610_gpio_irq_mask,
|
||||
.irq_unmask = vf610_gpio_irq_unmask,
|
||||
.irq_set_type = vf610_gpio_irq_set_type,
|
||||
.irq_set_wake = vf610_gpio_irq_set_wake,
|
||||
.flags = IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static void vf610_gpio_disable_clk(void *data)
|
||||
{
|
||||
clk_disable_unprepare(data);
|
||||
@ -249,7 +263,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
|
||||
struct vf610_gpio_port *port;
|
||||
struct gpio_chip *gc;
|
||||
struct gpio_irq_chip *girq;
|
||||
struct irq_chip *ic;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
@ -315,14 +328,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
|
||||
gc->direction_output = vf610_gpio_direction_output;
|
||||
gc->set = vf610_gpio_set;
|
||||
|
||||
ic = &port->ic;
|
||||
ic->name = "gpio-vf610";
|
||||
ic->irq_ack = vf610_gpio_irq_ack;
|
||||
ic->irq_mask = vf610_gpio_irq_mask;
|
||||
ic->irq_unmask = vf610_gpio_irq_unmask;
|
||||
ic->irq_set_type = vf610_gpio_irq_set_type;
|
||||
ic->irq_set_wake = vf610_gpio_irq_set_wake;
|
||||
|
||||
/* Mask all GPIO interrupts */
|
||||
for (i = 0; i < gc->ngpio; i++)
|
||||
vf610_gpio_writel(0, port->base + PORT_PCR(i));
|
||||
@ -331,7 +336,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
|
||||
vf610_gpio_writel(~0, port->base + PORT_ISFR);
|
||||
|
||||
girq = &gc->irq;
|
||||
girq->chip = ic;
|
||||
gpio_irq_chip_set_chip(girq, &vf610_irqchip);
|
||||
girq->parent_handler = vf610_gpio_irq_handler;
|
||||
girq->num_parents = 1;
|
||||
girq->parents = devm_kcalloc(&pdev->dev, 1,
|
||||
|
@ -1637,6 +1637,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
|
||||
.ignore_wake = "ELAN0415:00@9",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Spurious wakeups from TP_ATTN# pin
|
||||
* Found in BIOS 1.7.7
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
|
||||
},
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "SYNA1202:00@16",
|
||||
},
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#include <linux/gpio/consumer.h>
|
||||
|
||||
struct acpi_device;
|
||||
struct device;
|
||||
struct fwnode_handle;
|
||||
|
||||
|
@ -53,7 +53,8 @@ config DRM_DEBUG_MM
|
||||
|
||||
config DRM_USE_DYNAMIC_DEBUG
|
||||
bool "use dynamic debug to implement drm.debug"
|
||||
default y
|
||||
default n
|
||||
depends on BROKEN
|
||||
depends on DRM
|
||||
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
|
||||
depends on JUMP_LABEL
|
||||
|
@ -243,6 +243,7 @@ extern int amdgpu_num_kcq;
|
||||
|
||||
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
|
||||
extern int amdgpu_vcnfw_log;
|
||||
extern int amdgpu_sg_display;
|
||||
|
||||
#define AMDGPU_VM_MAX_NUM_CTX 4096
|
||||
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
|
||||
|
@ -1220,10 +1220,13 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
* next job actually sees the results from the previous one
|
||||
* before we start executing on the same scheduler ring.
|
||||
*/
|
||||
if (!s_fence || s_fence->sched != sched)
|
||||
if (!s_fence || s_fence->sched != sched) {
|
||||
dma_fence_put(fence);
|
||||
continue;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
|
||||
dma_fence_put(fence);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -4268,6 +4268,9 @@ exit:
|
||||
}
|
||||
adev->in_suspend = false;
|
||||
|
||||
if (adev->enable_mes)
|
||||
amdgpu_mes_self_test(adev);
|
||||
|
||||
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
|
||||
DRM_WARN("smart shift update failed\n");
|
||||
|
||||
|
@ -186,6 +186,7 @@ int amdgpu_num_kcq = -1;
|
||||
int amdgpu_smartshift_bias;
|
||||
int amdgpu_use_xgmi_p2p = 1;
|
||||
int amdgpu_vcnfw_log;
|
||||
int amdgpu_sg_display = -1; /* auto */
|
||||
|
||||
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
|
||||
|
||||
@ -931,6 +932,16 @@ module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
|
||||
MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
|
||||
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: sg_display (int)
|
||||
* Disable S/G (scatter/gather) display (i.e., display from system memory).
|
||||
* This option is only relevant on APUs. Set this option to 0 to disable
|
||||
* S/G display if you experience flickering or other issues under memory
|
||||
* pressure and report the issue.
|
||||
*/
|
||||
MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
|
||||
module_param_named(sg_display, amdgpu_sg_display, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: smu_pptable_id (int)
|
||||
* Used to override pptable id. id = 0 use VBIOS pptable.
|
||||
|
@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
if (!ring->no_scheduler)
|
||||
/*
|
||||
* Notice we check for sched.ops since there's some
|
||||
* override on the meaning of sched.ready by amdgpu.
|
||||
* The natural check would be sched.ready, which is
|
||||
* set as drm_sched_init() finishes...
|
||||
*/
|
||||
if (ring->sched.ops)
|
||||
drm_sched_fini(&ring->sched);
|
||||
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
||||
|
@ -295,7 +295,7 @@ struct amdgpu_ring {
|
||||
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
|
||||
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
|
||||
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
|
||||
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
|
||||
#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
|
||||
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
|
||||
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
|
||||
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
|
||||
|
@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
|
||||
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
|
||||
min(nptes, 32u), dst, incr,
|
||||
upd_flags,
|
||||
vm->task_info.pid,
|
||||
vm->task_info.tgid,
|
||||
vm->immediate.fence_context);
|
||||
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
|
||||
cursor.level, pe_start, dst,
|
||||
|
@ -6877,7 +6877,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
|
||||
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
|
||||
.test_ring = gfx_v9_0_ring_test_ring,
|
||||
.test_ib = gfx_v9_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_switch_buffer = gfx_v9_ring_emit_sb,
|
||||
|
@ -1344,7 +1344,7 @@ static int mes_v11_0_late_init(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
|
||||
if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
|
||||
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
|
||||
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
|
||||
amdgpu_mes_self_test(adev);
|
||||
|
||||
|
@ -641,7 +641,9 @@ static int soc21_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_SD;
|
||||
AMD_CG_SUPPORT_HDP_SD |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
|
@ -1184,24 +1184,38 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
|
||||
memset(pa_config, 0, sizeof(*pa_config));
|
||||
|
||||
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
|
||||
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
|
||||
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
/*
|
||||
* Raven2 has a HW issue that it is unable to use the vram which
|
||||
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
|
||||
* workaround that increase system aperture high address (add 1)
|
||||
* to get rid of the VM fault and hardware hang.
|
||||
*/
|
||||
logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
|
||||
else
|
||||
logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
|
||||
|
||||
agp_base = 0;
|
||||
agp_bot = adev->gmc.agp_start >> 24;
|
||||
agp_top = adev->gmc.agp_end >> 24;
|
||||
|
||||
/* AGP aperture is disabled */
|
||||
if (agp_bot == agp_top) {
|
||||
logical_addr_low = adev->gmc.vram_start >> 18;
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
/*
|
||||
* Raven2 has a HW issue that it is unable to use the vram which
|
||||
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
|
||||
* workaround that increase system aperture high address (add 1)
|
||||
* to get rid of the VM fault and hardware hang.
|
||||
*/
|
||||
logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
|
||||
else
|
||||
logical_addr_high = adev->gmc.vram_end >> 18;
|
||||
} else {
|
||||
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
|
||||
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
|
||||
/*
|
||||
* Raven2 has a HW issue that it is unable to use the vram which
|
||||
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
|
||||
* workaround that increase system aperture high address (add 1)
|
||||
* to get rid of the VM fault and hardware hang.
|
||||
*/
|
||||
logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
|
||||
else
|
||||
logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
|
||||
}
|
||||
|
||||
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
|
||||
|
||||
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
|
||||
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
|
||||
@ -1503,6 +1517,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
case IP_VERSION(3, 0, 1):
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
case IP_VERSION(3, 1, 4):
|
||||
case IP_VERSION(3, 1, 5):
|
||||
case IP_VERSION(3, 1, 6):
|
||||
init_data.flags.gpu_vm_support = true;
|
||||
break;
|
||||
@ -1511,6 +1527,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (init_data.flags.gpu_vm_support &&
|
||||
(amdgpu_sg_display == 0))
|
||||
init_data.flags.gpu_vm_support = false;
|
||||
|
||||
if (init_data.flags.gpu_vm_support)
|
||||
adev->mode_info.gpu_vm_support = true;
|
||||
@ -9639,7 +9658,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
|
||||
* atomic state, so call drm helper to normalize zpos.
|
||||
*/
|
||||
drm_atomic_normalize_zpos(dev, state);
|
||||
ret = drm_atomic_normalize_zpos(dev, state);
|
||||
if (ret) {
|
||||
drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Remove exiting planes if they are modified */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
|
@ -3626,7 +3626,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
||||
(int)hubp->curs_attr.width || pos_cpy.x
|
||||
<= (int)hubp->curs_attr.width +
|
||||
pipe_ctx->plane_state->src_rect.x) {
|
||||
pos_cpy.x = temp_x + viewport_width;
|
||||
pos_cpy.x = 2 * viewport_width - temp_x;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
||||
case IP_VERSION(9, 4, 2):
|
||||
case IP_VERSION(10, 3, 0):
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
*states = ATTR_STATE_SUPPORTED;
|
||||
break;
|
||||
default:
|
||||
|
@ -123,7 +123,8 @@
|
||||
(1 << FEATURE_DS_FCLK_BIT) | \
|
||||
(1 << FEATURE_DS_LCLK_BIT) | \
|
||||
(1 << FEATURE_DS_DCFCLK_BIT) | \
|
||||
(1 << FEATURE_DS_UCLK_BIT))
|
||||
(1 << FEATURE_DS_UCLK_BIT) | \
|
||||
(1ULL << FEATURE_DS_VCN_BIT))
|
||||
|
||||
//For use with feature control messages
|
||||
typedef enum {
|
||||
@ -522,9 +523,9 @@ typedef enum {
|
||||
TEMP_HOTSPOT_M,
|
||||
TEMP_MEM,
|
||||
TEMP_VR_GFX,
|
||||
TEMP_VR_SOC,
|
||||
TEMP_VR_MEM0,
|
||||
TEMP_VR_MEM1,
|
||||
TEMP_VR_SOC,
|
||||
TEMP_VR_U,
|
||||
TEMP_LIQUID0,
|
||||
TEMP_LIQUID1,
|
||||
|
@ -113,20 +113,21 @@
|
||||
#define NUM_FEATURES 64
|
||||
|
||||
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
|
||||
#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
|
||||
(1 << FEATURE_DPM_UCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_FCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_SOCCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_MP0CLK_BIT) | \
|
||||
(1 << FEATURE_DPM_LINK_BIT) | \
|
||||
(1 << FEATURE_DPM_DCN_BIT) | \
|
||||
(1 << FEATURE_DS_GFXCLK_BIT) | \
|
||||
(1 << FEATURE_DS_SOCCLK_BIT) | \
|
||||
(1 << FEATURE_DS_FCLK_BIT) | \
|
||||
(1 << FEATURE_DS_LCLK_BIT) | \
|
||||
(1 << FEATURE_DS_DCFCLK_BIT) | \
|
||||
(1 << FEATURE_DS_UCLK_BIT)
|
||||
#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
|
||||
(1 << FEATURE_DPM_UCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_FCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_SOCCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_MP0CLK_BIT) | \
|
||||
(1 << FEATURE_DPM_LINK_BIT) | \
|
||||
(1 << FEATURE_DPM_DCN_BIT) | \
|
||||
(1 << FEATURE_DS_GFXCLK_BIT) | \
|
||||
(1 << FEATURE_DS_SOCCLK_BIT) | \
|
||||
(1 << FEATURE_DS_FCLK_BIT) | \
|
||||
(1 << FEATURE_DS_LCLK_BIT) | \
|
||||
(1 << FEATURE_DS_DCFCLK_BIT) | \
|
||||
(1 << FEATURE_DS_UCLK_BIT) | \
|
||||
(1ULL << FEATURE_DS_VCN_BIT))
|
||||
|
||||
//For use with feature control messages
|
||||
typedef enum {
|
||||
|
@ -28,11 +28,11 @@
|
||||
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
|
||||
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
|
||||
|
||||
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
|
||||
|
@ -407,6 +407,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
if (amdgpu_sriov_vf(smu->adev))
|
||||
return 0;
|
||||
|
||||
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
|
||||
&smu_table->power_play_table,
|
||||
&smu_table->power_play_table_size);
|
||||
@ -1257,6 +1260,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
|
||||
table_context->power_play_table;
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
|
||||
if (amdgpu_sriov_vf(smu->adev))
|
||||
return 0;
|
||||
|
||||
if (!range)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
|
||||
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
|
||||
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
|
||||
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
|
||||
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
|
||||
};
|
||||
|
||||
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
|
||||
|
@ -714,7 +714,7 @@ static int ast_primary_plane_init(struct ast_private *ast)
|
||||
struct ast_plane *ast_primary_plane = &ast->primary_plane;
|
||||
struct drm_plane *primary_plane = &ast_primary_plane->base;
|
||||
void __iomem *vaddr = ast->vram;
|
||||
u64 offset = ast->vram_base;
|
||||
u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
|
||||
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
|
||||
unsigned long size = ast->vram_fb_available - cursor_size;
|
||||
int ret;
|
||||
@ -972,7 +972,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
|
||||
return -ENOMEM;
|
||||
|
||||
vaddr = ast->vram + ast->vram_fb_available - size;
|
||||
offset = ast->vram_base + ast->vram_fb_available - size;
|
||||
offset = ast->vram_fb_available - size;
|
||||
|
||||
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
|
||||
0x01, &ast_cursor_plane_funcs,
|
||||
|
@ -233,21 +233,17 @@ void drm_client_dev_restore(struct drm_device *dev)
|
||||
|
||||
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
|
||||
{
|
||||
struct drm_device *dev = buffer->client->dev;
|
||||
|
||||
if (buffer->gem) {
|
||||
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
|
||||
drm_gem_object_put(buffer->gem);
|
||||
}
|
||||
|
||||
if (buffer->handle)
|
||||
drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
|
||||
|
||||
kfree(buffer);
|
||||
}
|
||||
|
||||
static struct drm_client_buffer *
|
||||
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
|
||||
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
|
||||
u32 format, u32 *handle)
|
||||
{
|
||||
const struct drm_format_info *info = drm_format_info(format);
|
||||
struct drm_mode_create_dumb dumb_args = { };
|
||||
@ -269,16 +265,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
|
||||
if (ret)
|
||||
goto err_delete;
|
||||
|
||||
buffer->handle = dumb_args.handle;
|
||||
buffer->pitch = dumb_args.pitch;
|
||||
|
||||
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
|
||||
if (!obj) {
|
||||
ret = -ENOENT;
|
||||
goto err_delete;
|
||||
}
|
||||
|
||||
buffer->pitch = dumb_args.pitch;
|
||||
buffer->gem = obj;
|
||||
*handle = dumb_args.handle;
|
||||
|
||||
return buffer;
|
||||
|
||||
@ -365,7 +360,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
|
||||
}
|
||||
|
||||
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
|
||||
u32 width, u32 height, u32 format)
|
||||
u32 width, u32 height, u32 format,
|
||||
u32 handle)
|
||||
{
|
||||
struct drm_client_dev *client = buffer->client;
|
||||
struct drm_mode_fb_cmd fb_req = { };
|
||||
@ -377,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
|
||||
fb_req.depth = info->depth;
|
||||
fb_req.width = width;
|
||||
fb_req.height = height;
|
||||
fb_req.handle = buffer->handle;
|
||||
fb_req.handle = handle;
|
||||
fb_req.pitch = buffer->pitch;
|
||||
|
||||
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
|
||||
@ -414,13 +410,24 @@ struct drm_client_buffer *
|
||||
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
|
||||
{
|
||||
struct drm_client_buffer *buffer;
|
||||
u32 handle;
|
||||
int ret;
|
||||
|
||||
buffer = drm_client_buffer_create(client, width, height, format);
|
||||
buffer = drm_client_buffer_create(client, width, height, format,
|
||||
&handle);
|
||||
if (IS_ERR(buffer))
|
||||
return buffer;
|
||||
|
||||
ret = drm_client_buffer_addfb(buffer, width, height, format);
|
||||
ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
|
||||
|
||||
/*
|
||||
* The handle is only needed for creating the framebuffer, destroy it
|
||||
* again to solve a circular dependency should anybody export the GEM
|
||||
* object as DMA-buf. The framebuffer and our buffer structure are still
|
||||
* holding references to the GEM object to prevent its destruction.
|
||||
*/
|
||||
drm_mode_destroy_dumb(client->dev, handle, client->file);
|
||||
|
||||
if (ret) {
|
||||
drm_client_buffer_delete(buffer);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -2466,6 +2466,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
|
||||
dvo_port);
|
||||
}
|
||||
|
||||
static enum port
|
||||
dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
|
||||
{
|
||||
switch (dvo_port) {
|
||||
case DVO_PORT_MIPIA:
|
||||
return PORT_A;
|
||||
case DVO_PORT_MIPIC:
|
||||
if (DISPLAY_VER(i915) >= 11)
|
||||
return PORT_B;
|
||||
else
|
||||
return PORT_C;
|
||||
default:
|
||||
return PORT_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
|
||||
{
|
||||
switch (vbt_max_link_rate) {
|
||||
@ -3414,19 +3430,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
|
||||
|
||||
dvo_port = child->dvo_port;
|
||||
|
||||
if (dvo_port == DVO_PORT_MIPIA ||
|
||||
(dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
|
||||
(dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
|
||||
if (port)
|
||||
*port = dvo_port - DVO_PORT_MIPIA;
|
||||
return true;
|
||||
} else if (dvo_port == DVO_PORT_MIPIB ||
|
||||
dvo_port == DVO_PORT_MIPIC ||
|
||||
dvo_port == DVO_PORT_MIPID) {
|
||||
if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"VBT has unsupported DSI port %c\n",
|
||||
port_name(dvo_port - DVO_PORT_MIPIA));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (port)
|
||||
*port = dsi_dvo_port_to_port(i915, dvo_port);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -3511,7 +3524,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
|
||||
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
|
||||
continue;
|
||||
|
||||
if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
|
||||
if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
|
||||
if (!devdata->dsc)
|
||||
return false;
|
||||
|
||||
|
@ -328,8 +328,20 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
|
||||
{
|
||||
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
|
||||
return 0;
|
||||
|
||||
if (helper->fb->funcs->dirty)
|
||||
return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
|
||||
.fb_probe = intelfb_create,
|
||||
.fb_dirty = intelfb_dirty,
|
||||
};
|
||||
|
||||
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
|
||||
|
@ -1587,7 +1587,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
|
||||
skl_check_wm_level(&wm->wm[level], ddb);
|
||||
|
||||
if (icl_need_wm1_wa(i915, plane_id) &&
|
||||
level == 1 && wm->wm[0].enable) {
|
||||
level == 1 && !wm->wm[level].enable &&
|
||||
wm->wm[0].enable) {
|
||||
wm->wm[level].blocks = wm->wm[0].blocks;
|
||||
wm->wm[level].lines = wm->wm[0].lines;
|
||||
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
|
||||
|
@ -3483,6 +3483,13 @@ err_request:
|
||||
eb.composite_fence :
|
||||
&eb.requests[0]->fence);
|
||||
|
||||
if (unlikely(eb.gem_context->syncobj)) {
|
||||
drm_syncobj_replace_fence(eb.gem_context->syncobj,
|
||||
eb.composite_fence ?
|
||||
eb.composite_fence :
|
||||
&eb.requests[0]->fence);
|
||||
}
|
||||
|
||||
if (out_fence) {
|
||||
if (err == 0) {
|
||||
fd_install(out_fence_fd, out_fence->file);
|
||||
@ -3494,13 +3501,6 @@ err_request:
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(eb.gem_context->syncobj)) {
|
||||
drm_syncobj_replace_fence(eb.gem_context->syncobj,
|
||||
eb.composite_fence ?
|
||||
eb.composite_fence :
|
||||
&eb.requests[0]->fence);
|
||||
}
|
||||
|
||||
if (!out_fence && eb.composite_fence)
|
||||
dma_fence_put(eb.composite_fence);
|
||||
|
||||
|
@ -579,7 +579,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
|
||||
mapping_set_gfp_mask(mapping, mask);
|
||||
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
|
||||
|
||||
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
|
||||
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
|
||||
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
|
||||
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
||||
|
@ -1355,6 +1355,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
|
||||
GAMT_CHKN_BIT_REG,
|
||||
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
||||
|
||||
/*
|
||||
* Wa_1408615072:icl,ehl (vsunit)
|
||||
* Wa_1407596294:icl,ehl (hsunit)
|
||||
*/
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
||||
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1407352427:icl,ehl */
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
PSDUNIT_CLKGATE_DIS);
|
||||
@ -2539,13 +2546,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
|
||||
GEN11_ENABLE_32_PLANE_MODE);
|
||||
|
||||
/*
|
||||
* Wa_1408615072:icl,ehl (vsunit)
|
||||
* Wa_1407596294:icl,ehl (hsunit)
|
||||
*/
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
||||
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
||||
|
||||
/*
|
||||
* Wa_1408767742:icl[a2..forever],ehl[all]
|
||||
* Wa_1605460711:icl[a0..c0]
|
||||
|
@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
|
||||
|
||||
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
|
||||
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
|
||||
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
|
||||
mode->clock * 9 / 10) * 1000;
|
||||
} else {
|
||||
vc4_state->hvs_load = mode->clock * 1000;
|
||||
|
@ -97,6 +97,10 @@
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
|
||||
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0)
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0)
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4)
|
||||
|
||||
# define VC4_HD_M_SW_RST BIT(2)
|
||||
# define VC4_HD_M_ENABLE BIT(0)
|
||||
|
||||
@ -1306,7 +1310,6 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
||||
VC4_HDMI_VERTB_VBP));
|
||||
unsigned long flags;
|
||||
unsigned char gcp;
|
||||
bool gcp_en;
|
||||
u32 reg;
|
||||
int idx;
|
||||
|
||||
@ -1341,16 +1344,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
||||
switch (vc4_state->output_bpc) {
|
||||
case 12:
|
||||
gcp = 6;
|
||||
gcp_en = true;
|
||||
break;
|
||||
case 10:
|
||||
gcp = 5;
|
||||
gcp_en = true;
|
||||
break;
|
||||
case 8:
|
||||
default:
|
||||
gcp = 4;
|
||||
gcp_en = false;
|
||||
gcp = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1359,8 +1359,7 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
||||
* doesn't signal in GCP.
|
||||
*/
|
||||
if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) {
|
||||
gcp = 4;
|
||||
gcp_en = false;
|
||||
gcp = 0;
|
||||
}
|
||||
|
||||
reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
|
||||
@ -1373,11 +1372,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
||||
reg = HDMI_READ(HDMI_GCP_WORD_1);
|
||||
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
|
||||
reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
|
||||
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK;
|
||||
reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE;
|
||||
HDMI_WRITE(HDMI_GCP_WORD_1, reg);
|
||||
|
||||
reg = HDMI_READ(HDMI_GCP_CONFIG);
|
||||
reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
|
||||
reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
|
||||
reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
|
||||
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
|
||||
|
||||
reg = HDMI_READ(HDMI_MISC_CONTROL);
|
||||
|
@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
||||
{
|
||||
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
|
||||
struct drm_gem_dma_object *bo;
|
||||
int num_planes = fb->format->num_planes;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
u32 h_subsample = fb->format->hsub;
|
||||
@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
for (i = 0; i < num_planes; i++) {
|
||||
bo = drm_fb_dma_get_gem_obj(fb, i);
|
||||
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't support subpixel source positioning for scaling,
|
||||
|
@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
||||
void __user *user_bo_handles = NULL;
|
||||
struct virtio_gpu_object_array *buflist = NULL;
|
||||
struct sync_file *sync_file;
|
||||
int in_fence_fd = exbuf->fence_fd;
|
||||
int out_fence_fd = -1;
|
||||
void *buf;
|
||||
uint64_t fence_ctx;
|
||||
@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
||||
ring_idx = exbuf->ring_idx;
|
||||
}
|
||||
|
||||
exbuf->fence_fd = -1;
|
||||
|
||||
virtio_gpu_create_context(dev, file);
|
||||
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
|
||||
struct dma_fence *in_fence;
|
||||
|
||||
in_fence = sync_file_get_fence(in_fence_fd);
|
||||
in_fence = sync_file_get_fence(exbuf->fence_fd);
|
||||
|
||||
if (!in_fence)
|
||||
return -EINVAL;
|
||||
|
@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmw_bo_init will delete the *p_bo object if it fails
|
||||
*/
|
||||
ret = vmw_bo_init(vmw, *p_bo, size,
|
||||
placement, interruptible, pin,
|
||||
bo_free);
|
||||
@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
|
||||
|
||||
return ret;
|
||||
out_error:
|
||||
kfree(*p_bo);
|
||||
*p_bo = NULL;
|
||||
return ret;
|
||||
}
|
||||
@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
|
||||
ttm_bo_put(&vmw_bo->base);
|
||||
}
|
||||
|
||||
drm_gem_object_put(&vmw_bo->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
|
||||
vmw_bo_unreference(&vbo);
|
||||
drm_gem_object_put(&vbo->base.base);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -ERESTARTSYS || ret == -EBUSY)
|
||||
return -EBUSY;
|
||||
@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
|
||||
* struct vmw_buffer_object should be placed.
|
||||
* Return: Zero on success, Negative error code on error.
|
||||
*
|
||||
* The vmw buffer object pointer will be refcounted.
|
||||
* The vmw buffer object pointer will be refcounted (both ttm and gem)
|
||||
*/
|
||||
int vmw_user_bo_lookup(struct drm_file *filp,
|
||||
uint32_t handle,
|
||||
@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
|
||||
|
||||
*out = gem_to_vmw_bo(gobj);
|
||||
ttm_bo_get(&(*out)->base);
|
||||
drm_gem_object_put(gobj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -791,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
||||
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
|
||||
args->size, &args->handle,
|
||||
&vbo);
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(&vbo->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
||||
}
|
||||
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
|
||||
ttm_bo_put(&vmw_bo->base);
|
||||
drm_gem_object_put(&vmw_bo->base.base);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
||||
}
|
||||
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
|
||||
ttm_bo_put(&vmw_bo->base);
|
||||
drm_gem_object_put(&vmw_bo->base.base);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
|
||||
&vmw_sys_placement :
|
||||
&vmw_vram_sys_placement,
|
||||
true, false, &vmw_gem_destroy, p_vbo);
|
||||
|
||||
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
|
||||
if (ret != 0)
|
||||
goto out_no_bo;
|
||||
|
||||
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
|
||||
|
||||
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(&(*p_vbo)->base.base);
|
||||
out_no_bo:
|
||||
return ret;
|
||||
}
|
||||
@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
|
||||
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
|
||||
rep->cur_gmr_id = handle;
|
||||
rep->cur_gmr_offset = 0;
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(&vbo->base.base);
|
||||
out_no_bo:
|
||||
return ret;
|
||||
}
|
||||
|
@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
|
||||
err_out:
|
||||
/* vmw_user_lookup_handle takes one ref so does new_fb */
|
||||
if (bo)
|
||||
if (bo) {
|
||||
vmw_bo_unreference(&bo);
|
||||
drm_gem_object_put(&bo->base.base);
|
||||
}
|
||||
if (surface)
|
||||
vmw_surface_unreference(&surface);
|
||||
|
||||
|
@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
|
||||
|
||||
vmw_bo_unreference(&buf);
|
||||
drm_gem_object_put(&buf->base.base);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
|
||||
num_output_sig, tfile, shader_handle);
|
||||
out_bad_arg:
|
||||
vmw_bo_unreference(&buffer);
|
||||
drm_gem_object_put(&buffer->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
||||
container_of(base, struct vmw_user_surface, prime.base);
|
||||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
if (base->shareable && res && res->backup)
|
||||
if (res && res->backup)
|
||||
drm_gem_object_put(&res->backup->base.base);
|
||||
|
||||
*p_base = NULL;
|
||||
@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
goto out_unlock;
|
||||
}
|
||||
vmw_bo_reference(res->backup);
|
||||
drm_gem_object_get(&res->backup->base.base);
|
||||
/*
|
||||
* We don't expose the handle to the userspace and surface
|
||||
* already holds a gem reference
|
||||
*/
|
||||
drm_gem_handle_delete(file_priv, backup_handle);
|
||||
}
|
||||
|
||||
tmp = vmw_resource_reference(&srf->res);
|
||||
@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
|
||||
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
|
||||
rep->buffer_size = res->backup->base.base.size;
|
||||
rep->buffer_handle = backup_handle;
|
||||
if (user_srf->prime.base.shareable)
|
||||
drm_gem_object_get(&res->backup->base.base);
|
||||
} else {
|
||||
rep->buffer_map_handle = 0;
|
||||
rep->buffer_size = 0;
|
||||
|
@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
|
||||
if (umem_dmabuf->sgt)
|
||||
goto wait_fence;
|
||||
|
||||
sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
|
||||
DMA_BIDIRECTIONAL);
|
||||
sgt = dma_buf_map_attachment(umem_dmabuf->attach,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt))
|
||||
return PTR_ERR(sgt);
|
||||
|
||||
@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
|
||||
umem_dmabuf->last_sg_trim = 0;
|
||||
}
|
||||
|
||||
dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
umem_dmabuf->sgt = NULL;
|
||||
}
|
||||
|
@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, length);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.length,
|
||||
if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
|
||||
sizeof(tinfo.length)))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (ret)
|
||||
hfi1_user_exp_rcv_invalid(fd, &tinfo);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
|
||||
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
|
||||
{
|
||||
int pinned;
|
||||
unsigned int npages;
|
||||
unsigned int npages = tidbuf->npages;
|
||||
unsigned long vaddr = tidbuf->vaddr;
|
||||
struct page **pages = NULL;
|
||||
struct hfi1_devdata *dd = fd->uctxt->dd;
|
||||
|
||||
/* Get the number of pages the user buffer spans */
|
||||
npages = num_user_pages(vaddr, tidbuf->length);
|
||||
if (!npages)
|
||||
return -EINVAL;
|
||||
|
||||
if (npages > fd->uctxt->expected_count) {
|
||||
dd_dev_err(dd, "Expected buffer too big\n");
|
||||
return -EINVAL;
|
||||
@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
|
||||
return pinned;
|
||||
}
|
||||
tidbuf->pages = pages;
|
||||
tidbuf->npages = npages;
|
||||
fd->tid_n_pinned += pinned;
|
||||
return pinned;
|
||||
}
|
||||
@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
|
||||
mutex_init(&tidbuf->cover_mutex);
|
||||
tidbuf->vaddr = tinfo->vaddr;
|
||||
tidbuf->length = tinfo->length;
|
||||
tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
|
||||
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
|
||||
GFP_KERNEL);
|
||||
if (!tidbuf->psets) {
|
||||
|
@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
|
||||
continue;
|
||||
|
||||
idev = in_dev_get(ip_dev);
|
||||
if (!idev)
|
||||
continue;
|
||||
|
||||
in_dev_for_each_ifa_rtnl(ifa, idev) {
|
||||
ibdev_dbg(&iwdev->ibdev,
|
||||
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
|
||||
|
@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
|
||||
|
||||
/* IB ports start with 1, MANA Ethernet ports start with 0 */
|
||||
port = ucmd.port;
|
||||
if (ucmd.port > mc->num_ports)
|
||||
if (port < 1 || port > mc->num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
|
||||
|
@ -276,8 +276,8 @@ iter_chunk:
|
||||
size = pa_end - pa_start + PAGE_SIZE;
|
||||
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
|
||||
va_start, &pa_start, size, flags);
|
||||
err = iommu_map(pd->domain, va_start, pa_start,
|
||||
size, flags);
|
||||
err = iommu_map_atomic(pd->domain, va_start,
|
||||
pa_start, size, flags);
|
||||
if (err) {
|
||||
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
||||
va_start, &pa_start, size, err);
|
||||
@ -293,8 +293,8 @@ iter_chunk:
|
||||
size = pa - pa_start + PAGE_SIZE;
|
||||
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
|
||||
va_start, &pa_start, size, flags);
|
||||
err = iommu_map(pd->domain, va_start, pa_start,
|
||||
size, flags);
|
||||
err = iommu_map_atomic(pd->domain, va_start,
|
||||
pa_start, size, flags);
|
||||
if (err) {
|
||||
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
||||
va_start, &pa_start, size, err);
|
||||
|
@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
|
||||
rn->attach_mcast = ipoib_mcast_attach;
|
||||
rn->detach_mcast = ipoib_mcast_detach;
|
||||
rn->hca = hca;
|
||||
|
||||
rc = netif_set_real_num_tx_queues(dev, 1);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = netif_set_real_num_rx_queues(dev, 1);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->rn_ops = dev->netdev_ops;
|
||||
|
@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
|
||||
|
||||
if (srv_path->kobj.state_in_sysfs) {
|
||||
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
|
||||
kobject_del(&srv_path->kobj);
|
||||
kobject_put(&srv_path->kobj);
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
|
||||
}
|
||||
|
||||
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
|
||||
}
|
||||
|
@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
|
||||
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
|
||||
if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
|
||||
ci->pkg == BCMA_PKG_ID_BCM47186) {
|
||||
if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
|
||||
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
|
||||
}
|
||||
if (ci->pkg == BCMA_PKG_ID_BCM5358)
|
||||
if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
|
||||
break;
|
||||
case BCMA_CHIP_ID_BCM53573:
|
||||
|
@ -9273,10 +9273,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
|
||||
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
|
||||
if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
|
||||
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
|
||||
netdev_err(bp->dev, "tx ring reservation failure\n");
|
||||
netdev_reset_tc(bp->dev);
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
|
||||
if (bp->tx_nr_rings_xdp)
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
|
||||
else
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
|
||||
if (i40e_enabled_xdp_vsi(vsi)) {
|
||||
int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
|
||||
int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
|
||||
|
||||
if (frame_size > i40e_max_xdp_frame_size(vsi))
|
||||
return -EINVAL;
|
||||
@ -13167,6 +13167,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
|
||||
}
|
||||
|
||||
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
|
||||
if (!br_spec)
|
||||
return -EINVAL;
|
||||
|
||||
nla_for_each_nested(attr, br_spec, rem) {
|
||||
__u16 mode;
|
||||
|
@ -927,7 +927,7 @@ static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched
|
||||
{
|
||||
int status;
|
||||
|
||||
if (node->tx_priority >= 8) {
|
||||
if (priority >= 8) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -957,7 +957,7 @@ static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_n
|
||||
{
|
||||
int status;
|
||||
|
||||
if (node->tx_weight > 200 || node->tx_weight < 1) {
|
||||
if (weight > 200 || weight < 1) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -275,6 +275,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
|
||||
if (status && status != -EEXIST)
|
||||
return status;
|
||||
|
||||
netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
|
||||
vsi->vsi_num, promisc_m);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -300,6 +302,8 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
|
||||
promisc_m, 0);
|
||||
}
|
||||
|
||||
netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
|
||||
vsi->vsi_num, promisc_m);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -414,6 +418,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||
}
|
||||
err = 0;
|
||||
vlan_ops->dis_rx_filtering(vsi);
|
||||
|
||||
/* promiscuous mode implies allmulticast so
|
||||
* that VSIs that are in promiscuous mode are
|
||||
* subscribed to multicast packets coming to
|
||||
* the port
|
||||
*/
|
||||
err = ice_set_promisc(vsi,
|
||||
ICE_MCAST_PROMISC_BITS);
|
||||
if (err)
|
||||
goto out_promisc;
|
||||
}
|
||||
} else {
|
||||
/* Clear Rx filter to remove traffic from wire */
|
||||
@ -430,6 +444,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
vlan_ops->ena_rx_filtering(vsi);
|
||||
}
|
||||
|
||||
/* disable allmulti here, but only if allmulti is not
|
||||
* still enabled for the netdev
|
||||
*/
|
||||
if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
|
||||
err = ice_clear_promisc(vsi,
|
||||
ICE_MCAST_PROMISC_BITS);
|
||||
if (err) {
|
||||
netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
|
||||
err, vsi->vsi_num);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
goto exit;
|
||||
|
@ -598,7 +598,22 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clean_xdp_irq_zc - AF_XDP ZC specific Tx cleaning routine
|
||||
* ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
|
||||
* @xdp_ring: XDP Tx ring
|
||||
* @tx_buf: Tx buffer to clean
|
||||
*/
|
||||
static void
|
||||
ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
|
||||
{
|
||||
page_frag_free(tx_buf->raw_buf);
|
||||
xdp_ring->xdp_tx_active--;
|
||||
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
|
||||
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
|
||||
dma_unmap_len_set(tx_buf, len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
|
||||
* @xdp_ring: XDP Tx ring
|
||||
*/
|
||||
static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
|
||||
@ -607,44 +622,47 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
|
||||
struct ice_tx_desc *tx_desc;
|
||||
u16 cnt = xdp_ring->count;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
u16 completed_frames = 0;
|
||||
u16 xsk_frames = 0;
|
||||
u16 last_rs;
|
||||
int i;
|
||||
|
||||
last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
|
||||
if (tx_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
|
||||
if ((tx_desc->cmd_type_offset_bsz &
|
||||
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
|
||||
if (last_rs >= ntc)
|
||||
xsk_frames = last_rs - ntc + 1;
|
||||
completed_frames = last_rs - ntc + 1;
|
||||
else
|
||||
xsk_frames = last_rs + cnt - ntc + 1;
|
||||
completed_frames = last_rs + cnt - ntc + 1;
|
||||
}
|
||||
|
||||
if (!xsk_frames)
|
||||
if (!completed_frames)
|
||||
return;
|
||||
|
||||
if (likely(!xdp_ring->xdp_tx_active))
|
||||
if (likely(!xdp_ring->xdp_tx_active)) {
|
||||
xsk_frames = completed_frames;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
ntc = xdp_ring->next_to_clean;
|
||||
for (i = 0; i < xsk_frames; i++) {
|
||||
for (i = 0; i < completed_frames; i++) {
|
||||
tx_buf = &xdp_ring->tx_buf[ntc];
|
||||
|
||||
if (tx_buf->xdp) {
|
||||
xsk_buff_free(tx_buf->xdp);
|
||||
xdp_ring->xdp_tx_active--;
|
||||
if (tx_buf->raw_buf) {
|
||||
ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
|
||||
tx_buf->raw_buf = NULL;
|
||||
} else {
|
||||
xsk_frames++;
|
||||
}
|
||||
|
||||
ntc++;
|
||||
if (ntc == cnt)
|
||||
if (ntc >= xdp_ring->count)
|
||||
ntc = 0;
|
||||
}
|
||||
skip:
|
||||
tx_desc->cmd_type_offset_bsz = 0;
|
||||
xdp_ring->next_to_clean += xsk_frames;
|
||||
xdp_ring->next_to_clean += completed_frames;
|
||||
if (xdp_ring->next_to_clean >= cnt)
|
||||
xdp_ring->next_to_clean -= cnt;
|
||||
if (xsk_frames)
|
||||
|
@ -2256,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IGB_HWMON
|
||||
/**
|
||||
* igb_set_i2c_bb - Init I2C interface
|
||||
* @hw: pointer to hardware structure
|
||||
**/
|
||||
static void igb_set_i2c_bb(struct e1000_hw *hw)
|
||||
{
|
||||
u32 ctrl_ext;
|
||||
s32 i2cctl;
|
||||
|
||||
ctrl_ext = rd32(E1000_CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_I2C_ENA;
|
||||
wr32(E1000_CTRL_EXT, ctrl_ext);
|
||||
wrfl();
|
||||
|
||||
i2cctl = rd32(E1000_I2CPARAMS);
|
||||
i2cctl |= E1000_I2CBB_EN
|
||||
| E1000_I2C_CLK_OE_N
|
||||
| E1000_I2C_DATA_OE_N;
|
||||
wr32(E1000_I2CPARAMS, i2cctl);
|
||||
wrfl();
|
||||
}
|
||||
#endif
|
||||
|
||||
void igb_reset(struct igb_adapter *adapter)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
@ -2400,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter)
|
||||
* interface.
|
||||
*/
|
||||
if (adapter->ets)
|
||||
mac->ops.init_thermal_sensor_thresh(hw);
|
||||
igb_set_i2c_bb(hw);
|
||||
mac->ops.init_thermal_sensor_thresh(hw);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -3141,21 +3166,12 @@ static void igb_init_mas(struct igb_adapter *adapter)
|
||||
**/
|
||||
static s32 igb_init_i2c(struct igb_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
s32 status = 0;
|
||||
s32 i2cctl;
|
||||
|
||||
/* I2C interface supported on i350 devices */
|
||||
if (adapter->hw.mac.type != e1000_i350)
|
||||
return 0;
|
||||
|
||||
i2cctl = rd32(E1000_I2CPARAMS);
|
||||
i2cctl |= E1000_I2CBB_EN
|
||||
| E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
|
||||
| E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
|
||||
wr32(E1000_I2CPARAMS, i2cctl);
|
||||
wrfl();
|
||||
|
||||
/* Initialize the i2c bus which is controlled by the registers.
|
||||
* This bus will use the i2c_algo_bit structure that implements
|
||||
* the protocol through toggling of the 4 bits in the register.
|
||||
@ -3544,6 +3560,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
adapter->ets = true;
|
||||
else
|
||||
adapter->ets = false;
|
||||
/* Only enable I2C bit banging if an external thermal
|
||||
* sensor is supported.
|
||||
*/
|
||||
if (adapter->ets)
|
||||
igb_set_i2c_bb(hw);
|
||||
hw->mac.ops.init_thermal_sensor_thresh(hw);
|
||||
if (igb_sysfs_init(adapter))
|
||||
dev_err(&pdev->dev,
|
||||
"failed to allocate sysfs resources\n");
|
||||
@ -6814,7 +6836,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
|
||||
struct timespec64 ts;
|
||||
u32 tsauxc;
|
||||
|
||||
if (pin < 0 || pin >= IGB_N_PEROUT)
|
||||
if (pin < 0 || pin >= IGB_N_SDP)
|
||||
return;
|
||||
|
||||
spin_lock(&adapter->tmreg_lock);
|
||||
@ -6822,7 +6844,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
|
||||
if (hw->mac.type == e1000_82580 ||
|
||||
hw->mac.type == e1000_i354 ||
|
||||
hw->mac.type == e1000_i350) {
|
||||
s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
|
||||
s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
|
||||
u32 systiml, systimh, level_mask, level, rem;
|
||||
u64 systim, now;
|
||||
|
||||
@ -6870,8 +6892,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
|
||||
ts.tv_nsec = (u32)systim;
|
||||
ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
|
||||
} else {
|
||||
ts = timespec64_add(adapter->perout[pin].start,
|
||||
adapter->perout[pin].period);
|
||||
ts = timespec64_add(adapter->perout[tsintr_tt].start,
|
||||
adapter->perout[tsintr_tt].period);
|
||||
}
|
||||
|
||||
/* u32 conversion of tv_sec is safe until y2106 */
|
||||
@ -6880,7 +6902,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
|
||||
tsauxc = rd32(E1000_TSAUXC);
|
||||
tsauxc |= TSAUXC_EN_TT0;
|
||||
wr32(E1000_TSAUXC, tsauxc);
|
||||
adapter->perout[pin].start = ts;
|
||||
adapter->perout[tsintr_tt].start = ts;
|
||||
|
||||
spin_unlock(&adapter->tmreg_lock);
|
||||
}
|
||||
@ -6894,7 +6916,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
||||
struct ptp_clock_event event;
|
||||
struct timespec64 ts;
|
||||
|
||||
if (pin < 0 || pin >= IGB_N_EXTTS)
|
||||
if (pin < 0 || pin >= IGB_N_SDP)
|
||||
return;
|
||||
|
||||
if (hw->mac.type == e1000_82580 ||
|
||||
|
@ -73,6 +73,8 @@
|
||||
#define IXGBE_RXBUFFER_4K 4096
|
||||
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
|
||||
|
||||
#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
|
||||
|
||||
/* Attempt to maximize the headroom available for incoming frames. We
|
||||
* use a 2K buffer for receives and need 1536/1534 to store the data for
|
||||
* the frame. This leaves us with 512 bytes of room. From that we need
|
||||
|
@ -6777,6 +6777,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
|
||||
ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
|
||||
* @adapter: device handle, pointer to adapter
|
||||
*/
|
||||
static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
|
||||
return IXGBE_RXBUFFER_2K;
|
||||
else
|
||||
return IXGBE_RXBUFFER_3K;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_change_mtu - Change the Maximum Transfer Unit
|
||||
* @netdev: network interface device structure
|
||||
@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (adapter->xdp_prog) {
|
||||
int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
|
||||
VLAN_HLEN;
|
||||
int i;
|
||||
if (ixgbe_enabled_xdp_adapter(adapter)) {
|
||||
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct ixgbe_ring *ring = adapter->rx_ring[i];
|
||||
|
||||
if (new_frame_size > ixgbe_rx_bufsz(ring)) {
|
||||
e_warn(probe, "Requested MTU size is not supported with XDP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
|
||||
e_warn(probe, "Requested MTU size is not supported with XDP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -130,26 +130,31 @@ struct nfp_ipsec_cfg_mssg {
|
||||
};
|
||||
};
|
||||
|
||||
static int nfp_ipsec_cfg_cmd_issue(struct nfp_net *nn, int type, int saidx,
|
||||
struct nfp_ipsec_cfg_mssg *msg)
|
||||
static int nfp_net_ipsec_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
|
||||
{
|
||||
unsigned int offset = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
|
||||
struct nfp_ipsec_cfg_mssg *msg = (struct nfp_ipsec_cfg_mssg *)entry->msg;
|
||||
int i, msg_size, ret;
|
||||
|
||||
msg->cmd = type;
|
||||
msg->sa_idx = saidx;
|
||||
msg->rsp = 0;
|
||||
msg_size = ARRAY_SIZE(msg->raw);
|
||||
|
||||
for (i = 0; i < msg_size; i++)
|
||||
nn_writel(nn, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
|
||||
|
||||
ret = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_IPSEC);
|
||||
if (ret < 0)
|
||||
ret = nfp_net_mbox_lock(nn, sizeof(*msg));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg_size = ARRAY_SIZE(msg->raw);
|
||||
for (i = 0; i < msg_size; i++)
|
||||
nn_writel(nn, offset + 4 * i, msg->raw[i]);
|
||||
|
||||
ret = nfp_net_mbox_reconfig(nn, entry->cmd);
|
||||
if (ret < 0) {
|
||||
nn_ctrl_bar_unlock(nn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* For now we always read the whole message response back */
|
||||
for (i = 0; i < msg_size; i++)
|
||||
msg->raw[i] = nn_readl(nn, NFP_NET_CFG_MBOX_VAL + 4 * i);
|
||||
msg->raw[i] = nn_readl(nn, offset + 4 * i);
|
||||
|
||||
nn_ctrl_bar_unlock(nn);
|
||||
|
||||
switch (msg->rsp) {
|
||||
case NFP_IPSEC_CFG_MSSG_OK:
|
||||
@ -487,7 +492,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
|
||||
}
|
||||
|
||||
/* Allocate saidx and commit the SA */
|
||||
err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_ADD_SA, saidx, &msg);
|
||||
msg.cmd = NFP_IPSEC_CFG_MSSG_ADD_SA;
|
||||
msg.sa_idx = saidx;
|
||||
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
|
||||
sizeof(msg), nfp_net_ipsec_cfg);
|
||||
if (err) {
|
||||
xa_erase(&nn->xa_ipsec, saidx);
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed to issue IPsec command");
|
||||
@ -501,14 +509,17 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
|
||||
|
||||
static void nfp_net_xfrm_del_state(struct xfrm_state *x)
|
||||
{
|
||||
struct nfp_ipsec_cfg_mssg msg = {
|
||||
.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
|
||||
.sa_idx = x->xso.offload_handle - 1,
|
||||
};
|
||||
struct net_device *netdev = x->xso.dev;
|
||||
struct nfp_ipsec_cfg_mssg msg;
|
||||
struct nfp_net *nn;
|
||||
int err;
|
||||
|
||||
nn = netdev_priv(netdev);
|
||||
err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_INV_SA,
|
||||
x->xso.offload_handle - 1, &msg);
|
||||
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
|
||||
sizeof(msg), nfp_net_ipsec_cfg);
|
||||
if (err)
|
||||
nn_warn(nn, "Failed to invalidate SA in hardware\n");
|
||||
|
||||
|
@ -617,9 +617,10 @@ struct nfp_net_dp {
|
||||
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
|
||||
* -EOPNOTSUPP to keep backwards compatibility (set by app)
|
||||
* @port: Pointer to nfp_port structure if vNIC is a port
|
||||
* @mc_lock: Protect mc_addrs list
|
||||
* @mc_addrs: List of mc addrs to add/del to HW
|
||||
* @mc_work: Work to update mc addrs
|
||||
* @mbox_amsg: Asynchronously processed message via mailbox
|
||||
* @mbox_amsg.lock: Protect message list
|
||||
* @mbox_amsg.list: List of message to process
|
||||
* @mbox_amsg.work: Work to process message asynchronously
|
||||
* @app_priv: APP private data for this vNIC
|
||||
*/
|
||||
struct nfp_net {
|
||||
@ -721,13 +722,25 @@ struct nfp_net {
|
||||
|
||||
struct nfp_port *port;
|
||||
|
||||
spinlock_t mc_lock;
|
||||
struct list_head mc_addrs;
|
||||
struct work_struct mc_work;
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
struct work_struct work;
|
||||
} mbox_amsg;
|
||||
|
||||
void *app_priv;
|
||||
};
|
||||
|
||||
struct nfp_mbox_amsg_entry {
|
||||
struct list_head list;
|
||||
int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
|
||||
u32 cmd;
|
||||
char msg[];
|
||||
};
|
||||
|
||||
int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
|
||||
int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
|
||||
|
||||
/* Functions to read/write from/to a BAR
|
||||
* Performs any endian conversion necessary.
|
||||
*/
|
||||
|
@ -1334,14 +1334,54 @@ err_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct nfp_mc_addr_entry {
|
||||
u8 addr[ETH_ALEN];
|
||||
u32 cmd;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
|
||||
int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
|
||||
int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
|
||||
{
|
||||
struct nfp_mbox_amsg_entry *entry;
|
||||
|
||||
entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(entry->msg, data, len);
|
||||
entry->cmd = cmd;
|
||||
entry->cfg = cb;
|
||||
|
||||
spin_lock_bh(&nn->mbox_amsg.lock);
|
||||
list_add_tail(&entry->list, &nn->mbox_amsg.list);
|
||||
spin_unlock_bh(&nn->mbox_amsg.lock);
|
||||
|
||||
schedule_work(&nn->mbox_amsg.work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nfp_net_mbox_amsg_work(struct work_struct *work)
|
||||
{
|
||||
struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
|
||||
struct nfp_mbox_amsg_entry *entry, *tmp;
|
||||
struct list_head tmp_list;
|
||||
|
||||
INIT_LIST_HEAD(&tmp_list);
|
||||
|
||||
spin_lock_bh(&nn->mbox_amsg.lock);
|
||||
list_splice_init(&nn->mbox_amsg.list, &tmp_list);
|
||||
spin_unlock_bh(&nn->mbox_amsg.lock);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
|
||||
int err = entry->cfg(nn, entry);
|
||||
|
||||
if (err)
|
||||
nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
|
||||
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
|
||||
{
|
||||
unsigned char *addr = entry->msg;
|
||||
int ret;
|
||||
|
||||
ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
|
||||
@ -1353,26 +1393,7 @@ static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u
|
||||
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
|
||||
get_unaligned_be16(addr + 4));
|
||||
|
||||
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
|
||||
}
|
||||
|
||||
static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
|
||||
{
|
||||
struct nfp_mc_addr_entry *entry;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
ether_addr_copy(entry->addr, addr);
|
||||
entry->cmd = cmd;
|
||||
spin_lock_bh(&nn->mc_lock);
|
||||
list_add_tail(&entry->list, &nn->mc_addrs);
|
||||
spin_unlock_bh(&nn->mc_lock);
|
||||
|
||||
schedule_work(&nn->mc_work);
|
||||
|
||||
return 0;
|
||||
return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
|
||||
}
|
||||
|
||||
static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
|
||||
@ -1385,35 +1406,16 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
|
||||
return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
|
||||
NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
|
||||
}
|
||||
|
||||
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
|
||||
{
|
||||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
|
||||
return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
|
||||
}
|
||||
|
||||
static void nfp_net_mc_addr_config(struct work_struct *work)
|
||||
{
|
||||
struct nfp_net *nn = container_of(work, struct nfp_net, mc_work);
|
||||
struct nfp_mc_addr_entry *entry, *tmp;
|
||||
struct list_head tmp_list;
|
||||
|
||||
INIT_LIST_HEAD(&tmp_list);
|
||||
|
||||
spin_lock_bh(&nn->mc_lock);
|
||||
list_splice_init(&nn->mc_addrs, &tmp_list);
|
||||
spin_unlock_bh(&nn->mc_lock);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
|
||||
if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd))
|
||||
nn_err(nn, "Config mc address to HW failed.\n");
|
||||
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
|
||||
NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
|
||||
}
|
||||
|
||||
static void nfp_net_set_rx_mode(struct net_device *netdev)
|
||||
@ -2686,9 +2688,9 @@ int nfp_net_init(struct nfp_net *nn)
|
||||
if (!nn->dp.netdev)
|
||||
return 0;
|
||||
|
||||
spin_lock_init(&nn->mc_lock);
|
||||
INIT_LIST_HEAD(&nn->mc_addrs);
|
||||
INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config);
|
||||
spin_lock_init(&nn->mbox_amsg.lock);
|
||||
INIT_LIST_HEAD(&nn->mbox_amsg.list);
|
||||
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
|
||||
|
||||
return register_netdev(nn->dp.netdev);
|
||||
|
||||
@ -2709,6 +2711,6 @@ void nfp_net_clean(struct nfp_net *nn)
|
||||
unregister_netdev(nn->dp.netdev);
|
||||
nfp_net_ipsec_clean(nn);
|
||||
nfp_ccm_mbox_clean(nn);
|
||||
flush_work(&nn->mc_work);
|
||||
flush_work(&nn->mbox_amsg.work);
|
||||
nfp_net_reconfig_wait_posted(nn);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user