Merge 6.8-rc3 into char-misc-next

We need the char-misc fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2024-02-04 06:18:21 -08:00
commit e21817acb2
448 changed files with 5486 additions and 3529 deletions

13
CREDITS
View File

@ -2161,6 +2161,19 @@ N: Mike Kravetz
E: mike.kravetz@oracle.com
D: Maintenance and development of the hugetlb subsystem
N: Seth Jennings
E: sjenning@redhat.com
D: Creation and maintenance of zswap
N: Dan Streetman
E: ddstreet@ieee.org
D: Maintenance and development of zswap
D: Creation and maintenance of the zpool API
N: Vitaly Wool
E: vitaly.wool@konsulko.com
D: Maintenance and development of zswap
N: Andreas S. Krebs
E: akrebs@altavista.net
D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards

View File

@ -1,4 +1,4 @@
What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
Date: March 2010
KernelVersion: 2.6.35
Contact: netdev@vger.kernel.org
@ -8,7 +8,7 @@ Description:
network device queue. Possible values depend on the number
of available CPU(s) in the system.
What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
Date: April 2010
KernelVersion: 2.6.35
Contact: netdev@vger.kernel.org
@ -16,7 +16,7 @@ Description:
Number of Receive Packet Steering flows being currently
processed by this particular network device receive queue.
What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@ -24,7 +24,7 @@ Description:
Indicates the number of transmit timeout events seen by this
network interface transmit queue.
What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
Date: March 2015
KernelVersion: 4.1
Contact: netdev@vger.kernel.org
@ -32,7 +32,7 @@ Description:
A Mbps max-rate set for the queue, a value of zero means disabled,
default is disabled.
What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
Date: November 2010
KernelVersion: 2.6.38
Contact: netdev@vger.kernel.org
@ -42,7 +42,7 @@ Description:
network device transmit queue. Possible values depend on the
number of available CPU(s) in the system.
What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
Date: June 2018
KernelVersion: 4.18.0
Contact: netdev@vger.kernel.org
@ -53,7 +53,7 @@ Description:
number of available receive queue(s) in the network device.
Default is disabled.
What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@ -62,7 +62,7 @@ Description:
of this particular network device transmit queue.
Default value is 1000.
What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@ -70,7 +70,7 @@ Description:
Indicates the number of bytes (objects) in flight on this
network device transmit queue.
What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@ -79,7 +79,7 @@ Description:
on this network device transmit queue. This value is clamped
to be within the bounds defined by limit_max and limit_min.
What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@ -88,7 +88,7 @@ Description:
queued on this network device transmit queue. See
include/linux/dynamic_queue_limits.h for the default value.
What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org

View File

@ -1,4 +1,4 @@
What: /sys/devices/.../hwmon/hwmon<i>/in0_input
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/in0_input
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -6,7 +6,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -20,7 +20,7 @@ Description: RW. Card reactive sustained (PL1/Tau) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_rated_max
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -28,7 +28,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max_interval
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -37,7 +37,7 @@ Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_crit
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_crit
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -50,7 +50,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/curr1_crit
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -63,7 +63,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/energy1_input
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/energy1_input
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org

View File

@ -1,4 +1,4 @@
What: /sys/devices/.../hwmon/hwmon<i>/power1_max
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -12,7 +12,7 @@ Description: RW. Card reactive sustained (PL1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -20,7 +20,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_crit
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -33,7 +33,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -44,7 +44,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
the operating frequency if the power averaged over a window
exceeds this limit.
What: /sys/devices/.../hwmon/hwmon<i>/in0_input
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in0_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -52,7 +52,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/energy1_input
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -60,7 +60,7 @@ Description: RO. Energy input of device in microjoules.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
Date: October 2023
KernelVersion: 6.6
Contact: intel-xe@lists.freedesktop.org

View File

@ -671,8 +671,23 @@ Testing Static Functions
------------------------
If we do not want to expose functions or variables for testing, one option is to
conditionally ``#include`` the test file at the end of your .c file. For
example:
conditionally export the used symbol. For example:
.. code-block:: c
/* In my_file.c */
VISIBLE_IF_KUNIT int do_interesting_thing();
EXPORT_SYMBOL_IF_KUNIT(do_interesting_thing);
/* In my_file.h */
#if IS_ENABLED(CONFIG_KUNIT)
int do_interesting_thing(void);
#endif
Alternatively, you could conditionally ``#include`` the test file at the end of
your .c file. For example:
.. code-block:: c

View File

@ -22,6 +22,7 @@ properties:
- const: allwinner,sun6i-a31-spdif
- const: allwinner,sun8i-h3-spdif
- const: allwinner,sun50i-h6-spdif
- const: allwinner,sun50i-h616-spdif
- items:
- const: allwinner,sun8i-a83t-spdif
- const: allwinner,sun8i-h3-spdif
@ -62,6 +63,8 @@ allOf:
enum:
- allwinner,sun6i-a31-spdif
- allwinner,sun8i-h3-spdif
- allwinner,sun50i-h6-spdif
- allwinner,sun50i-h616-spdif
then:
required:
@ -73,7 +76,7 @@ allOf:
contains:
enum:
- allwinner,sun8i-h3-spdif
- allwinner,sun50i-h6-spdif
- allwinner,sun50i-h616-spdif
then:
properties:

View File

@ -942,6 +942,10 @@ attribute-sets:
-
name: gro-ipv4-max-size
type: u32
-
name: dpll-pin
type: nest
nested-attributes: link-dpll-pin-attrs
-
name: af-spec-attrs
attributes:
@ -1627,6 +1631,12 @@ attribute-sets:
-
name: used
type: u8
-
name: link-dpll-pin-attrs
attributes:
-
name: id
type: u32
sub-messages:
-

View File

@ -448,17 +448,17 @@ Function-specific configfs interface
The function name to use when creating the function directory is "ncm".
The NCM function provides these attributes in its function directory:
=============== ==================================================
ifname network device interface name associated with this
function instance
qmult queue length multiplier for high and super speed
host_addr MAC address of host's end of this
Ethernet over USB link
dev_addr MAC address of device's end of this
Ethernet over USB link
max_segment_size Segment size required for P2P connections. This
will set MTU to (max_segment_size - 14 bytes)
=============== ==================================================
======================= ==================================================
ifname network device interface name associated with this
function instance
qmult queue length multiplier for high and super speed
host_addr MAC address of host's end of this
Ethernet over USB link
dev_addr MAC address of device's end of this
Ethernet over USB link
max_segment_size Segment size required for P2P connections. This
will set MTU to 14 bytes
======================= ==================================================
and after creating the functions/ncm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.

View File

@ -10091,7 +10091,7 @@ L: linux-i2c@vger.kernel.org
S: Maintained
W: https://i2c.wiki.kernel.org/
Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andi.shyti/linux.git
F: Documentation/devicetree/bindings/i2c/
F: drivers/i2c/algos/
F: drivers/i2c/busses/
@ -10283,7 +10283,7 @@ F: drivers/scsi/ibmvscsi/ibmvscsi*
F: include/scsi/viosrp.h
IBM Power Virtual SCSI Device Target Driver
M: Michael Cyr <mikecyr@linux.ibm.com>
M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
S: Supported
@ -11725,6 +11725,7 @@ F: fs/smb/server/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendanhiggins@google.com>
M: David Gow <davidgow@google.com>
R: Rae Moar <rmoar@google.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
@ -12903,6 +12904,8 @@ M: Alejandro Colomar <alx@kernel.org>
L: linux-man@vger.kernel.org
S: Maintained
W: http://www.kernel.org/doc/man-pages
T: git git://git.kernel.org/pub/scm/docs/man-pages/man-pages.git
T: git git://www.alejandro-colomar.es/src/alx/linux/man-pages/man-pages.git
MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP)
M: Jeremy Kerr <jk@codeconstruct.com.au>
@ -15178,6 +15181,7 @@ F: Documentation/networking/net_cachelines/net_device.rst
F: drivers/connector/
F: drivers/net/
F: include/dt-bindings/net/
F: include/linux/cn_proc.h
F: include/linux/etherdevice.h
F: include/linux/fcdevice.h
F: include/linux/fddidevice.h
@ -15185,6 +15189,7 @@ F: include/linux/hippidevice.h
F: include/linux/if_*
F: include/linux/inetdevice.h
F: include/linux/netdevice.h
F: include/uapi/linux/cn_proc.h
F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
X: drivers/net/wireless/
@ -16857,9 +16862,8 @@ F: Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
F: drivers/pci/controller/pcie-xilinx-cpm.c
PCI ENDPOINT SUBSYSTEM
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
M: Krzysztof Wilczyński <kw@linux.com>
R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
R: Kishon Vijay Abraham I <kishon@kernel.org>
L: linux-pci@vger.kernel.org
S: Supported
@ -18082,7 +18086,6 @@ F: drivers/net/ethernet/qualcomm/emac/
QUALCOMM ETHQOS ETHERNET DRIVER
M: Vinod Koul <vkoul@kernel.org>
R: Bhupesh Sharma <bhupesh.sharma@linaro.org>
L: netdev@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@ -24341,13 +24344,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs.git
F: Documentation/filesystems/zonefs.rst
F: fs/zonefs/
ZPOOL COMPRESSED PAGE STORAGE API
M: Dan Streetman <ddstreet@ieee.org>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/zpool.h
F: mm/zpool.c
ZR36067 VIDEO FOR LINUX DRIVER
M: Corentin Labbe <clabbe@baylibre.com>
L: mjpeg-users@lists.sourceforge.net
@ -24399,7 +24395,9 @@ M: Nhat Pham <nphamcs@gmail.com>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/admin-guide/mm/zswap.rst
F: include/linux/zpool.h
F: include/linux/zswap.h
F: mm/zpool.c
F: mm/zswap.c
THE REST

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@ -294,15 +294,15 @@ may-sync-config := 1
single-build :=
ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
need-config :=
endif
endif
endif
ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
may-sync-config :=
endif
endif
endif
need-compiler := $(may-sync-config)
@ -323,9 +323,9 @@ endif
# We cannot build single targets and the others at the same time
ifneq ($(filter $(single-targets), $(MAKECMDGOALS)),)
single-build := 1
ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),)
ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),)
mixed-build := 1
endif
endif
endif
# For "make -j clean all", "make -j mrproper defconfig all", etc.
@ -1666,7 +1666,7 @@ help:
@echo ' (sparse by default)'
@echo ' make C=2 [targets] Force check of all c source with $$CHECK'
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
@echo ' make W=n [targets] Enable extra build checks, n=1,2,3 where'
@echo ' make W=n [targets] Enable extra build checks, n=1,2,3,c,e where'
@echo ' 1: warnings which may be relevant and do not occur too often'
@echo ' 2: warnings which occur quite often but may still be relevant'
@echo ' 3: more obscure warnings, can most likely be ignored'

View File

@ -673,6 +673,7 @@ config SHADOW_CALL_STACK
bool "Shadow Call Stack"
depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
depends on MMU
help
This option enables the compiler's Shadow Call Stack, which
uses a shadow stack to protect function return addresses from

View File

@ -195,7 +195,7 @@ vdso_prepare: prepare0
include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
ifdef CONFIG_COMPAT_VDSO
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
arch/arm64/kernel/vdso32/vdso.so
endif
endif

View File

@ -17,9 +17,6 @@
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
#ifdef CONFIG_COMPAT_VDSO
#include <generated/vdso32-offsets.h>
#endif
#define VDSO_SYMBOL(base, name) \
({ \

View File

@ -77,9 +77,9 @@ obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
# We need to prevent the SCS patching code from patching itself. Using
# -mbranch-protection=none here to avoid the patchable PAC opcodes from being
# generated triggers an issue with full LTO on Clang, which stops emitting PAC
# instructions altogether. So instead, omit the unwind tables used by the
# patching code, so it will not be able to locate its own PAC instructions.
CFLAGS_patch-scs.o += -fno-asynchronous-unwind-tables -fno-unwind-tables
# instructions altogether. So disable LTO as well for the compilation unit.
CFLAGS_patch-scs.o += -mbranch-protection=none
CFLAGS_REMOVE_patch-scs.o += $(CC_FLAGS_LTO)
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so

View File

@ -127,9 +127,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
$(call if_changed,vdsosym)
# Strip rule for vdso.so
$(obj)/vdso.so: OBJCOPYFLAGS := -S
$(obj)/vdso.so: $(obj)/vdso32.so.dbg FORCE
@ -166,9 +163,3 @@ quiet_cmd_vdsoas = AS32 $@
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(obj)/$(munge) $< $@
# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO)
gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
# The AArch64 nm should be able to read an AArch32 binary
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@

View File

@ -15,10 +15,10 @@
KBUILD_DEFCONFIG := multi_defconfig
ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, \
m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
endif
endif
endif
#

View File

@ -25,7 +25,6 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
select BUILDTIME_TABLE_SORT
select HAVE_KERNEL_UNCOMPRESSED
select HAVE_PCI
select HAVE_PERF_EVENTS

View File

@ -50,12 +50,12 @@ export CROSS32CC
# Set default cross compiler for kernel build
ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
ifeq ($(CROSS_COMPILE),)
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS_COMPILE := $(call cc-cross-prefix, \
$(foreach a,$(CC_ARCHES), \
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
endif
endif
endif
ifdef CONFIG_DYNAMIC_FTRACE

View File

@ -576,6 +576,7 @@
.section __ex_table,"aw" ! \
.align 4 ! \
.word (fault_addr - .), (except_addr - .) ! \
or %r0,%r0,%r0 ! \
.previous

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PARISC_EXTABLE_H
#define __PARISC_EXTABLE_H
#include <asm/ptrace.h>
#include <linux/compiler.h>
/*
* The exception table consists of three addresses:
*
* - A relative address to the instruction that is allowed to fault.
* - A relative address at which the program should continue (fixup routine)
* - An asm statement which specifies which CPU register will
* receive -EFAULT when an exception happens if the lowest bit in
* the fixup address is set.
*
* Note: The register specified in the err_opcode instruction will be
* modified at runtime if a fault happens. Register %r0 will be ignored.
*
* Since relative addresses are used, 32bit values are sufficient even on
* 64bit kernel.
*/
struct pt_regs;
int fixup_exception(struct pt_regs *regs);
#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
int insn; /* relative address of insn that is allowed to fault. */
int fixup; /* relative address of fixup routine */
int err_opcode; /* sample opcode with register which holds error code */
};
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\
".section __ex_table,\"aw\"\n" \
".align 4\n" \
".word (" #fault_addr " - .), (" #except_addr " - .)\n" \
opcode "\n" \
".previous\n"
/*
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
* (with lowest bit set) for which the fault handler in fixup_exception() will
* load -EFAULT on fault into the register specified by the err_opcode instruction,
* and zeroes the target register in case of a read fault in get_user().
*/
#define ASM_EXCEPTIONTABLE_VAR(__err_var) \
int __err_var = 0
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register)
static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
struct exception_table_entry *b,
struct exception_table_entry tmp,
int delta)
{
a->fixup = b->fixup + delta;
b->fixup = tmp.fixup - delta;
a->err_opcode = b->err_opcode;
b->err_opcode = tmp.err_opcode;
}
#define swap_ex_entry_fixup swap_ex_entry_fixup
#endif

View File

@ -8,7 +8,8 @@
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%1),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
"or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
@ -22,7 +23,8 @@
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%%sr3,%1),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
"or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \

View File

@ -7,6 +7,7 @@
*/
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/extable.h>
#include <linux/bug.h>
#include <linux/string.h>
@ -26,37 +27,6 @@
#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr)
#endif
/*
* The exception table contains two values: the first is the relative offset to
* the address of the instruction that is allowed to fault, and the second is
* the relative offset to the address of the fixup routine. Since relative
* addresses are used, 32bit values are sufficient even on 64bit kernel.
*/
#define ARCH_HAS_RELATIVE_EXTABLE
struct exception_table_entry {
int insn; /* relative address of insn that is allowed to fault. */
int fixup; /* relative address of fixup routine */
};
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
".section __ex_table,\"aw\"\n" \
".align 4\n" \
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n"
/*
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
* (with lowest bit set) for which the fault handler in fixup_exception() will
* load -EFAULT into %r29 for a read or write fault, and zeroes the target
* register in case of a read fault in get_user().
*/
#define ASM_EXCEPTIONTABLE_REG 29
#define ASM_EXCEPTIONTABLE_VAR(__variable) \
register long __variable __asm__ ("r29") = 0
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
#define __get_user_internal(sr, val, ptr) \
({ \
ASM_EXCEPTIONTABLE_VAR(__gu_err); \
@ -83,7 +53,7 @@ struct exception_table_entry {
\
__asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
: "=r"(__gu_val), "+r"(__gu_err) \
: "i"(sr), "r"(ptr)); \
\
@ -115,8 +85,8 @@ struct exception_table_entry {
"1: ldw 0(%%sr%2,%3),%0\n" \
"2: ldw 4(%%sr%2,%3),%R0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1") \
: "=&r"(__gu_tmp.l), "+r"(__gu_err) \
: "i"(sr), "r"(ptr)); \
\
@ -174,7 +144,7 @@ struct exception_table_entry {
__asm__ __volatile__ ( \
"1: " stx " %1,0(%%sr%2,%3)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
: "+r"(__pu_err) \
: "r"(x), "i"(sr), "r"(ptr))
@ -186,15 +156,14 @@ struct exception_table_entry {
"1: stw %1,0(%%sr%2,%3)\n" \
"2: stw %R1,4(%%sr%2,%3)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0") \
: "+r"(__pu_err) \
: "r"(__val), "i"(sr), "r"(ptr)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
/*
* Complex access routines -- external declarations
*/
@ -216,7 +185,4 @@ unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
struct pt_regs;
int fixup_exception(struct pt_regs *regs);
#endif /* __PARISC_UACCESS_H */

View File

@ -58,7 +58,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
struct pdc_btlb_info btlb_info __ro_after_init;
struct pdc_btlb_info btlb_info;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
@ -264,6 +264,10 @@ parisc_cache_init(void)
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
/* stride needs to be non-zero, otherwise cache flushes will not work */
WARN_ON(cache_info.dc_size && dcache_stride == 0);
WARN_ON(cache_info.ic_size && icache_stride == 0);
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
@ -850,7 +854,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
#endif
" fic,m %3(%4,%0)\n"
"2: sync\n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
: "+r" (start), "+r" (error)
: "r" (end), "r" (dcache_stride), "i" (SR_USER));
}
@ -865,7 +869,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
#endif
" fdc,m %3(%4,%0)\n"
"2: sync\n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
: "+r" (start), "+r" (error)
: "r" (end), "r" (icache_stride), "i" (SR_USER));
}

View File

@ -742,7 +742,7 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
};
if (device_for_each_child(parent, &recurse_data, descend_children))
{ /* nothing */ };
{ /* nothing */ }
return d.dev;
}
@ -1004,6 +1004,9 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
pr_info("\n");
/* Prevent hung task messages when printing on serial console */
cond_resched();
pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
hpa, parisc_hardware_description(&dev->id));

View File

@ -120,8 +120,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg)
"2: ldbs 1(%%sr1,%3), %0\n"
" depw %2, 23, 24, %0\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "+r" (val), "+r" (ret), "=&r" (temp1)
: "r" (saddr), "r" (regs->isr) );
@ -152,8 +152,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
" mtctl %2,11\n"
" vshd %0,%3,%0\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
: "r" (saddr), "r" (regs->isr) );
@ -189,8 +189,8 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
" mtsar %%r19\n"
" shrpd %0,%%r20,%%sar,%0\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "=r" (val), "+r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20" );
@ -209,9 +209,9 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
" vshd %0,%R0,%0\n"
" vshd %R0,%4,%R0\n"
"4: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
: "r" (regs->isr) );
}
@ -244,8 +244,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg)
"1: stb %1, 0(%%sr1, %3)\n"
"2: stb %2, 1(%%sr1, %3)\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
: "+r" (ret), "=&r" (temp1)
: "r" (val), "r" (regs->ior), "r" (regs->isr) );
@ -285,8 +285,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
" stw %%r20,0(%%sr1,%2)\n"
" stw %%r21,4(%%sr1,%2)\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
@ -329,10 +329,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
"3: std %%r20,0(%%sr1,%2)\n"
"4: std %%r21,8(%%sr1,%2)\n"
"5: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
@ -357,11 +357,11 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
"4: stw %%r1,4(%%sr1,%2)\n"
"5: stw %R1,8(%%sr1,%2)\n"
"6: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1" );

View File

@ -127,7 +127,7 @@ SECTIONS
}
#endif
RO_DATA(8)
RO_DATA(PAGE_SIZE)
/* unwind info */
. = ALIGN(4);

View File

@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs)
* Fix up get_user() and put_user().
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
* bit in the relative address of the fixup routine to indicate
* that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
* -EFAULT to report a userspace access error.
* that the register encoded in the "or %r0,%r0,register"
* opcode should be loaded with -EFAULT to report a userspace
* access error.
*/
if (fix->fixup & 1) {
regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
int fault_error_reg = fix->err_opcode & 0x1f;
if (!WARN_ON(!fault_error_reg))
regs->gr[fault_error_reg] = -EFAULT;
pr_debug("Unalignment fixup of register %d at %pS\n",
fault_error_reg, (void*)regs->iaoq[0]);
/* zero target register for get_user() */
if (parisc_acctyp(0, regs->iir) == VM_READ) {

View File

@ -1287,20 +1287,20 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
int ret = -EINVAL;
/* At first attach the ownership is already set */
if (!domain)
return 0;
if (!grp)
return -ENODEV;
table_group = iommu_group_get_iommudata(grp);
ret = table_group->ops->take_ownership(table_group);
/*
* The domain being set to PLATFORM from earlier
* BLOCKED. The table_group ownership has to be released.
*/
table_group->ops->release_ownership(table_group);
iommu_group_put(grp);
return ret;
return 0;
}
static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
@ -1312,13 +1312,32 @@ static struct iommu_domain spapr_tce_platform_domain = {
.ops = &spapr_tce_platform_domain_ops,
};
static struct iommu_domain spapr_tce_blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
static int
spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev)
{
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
int ret = -EINVAL;
/*
* FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
* also sets the dma_api ops
*/
.ops = &spapr_tce_platform_domain_ops,
table_group = iommu_group_get_iommudata(grp);
ret = table_group->ops->take_ownership(table_group);
iommu_group_put(grp);
return ret;
}
static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
.attach_dev = spapr_tce_blocked_iommu_attach_dev,
};
static struct iommu_domain spapr_tce_blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &spapr_tce_blocked_domain_ops,
};
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)

View File

@ -115,7 +115,9 @@ archprepare:
$(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
ifdef CONFIG_LD_SCRIPT_DYN
LINK-$(call gcc-min-version, 60100)$(CONFIG_CC_IS_CLANG) += -no-pie
endif
LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \

View File

@ -112,13 +112,13 @@ ifeq ($(CONFIG_X86_32),y)
# temporary until string.h is fixed
KBUILD_CFLAGS += -ffreestanding
ifeq ($(CONFIG_STACKPROTECTOR),y)
ifeq ($(CONFIG_SMP),y)
ifeq ($(CONFIG_STACKPROTECTOR),y)
ifeq ($(CONFIG_SMP),y)
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
else
else
KBUILD_CFLAGS += -mstack-protector-guard=global
endif
endif
endif
endif
else
BITS := 64
UTS_MACHINE := x86_64

View File

@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
{
unsigned long x = (unsigned long)addr;
unsigned long y = x - __START_KERNEL_map;
bool ret;
/* use the carry flag to determine if x was < __START_KERNEL_map */
if (unlikely(x > y)) {
@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
return false;
}
return pfn_valid(x >> PAGE_SHIFT);
/*
* pfn_valid() relies on RCU, and may call into the scheduler on exiting
* the critical section. However, this would result in recursion with
* KMSAN. Therefore, disable preemption here, and re-enable preemption
* below while suppressing reschedules to avoid recursion.
*
* Note, this sacrifices occasionally breaking scheduling guarantees.
* Although, a kernel compiled with KMSAN has already given up on any
* performance guarantees due to being heavily instrumented.
*/
preempt_disable();
ret = pfn_valid(x >> PAGE_SHIFT);
preempt_enable_no_resched();
return ret;
}
#endif /* !MODULE */

View File

@ -49,6 +49,7 @@
#include "blk-pm.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
#include "blk-ioprio.h"
struct dentry *blk_debugfs_root;
@ -833,6 +834,14 @@ end_io:
}
EXPORT_SYMBOL(submit_bio_noacct);
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/**
* submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O
@ -855,6 +864,7 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio));
}
bio_set_ioprio(bio);
submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);

View File

@ -40,7 +40,6 @@
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
@ -2944,14 +2943,6 @@ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
return true;
}
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
@ -2976,7 +2967,6 @@ void blk_mq_submit_bio(struct bio *bio)
blk_status_t ret;
bio = blk_queue_bounce(bio, q);
bio_set_ioprio(bio);
if (plug) {
rq = rq_list_peek(&plug->cached_rq);

View File

@ -478,6 +478,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
{
WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
/* (e)poll-based threads require an explicit wakeup signal when
* queuing their own work; they rely on these events to consume
* messages without I/O block. Without it, threads risk waiting
* indefinitely without handling the work.
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
thread->pid == current->pid && !thread->process_todo)
wake_up_interruptible_sync(&thread->wait);
thread->process_todo = true;
}

View File

@ -606,13 +606,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* ASMedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
{ PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
{ PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
{ PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */

View File

@ -4545,6 +4545,7 @@ struct caam_hash_alg {
struct list_head entry;
struct device *dev;
int alg_type;
bool is_hmac;
struct ahash_alg ahash_alg;
};
@ -4571,7 +4572,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev;
if (alg->setkey) {
if (caam_hash->is_hmac) {
ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
ARRAY_SIZE(ctx->key),
DMA_TO_DEVICE,
@ -4611,7 +4612,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@ -4646,12 +4647,14 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
t_alg->ahash_alg.setkey = NULL;
t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;

View File

@ -1753,6 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
int alg_type;
bool is_hmac;
struct ahash_engine_alg ahash_alg;
};
@ -1804,7 +1805,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
} else {
if (priv->era >= 6) {
ctx->dir = DMA_BIDIRECTIONAL;
ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
} else {
ctx->dir = DMA_TO_DEVICE;
ctx->key_dir = DMA_NONE;
@ -1862,7 +1863,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@ -1915,12 +1916,14 @@ caam_hash_alloc(struct caam_hash_template *template,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
halg->setkey = NULL;
t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;

View File

@ -463,6 +463,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
break;
case ADF_401XX_PCI_DEVICE_ID:
hw_data->fw_name = ADF_4XXX_FW;

View File

@ -168,10 +168,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
if (vmf->pgoff > buffer->pagecount)
return VM_FAULT_SIGBUS;
vmf->page = buffer->pages[vmf->pgoff];
get_page(vmf->page);
return 0;
return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
}
static const struct vm_operations_struct dma_heap_vm_ops = {
@ -185,6 +182,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;

View File

@ -222,8 +222,14 @@ struct atdma_sg {
* @vd: pointer to the virtual dma descriptor.
* @atchan: pointer to the atmel dma channel.
* @total_len: total transaction byte count
* @sg_len: number of sg entries.
* @sglen: number of sg entries.
* @sg: array of sgs.
* @boundary: number of transfers to perform before the automatic address increment operation
* @dst_hole: value to add to the destination address when the boundary has been reached
* @src_hole: value to add to the source address when the boundary has been reached
* @memset_buffer: buffer used for the memset operation
* @memset_paddr: physical address of the buffer used for the memset operation
* @memset_vaddr: virtual address of the buffer used for the memset operation
*/
struct at_desc {
struct virt_dma_desc vd;
@ -245,7 +251,10 @@ struct at_desc {
/*-- Channels --------------------------------------------------------*/
/**
* atc_status - information bits stored in channel status flag
* enum atc_status - information bits stored in channel status flag
*
* @ATC_IS_PAUSED: If channel is pauses
* @ATC_IS_CYCLIC: If channel is cyclic
*
* Manipulated with atomic operations.
*/
@ -282,7 +291,6 @@ struct at_dma_chan {
u32 save_cfg;
u32 save_dscr;
struct dma_slave_config dma_sconfig;
bool cyclic;
struct at_desc *desc;
};
@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @dma_device: dmaengine dma_device object members
* @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @lli_pool: hw lli table
* @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
*/
struct at_dma {
@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
/**
* atc_get_llis_residue - Get residue for a hardware linked list transfer
* @atchan: pointer to an atmel hdmac channel.
* @desc: pointer to the descriptor for which the residue is calculated.
* @residue: residue to be set to dma_tx_state.
*
* Calculate the residue by removing the length of the Linked List Item (LLI)
* already transferred from the total length. To get the current LLI we can use
@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
* @atchan: pointer to an atmel hdmac channel.
* @desc: pointer to the descriptor for which the residue is calculated.
* @residue: residue to be set to dma_tx_state.
* Returns 0 on success, -errno otherwise.
*
* Returns: %0 on success, -errno otherwise.
*/
static int atc_get_llis_residue(struct at_dma_chan *atchan,
struct at_desc *desc, u32 *residue)
@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @residue: residue to be updated.
* Return 0 on success, -errono otherwise.
*
* Return: %0 on success, -errno otherwise.
*/
static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
u32 *residue)
@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
* return - the number of allocated descriptors
* Return: the number of allocated descriptors
*/
static int atc_alloc_chan_resources(struct dma_chan *chan)
{

View File

@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
if (!dpaa2_chan->fd_pool)
goto err;
dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
sizeof(struct dpaa2_fl_entry),
sizeof(struct dpaa2_fl_entry), 0);
dpaa2_chan->fl_pool =
dma_pool_create("fl_pool", dev,
sizeof(struct dpaa2_fl_entry) * 3,
sizeof(struct dpaa2_fl_entry), 0);
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
sizeof(struct dpaa2_qdma_sd_d),
sizeof(struct dpaa2_qdma_sd_d) * 2,
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;

View File

@ -514,11 +514,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
dma_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
queue_size[i],
&queue_temp->bus_addr,
GFP_KERNEL);
dmam_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
queue_size[i],
&queue_temp->bus_addr,
GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@ -563,15 +563,14 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
status_head->cq = dma_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
status_size,
&status_head->bus_addr,
GFP_KERNEL);
if (!status_head->cq) {
devm_kfree(&pdev->dev, status_head);
status_head->cq = dmam_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
status_size,
&status_head->bus_addr,
GFP_KERNEL);
if (!status_head->cq)
return NULL;
}
status_head->n_cq = status_size;
status_head->virt_head = status_head->cq;
status_head->virt_tail = status_head->cq;
@ -1268,8 +1267,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static void fsl_qdma_remove(struct platform_device *pdev)
{
int i;
struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@ -1277,12 +1274,6 @@ static void fsl_qdma_remove(struct platform_device *pdev)
fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
for (i = 0; i < fsl_qdma->block_number; i++) {
status = fsl_qdma->status[i];
dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
status->n_cq, status->cq, status->bus_addr);
}
}
static const struct of_device_id fsl_qdma_dt_ids[] = {

View File

@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {

View File

@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
u8 status;
if (!vd)
return;
@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
/* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
/* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
result->result = DMA_TRANS_NOERROR;
/* Propagate TR Response errors to the client */
status = d->hwdesc[0].tr_resp_base->status;
if (status)
result->result = DMA_TRANS_ABORTED;
else
result->result = DMA_TRANS_NOERROR;
}
}
}

View File

@ -118,10 +118,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
* @buf: where to put the string
* @size: size of @buf, in bytes
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
* An overlong string is silently truncated such that it and the
* zero byte fit into @size.
* The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
* @key. The string is zero-terminated. An overlong string is silently truncated such that it
* and the zero byte fit into @size.
*
* Returns strlen(buf) or a negative error code.
*/
@ -368,8 +367,17 @@ static ssize_t show_text_leaf(struct device *dev,
for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) {
int result = fw_csr_string(directories[i], attr->key, buf, bufsize);
// Detected.
if (result >= 0)
if (result >= 0) {
ret = result;
} else if (i == 0 && attr->key == CSR_VENDOR) {
// Sony DVMC-DA1 has configuration ROM such that the descriptor leaf entry
// in the root directory follows to the directory entry for vendor ID
// instead of the immediate value for vendor ID.
result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf,
bufsize);
if (result >= 0)
ret = result;
}
}
if (ret >= 0) {

View File

@ -141,11 +141,31 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
static const struct drm_client_funcs kfd_client_funcs = {
.unregister = drm_client_release,
};
int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
{
int ret;
if (!adev->kfd.init_complete)
return 0;
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
&kfd_client_funcs);
if (ret) {
dev_err(adev->dev, "Failed to init DRM client: %d\n",
ret);
return ret;
}
drm_client_register(&adev->kfd.client);
return 0;
}
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
int last_valid_bit;
int ret;
amdgpu_amdkfd_gpuvm_init_mem_limits();
@ -164,12 +184,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.enable_mes = adev->enable_mes,
};
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", &kfd_client_funcs);
if (ret) {
dev_err(adev->dev, "Failed to init DRM client: %d\n", ret);
return;
}
/* this is going to have a few of the MSBs set that we need to
* clear
*/
@ -208,10 +222,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
&gpu_resources);
if (adev->kfd.init_complete)
drm_client_register(&adev->kfd.client);
else
drm_client_release(&adev->kfd.client);
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;

View File

@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm,
struct svm_range_bo *svm_bo);
int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#endif
@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,

View File

@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
if (!amdgpu_ring_sched_ready(ring))
continue;
/* stop secheduler and drain ring. */

View File

@ -2085,21 +2085,35 @@ out:
return ret;
}
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
ret = amdgpu_bo_reserve(mem->bo, true);
if (ret)
goto out;
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm == vm)
kfd_mem_dmaunmap_attachment(mem, entry);
if (entry->bo_va->base.vm != vm)
continue;
if (entry->bo_va->base.bo->tbo.ttm &&
!entry->bo_va->base.bo->tbo.ttm->sg)
continue;
kfd_mem_dmaunmap_attachment(mem, entry);
}
amdgpu_bo_unreserve(mem->bo);
out:
mutex_unlock(&mem->lock);
return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(

View File

@ -1678,7 +1678,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_stop(&ring->sched);
}
@ -1694,7 +1694,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_start(&ring->sched);
}
@ -1916,8 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val];
if (!ring || !ring->funcs->preempt_ib ||
!drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring) ||
!ring->funcs->preempt_ib)
return -EINVAL;
/* the last preemption failed */

View File

@ -4121,23 +4121,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
} else {
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
r = psp_gpu_reset(adev);
break;
default:
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
break;
}
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@ -5031,7 +5021,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
spin_lock(&ring->sched.job_list_lock);
@ -5170,7 +5160,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
/* Clear job fence from fence drv to avoid force_completion
@ -5637,7 +5627,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@ -5706,7 +5696,7 @@ skip_hw_reset:
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
@ -6061,7 +6051,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, NULL);
@ -6189,7 +6179,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);

View File

@ -2255,6 +2255,10 @@ retry_init:
if (ret)
goto err_pci;
ret = amdgpu_amdkfd_drm_client_create(adev);
if (ret)
goto err_pci;
/*
* 1. don't init fbdev on hw without DCE
* 2. don't init fbdev if there are no connectors

View File

@ -635,6 +635,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
ring->name);
ring->sched.ready = !r;
return r;
}
@ -717,3 +718,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
}
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
{
if (!ring)
return false;
if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
return false;
return true;
}

View File

@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
#endif

View File

@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(mmIH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}

View File

@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);

View File

@ -4027,8 +4027,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = 0;
adev->gfx.mec2_fw = NULL;
}
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
gfx_v10_0_check_fw_write_wait(adev);
out:

View File

@ -107,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
};
static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007),
SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a),
SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f)
};
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@ -304,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
case IP_VERSION(11, 5, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_11_5_0,
(const u32)ARRAY_SIZE(golden_settings_gc_11_5_0));
break;
default:
break;
}

View File

@ -915,8 +915,8 @@ static int gmc_v6_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v6_0_hw_fini(void *handle)

View File

@ -1099,8 +1099,8 @@ static int gmc_v7_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v7_0_hw_fini(void *handle)

View File

@ -1219,8 +1219,8 @@ static int gmc_v8_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v8_0_hw_fini(void *handle)

View File

@ -2341,8 +2341,8 @@ static int gmc_v9_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
/**

View File

@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);

View File

@ -418,6 +418,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}

View File

@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -2017,22 +2017,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
return ret;
}
/**
* vcn_v4_0_set_interrupt_state - set VCN block interrupt state
*
* @adev: amdgpu_device pointer
* @source: interrupt sources
* @type: interrupt types
* @state: interrupt states
*
* Set VCN block interrupt state
*/
static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
unsigned type, enum amdgpu_interrupt_state state)
{
return 0;
}
/**
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
*
@ -2097,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
.set = vcn_v4_0_set_interrupt_state,
.process = vcn_v4_0_process_interrupt,
};

View File

@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle)
vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
}
return 0;
@ -1668,22 +1666,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s
return ret;
}
/**
* vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state
*
* @adev: amdgpu_device pointer
* @source: interrupt sources
* @type: interrupt types
* @state: interrupt states
*
* Set VCN block interrupt state
*/
static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
unsigned type, enum amdgpu_interrupt_state state)
{
return 0;
}
/**
* vcn_v4_0_5_process_interrupt - process VCN block interrupt
*
@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = {
.set = vcn_v4_0_5_set_interrupt_state,
.process = vcn_v4_0_5_process_interrupt,
};

View File

@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9eef807, 0x876dff6d,
0x0000ffff, 0x87fe7e7e,
0x87ea6a6a, 0xb9faf802,
0xbe80226c, 0xbf810000,
0xbe80226c, 0xbf9b0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
@ -2065,7 +2065,7 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@ -2500,7 +2500,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x876dff6d, 0x0000ffff,
0x87fe7e7e, 0x87ea6a6a,
0xb9faf802, 0xbe80226c,
0xbf810000, 0xbf9f0000,
0xbf9b0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
};
@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xb8eef802, 0xbf0d866e,
0xbfa20002, 0xb97af802,
0xbe80486c, 0xb97af802,
0xbe804a6c, 0xbfb00000,
0xbe804a6c, 0xbfb10000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};

View File

@ -1104,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV:
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
s_endpgm
s_endpgm_saved
end
function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)

View File

@ -921,7 +921,7 @@ L_RESTORE:
/* the END */
/**************************************************************************/
L_END_PGM:
s_endpgm
s_endpgm_saved
end

View File

@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
if (err)
goto sync_memory_failed;
}
mutex_unlock(&p->mutex);

View File

@ -574,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
addr = prange->start << PAGE_SHIFT;
addr = migrate->start;
src = (uint64_t *)(scratch + npages);
dst = scratch;

View File

@ -1488,10 +1488,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = adev_to_drm(kfd->adev);
struct drm_device *ddev;
if (node->xcp)
ddev = node->xcp->ddev;
else
ddev = adev_to_drm(node->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,

View File

@ -9187,6 +9187,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* To fix this, DC should permit updating only stream properties.
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
if (!dummy_updates) {
DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];

View File

@ -437,32 +437,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@ -474,32 +474,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}

View File

@ -203,12 +203,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;

View File

@ -184,8 +184,6 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@ -240,6 +238,8 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,

View File

@ -1112,7 +1112,7 @@ struct pipe_slice_table {
struct pipe_ctx *pri_pipe;
struct dc_plane_state *plane;
int slice_count;
} mpc_combines[MAX_SURFACES];
} mpc_combines[MAX_PLANES];
int mpc_combine_count;
};
@ -2753,7 +2753,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};

View File

@ -164,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.sr_exit_z8_time_us = 210.0,
.sr_enter_plus_exit_z8_time_us = 320.0,
.fclk_change_latency_us = 24.0,

View File

@ -341,9 +341,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
break;
}
if (dml2->config.bbox_overrides.clks_table.num_states)
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
/* Override from passed values, if available */
for (i = 0; i < p->in_states->num_states; i++) {
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
@ -400,6 +397,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
@ -793,35 +791,28 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
/*TODO no support for mpc combine, need rework - should calculate scaling params based on plane+stream*/
static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, const struct dc_state *context)
static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context)
{
int i;
struct scaler_data data = { 0 };
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
memset(temp_pipe, 0, sizeof(struct pipe_ctx));
for (i = 0; i < MAX_PIPES; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state == in && !pipe->prev_odm_pipe) {
const struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
temp_pipe->stream = pipe->stream;
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
data = context->res_ctx.pipe_ctx[i].plane_res.scl_data;
while (next_pipe) {
data.h_active += next_pipe->plane_res.scl_data.h_active;
data.recout.width += next_pipe->plane_res.scl_data.recout.width;
if (in->rotation == ROTATION_ANGLE_0 || in->rotation == ROTATION_ANGLE_180) {
data.viewport.width += next_pipe->plane_res.scl_data.viewport.width;
} else {
data.viewport.height += next_pipe->plane_res.scl_data.viewport.height;
}
next_pipe = next_pipe->next_odm_pipe;
}
resource_build_scaling_params(temp_pipe);
break;
}
}
ASSERT(i < MAX_PIPES);
return data;
return temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
@ -866,7 +857,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->ScalerEnabled[location] = false;
}
static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, const struct dc_state *context)
static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context)
{
const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context);

View File

@ -1183,9 +1183,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
if (dccg) {
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,

View File

@ -2790,18 +2790,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
}
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
phyd32clk = get_phyd32clk_src(link);
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
phyd32clk = get_phyd32clk_src(link);
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
} else {
if (dccg->funcs->enable_symclk_se)
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,

View File

@ -469,6 +469,8 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
/* solely used for build scalar data in dml2 */
struct pipe_ctx temp_pipe;
};
struct dce_bw_output {

View File

@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;

View File

@ -734,7 +734,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@ -1954,31 +1954,10 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
static int smu_reset_mp1_state(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if ((!adev->in_runpm) && (!adev->in_suspend) &&
(!amdgpu_in_reset(adev)))
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
break;
default:
break;
}
return ret;
}
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@ -1996,15 +1975,7 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
ret = smu_smc_hw_cleanup(smu);
if (ret)
return ret;
ret = smu_reset_mp1_state(smu);
if (ret)
return ret;
return 0;
return smu_smc_hw_cleanup(smu);
}
static void smu_late_fini(void *handle)

View File

@ -424,7 +424,6 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
SMU_BACO_STATE_NONE,
};
struct smu_baco_context {

View File

@ -2748,13 +2748,7 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */
@ -2950,7 +2944,7 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
switch (adev->ip_versions[MP1_HWIP][0]) {
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
return smu->smc_fw_version >= 0x004e6300;
case IP_VERSION(13, 0, 10):

View File

@ -2505,13 +2505,7 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */

View File

@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
cancel_work_sync(&fctx->uevent_work);
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
@ -145,12 +146,13 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc
return drop;
}
static int
nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
static void
nouveau_fence_uevent_work(struct work_struct *work)
{
struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
uevent_work);
unsigned long flags;
int ret = NVIF_EVENT_KEEP;
int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) {
@ -160,11 +162,20 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (nouveau_fence_update(chan, fctx))
ret = NVIF_EVENT_DROP;
drop = 1;
}
spin_unlock_irqrestore(&fctx->lock, flags);
if (drop)
nvif_event_block(&fctx->event);
return ret;
spin_unlock_irqrestore(&fctx->lock, flags);
}
static int
nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
schedule_work(&fctx->uevent_work);
return NVIF_EVENT_KEEP;
}
void
@ -178,6 +189,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
} args;
int ret;
INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);

View File

@ -44,6 +44,7 @@ struct nouveau_fence_chan {
u32 context;
char name[32];
struct work_struct uevent_work;
struct nvif_event event;
int notify_ref, dead, killed;
};

View File

@ -1078,7 +1078,6 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
if (IS_ERR(rpc))
return PTR_ERR(rpc);
rpc->size = sizeof(*rpc);
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
@ -1094,6 +1093,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
strings += name_len;
str_offset += name_len;
}
rpc->size = str_offset;
return nvkm_gsp_rpc_wr(gsp, rpc, false);
}

View File

@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client)
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
if (domain && domain != tegra->domain)
if (domain && domain->type != IOMMU_DOMAIN_IDENTITY &&
domain != tegra->domain)
return 0;
if (tegra->domain) {

View File

@ -94,6 +94,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
goto err_free;
}
dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;

View File

@ -50,8 +50,8 @@
#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffu << 16)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffffu << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn

View File

@ -242,8 +242,8 @@ struct slpc_shared_data {
(HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xffu << 8)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
#endif

Some files were not shown because too many files have changed in this diff Show More