Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: net/ipv4/ip_gre.c17af420545
("erspan: make sure erspan_base_hdr is present in skb->head")5832c4a77d
("ip_tunnel: convert __be16 tunnel flags to bitmaps") https://lore.kernel.org/all/20240402103253.3b54a1cf@canb.auug.org.au/ Adjacent changes: net/ipv6/ip6_fib.cd21d40605b
("ipv6: Fix infinite recursion in fib6_dump_done().")5fc68320c1
("ipv6: remove RTNL protection from inet6_dump_fib()") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
cf1ca1f66d
@ -6599,7 +6599,7 @@
|
||||
To turn off having tracepoints sent to printk,
|
||||
echo 0 > /proc/sys/kernel/tracepoint_printk
|
||||
Note, echoing 1 into this file without the
|
||||
tracepoint_printk kernel cmdline option has no effect.
|
||||
tp_printk kernel cmdline option has no effect.
|
||||
|
||||
The tp_printk_stop_on_boot (see below) can also be used
|
||||
to stop the printing of events to console at
|
||||
|
@ -155,7 +155,7 @@ Setting this parameter to 100 will disable the hysteresis.
|
||||
|
||||
Some users cannot tolerate the swapping that comes with zswap store failures
|
||||
and zswap writebacks. Swapping can be disabled entirely (without disabling
|
||||
zswap itself) on a cgroup-basis as follows:
|
||||
zswap itself) on a cgroup-basis as follows::
|
||||
|
||||
echo 0 > /sys/fs/cgroup/<cgroup-name>/memory.zswap.writeback
|
||||
|
||||
@ -166,7 +166,7 @@ writeback (because the same pages might be rejected again and again).
|
||||
When there is a sizable amount of cold memory residing in the zswap pool, it
|
||||
can be advantageous to proactively write these cold pages to swap and reclaim
|
||||
the memory for other use cases. By default, the zswap shrinker is disabled.
|
||||
User can enable it as follows:
|
||||
User can enable it as follows::
|
||||
|
||||
echo Y > /sys/module/zswap/parameters/shrinker_enabled
|
||||
|
||||
|
@ -574,7 +574,7 @@ Memory b/w domain is L3 cache.
|
||||
MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;...
|
||||
|
||||
Memory bandwidth Allocation specified in MiBps
|
||||
---------------------------------------------
|
||||
----------------------------------------------
|
||||
|
||||
Memory bandwidth domain is L3 cache.
|
||||
::
|
||||
|
@ -104,6 +104,8 @@ Some of these tools are listed below:
|
||||
KASAN and can be used in production. See Documentation/dev-tools/kfence.rst
|
||||
* lockdep is a locking correctness validator. See
|
||||
Documentation/locking/lockdep-design.rst
|
||||
* Runtime Verification (RV) supports checking specific behaviours for a given
|
||||
subsystem. See Documentation/trace/rv/runtime-verification.rst
|
||||
* There are several other pieces of debug instrumentation in the kernel, many
|
||||
of which can be found in lib/Kconfig.debug
|
||||
|
||||
|
@ -94,6 +94,10 @@ properties:
|
||||
|
||||
local-bd-address: true
|
||||
|
||||
qcom,local-bd-address-broken:
|
||||
type: boolean
|
||||
description:
|
||||
boot firmware is incorrectly passing the address in big-endian order
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
@ -178,7 +178,7 @@ yet. Bug reports are always welcome at the issue tracker below!
|
||||
- ``LLVM=1``
|
||||
* - s390
|
||||
- Maintained
|
||||
- ``CC=clang``
|
||||
- ``LLVM=1`` (LLVM >= 18.1.0), ``CC=clang`` (LLVM < 18.1.0)
|
||||
* - um (User Mode)
|
||||
- Maintained
|
||||
- ``LLVM=1``
|
||||
|
76
Documentation/networking/devlink/devlink-eswitch-attr.rst
Normal file
76
Documentation/networking/devlink/devlink-eswitch-attr.rst
Normal file
@ -0,0 +1,76 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==========================
|
||||
Devlink E-Switch Attribute
|
||||
==========================
|
||||
|
||||
Devlink E-Switch supports two modes of operation: legacy and switchdev.
|
||||
Legacy mode operates based on traditional MAC/VLAN steering rules. Switching
|
||||
decisions are made based on MAC addresses, VLANs, etc. There is limited ability
|
||||
to offload switching rules to hardware.
|
||||
|
||||
On the other hand, switchdev mode allows for more advanced offloading
|
||||
capabilities of the E-Switch to hardware. In switchdev mode, more switching
|
||||
rules and logic can be offloaded to the hardware switch ASIC. It enables
|
||||
representor netdevices that represent the slow path of virtual functions (VFs)
|
||||
or scalable-functions (SFs) of the device. See more information about
|
||||
:ref:`Documentation/networking/switchdev.rst <switchdev>` and
|
||||
:ref:`Documentation/networking/representors.rst <representors>`.
|
||||
|
||||
In addition, the devlink E-Switch also comes with other attributes listed
|
||||
in the following section.
|
||||
|
||||
Attributes Description
|
||||
======================
|
||||
|
||||
The following is a list of E-Switch attributes.
|
||||
|
||||
.. list-table:: E-Switch attributes
|
||||
:widths: 8 5 45
|
||||
|
||||
* - Name
|
||||
- Type
|
||||
- Description
|
||||
* - ``mode``
|
||||
- enum
|
||||
- The mode of the device. The mode can be one of the following:
|
||||
|
||||
* ``legacy`` operates based on traditional MAC/VLAN steering
|
||||
rules.
|
||||
* ``switchdev`` allows for more advanced offloading capabilities of
|
||||
the E-Switch to hardware.
|
||||
* - ``inline-mode``
|
||||
- enum
|
||||
- Some HWs need the VF driver to put part of the packet
|
||||
headers on the TX descriptor so the e-switch can do proper
|
||||
matching and steering. Support for both switchdev mode and legacy mode.
|
||||
|
||||
* ``none`` none.
|
||||
* ``link`` L2 mode.
|
||||
* ``network`` L3 mode.
|
||||
* ``transport`` L4 mode.
|
||||
* - ``encap-mode``
|
||||
- enum
|
||||
- The encapsulation mode of the device. Support for both switchdev mode
|
||||
and legacy mode. The mode can be one of the following:
|
||||
|
||||
* ``none`` Disable encapsulation support.
|
||||
* ``basic`` Enable encapsulation support.
|
||||
|
||||
Example Usage
|
||||
=============
|
||||
|
||||
.. code:: shell
|
||||
|
||||
# enable switchdev mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 mode switchdev
|
||||
|
||||
# set inline-mode and encap-mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 inline-mode none encap-mode basic
|
||||
|
||||
# display devlink device eswitch attributes
|
||||
$ devlink dev eswitch show pci/0000:08:00.0
|
||||
pci/0000:08:00.0: mode switchdev inline-mode none encap-mode basic
|
||||
|
||||
# enable encap-mode with legacy mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 mode legacy inline-mode none encap-mode basic
|
@ -67,6 +67,7 @@ general.
|
||||
devlink-selftests
|
||||
devlink-trap
|
||||
devlink-linecard
|
||||
devlink-eswitch-attr
|
||||
|
||||
Driver-specific documentation
|
||||
-----------------------------
|
||||
|
@ -1,4 +1,5 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
.. _representors:
|
||||
|
||||
=============================
|
||||
Network Function Representors
|
||||
|
@ -46,21 +46,16 @@ SEV hardware uses ASIDs to associate a memory encryption key with a VM.
|
||||
Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value
|
||||
defined in the CPUID 0x8000001f[ecx] field.
|
||||
|
||||
SEV Key Management
|
||||
==================
|
||||
The KVM_MEMORY_ENCRYPT_OP ioctl
|
||||
===============================
|
||||
|
||||
The SEV guest key management is handled by a separate processor called the AMD
|
||||
Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
|
||||
key management interface to perform common hypervisor activities such as
|
||||
encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
|
||||
information, see the SEV Key Management spec [api-spec]_
|
||||
|
||||
The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP. If the argument
|
||||
to KVM_MEMORY_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled
|
||||
and ``ENOTTY`` if it is disabled (on some older versions of Linux,
|
||||
the ioctl runs normally even with a NULL argument, and therefore will
|
||||
likely return ``EFAULT``). If non-NULL, the argument to KVM_MEMORY_ENCRYPT_OP
|
||||
must be a struct kvm_sev_cmd::
|
||||
The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP, which operates on
|
||||
the VM file descriptor. If the argument to KVM_MEMORY_ENCRYPT_OP is NULL,
|
||||
the ioctl returns 0 if SEV is enabled and ``ENOTTY`` if it is disabled
|
||||
(on some older versions of Linux, the ioctl tries to run normally even
|
||||
with a NULL argument, and therefore will likely return ``EFAULT`` instead
|
||||
of zero if SEV is enabled). If non-NULL, the argument to
|
||||
KVM_MEMORY_ENCRYPT_OP must be a struct kvm_sev_cmd::
|
||||
|
||||
struct kvm_sev_cmd {
|
||||
__u32 id;
|
||||
@ -87,10 +82,6 @@ guests, such as launching, running, snapshotting, migrating and decommissioning.
|
||||
The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform
|
||||
context. In a typical workflow, this command should be the first command issued.
|
||||
|
||||
The firmware can be initialized either by using its own non-volatile storage or
|
||||
the OS can manage the NV storage for the firmware using the module parameter
|
||||
``init_ex_path``. If the file specified by ``init_ex_path`` does not exist or
|
||||
is invalid, the OS will create or override the file with output from PSP.
|
||||
|
||||
Returns: 0 on success, -negative on error
|
||||
|
||||
@ -434,6 +425,21 @@ issued by the hypervisor to make the guest ready for execution.
|
||||
|
||||
Returns: 0 on success, -negative on error
|
||||
|
||||
Firmware Management
|
||||
===================
|
||||
|
||||
The SEV guest key management is handled by a separate processor called the AMD
|
||||
Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
|
||||
key management interface to perform common hypervisor activities such as
|
||||
encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
|
||||
information, see the SEV Key Management spec [api-spec]_
|
||||
|
||||
The AMD-SP firmware can be initialized either by using its own non-volatile
|
||||
storage or the OS can manage the NV storage for the firmware using
|
||||
parameter ``init_ex_path`` of the ``ccp`` module. If the file specified
|
||||
by ``init_ex_path`` does not exist or is invalid, the OS will create or
|
||||
override the file with PSP non-volatile storage.
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
|
@ -193,8 +193,8 @@ data:
|
||||
Asynchronous page fault (APF) control MSR.
|
||||
|
||||
Bits 63-6 hold 64-byte aligned physical address of a 64 byte memory area
|
||||
which must be in guest RAM and must be zeroed. This memory is expected
|
||||
to hold a copy of the following structure::
|
||||
which must be in guest RAM. This memory is expected to hold the
|
||||
following structure::
|
||||
|
||||
struct kvm_vcpu_pv_apf_data {
|
||||
/* Used for 'page not present' events delivered via #PF */
|
||||
@ -204,7 +204,6 @@ data:
|
||||
__u32 token;
|
||||
|
||||
__u8 pad[56];
|
||||
__u32 enabled;
|
||||
};
|
||||
|
||||
Bits 5-4 of the MSR are reserved and should be zero. Bit 0 is set to 1
|
||||
@ -232,14 +231,14 @@ data:
|
||||
as regular page fault, guest must reset 'flags' to '0' before it does
|
||||
something that can generate normal page fault.
|
||||
|
||||
Bytes 5-7 of 64 byte memory location ('token') will be written to by the
|
||||
Bytes 4-7 of 64 byte memory location ('token') will be written to by the
|
||||
hypervisor at the time of APF 'page ready' event injection. The content
|
||||
of these bytes is a token which was previously delivered as 'page not
|
||||
present' event. The event indicates the page in now available. Guest is
|
||||
supposed to write '0' to 'token' when it is done handling 'page ready'
|
||||
event and to write 1' to MSR_KVM_ASYNC_PF_ACK after clearing the location;
|
||||
writing to the MSR forces KVM to re-scan its queue and deliver the next
|
||||
pending notification.
|
||||
of these bytes is a token which was previously delivered in CR2 as
|
||||
'page not present' event. The event indicates the page is now available.
|
||||
Guest is supposed to write '0' to 'token' when it is done handling
|
||||
'page ready' event and to write '1' to MSR_KVM_ASYNC_PF_ACK after
|
||||
clearing the location; writing to the MSR forces KVM to re-scan its
|
||||
queue and deliver the next pending notification.
|
||||
|
||||
Note, MSR_KVM_ASYNC_PF_INT MSR specifying the interrupt vector for 'page
|
||||
ready' APF delivery needs to be written to before enabling APF mechanism
|
||||
|
39
MAINTAINERS
39
MAINTAINERS
@ -6156,7 +6156,6 @@ DEVICE-MAPPER (LVM)
|
||||
M: Alasdair Kergon <agk@redhat.com>
|
||||
M: Mike Snitzer <snitzer@kernel.org>
|
||||
M: Mikulas Patocka <mpatocka@redhat.com>
|
||||
M: dm-devel@lists.linux.dev
|
||||
L: dm-devel@lists.linux.dev
|
||||
S: Maintained
|
||||
Q: http://patchwork.kernel.org/project/dm-devel/list/
|
||||
@ -9652,7 +9651,9 @@ L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/hid/hid-logitech-hidpp.c
|
||||
|
||||
HIGH-RESOLUTION TIMERS, CLOCKEVENTS
|
||||
HIGH-RESOLUTION TIMERS, TIMER WHEEL, CLOCKEVENTS
|
||||
M: Anna-Maria Behnsen <anna-maria@linutronix.de>
|
||||
M: Frederic Weisbecker <frederic@kernel.org>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -9660,9 +9661,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
F: Documentation/timers/
|
||||
F: include/linux/clockchips.h
|
||||
F: include/linux/hrtimer.h
|
||||
F: include/linux/timer.h
|
||||
F: kernel/time/clockevents.c
|
||||
F: kernel/time/hrtimer.c
|
||||
F: kernel/time/timer_*.c
|
||||
F: kernel/time/timer.c
|
||||
F: kernel/time/timer_list.c
|
||||
F: kernel/time/timer_migration.*
|
||||
F: tools/testing/selftests/timers/
|
||||
|
||||
HIGH-SPEED SCC DRIVER FOR AX.25
|
||||
L: linux-hams@vger.kernel.org
|
||||
@ -14014,6 +14019,7 @@ F: drivers/net/ethernet/mellanox/mlx4/en_*
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx5e)
|
||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||
M: Tariq Toukan <tariqt@nvidia.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
@ -14081,6 +14087,7 @@ F: include/uapi/rdma/mlx4-abi.h
|
||||
MELLANOX MLX5 core VPI driver
|
||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||
M: Leon Romanovsky <leonro@nvidia.com>
|
||||
M: Tariq Toukan <tariqt@nvidia.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
@ -15627,9 +15634,10 @@ F: drivers/misc/nsm.c
|
||||
F: include/uapi/linux/nsm.h
|
||||
|
||||
NOHZ, DYNTICKS SUPPORT
|
||||
M: Anna-Maria Behnsen <anna-maria@linutronix.de>
|
||||
M: Frederic Weisbecker <frederic@kernel.org>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
M: Ingo Molnar <mingo@kernel.org>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/nohz
|
||||
@ -17590,15 +17598,20 @@ F: drivers/pnp/
|
||||
F: include/linux/pnp.h
|
||||
|
||||
POSIX CLOCKS and TIMERS
|
||||
M: Anna-Maria Behnsen <anna-maria@linutronix.de>
|
||||
M: Frederic Weisbecker <frederic@kernel.org>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
F: fs/timerfd.c
|
||||
F: include/linux/time_namespace.h
|
||||
F: include/linux/timer*
|
||||
F: include/linux/timerfd.h
|
||||
F: include/uapi/linux/time.h
|
||||
F: include/uapi/linux/timerfd.h
|
||||
F: include/trace/events/timer*
|
||||
F: kernel/time/*timer*
|
||||
F: kernel/time/itimer.c
|
||||
F: kernel/time/posix-*
|
||||
F: kernel/time/namespace.c
|
||||
|
||||
POWER MANAGEMENT CORE
|
||||
@ -22281,13 +22294,20 @@ S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
F: include/linux/clocksource.h
|
||||
F: include/linux/time.h
|
||||
F: include/linux/timekeeper_internal.h
|
||||
F: include/linux/timekeeping.h
|
||||
F: include/linux/timex.h
|
||||
F: include/uapi/linux/time.h
|
||||
F: include/uapi/linux/timex.h
|
||||
F: kernel/time/alarmtimer.c
|
||||
F: kernel/time/clocksource.c
|
||||
F: kernel/time/ntp.c
|
||||
F: kernel/time/time*.c
|
||||
F: kernel/time/clocksource*
|
||||
F: kernel/time/ntp*
|
||||
F: kernel/time/time.c
|
||||
F: kernel/time/timeconst.bc
|
||||
F: kernel/time/timeconv.c
|
||||
F: kernel/time/timecounter.c
|
||||
F: kernel/time/timekeeping*
|
||||
F: kernel/time/time_test.c
|
||||
F: tools/testing/selftests/timers/
|
||||
|
||||
TIPC NETWORK LAYER
|
||||
@ -23662,7 +23682,6 @@ F: drivers/scsi/vmw_pvscsi.c
|
||||
F: drivers/scsi/vmw_pvscsi.h
|
||||
|
||||
VMWARE VIRTUAL PTP CLOCK DRIVER
|
||||
M: Jeff Sipek <jsipek@vmware.com>
|
||||
R: Ajay Kaher <akaher@vmware.com>
|
||||
R: Alexey Makhalov <amakhalov@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -944,6 +944,8 @@ ap_spi_fp: &spi10 {
|
||||
vddrf-supply = <&pp1300_l2c>;
|
||||
vddch0-supply = <&pp3300_l10c>;
|
||||
max-speed = <3200000>;
|
||||
|
||||
qcom,local-bd-address-broken;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -291,6 +291,21 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
blr x2
|
||||
0:
|
||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case. Publish the E2H bit early so that
|
||||
* it can be picked up by the init_el2_state macro.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
1:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
|
||||
@ -303,22 +318,10 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
|
||||
mov_q x1, INIT_SCTLR_EL1_MMU_OFF
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x0, SYS_ID_AA64MMFR4_EL1
|
||||
ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
mrs x0, hcr_el2
|
||||
and x0, x0, #HCR_E2H
|
||||
cbz x0, 2f
|
||||
1:
|
||||
|
||||
/* Set a sane SCTLR_EL1, the VHE way */
|
||||
pre_disable_mmu_workaround
|
||||
msr_s SYS_SCTLR_EL12, x1
|
||||
|
@ -2597,14 +2597,11 @@ static __init int kvm_arm_init(void)
|
||||
if (err)
|
||||
goto out_hyp;
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
kvm_info("Protected nVHE mode initialized successfully\n");
|
||||
} else if (in_hyp_mode) {
|
||||
kvm_info("VHE mode initialized successfully\n");
|
||||
} else {
|
||||
char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
|
||||
kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
|
||||
}
|
||||
kvm_info("%s%sVHE mode initialized successfully\n",
|
||||
in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
|
||||
"Protected " : "Hyp "),
|
||||
in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
|
||||
"h" : "n"));
|
||||
|
||||
/*
|
||||
* FIXME: Do something reasonable if kvm_init() fails after pKVM
|
||||
|
@ -154,7 +154,8 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest(mmu, &cxt, false);
|
||||
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
|
||||
TLBI_TTL_UNKNOWN);
|
||||
|
||||
dsb(ish);
|
||||
__tlbi(vmalle1is);
|
||||
|
@ -528,7 +528,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
|
||||
kvm_clear_pte(ctx->ptep);
|
||||
dsb(ishst);
|
||||
__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
|
||||
__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
|
||||
} else {
|
||||
if (ctx->end - ctx->addr < granule)
|
||||
return -EINVAL;
|
||||
@ -843,12 +843,15 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
* Perform the appropriate TLB invalidation based on the
|
||||
* evicted pte value (if any).
|
||||
*/
|
||||
if (kvm_pte_table(ctx->old, ctx->level))
|
||||
kvm_tlb_flush_vmid_range(mmu, ctx->addr,
|
||||
kvm_granule_size(ctx->level));
|
||||
else if (kvm_pte_valid(ctx->old))
|
||||
if (kvm_pte_table(ctx->old, ctx->level)) {
|
||||
u64 size = kvm_granule_size(ctx->level);
|
||||
u64 addr = ALIGN_DOWN(ctx->addr, size);
|
||||
|
||||
kvm_tlb_flush_vmid_range(mmu, addr, size);
|
||||
} else if (kvm_pte_valid(ctx->old)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
|
||||
ctx->addr, ctx->level);
|
||||
}
|
||||
}
|
||||
|
||||
if (stage2_pte_is_counted(ctx->old))
|
||||
@ -896,9 +899,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (kvm_pte_valid(ctx->old)) {
|
||||
kvm_clear_pte(ctx->ptep);
|
||||
|
||||
if (!stage2_unmap_defer_tlb_flush(pgt))
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
|
||||
ctx->addr, ctx->level);
|
||||
if (kvm_pte_table(ctx->old, ctx->level)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
|
||||
TLBI_TTL_UNKNOWN);
|
||||
} else if (!stage2_unmap_defer_tlb_flush(pgt)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
|
||||
ctx->level);
|
||||
}
|
||||
}
|
||||
|
||||
mm_ops->put_page(ctx->ptep);
|
||||
|
@ -171,7 +171,8 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
|
||||
TLBI_TTL_UNKNOWN);
|
||||
|
||||
dsb(ish);
|
||||
__tlbi(vmalle1is);
|
||||
|
@ -1637,7 +1637,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
|
||||
if (esr_fsc_is_permission_fault(esr)) {
|
||||
if (esr_fsc_is_translation_fault(esr)) {
|
||||
/* Beyond sanitised PARange (which is the IPA limit) */
|
||||
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
|
||||
kvm_inject_size_fault(vcpu);
|
||||
|
@ -619,15 +619,6 @@ config MACH_EYEQ5
|
||||
|
||||
bool
|
||||
|
||||
config FIT_IMAGE_FDT_EPM5
|
||||
bool "Include FDT for Mobileye EyeQ5 development platforms"
|
||||
depends on MACH_EYEQ5
|
||||
default n
|
||||
help
|
||||
Enable this to include the FDT for the EyeQ5 development platforms
|
||||
from Mobileye in the FIT kernel image.
|
||||
This requires u-boot on the platform.
|
||||
|
||||
config MACH_NINTENDO64
|
||||
bool "Nintendo 64 console"
|
||||
select CEVT_R4K
|
||||
@ -1011,6 +1002,15 @@ config CAVIUM_OCTEON_SOC
|
||||
|
||||
endchoice
|
||||
|
||||
config FIT_IMAGE_FDT_EPM5
|
||||
bool "Include FDT for Mobileye EyeQ5 development platforms"
|
||||
depends on MACH_EYEQ5
|
||||
default n
|
||||
help
|
||||
Enable this to include the FDT for the EyeQ5 development platforms
|
||||
from Mobileye in the FIT kernel image.
|
||||
This requires u-boot on the platform.
|
||||
|
||||
source "arch/mips/alchemy/Kconfig"
|
||||
source "arch/mips/ath25/Kconfig"
|
||||
source "arch/mips/ath79/Kconfig"
|
||||
|
@ -137,11 +137,21 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
|
||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||
|
||||
sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
|
||||
if (!pending &&
|
||||
((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
|
||||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
|
||||
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
|
||||
goto skip_write_pending;
|
||||
|
||||
if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
|
||||
if (!pending)
|
||||
goto skip_write_pending;
|
||||
if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
|
||||
goto skip_write_pending;
|
||||
if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
|
||||
goto skip_write_pending;
|
||||
}
|
||||
|
||||
if (pending)
|
||||
irqd->state |= APLIC_IRQ_STATE_PENDING;
|
||||
else
|
||||
@ -187,16 +197,31 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
|
||||
|
||||
static bool aplic_read_input(struct aplic *aplic, u32 irq)
|
||||
{
|
||||
bool ret;
|
||||
unsigned long flags;
|
||||
u32 sourcecfg, sm, raw_input, irq_inverted;
|
||||
struct aplic_irq *irqd;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (!irq || aplic->nr_irqs <= irq)
|
||||
return false;
|
||||
irqd = &aplic->irqs[irq];
|
||||
|
||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||
ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
|
||||
|
||||
sourcecfg = irqd->sourcecfg;
|
||||
if (sourcecfg & APLIC_SOURCECFG_D)
|
||||
goto skip;
|
||||
|
||||
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
|
||||
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
|
||||
goto skip;
|
||||
|
||||
raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
|
||||
irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
|
||||
sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
|
||||
ret = !!(raw_input ^ irq_inverted);
|
||||
|
||||
skip:
|
||||
raw_spin_unlock_irqrestore(&irqd->lock, flags);
|
||||
|
||||
return ret;
|
||||
|
@ -986,7 +986,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
|
||||
|
||||
static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return copy_isa_ext_reg_indices(vcpu, NULL);;
|
||||
return copy_isa_ext_reg_indices(vcpu, NULL);
|
||||
}
|
||||
|
||||
static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
|
@ -28,7 +28,7 @@ obj-y += net/
|
||||
|
||||
obj-$(CONFIG_KEXEC_FILE) += purgatory/
|
||||
|
||||
obj-y += virt/svm/
|
||||
obj-y += virt/
|
||||
|
||||
# for cleaning
|
||||
subdir- += boot tools
|
||||
|
@ -2439,6 +2439,8 @@ config USE_X86_SEG_SUPPORT
|
||||
# with named address spaces - see GCC PR sanitizer/111736.
|
||||
#
|
||||
depends on !KASAN
|
||||
# -fsanitize=thread (KCSAN) is also incompatible.
|
||||
depends on !KCSAN
|
||||
|
||||
config CC_HAS_SLS
|
||||
def_bool $(cc-option,-mharden-sls=all)
|
||||
|
@ -251,8 +251,6 @@ archheaders:
|
||||
|
||||
libs-y += arch/x86/lib/
|
||||
|
||||
core-y += arch/x86/virt/
|
||||
|
||||
# drivers-y are linked after core-y
|
||||
drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
|
||||
drivers-$(CONFIG_PCI) += arch/x86/pci/
|
||||
|
@ -15,10 +15,12 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
.code64
|
||||
.text
|
||||
@ -149,6 +151,7 @@ SYM_FUNC_END(__efi64_thunk)
|
||||
SYM_FUNC_START(efi32_stub_entry)
|
||||
call 1f
|
||||
1: popl %ecx
|
||||
leal (efi32_boot_args - 1b)(%ecx), %ebx
|
||||
|
||||
/* Clear BSS */
|
||||
xorl %eax, %eax
|
||||
@ -163,6 +166,7 @@ SYM_FUNC_START(efi32_stub_entry)
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
movl %esi, 8(%ebx)
|
||||
jmp efi32_entry
|
||||
SYM_FUNC_END(efi32_stub_entry)
|
||||
#endif
|
||||
@ -239,8 +243,6 @@ SYM_FUNC_END(efi_enter32)
|
||||
*
|
||||
* Arguments: %ecx image handle
|
||||
* %edx EFI system table pointer
|
||||
* %esi struct bootparams pointer (or NULL when not using
|
||||
* the EFI handover protocol)
|
||||
*
|
||||
* Since this is the point of no return for ordinary execution, no registers
|
||||
* are considered live except for the function parameters. [Note that the EFI
|
||||
@ -266,9 +268,18 @@ SYM_FUNC_START_LOCAL(efi32_entry)
|
||||
leal (efi32_boot_args - 1b)(%ebx), %ebx
|
||||
movl %ecx, 0(%ebx)
|
||||
movl %edx, 4(%ebx)
|
||||
movl %esi, 8(%ebx)
|
||||
movb $0x0, 12(%ebx) // efi_is64
|
||||
|
||||
/*
|
||||
* Allocate some memory for a temporary struct boot_params, which only
|
||||
* needs the minimal pieces that startup_32() relies on.
|
||||
*/
|
||||
subl $PARAM_SIZE, %esp
|
||||
movl %esp, %esi
|
||||
movl $PAGE_SIZE, BP_kernel_alignment(%esi)
|
||||
movl $_end - 1b, BP_init_size(%esi)
|
||||
subl $startup_32 - 1b, BP_init_size(%esi)
|
||||
|
||||
/* Disable paging */
|
||||
movl %cr0, %eax
|
||||
btrl $X86_CR0_PG_BIT, %eax
|
||||
@ -294,8 +305,7 @@ SYM_FUNC_START(efi32_pe_entry)
|
||||
|
||||
movl 8(%ebp), %ecx // image_handle
|
||||
movl 12(%ebp), %edx // sys_table
|
||||
xorl %esi, %esi
|
||||
jmp efi32_entry // pass %ecx, %edx, %esi
|
||||
jmp efi32_entry // pass %ecx, %edx
|
||||
// no other registers remain live
|
||||
|
||||
2: popl %edi // restore callee-save registers
|
||||
|
@ -41,6 +41,7 @@ obj-$(CONFIG_X86_X32_ABI) += vdso-image-x32.o
|
||||
obj-$(CONFIG_COMPAT_32) += vdso-image-32.o vdso32-setup.o
|
||||
|
||||
OBJECT_FILES_NON_STANDARD_vdso-image-32.o := n
|
||||
OBJECT_FILES_NON_STANDARD_vdso-image-x32.o := n
|
||||
OBJECT_FILES_NON_STANDARD_vdso-image-64.o := n
|
||||
OBJECT_FILES_NON_STANDARD_vdso32-setup.o := n
|
||||
|
||||
|
@ -250,7 +250,7 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
/*
|
||||
* AMD Performance Monitor Family 17h and later:
|
||||
*/
|
||||
static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
@ -262,10 +262,39 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
|
||||
};
|
||||
|
||||
static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
|
||||
};
|
||||
|
||||
static const u64 amd_zen4_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
|
||||
[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120,
|
||||
};
|
||||
|
||||
static u64 amd_pmu_event_map(int hw_event)
|
||||
{
|
||||
if (boot_cpu_data.x86 >= 0x17)
|
||||
return amd_f17h_perfmon_event_map[hw_event];
|
||||
if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a)
|
||||
return amd_zen4_perfmon_event_map[hw_event];
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19)
|
||||
return amd_zen2_perfmon_event_map[hw_event];
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_ZEN1))
|
||||
return amd_zen1_perfmon_event_map[hw_event];
|
||||
|
||||
return amd_perfmon_event_map[hw_event];
|
||||
}
|
||||
@ -904,8 +933,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
|
||||
if (!status)
|
||||
goto done;
|
||||
|
||||
/* Read branch records before unfreezing */
|
||||
if (status & GLOBAL_STATUS_LBRS_FROZEN) {
|
||||
/* Read branch records */
|
||||
if (x86_pmu.lbr_nr) {
|
||||
amd_pmu_lbr_read();
|
||||
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
|
||||
}
|
||||
|
@ -402,10 +402,12 @@ void amd_pmu_lbr_enable_all(void)
|
||||
wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
|
||||
}
|
||||
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
|
||||
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
}
|
||||
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
|
||||
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
|
||||
}
|
||||
|
||||
@ -418,10 +420,12 @@ void amd_pmu_lbr_disable_all(void)
|
||||
return;
|
||||
|
||||
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
|
||||
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
}
|
||||
}
|
||||
|
||||
__init int amd_pmu_lbr_init(void)
|
||||
|
@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
|
||||
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
|
||||
struct module *mod);
|
||||
extern void *callthunks_translate_call_dest(void *dest);
|
||||
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
|
||||
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
|
||||
#else
|
||||
static __always_inline void callthunks_patch_builtin_calls(void) {}
|
||||
static __always_inline void
|
||||
@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
|
||||
return dest;
|
||||
}
|
||||
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
|
||||
void *func)
|
||||
void *func, void *ip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/asm.h>
|
||||
#include <asm/fred.h>
|
||||
#include <asm/gsseg.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
extern void cmpxchg8b_emu(void);
|
||||
|
@ -91,8 +91,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
|
||||
REQUIRED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
|
||||
|
||||
#define DISABLED_MASK_BIT_SET(feature_bit) \
|
||||
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
|
||||
@ -116,8 +117,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
|
||||
DISABLED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
|
||||
|
||||
#define cpu_has(c, bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
|
@ -13,7 +13,7 @@
|
||||
/*
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#define NCAPINTS 21 /* N 32-bit words worth of info */
|
||||
#define NCAPINTS 22 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 2 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
@ -459,6 +459,14 @@
|
||||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
|
||||
|
||||
/*
|
||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||
* CPUID levels like 0x80000022, etc.
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
|
@ -155,6 +155,7 @@
|
||||
#define DISABLED_MASK18 (DISABLE_IBT)
|
||||
#define DISABLED_MASK19 (DISABLE_SEV_SNP)
|
||||
#define DISABLED_MASK20 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
|
||||
#define DISABLED_MASK21 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
|
||||
|
||||
#endif /* _ASM_X86_DISABLED_FEATURES_H */
|
||||
|
@ -262,11 +262,20 @@
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* The CALL to srso_alias_untrain_ret() must be patched in directly at
|
||||
* the spot where untraining must be done, ie., srso_alias_untrain_ret()
|
||||
* must be the target of a CALL instruction instead of indirectly
|
||||
* jumping to a wrapper which then calls it. Therefore, this macro is
|
||||
* called outside of __UNTRAIN_RET below, for the time being, before the
|
||||
* kernel can support nested alternatives with arbitrary nesting.
|
||||
*/
|
||||
.macro CALL_UNTRAIN_RET
|
||||
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
|
||||
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
|
||||
#else
|
||||
#define CALL_UNTRAIN_RET ""
|
||||
ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
|
||||
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
|
||||
@ -282,8 +291,8 @@
|
||||
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
|
||||
#if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
CALL_UNTRAIN_RET
|
||||
ALTERNATIVE_2 "", \
|
||||
"call entry_ibpb", \ibpb_feature, \
|
||||
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
@ -342,6 +351,8 @@ extern void retbleed_return_thunk(void);
|
||||
static inline void retbleed_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void srso_alias_untrain_ret(void);
|
||||
|
||||
#ifdef CONFIG_MITIGATION_SRSO
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
|
@ -99,6 +99,7 @@
|
||||
#define REQUIRED_MASK18 0
|
||||
#define REQUIRED_MASK19 0
|
||||
#define REQUIRED_MASK20 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
|
||||
#define REQUIRED_MASK21 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
|
||||
|
||||
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
|
||||
|
@ -218,12 +218,12 @@ void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages);
|
||||
void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages);
|
||||
void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
|
||||
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
|
||||
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
|
||||
void snp_set_wakeup_secondary_cpu(void);
|
||||
bool snp_init(struct boot_params *bp);
|
||||
void __noreturn snp_abort(void);
|
||||
void snp_dmi_setup(void);
|
||||
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
u64 snp_get_unsupported_features(u64 status);
|
||||
@ -244,12 +244,12 @@ static inline void __init
|
||||
early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
|
||||
static inline void __init
|
||||
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
|
||||
static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
|
||||
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
|
||||
static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
|
||||
static inline void snp_set_wakeup_secondary_cpu(void) { }
|
||||
static inline bool snp_init(struct boot_params *bp) { return false; }
|
||||
static inline void snp_abort(void) { }
|
||||
static inline void snp_dmi_setup(void) { }
|
||||
static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
|
||||
{
|
||||
return -ENOTTY;
|
||||
|
@ -30,12 +30,13 @@ struct x86_init_mpparse {
|
||||
* @reserve_resources: reserve the standard resources for the
|
||||
* platform
|
||||
* @memory_setup: platform specific memory setup
|
||||
*
|
||||
* @dmi_setup: platform specific DMI setup
|
||||
*/
|
||||
struct x86_init_resources {
|
||||
void (*probe_roms)(void);
|
||||
void (*reserve_resources)(void);
|
||||
char *(*memory_setup)(void);
|
||||
void (*dmi_setup)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -694,6 +694,7 @@ enum sev_cmd_id {
|
||||
|
||||
struct kvm_sev_cmd {
|
||||
__u32 id;
|
||||
__u32 pad0;
|
||||
__u64 data;
|
||||
__u32 error;
|
||||
__u32 sev_fd;
|
||||
@ -704,28 +705,35 @@ struct kvm_sev_launch_start {
|
||||
__u32 policy;
|
||||
__u64 dh_uaddr;
|
||||
__u32 dh_len;
|
||||
__u32 pad0;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
__u32 pad1;
|
||||
};
|
||||
|
||||
struct kvm_sev_launch_update_data {
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
|
||||
struct kvm_sev_launch_secret {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u32 pad0;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u32 pad1;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
struct kvm_sev_launch_measure {
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
struct kvm_sev_guest_status {
|
||||
@ -738,33 +746,43 @@ struct kvm_sev_dbg {
|
||||
__u64 src_uaddr;
|
||||
__u64 dst_uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
struct kvm_sev_attestation_report {
|
||||
__u8 mnonce[16];
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
struct kvm_sev_send_start {
|
||||
__u32 policy;
|
||||
__u32 pad0;
|
||||
__u64 pdh_cert_uaddr;
|
||||
__u32 pdh_cert_len;
|
||||
__u32 pad1;
|
||||
__u64 plat_certs_uaddr;
|
||||
__u32 plat_certs_len;
|
||||
__u32 pad2;
|
||||
__u64 amd_certs_uaddr;
|
||||
__u32 amd_certs_len;
|
||||
__u32 pad3;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
__u32 pad4;
|
||||
};
|
||||
|
||||
struct kvm_sev_send_update_data {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u32 pad0;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u32 pad1;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
struct kvm_sev_receive_start {
|
||||
@ -772,17 +790,22 @@ struct kvm_sev_receive_start {
|
||||
__u32 policy;
|
||||
__u64 pdh_uaddr;
|
||||
__u32 pdh_len;
|
||||
__u32 pad0;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
__u32 pad1;
|
||||
};
|
||||
|
||||
struct kvm_sev_receive_update_data {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u32 pad0;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u32 pad1;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
|
||||
|
@ -142,7 +142,6 @@ struct kvm_vcpu_pv_apf_data {
|
||||
__u32 token;
|
||||
|
||||
__u8 pad[56];
|
||||
__u32 enabled;
|
||||
};
|
||||
|
||||
#define KVM_PV_EOI_BIT 0
|
||||
|
@ -314,7 +314,7 @@ static bool is_callthunk(void *addr)
|
||||
return !bcmp(pad, insn_buff, tmpl_size);
|
||||
}
|
||||
|
||||
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
|
||||
int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
|
||||
{
|
||||
unsigned int tmpl_size = SKL_TMPL_SIZE;
|
||||
u8 insn_buff[MAX_PATCH_LEN];
|
||||
@ -327,7 +327,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
|
||||
return 0;
|
||||
|
||||
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
|
||||
apply_relocation(insn_buff, tmpl_size, *pprog,
|
||||
apply_relocation(insn_buff, tmpl_size, ip,
|
||||
skl_call_thunk_template, tmpl_size);
|
||||
|
||||
memcpy(*pprog, insn_buff, tmpl_size);
|
||||
|
@ -49,6 +49,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
||||
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
|
||||
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
|
||||
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
/*
|
||||
* EISA specific code
|
||||
*/
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/eisa.h>
|
||||
#include <linux/io.h>
|
||||
@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void)
|
||||
{
|
||||
void __iomem *p;
|
||||
|
||||
if (xen_pv_domain() && !xen_initial_domain())
|
||||
if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||
return 0;
|
||||
|
||||
p = ioremap(0x0FFFD9, 4);
|
||||
|
@ -65,6 +65,7 @@ static int __init parse_no_stealacc(char *arg)
|
||||
|
||||
early_param("no-steal-acc", parse_no_stealacc);
|
||||
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled);
|
||||
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
|
||||
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
|
||||
static int has_steal_clock = 0;
|
||||
@ -244,7 +245,7 @@ noinstr u32 kvm_read_and_reset_apf_flags(void)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (__this_cpu_read(apf_reason.enabled)) {
|
||||
if (__this_cpu_read(async_pf_enabled)) {
|
||||
flags = __this_cpu_read(apf_reason.flags);
|
||||
__this_cpu_write(apf_reason.flags, 0);
|
||||
}
|
||||
@ -295,7 +296,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
|
||||
|
||||
inc_irq_stat(irq_hv_callback_count);
|
||||
|
||||
if (__this_cpu_read(apf_reason.enabled)) {
|
||||
if (__this_cpu_read(async_pf_enabled)) {
|
||||
token = __this_cpu_read(apf_reason.token);
|
||||
kvm_async_pf_task_wake(token);
|
||||
__this_cpu_write(apf_reason.token, 0);
|
||||
@ -362,7 +363,7 @@ static void kvm_guest_cpu_init(void)
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
|
||||
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
|
||||
__this_cpu_write(apf_reason.enabled, 1);
|
||||
__this_cpu_write(async_pf_enabled, true);
|
||||
pr_debug("setup async PF for cpu %d\n", smp_processor_id());
|
||||
}
|
||||
|
||||
@ -383,11 +384,11 @@ static void kvm_guest_cpu_init(void)
|
||||
|
||||
static void kvm_pv_disable_apf(void)
|
||||
{
|
||||
if (!__this_cpu_read(apf_reason.enabled))
|
||||
if (!__this_cpu_read(async_pf_enabled))
|
||||
return;
|
||||
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
|
||||
__this_cpu_write(apf_reason.enabled, 0);
|
||||
__this_cpu_write(async_pf_enabled, false);
|
||||
|
||||
pr_debug("disable async PF for cpu %d\n", smp_processor_id());
|
||||
}
|
||||
|
@ -580,7 +580,7 @@ EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx);
|
||||
|
||||
static char *nmi_check_stall_msg[] = {
|
||||
/* */
|
||||
/* +--------- nsp->idt_seq_snap & 0x1: CPU is in NMI handler. */
|
||||
/* +--------- nmi_seq & 0x1: CPU is currently in NMI handler. */
|
||||
/* | +------ cpu_is_offline(cpu) */
|
||||
/* | | +--- nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls): */
|
||||
/* | | | NMI handler has been invoked. */
|
||||
@ -628,22 +628,26 @@ void nmi_backtrace_stall_check(const struct cpumask *btp)
|
||||
nmi_seq = READ_ONCE(nsp->idt_nmi_seq);
|
||||
if (nsp->idt_nmi_seq_snap + 1 == nmi_seq && (nmi_seq & 0x1)) {
|
||||
msgp = "CPU entered NMI handler function, but has not exited";
|
||||
} else if ((nsp->idt_nmi_seq_snap & 0x1) != (nmi_seq & 0x1)) {
|
||||
msgp = "CPU is handling NMIs";
|
||||
} else {
|
||||
idx = ((nsp->idt_seq_snap & 0x1) << 2) |
|
||||
} else if (nsp->idt_nmi_seq_snap == nmi_seq ||
|
||||
nsp->idt_nmi_seq_snap + 1 == nmi_seq) {
|
||||
idx = ((nmi_seq & 0x1) << 2) |
|
||||
(cpu_is_offline(cpu) << 1) |
|
||||
(nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls));
|
||||
msgp = nmi_check_stall_msg[idx];
|
||||
if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))
|
||||
modp = ", but OK because ignore_nmis was set";
|
||||
if (nmi_seq & 0x1)
|
||||
msghp = " (CPU currently in NMI handler function)";
|
||||
else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
|
||||
if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
|
||||
msghp = " (CPU exited one NMI handler function)";
|
||||
else if (nmi_seq & 0x1)
|
||||
msghp = " (CPU currently in NMI handler function)";
|
||||
else
|
||||
msghp = " (CPU was never in an NMI handler function)";
|
||||
} else {
|
||||
msgp = "CPU is handling NMIs";
|
||||
}
|
||||
pr_alert("%s: CPU %d: %s%s%s, last activity: %lu jiffies ago.\n",
|
||||
__func__, cpu, msgp, modp, msghp, j - READ_ONCE(nsp->recv_jiffies));
|
||||
pr_alert("%s: CPU %d: %s%s%s\n", __func__, cpu, msgp, modp, msghp);
|
||||
pr_alert("%s: last activity: %lu jiffies ago.\n",
|
||||
__func__, j - READ_ONCE(nsp->recv_jiffies));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,16 +203,6 @@ void __init probe_roms(void)
|
||||
unsigned char c;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The ROM memory range is not part of the e820 table and is therefore not
|
||||
* pre-validated by BIOS. The kernel page table maps the ROM region as encrypted
|
||||
* memory, and SNP requires encrypted memory to be validated before access.
|
||||
* Do that here.
|
||||
*/
|
||||
snp_prep_memory(video_rom_resource.start,
|
||||
((system_rom_resource.end + 1) - video_rom_resource.start),
|
||||
SNP_PAGE_STATE_PRIVATE);
|
||||
|
||||
/* video rom */
|
||||
upper = adapter_rom_resources[0].start;
|
||||
for (start = video_rom_resource.start; start < upper; start += 2048) {
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/console.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/ima.h>
|
||||
#include <linux/init_ohci1394_dma.h>
|
||||
@ -902,7 +901,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
efi_init();
|
||||
|
||||
reserve_ibft_region();
|
||||
dmi_setup();
|
||||
x86_init.resources.dmi_setup();
|
||||
|
||||
/*
|
||||
* VMware detection requires dmi to be available, so this
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/psp-sev.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <uapi/linux/sev-guest.h>
|
||||
|
||||
#include <asm/init.h>
|
||||
@ -795,21 +796,6 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
|
||||
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
|
||||
}
|
||||
|
||||
void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
|
||||
{
|
||||
unsigned long vaddr, npages;
|
||||
|
||||
vaddr = (unsigned long)__va(paddr);
|
||||
npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
|
||||
|
||||
if (op == SNP_PAGE_STATE_PRIVATE)
|
||||
early_snp_set_memory_private(vaddr, paddr, npages);
|
||||
else if (op == SNP_PAGE_STATE_SHARED)
|
||||
early_snp_set_memory_shared(vaddr, paddr, npages);
|
||||
else
|
||||
WARN(1, "invalid memory op %d\n", op);
|
||||
}
|
||||
|
||||
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
|
||||
unsigned long vaddr_end, int op)
|
||||
{
|
||||
@ -2136,6 +2122,17 @@ void __head __noreturn snp_abort(void)
|
||||
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
|
||||
}
|
||||
|
||||
/*
|
||||
* SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
|
||||
* enabled, as the alternative (fallback) logic for DMI probing in the legacy
|
||||
* ROM region can cause a crash since this region is not pre-validated.
|
||||
*/
|
||||
void __init snp_dmi_setup(void)
|
||||
{
|
||||
if (efi_enabled(EFI_CONFIG_TABLES))
|
||||
dmi_setup();
|
||||
}
|
||||
|
||||
static void dump_cpuid_table(void)
|
||||
{
|
||||
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
|
||||
|
@ -3,6 +3,7 @@
|
||||
*
|
||||
* For licencing details see kernel-base/COPYING
|
||||
*/
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/export.h>
|
||||
@ -66,6 +67,7 @@ struct x86_init_ops x86_init __initdata = {
|
||||
.probe_roms = probe_roms,
|
||||
.reserve_resources = reserve_standard_io_resources,
|
||||
.memory_setup = e820__memory_setup_default,
|
||||
.dmi_setup = dmi_setup,
|
||||
},
|
||||
|
||||
.mpparse = {
|
||||
|
@ -189,15 +189,15 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
|
||||
const char *sig)
|
||||
static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
|
||||
int nent, const char *sig)
|
||||
{
|
||||
struct kvm_hypervisor_cpuid cpuid = {};
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
u32 base;
|
||||
|
||||
for_each_possible_hypervisor_cpuid_base(base) {
|
||||
entry = kvm_find_cpuid_entry(vcpu, base);
|
||||
entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
|
||||
if (entry) {
|
||||
u32 signature[3];
|
||||
@ -217,22 +217,29 @@ static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcp
|
||||
return cpuid;
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
|
||||
struct kvm_cpuid_entry2 *entries, int nent)
|
||||
static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
|
||||
const char *sig)
|
||||
{
|
||||
return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
|
||||
vcpu->arch.cpuid_nent, sig);
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
|
||||
int nent, u32 kvm_cpuid_base)
|
||||
{
|
||||
return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
|
||||
KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 base = vcpu->arch.kvm_cpuid.base;
|
||||
|
||||
if (!base)
|
||||
return NULL;
|
||||
|
||||
return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
|
||||
KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
|
||||
vcpu->arch.cpuid_nent);
|
||||
return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
|
||||
vcpu->arch.cpuid_nent, base);
|
||||
}
|
||||
|
||||
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
|
||||
@ -266,6 +273,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
|
||||
int nent)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
struct kvm_hypervisor_cpuid kvm_cpuid;
|
||||
|
||||
best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
if (best) {
|
||||
@ -292,10 +300,12 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
|
||||
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
|
||||
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
|
||||
|
||||
best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
|
||||
if (kvm_hlt_in_guest(vcpu->kvm) && best &&
|
||||
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
|
||||
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
|
||||
kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
|
||||
if (kvm_cpuid.base) {
|
||||
best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
|
||||
if (kvm_hlt_in_guest(vcpu->kvm) && best)
|
||||
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
|
||||
}
|
||||
|
||||
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
|
||||
best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
|
@ -84,9 +84,10 @@ struct enc_region {
|
||||
};
|
||||
|
||||
/* Called with the sev_bitmap_lock held, or on shutdown */
|
||||
static int sev_flush_asids(int min_asid, int max_asid)
|
||||
static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
|
||||
{
|
||||
int ret, asid, error = 0;
|
||||
int ret, error = 0;
|
||||
unsigned int asid;
|
||||
|
||||
/* Check if there are any ASIDs to reclaim before performing a flush */
|
||||
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
|
||||
@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
|
||||
}
|
||||
|
||||
/* Must be called with the sev_bitmap_lock held */
|
||||
static bool __sev_recycle_asids(int min_asid, int max_asid)
|
||||
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
|
||||
{
|
||||
if (sev_flush_asids(min_asid, max_asid))
|
||||
return false;
|
||||
@ -143,8 +144,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
|
||||
|
||||
static int sev_asid_new(struct kvm_sev_info *sev)
|
||||
{
|
||||
int asid, min_asid, max_asid, ret;
|
||||
/*
|
||||
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
|
||||
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
|
||||
* Note: min ASID can end up larger than the max if basic SEV support is
|
||||
* effectively disabled by disallowing use of ASIDs for SEV guests.
|
||||
*/
|
||||
unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
|
||||
unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
|
||||
unsigned int asid;
|
||||
bool retry = true;
|
||||
int ret;
|
||||
|
||||
if (min_asid > max_asid)
|
||||
return -ENOTTY;
|
||||
|
||||
WARN_ON(sev->misc_cg);
|
||||
sev->misc_cg = get_current_misc_cg();
|
||||
@ -157,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
|
||||
|
||||
mutex_lock(&sev_bitmap_lock);
|
||||
|
||||
/*
|
||||
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
|
||||
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
|
||||
*/
|
||||
min_asid = sev->es_active ? 1 : min_sev_asid;
|
||||
max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
|
||||
again:
|
||||
asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
|
||||
if (asid > max_asid) {
|
||||
@ -179,7 +186,8 @@ again:
|
||||
|
||||
mutex_unlock(&sev_bitmap_lock);
|
||||
|
||||
return asid;
|
||||
sev->asid = asid;
|
||||
return 0;
|
||||
e_uncharge:
|
||||
sev_misc_cg_uncharge(sev);
|
||||
put_misc_cg(sev->misc_cg);
|
||||
@ -187,7 +195,7 @@ e_uncharge:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sev_get_asid(struct kvm *kvm)
|
||||
static unsigned int sev_get_asid(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
|
||||
@ -247,21 +255,19 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
struct sev_platform_init_args init_args = {0};
|
||||
int asid, ret;
|
||||
int ret;
|
||||
|
||||
if (kvm->created_vcpus)
|
||||
return -EINVAL;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (unlikely(sev->active))
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
|
||||
sev->active = true;
|
||||
sev->es_active = argp->id == KVM_SEV_ES_INIT;
|
||||
asid = sev_asid_new(sev);
|
||||
if (asid < 0)
|
||||
ret = sev_asid_new(sev);
|
||||
if (ret)
|
||||
goto e_no_asid;
|
||||
sev->asid = asid;
|
||||
|
||||
init_args.probe = false;
|
||||
ret = sev_platform_init(&init_args);
|
||||
@ -287,8 +293,8 @@ e_no_asid:
|
||||
|
||||
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
|
||||
{
|
||||
unsigned int asid = sev_get_asid(kvm);
|
||||
struct sev_data_activate activate;
|
||||
int asid = sev_get_asid(kvm);
|
||||
int ret;
|
||||
|
||||
/* activate ASID on the given handle */
|
||||
@ -2240,8 +2246,10 @@ void __init sev_hardware_setup(void)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sev_asid_count = max_sev_asid - min_sev_asid + 1;
|
||||
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
|
||||
if (min_sev_asid <= max_sev_asid) {
|
||||
sev_asid_count = max_sev_asid - min_sev_asid + 1;
|
||||
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
|
||||
}
|
||||
sev_supported = true;
|
||||
|
||||
/* SEV-ES support requested? */
|
||||
@ -2272,7 +2280,9 @@ void __init sev_hardware_setup(void)
|
||||
out:
|
||||
if (boot_cpu_has(X86_FEATURE_SEV))
|
||||
pr_info("SEV %s (ASIDs %u - %u)\n",
|
||||
sev_supported ? "enabled" : "disabled",
|
||||
sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
|
||||
"unusable" :
|
||||
"disabled",
|
||||
min_sev_asid, max_sev_asid);
|
||||
if (boot_cpu_has(X86_FEATURE_SEV_ES))
|
||||
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
|
||||
@ -2320,7 +2330,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
|
||||
*/
|
||||
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
|
||||
{
|
||||
int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
|
||||
unsigned int asid = sev_get_asid(vcpu->kvm);
|
||||
|
||||
/*
|
||||
* Note! The address must be a kernel address, as regular page walk
|
||||
@ -2638,7 +2648,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
||||
void pre_sev_run(struct vcpu_svm *svm, int cpu)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
|
||||
int asid = sev_get_asid(svm->vcpu.kvm);
|
||||
unsigned int asid = sev_get_asid(svm->vcpu.kvm);
|
||||
|
||||
/* Assign the asid allocated with this SEV guest */
|
||||
svm->asid = asid;
|
||||
|
@ -735,13 +735,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
|
||||
* Tracepoint for nested #vmexit because of interrupt pending
|
||||
*/
|
||||
TRACE_EVENT(kvm_invlpga,
|
||||
TP_PROTO(__u64 rip, int asid, u64 address),
|
||||
TP_PROTO(__u64 rip, unsigned int asid, u64 address),
|
||||
TP_ARGS(rip, asid, address),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u64, rip )
|
||||
__field( int, asid )
|
||||
__field( __u64, address )
|
||||
__field( __u64, rip )
|
||||
__field( unsigned int, asid )
|
||||
__field( __u64, address )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -750,7 +750,7 @@ TRACE_EVENT(kvm_invlpga,
|
||||
__entry->address = address;
|
||||
),
|
||||
|
||||
TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
|
||||
TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
|
||||
__entry->rip, __entry->asid, __entry->address)
|
||||
);
|
||||
|
||||
|
@ -163,6 +163,7 @@ SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)
|
||||
lfence
|
||||
jmp srso_alias_return_thunk
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
.popsection
|
||||
|
||||
.pushsection .text..__x86.rethunk_safe
|
||||
@ -224,10 +225,15 @@ SYM_CODE_START(srso_return_thunk)
|
||||
SYM_CODE_END(srso_return_thunk)
|
||||
|
||||
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
|
||||
#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
|
||||
#else /* !CONFIG_MITIGATION_SRSO */
|
||||
/* Dummy for the alternative in CALL_UNTRAIN_RET. */
|
||||
SYM_CODE_START(srso_alias_untrain_ret)
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
#define JMP_SRSO_UNTRAIN_RET "ud2"
|
||||
#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
|
||||
#endif /* CONFIG_MITIGATION_SRSO */
|
||||
|
||||
#ifdef CONFIG_MITIGATION_UNRET_ENTRY
|
||||
@ -319,9 +325,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
|
||||
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
|
||||
|
||||
SYM_FUNC_START(entry_untrain_ret)
|
||||
ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
|
||||
JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
|
||||
JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
|
||||
ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
|
||||
SYM_FUNC_END(entry_untrain_ret)
|
||||
__EXPORT_THUNK(entry_untrain_ret)
|
||||
|
||||
|
@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
||||
for (; addr < end; addr = next) {
|
||||
pud_t *pud = pud_page + pud_index(addr);
|
||||
pmd_t *pmd;
|
||||
bool use_gbpage;
|
||||
|
||||
next = (addr & PUD_MASK) + PUD_SIZE;
|
||||
if (next > end)
|
||||
next = end;
|
||||
|
||||
/* if this is already a gbpage, this portion is already mapped */
|
||||
if (pud_leaf(*pud))
|
||||
continue;
|
||||
|
||||
/* Is using a gbpage allowed? */
|
||||
use_gbpage = info->direct_gbpages;
|
||||
|
||||
/* Don't use gbpage if it maps more than the requested region. */
|
||||
/* at the begining: */
|
||||
use_gbpage &= ((addr & ~PUD_MASK) == 0);
|
||||
/* ... or at the end: */
|
||||
use_gbpage &= ((next & ~PUD_MASK) == 0);
|
||||
|
||||
/* Never overwrite existing mappings */
|
||||
use_gbpage &= !pud_present(*pud);
|
||||
|
||||
if (use_gbpage) {
|
||||
if (info->direct_gbpages) {
|
||||
pud_t pudval;
|
||||
|
||||
if (pud_present(*pud))
|
||||
continue;
|
||||
|
||||
addr &= PUD_MASK;
|
||||
pudval = __pud((addr - info->offset) | info->page_flag);
|
||||
set_pud(pud, pudval);
|
||||
continue;
|
||||
|
@ -492,6 +492,24 @@ void __init sme_early_init(void)
|
||||
*/
|
||||
if (sev_status & MSR_AMD64_SEV_ENABLED)
|
||||
ia32_disable();
|
||||
|
||||
/*
|
||||
* Override init functions that scan the ROM region in SEV-SNP guests,
|
||||
* as this memory is not pre-validated and would thus cause a crash.
|
||||
*/
|
||||
if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
|
||||
x86_init.mpparse.find_mptable = x86_init_noop;
|
||||
x86_init.pci.init_irq = x86_init_noop;
|
||||
x86_init.resources.probe_roms = x86_init_noop;
|
||||
|
||||
/*
|
||||
* DMI setup behavior for SEV-SNP guests depends on
|
||||
* efi_enabled(EFI_CONFIG_TABLES), which hasn't been
|
||||
* parsed yet. snp_dmi_setup() will run after that
|
||||
* parsing has happened.
|
||||
*/
|
||||
x86_init.resources.dmi_setup = snp_dmi_setup;
|
||||
}
|
||||
}
|
||||
|
||||
void __init mem_encrypt_free_decrypted_mem(void)
|
||||
|
@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
|
||||
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
|
||||
{
|
||||
OPTIMIZER_HIDE_VAR(func);
|
||||
x86_call_depth_emit_accounting(pprog, func);
|
||||
ip += x86_call_depth_emit_accounting(pprog, func, ip);
|
||||
return emit_patch(pprog, func, ip, 0xE8);
|
||||
}
|
||||
|
||||
@ -1971,20 +1971,17 @@ populate_extable:
|
||||
|
||||
/* call */
|
||||
case BPF_JMP | BPF_CALL: {
|
||||
int offs;
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
func = (u8 *) __bpf_call_base + imm32;
|
||||
if (tail_call_reachable) {
|
||||
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
|
||||
} else {
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
offs = x86_call_depth_emit_accounting(&prog, func);
|
||||
ip += 7;
|
||||
}
|
||||
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
ip += x86_call_depth_emit_accounting(&prog, func, ip);
|
||||
if (emit_call(&prog, func, ip))
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -2834,7 +2831,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
|
||||
* Direct-call fentry stub, as such it needs accounting for the
|
||||
* __fentry__ call.
|
||||
*/
|
||||
x86_call_depth_emit_accounting(&prog, NULL);
|
||||
x86_call_depth_emit_accounting(&prog, NULL, image);
|
||||
}
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
|
@ -1,2 +1,2 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y += vmx/
|
||||
obj-y += svm/ vmx/
|
||||
|
@ -726,7 +726,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
||||
* which can be mixed are set in each bio and mark @rq as mixed
|
||||
* merged.
|
||||
*/
|
||||
void blk_rq_set_mixed_merge(struct request *rq)
|
||||
static void blk_rq_set_mixed_merge(struct request *rq)
|
||||
{
|
||||
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
|
||||
struct bio *bio;
|
||||
|
@ -770,16 +770,11 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
/*
|
||||
* Partial zone append completions cannot be supported as the
|
||||
* BIO fragments may end up not being written sequentially.
|
||||
* For such case, force the completed nbytes to be equal to
|
||||
* the BIO size so that bio_advance() sets the BIO remaining
|
||||
* size to 0 and we end up calling bio_endio() before returning.
|
||||
*/
|
||||
if (bio->bi_iter.bi_size != nbytes) {
|
||||
if (bio->bi_iter.bi_size != nbytes)
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
nbytes = bio->bi_iter.bi_size;
|
||||
} else {
|
||||
else
|
||||
bio->bi_iter.bi_sector = rq->__sector;
|
||||
}
|
||||
}
|
||||
|
||||
bio_advance(bio, nbytes);
|
||||
|
@ -146,8 +146,7 @@ static int blk_validate_limits(struct queue_limits *lim)
|
||||
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
|
||||
lim->max_dev_sectors);
|
||||
if (lim->max_user_sectors) {
|
||||
if (lim->max_user_sectors > max_hw_sectors ||
|
||||
lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
|
||||
if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
|
||||
return -EINVAL;
|
||||
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
|
||||
} else {
|
||||
|
@ -339,7 +339,6 @@ int ll_back_merge_fn(struct request *req, struct bio *bio,
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next);
|
||||
unsigned int blk_recalc_rq_segments(struct request *rq);
|
||||
void blk_rq_set_mixed_merge(struct request *rq);
|
||||
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
|
||||
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
|
||||
|
||||
|
@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
|
||||
ACPI_FREE(buffer.pointer);
|
||||
|
||||
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
|
||||
acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
|
||||
|
||||
status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_os_printf("Could Not evaluate object %p\n",
|
||||
obj_handle);
|
||||
return (AE_OK);
|
||||
}
|
||||
/*
|
||||
* Since this is a field unit, surround the output in braces
|
||||
*/
|
||||
|
@ -851,7 +851,7 @@ err_put_table:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit einj_remove(struct platform_device *pdev)
|
||||
static void einj_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct apei_exec_context ctx;
|
||||
|
||||
|
@ -712,8 +712,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
|
||||
ehc->saved_ncq_enabled |= 1 << devno;
|
||||
|
||||
/* If we are resuming, wake up the device */
|
||||
if (ap->pflags & ATA_PFLAG_RESUMING)
|
||||
if (ap->pflags & ATA_PFLAG_RESUMING) {
|
||||
dev->flags |= ATA_DFLAG_RESUMING;
|
||||
ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3169,6 +3171,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
|
||||
return 0;
|
||||
|
||||
err:
|
||||
dev->flags &= ~ATA_DFLAG_RESUMING;
|
||||
*r_failed_dev = dev;
|
||||
return rc;
|
||||
}
|
||||
|
@ -4730,6 +4730,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
struct ata_link *link;
|
||||
struct ata_device *dev;
|
||||
unsigned long flags;
|
||||
bool do_resume;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ap->scsi_scan_mutex);
|
||||
@ -4751,7 +4752,15 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
||||
if (scsi_device_get(sdev))
|
||||
continue;
|
||||
|
||||
do_resume = dev->flags & ATA_DFLAG_RESUMING;
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
if (do_resume) {
|
||||
ret = scsi_resume_device(sdev);
|
||||
if (ret == -EWOULDBLOCK)
|
||||
goto unlock;
|
||||
dev->flags &= ~ATA_DFLAG_RESUMING;
|
||||
}
|
||||
ret = scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
@ -826,11 +826,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
|
||||
|
||||
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||
{
|
||||
bdaddr_t bdaddr_swapped;
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
|
||||
HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
|
||||
baswap(&bdaddr_swapped, bdaddr);
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
|
||||
&bdaddr_swapped, HCI_EV_VENDOR,
|
||||
HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
|
||||
|
@ -7,7 +7,6 @@
|
||||
*
|
||||
* Copyright (C) 2007 Texas Instruments, Inc.
|
||||
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Acknowledgements:
|
||||
* This file is based on hci_ll.c, which was...
|
||||
@ -226,6 +225,7 @@ struct qca_serdev {
|
||||
struct qca_power *bt_power;
|
||||
u32 init_speed;
|
||||
u32 oper_speed;
|
||||
bool bdaddr_property_broken;
|
||||
const char *firmware_name;
|
||||
};
|
||||
|
||||
@ -1843,6 +1843,7 @@ static int qca_setup(struct hci_uart *hu)
|
||||
const char *firmware_name = qca_get_firmware_name(hu);
|
||||
int ret;
|
||||
struct qca_btsoc_version ver;
|
||||
struct qca_serdev *qcadev;
|
||||
const char *soc_name;
|
||||
|
||||
ret = qca_check_speeds(hu);
|
||||
@ -1904,16 +1905,11 @@ retry:
|
||||
case QCA_WCN6750:
|
||||
case QCA_WCN6855:
|
||||
case QCA_WCN7850:
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
|
||||
/* Set BDA quirk bit for reading BDA value from fwnode property
|
||||
* only if that property exist in DT.
|
||||
*/
|
||||
if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
|
||||
} else {
|
||||
bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
|
||||
}
|
||||
qcadev = serdev_device_get_drvdata(hu->serdev);
|
||||
if (qcadev->bdaddr_property_broken)
|
||||
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
|
||||
|
||||
hci_set_aosp_capable(hdev);
|
||||
|
||||
@ -2295,6 +2291,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
if (!qcadev->oper_speed)
|
||||
BT_DBG("UART will pick default operating speed");
|
||||
|
||||
qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
|
||||
"qcom,local-bd-address-broken");
|
||||
|
||||
if (data)
|
||||
qcadev->btsoc_type = data->soc_type;
|
||||
else
|
||||
|
@ -144,17 +144,4 @@ config CXL_REGION_INVALIDATION_TEST
|
||||
If unsure, or if this kernel is meant for production environments,
|
||||
say N.
|
||||
|
||||
config CXL_PMU
|
||||
tristate "CXL Performance Monitoring Unit"
|
||||
default CXL_BUS
|
||||
depends on PERF_EVENTS
|
||||
help
|
||||
Support performance monitoring as defined in CXL rev 3.0
|
||||
section 13.2: Performance Monitoring. CXL components may have
|
||||
one or more CXL Performance Monitoring Units (CPMUs).
|
||||
|
||||
Say 'y/m' to enable a driver that will attach to performance
|
||||
monitoring units and provide standard perf based interfaces.
|
||||
|
||||
If unsure say 'm'.
|
||||
endif
|
||||
|
@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
|
||||
return -ENOMEM;
|
||||
|
||||
chain = mock_chain(NULL, f, 1);
|
||||
if (!chain)
|
||||
if (chain)
|
||||
dma_fence_enable_sw_signaling(chain);
|
||||
else
|
||||
err = -ENOMEM;
|
||||
|
||||
dma_fence_enable_sw_signaling(chain);
|
||||
|
||||
dma_fence_signal(f);
|
||||
dma_fence_put(f);
|
||||
|
||||
|
@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
|
||||
continue;
|
||||
}
|
||||
|
||||
target = round_up(max(md->phys_addr, alloc_min), align) + target_slot * align;
|
||||
target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
|
||||
pages = size / EFI_PAGE_SIZE;
|
||||
|
||||
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
|
||||
|
@ -496,6 +496,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
|
||||
hdr->vid_mode = 0xffff;
|
||||
|
||||
hdr->type_of_loader = 0x21;
|
||||
hdr->initrd_addr_max = INT_MAX;
|
||||
|
||||
/* Convert unicode cmdline to ascii */
|
||||
cmdline_ptr = efi_convert_cmdline(image, &options_size);
|
||||
|
@ -1083,10 +1083,20 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline char *make_irq_label(const char *orig)
|
||||
{
|
||||
return kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void free_irq_label(const char *label)
|
||||
{
|
||||
kfree(label);
|
||||
}
|
||||
|
||||
static void edge_detector_stop(struct line *line)
|
||||
{
|
||||
if (line->irq) {
|
||||
free_irq(line->irq, line);
|
||||
free_irq_label(free_irq(line->irq, line));
|
||||
line->irq = 0;
|
||||
}
|
||||
|
||||
@ -1110,6 +1120,7 @@ static int edge_detector_setup(struct line *line,
|
||||
unsigned long irqflags = 0;
|
||||
u64 eflags;
|
||||
int irq, ret;
|
||||
char *label;
|
||||
|
||||
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
|
||||
if (eflags && !kfifo_initialized(&line->req->events)) {
|
||||
@ -1146,11 +1157,17 @@ static int edge_detector_setup(struct line *line,
|
||||
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
|
||||
irqflags |= IRQF_ONESHOT;
|
||||
|
||||
label = make_irq_label(line->req->label);
|
||||
if (!label)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Request a thread to read the events */
|
||||
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
|
||||
irqflags, line->req->label, line);
|
||||
if (ret)
|
||||
irqflags, label, line);
|
||||
if (ret) {
|
||||
free_irq_label(label);
|
||||
return ret;
|
||||
}
|
||||
|
||||
line->irq = irq;
|
||||
return 0;
|
||||
@ -1973,7 +1990,7 @@ static void lineevent_free(struct lineevent_state *le)
|
||||
blocking_notifier_chain_unregister(&le->gdev->device_notifier,
|
||||
&le->device_unregistered_nb);
|
||||
if (le->irq)
|
||||
free_irq(le->irq, le);
|
||||
free_irq_label(free_irq(le->irq, le));
|
||||
if (le->desc)
|
||||
gpiod_free(le->desc);
|
||||
kfree(le->label);
|
||||
@ -2114,6 +2131,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
||||
int fd;
|
||||
int ret;
|
||||
int irq, irqflags = 0;
|
||||
char *label;
|
||||
|
||||
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
|
||||
return -EFAULT;
|
||||
@ -2198,15 +2216,23 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
||||
if (ret)
|
||||
goto out_free_le;
|
||||
|
||||
label = make_irq_label(le->label);
|
||||
if (!label) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_le;
|
||||
}
|
||||
|
||||
/* Request a thread to read the events */
|
||||
ret = request_threaded_irq(irq,
|
||||
lineevent_irq_handler,
|
||||
lineevent_irq_thread,
|
||||
irqflags,
|
||||
le->label,
|
||||
label,
|
||||
le);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
free_irq_label(label);
|
||||
goto out_free_le;
|
||||
}
|
||||
|
||||
le->irq = irq;
|
||||
|
||||
|
@ -2397,6 +2397,11 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_dup_line_label);
|
||||
|
||||
static inline const char *function_name_or_default(const char *con_id)
|
||||
{
|
||||
return con_id ?: "(default)";
|
||||
}
|
||||
|
||||
/**
|
||||
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
|
||||
* @gc: GPIO chip
|
||||
@ -2425,10 +2430,11 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
|
||||
enum gpiod_flags dflags)
|
||||
{
|
||||
struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);
|
||||
const char *name = function_name_or_default(label);
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(desc)) {
|
||||
chip_err(gc, "failed to get GPIO descriptor\n");
|
||||
chip_err(gc, "failed to get GPIO %s descriptor\n", name);
|
||||
return desc;
|
||||
}
|
||||
|
||||
@ -2438,8 +2444,8 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
|
||||
|
||||
ret = gpiod_configure_flags(desc, label, lflags, dflags);
|
||||
if (ret) {
|
||||
chip_err(gc, "setup of own GPIO %s failed\n", label);
|
||||
gpiod_free_commit(desc);
|
||||
chip_err(gc, "setup of own GPIO %s failed\n", name);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@ -4153,19 +4159,17 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
|
||||
enum gpiod_flags *flags,
|
||||
unsigned long *lookupflags)
|
||||
{
|
||||
const char *name = function_name_or_default(con_id);
|
||||
struct gpio_desc *desc = ERR_PTR(-ENOENT);
|
||||
|
||||
if (is_of_node(fwnode)) {
|
||||
dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n",
|
||||
fwnode, con_id);
|
||||
dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", fwnode, name);
|
||||
desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags);
|
||||
} else if (is_acpi_node(fwnode)) {
|
||||
dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n",
|
||||
fwnode, con_id);
|
||||
dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", fwnode, name);
|
||||
desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags);
|
||||
} else if (is_software_node(fwnode)) {
|
||||
dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n",
|
||||
fwnode, con_id);
|
||||
dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", fwnode, name);
|
||||
desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags);
|
||||
}
|
||||
|
||||
@ -4181,6 +4185,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
|
||||
bool platform_lookup_allowed)
|
||||
{
|
||||
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
|
||||
const char *name = function_name_or_default(con_id);
|
||||
/*
|
||||
* scoped_guard() is implemented as a for loop, meaning static
|
||||
* analyzers will complain about these two not being initialized.
|
||||
@ -4203,8 +4208,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
|
||||
}
|
||||
|
||||
if (IS_ERR(desc)) {
|
||||
dev_dbg(consumer, "No GPIO consumer %s found\n",
|
||||
con_id);
|
||||
dev_dbg(consumer, "No GPIO consumer %s found\n", name);
|
||||
return desc;
|
||||
}
|
||||
|
||||
@ -4226,15 +4230,14 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
|
||||
*
|
||||
* FIXME: Make this more sane and safe.
|
||||
*/
|
||||
dev_info(consumer,
|
||||
"nonexclusive access to GPIO for %s\n", con_id);
|
||||
dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
|
||||
return desc;
|
||||
}
|
||||
|
||||
ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
|
||||
if (ret < 0) {
|
||||
dev_dbg(consumer, "setup of GPIO %s failed\n", con_id);
|
||||
gpiod_put(desc);
|
||||
dev_dbg(consumer, "setup of GPIO %s failed\n", name);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@ -4350,6 +4353,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
|
||||
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
|
||||
unsigned long lflags, enum gpiod_flags dflags)
|
||||
{
|
||||
const char *name = function_name_or_default(con_id);
|
||||
int ret;
|
||||
|
||||
if (lflags & GPIO_ACTIVE_LOW)
|
||||
@ -4393,7 +4397,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
|
||||
|
||||
/* No particular flag request, return here... */
|
||||
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
|
||||
gpiod_dbg(desc, "no flags found for %s\n", con_id);
|
||||
gpiod_dbg(desc, "no flags found for GPIO %s\n", name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4539,6 +4539,8 @@ int amdgpu_device_prepare(struct drm_device *dev)
|
||||
if (r)
|
||||
goto unprepare;
|
||||
|
||||
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
|
@ -2237,6 +2237,7 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
|
||||
case IP_VERSION(4, 0, 5):
|
||||
case IP_VERSION(4, 0, 6):
|
||||
if (amdgpu_umsch_mm & 0x1) {
|
||||
amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
|
||||
adev->enable_umsch_mm = true;
|
||||
|
@ -524,46 +524,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
|
||||
{
|
||||
struct amdgpu_ring *ring = file_inode(f)->i_private;
|
||||
volatile u32 *mqd;
|
||||
int r;
|
||||
u32 *kbuf;
|
||||
int r, i;
|
||||
uint32_t value, result;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
result = 0;
|
||||
kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto err_unreserve;
|
||||
|
||||
/*
|
||||
* Copy to local buffer to avoid put_user(), which might fault
|
||||
* and acquire mmap_sem, under reservation_ww_class_mutex.
|
||||
*/
|
||||
for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
|
||||
kbuf[i] = mqd[i];
|
||||
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
|
||||
result = 0;
|
||||
while (size) {
|
||||
if (*pos >= ring->mqd_size)
|
||||
goto done;
|
||||
break;
|
||||
|
||||
value = mqd[*pos/4];
|
||||
value = kbuf[*pos/4];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r)
|
||||
goto done;
|
||||
goto err_free;
|
||||
buf += 4;
|
||||
result += 4;
|
||||
size -= 4;
|
||||
*pos += 4;
|
||||
}
|
||||
|
||||
done:
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
mqd = NULL;
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
kfree(kbuf);
|
||||
return result;
|
||||
|
||||
err_unreserve:
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
err_free:
|
||||
kfree(kbuf);
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_mqd_fops = {
|
||||
|
@ -189,10 +189,13 @@ static void setup_vpe_queue(struct amdgpu_device *adev,
|
||||
mqd->rptr_val = 0;
|
||||
mqd->unmapped = 1;
|
||||
|
||||
if (adev->vpe.collaborate_mode)
|
||||
memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
|
||||
|
||||
qinfo->mqd_addr = test->mqd_data_gpu_addr;
|
||||
qinfo->csa_addr = test->ctx_data_gpu_addr +
|
||||
offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
|
||||
qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1;
|
||||
qinfo->doorbell_offset_0 = 0;
|
||||
qinfo->doorbell_offset_1 = 0;
|
||||
}
|
||||
|
||||
@ -287,7 +290,10 @@ static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *te
|
||||
ring[5] = 0;
|
||||
|
||||
mqd->wptr_val = (6 << 2);
|
||||
// WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
|
||||
if (adev->vpe.collaborate_mode)
|
||||
(++mqd)->wptr_val = (6 << 2);
|
||||
|
||||
WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (*fence == test_pattern)
|
||||
@ -571,6 +577,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
|
||||
|
||||
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
|
||||
case IP_VERSION(4, 0, 5):
|
||||
case IP_VERSION(4, 0, 6):
|
||||
fw_name = "amdgpu/umsch_mm_4_0_0.bin";
|
||||
break;
|
||||
default:
|
||||
@ -750,6 +757,7 @@ static int umsch_mm_early_init(void *handle)
|
||||
|
||||
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
|
||||
case IP_VERSION(4, 0, 5):
|
||||
case IP_VERSION(4, 0, 6):
|
||||
umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
|
||||
break;
|
||||
default:
|
||||
|
@ -33,13 +33,6 @@ enum UMSCH_SWIP_ENGINE_TYPE {
|
||||
UMSCH_SWIP_ENGINE_TYPE_MAX
|
||||
};
|
||||
|
||||
enum UMSCH_SWIP_AFFINITY_TYPE {
|
||||
UMSCH_SWIP_AFFINITY_TYPE_ANY = 0,
|
||||
UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1,
|
||||
UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2,
|
||||
UMSCH_SWIP_AFFINITY_TYPE_MAX
|
||||
};
|
||||
|
||||
enum UMSCH_CONTEXT_PRIORITY_LEVEL {
|
||||
CONTEXT_PRIORITY_LEVEL_IDLE = 0,
|
||||
CONTEXT_PRIORITY_LEVEL_NORMAL = 1,
|
||||
@ -51,13 +44,15 @@ enum UMSCH_CONTEXT_PRIORITY_LEVEL {
|
||||
struct umsch_mm_set_resource_input {
|
||||
uint32_t vmid_mask_mm_vcn;
|
||||
uint32_t vmid_mask_mm_vpe;
|
||||
uint32_t collaboration_mask_vpe;
|
||||
uint32_t logging_vmid;
|
||||
uint32_t engine_mask;
|
||||
union {
|
||||
struct {
|
||||
uint32_t disable_reset : 1;
|
||||
uint32_t disable_umsch_mm_log : 1;
|
||||
uint32_t reserved : 30;
|
||||
uint32_t use_rs64mem_for_proc_ctx_csa : 1;
|
||||
uint32_t reserved : 29;
|
||||
};
|
||||
uint32_t uint32_all;
|
||||
};
|
||||
@ -78,15 +73,18 @@ struct umsch_mm_add_queue_input {
|
||||
uint32_t doorbell_offset_1;
|
||||
enum UMSCH_SWIP_ENGINE_TYPE engine_type;
|
||||
uint32_t affinity;
|
||||
enum UMSCH_SWIP_AFFINITY_TYPE affinity_type;
|
||||
uint64_t mqd_addr;
|
||||
uint64_t h_context;
|
||||
uint64_t h_queue;
|
||||
uint32_t vm_context_cntl;
|
||||
|
||||
uint32_t process_csa_array_index;
|
||||
uint32_t context_csa_array_index;
|
||||
|
||||
struct {
|
||||
uint32_t is_context_suspended : 1;
|
||||
uint32_t reserved : 31;
|
||||
uint32_t collaboration_mode : 1;
|
||||
uint32_t reserved : 30;
|
||||
};
|
||||
};
|
||||
|
||||
@ -94,6 +92,7 @@ struct umsch_mm_remove_queue_input {
|
||||
uint32_t doorbell_offset_0;
|
||||
uint32_t doorbell_offset_1;
|
||||
uint64_t context_csa_addr;
|
||||
uint32_t context_csa_array_index;
|
||||
};
|
||||
|
||||
struct MQD_INFO {
|
||||
@ -103,6 +102,7 @@ struct MQD_INFO {
|
||||
uint32_t wptr_val;
|
||||
uint32_t rptr_val;
|
||||
uint32_t unmapped;
|
||||
uint32_t vmid;
|
||||
};
|
||||
|
||||
struct amdgpu_umsch_mm;
|
||||
|
@ -396,6 +396,12 @@ static int vpe_hw_init(void *handle)
|
||||
struct amdgpu_vpe *vpe = &adev->vpe;
|
||||
int ret;
|
||||
|
||||
/* Power on VPE */
|
||||
ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vpe_load_microcode(vpe);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -60,7 +60,7 @@ static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch)
|
||||
|
||||
umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr;
|
||||
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
|
||||
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
|
||||
1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
|
||||
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
|
||||
@ -248,7 +248,7 @@ static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch)
|
||||
data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0);
|
||||
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
|
||||
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
|
||||
if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
|
||||
WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
|
||||
2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
|
||||
SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
|
||||
@ -271,6 +271,8 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)
|
||||
|
||||
set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn;
|
||||
set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe;
|
||||
set_hw_resources.collaboration_mask_vpe =
|
||||
adev->vpe.collaborate_mode ? 0x3 : 0x0;
|
||||
set_hw_resources.engine_mask = umsch->engine_mask;
|
||||
|
||||
set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask;
|
||||
@ -346,6 +348,7 @@ static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch,
|
||||
add_queue.h_queue = input_ptr->h_queue;
|
||||
add_queue.vm_context_cntl = input_ptr->vm_context_cntl;
|
||||
add_queue.is_context_suspended = input_ptr->is_context_suspended;
|
||||
add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0;
|
||||
|
||||
add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
|
||||
add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
|
||||
|
@ -1523,7 +1523,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
|
||||
|
||||
/* Find a KFD GPU device that supports the get_dmabuf_info query */
|
||||
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
|
||||
if (dev)
|
||||
if (dev && !kfd_devcgroup_check_permission(dev))
|
||||
break;
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
@ -1545,7 +1545,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
|
||||
if (xcp_id >= 0)
|
||||
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
|
||||
else
|
||||
args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
|
||||
args->gpu_id = dev->id;
|
||||
args->flags = flags;
|
||||
|
||||
/* Copy metadata buffer to user mode */
|
||||
|
@ -339,7 +339,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
|
||||
break;
|
||||
}
|
||||
kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
|
||||
KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
|
||||
kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
KFD_DEBUG_DOORBELL_ID(context_id0),
|
||||
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
|
||||
|
@ -328,7 +328,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
|
||||
/* CP */
|
||||
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
|
||||
kfd_signal_event_interrupt(pasid, context_id0, 32);
|
||||
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
|
||||
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
|
||||
KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)))
|
||||
kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
KFD_CTXID0_DOORBELL_ID(context_id0),
|
||||
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
|
||||
|
@ -388,7 +388,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
|
||||
break;
|
||||
}
|
||||
kfd_signal_event_interrupt(pasid, sq_int_data, 24);
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
|
||||
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
|
||||
KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
|
||||
kfd_set_dbg_ev_from_interrupt(dev, pasid,
|
||||
KFD_DEBUG_DOORBELL_ID(context_id0),
|
||||
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
|
||||
|
@ -1473,7 +1473,7 @@ static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
|
||||
|
||||
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
|
||||
{
|
||||
return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
|
||||
return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
|
||||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
|
||||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
|
||||
}
|
||||
|
@ -6305,9 +6305,8 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
|
||||
else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||
stream->signal == SIGNAL_TYPE_EDP) {
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
|
||||
//
|
||||
// should decide stream support vsc sdp colorimetry capability
|
||||
// before building vsc info packet
|
||||
@ -6323,9 +6322,8 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
|
||||
if (stream->link->psr_settings.psr_feature_enabled)
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
}
|
||||
finish:
|
||||
dc_sink_release(sink);
|
||||
|
@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
|
||||
* amdgpu_dm_psr_enable() - enable psr f/w
|
||||
* @stream: stream state
|
||||
*
|
||||
* Return: true if success
|
||||
*/
|
||||
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = stream->link;
|
||||
unsigned int vsync_rate_hz = 0;
|
||||
@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
|
||||
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
|
||||
power_opt |= psr_power_opt_z10_static_screen;
|
||||
|
||||
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
|
||||
dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
|
||||
|
||||
if (link->ctx->dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(link->ctx->dc, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define AMDGPU_DM_PSR_ENTRY_DELAY 5
|
||||
|
||||
void amdgpu_dm_set_psr_caps(struct dc_link *link);
|
||||
bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
|
||||
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
|
||||
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
|
||||
|
@ -73,6 +73,8 @@
|
||||
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
|
||||
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
|
||||
|
||||
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
|
||||
|
||||
#define REG(reg_name) \
|
||||
(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
|
||||
|
||||
@ -411,9 +413,12 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
|
||||
|
||||
static void init_clk_states(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
|
||||
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
|
||||
if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
|
||||
clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
|
||||
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
@ -709,7 +714,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
clock_table->NumFclkLevelsEnabled;
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
|
||||
|
||||
num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
|
||||
num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
|
||||
clock_table->NumDcfClkLevelsEnabled;
|
||||
for (i = 0; i < num_dcfclk; i++) {
|
||||
int j;
|
||||
|
@ -3024,7 +3024,8 @@ static void backup_planes_and_stream_state(
|
||||
scratch->blend_tf[i] = *status->plane_states[i]->blend_tf;
|
||||
}
|
||||
scratch->stream_state = *stream;
|
||||
scratch->out_transfer_func = *stream->out_transfer_func;
|
||||
if (stream->out_transfer_func)
|
||||
scratch->out_transfer_func = *stream->out_transfer_func;
|
||||
}
|
||||
|
||||
static void restore_planes_and_stream_state(
|
||||
@ -3046,7 +3047,8 @@ static void restore_planes_and_stream_state(
|
||||
*status->plane_states[i]->blend_tf = scratch->blend_tf[i];
|
||||
}
|
||||
*stream = scratch->stream_state;
|
||||
*stream->out_transfer_func = scratch->out_transfer_func;
|
||||
if (stream->out_transfer_func)
|
||||
*stream->out_transfer_func = scratch->out_transfer_func;
|
||||
}
|
||||
|
||||
static bool update_planes_and_stream_state(struct dc *dc,
|
||||
|
@ -23,7 +23,7 @@
|
||||
# Makefile for the 'controller' sub-component of DAL.
|
||||
# It provides the control and status of HW CRTC block.
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init
|
||||
|
||||
DCE110 = dce110_timing_generator.o \
|
||||
dce110_compressor.o dce110_opp_regamma_v.o \
|
||||
|
@ -23,7 +23,7 @@
|
||||
# Makefile for the 'controller' sub-component of DAL.
|
||||
# It provides the control and status of HW CRTC block.
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init
|
||||
|
||||
DCE112 = dce112_compressor.o
|
||||
|
||||
|
@ -24,7 +24,7 @@
|
||||
# It provides the control and status of HW CRTC block.
|
||||
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init
|
||||
|
||||
DCE120 = dce120_timing_generator.o
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
# Makefile for the 'controller' sub-component of DAL.
|
||||
# It provides the control and status of HW CRTC block.
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
|
||||
|
||||
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
|
||||
dce60_resource.o
|
||||
|
@ -23,7 +23,7 @@
|
||||
# Makefile for the 'controller' sub-component of DAL.
|
||||
# It provides the control and status of HW CRTC block.
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init
|
||||
|
||||
DCE80 = dce80_timing_generator.o
|
||||
|
||||
|
@ -44,6 +44,36 @@
|
||||
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
|
||||
|
||||
|
||||
void mpc3_mpc_init(struct mpc *mpc)
|
||||
{
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
int opp_id;
|
||||
|
||||
mpc1_mpc_init(mpc);
|
||||
|
||||
for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
|
||||
if (REG(MUX[opp_id]))
|
||||
/* disable mpc out rate and flow control */
|
||||
REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,
|
||||
1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
|
||||
{
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
|
||||
mpc1_mpc_init_single_inst(mpc, mpcc_id);
|
||||
|
||||
/* assuming mpc out mux is connected to opp with the same index at this
|
||||
* point in time (e.g. transitioning from vbios to driver)
|
||||
*/
|
||||
if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))
|
||||
/* disable mpc out rate and flow control */
|
||||
REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,
|
||||
1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
|
||||
}
|
||||
|
||||
bool mpc3_is_dwb_idle(
|
||||
struct mpc *mpc,
|
||||
int dwb_id)
|
||||
@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux(
|
||||
MPC_DWB0_MUX, 0xf);
|
||||
}
|
||||
|
||||
void mpc3_set_out_rate_control(
|
||||
struct mpc *mpc,
|
||||
int opp_id,
|
||||
bool enable,
|
||||
bool rate_2x_mode,
|
||||
struct mpc_dwb_flow_control *flow_control)
|
||||
{
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
|
||||
REG_UPDATE_2(MUX[opp_id],
|
||||
MPC_OUT_RATE_CONTROL_DISABLE, !enable,
|
||||
MPC_OUT_RATE_CONTROL, rate_2x_mode);
|
||||
|
||||
if (flow_control)
|
||||
REG_UPDATE_2(MUX[opp_id],
|
||||
MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
|
||||
MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
|
||||
}
|
||||
|
||||
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
|
||||
{
|
||||
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
|
||||
@ -1490,8 +1501,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
|
||||
.read_mpcc_state = mpc3_read_mpcc_state,
|
||||
.insert_plane = mpc1_insert_plane,
|
||||
.remove_mpcc = mpc1_remove_mpcc,
|
||||
.mpc_init = mpc1_mpc_init,
|
||||
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
|
||||
.mpc_init = mpc3_mpc_init,
|
||||
.mpc_init_single_inst = mpc3_mpc_init_single_inst,
|
||||
.update_blending = mpc2_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
|
||||
@ -1508,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
|
||||
.set_dwb_mux = mpc3_set_dwb_mux,
|
||||
.disable_dwb_mux = mpc3_disable_dwb_mux,
|
||||
.is_dwb_idle = mpc3_is_dwb_idle,
|
||||
.set_out_rate_control = mpc3_set_out_rate_control,
|
||||
.set_gamut_remap = mpc3_set_gamut_remap,
|
||||
.program_shaper = mpc3_program_shaper,
|
||||
.acquire_rmu = mpcc3_acquire_rmu,
|
||||
|
@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
|
||||
int num_mpcc,
|
||||
int num_rmu);
|
||||
|
||||
void mpc3_mpc_init(
|
||||
struct mpc *mpc);
|
||||
|
||||
void mpc3_mpc_init_single_inst(
|
||||
struct mpc *mpc,
|
||||
unsigned int mpcc_id);
|
||||
|
||||
bool mpc3_program_shaper(
|
||||
struct mpc *mpc,
|
||||
const struct pwl_params *params,
|
||||
@ -1078,13 +1085,6 @@ bool mpc3_is_dwb_idle(
|
||||
struct mpc *mpc,
|
||||
int dwb_id);
|
||||
|
||||
void mpc3_set_out_rate_control(
|
||||
struct mpc *mpc,
|
||||
int opp_id,
|
||||
bool enable,
|
||||
bool rate_2x_mode,
|
||||
struct mpc_dwb_flow_control *flow_control);
|
||||
|
||||
void mpc3_power_on_ogam_lut(
|
||||
struct mpc *mpc, int mpcc_id,
|
||||
bool power_on);
|
||||
|
@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc)
|
||||
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
|
||||
int mpcc_id;
|
||||
|
||||
mpc1_mpc_init(mpc);
|
||||
mpc3_mpc_init(mpc);
|
||||
|
||||
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
|
||||
if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) {
|
||||
@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
|
||||
.insert_plane = mpc1_insert_plane,
|
||||
.remove_mpcc = mpc1_remove_mpcc,
|
||||
.mpc_init = mpc32_mpc_init,
|
||||
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
|
||||
.mpc_init_single_inst = mpc3_mpc_init_single_inst,
|
||||
.update_blending = mpc2_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
|
||||
@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
|
||||
.set_dwb_mux = mpc3_set_dwb_mux,
|
||||
.disable_dwb_mux = mpc3_disable_dwb_mux,
|
||||
.is_dwb_idle = mpc3_is_dwb_idle,
|
||||
.set_out_rate_control = mpc3_set_out_rate_control,
|
||||
.set_gamut_remap = mpc3_set_gamut_remap,
|
||||
.program_shaper = mpc32_program_shaper,
|
||||
.program_3dlut = mpc32_program_3dlut,
|
||||
|
@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 28.0,
|
||||
.sr_enter_plus_exit_time_us = 30.0,
|
||||
.sr_exit_z8_time_us = 210.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 320.0,
|
||||
.sr_exit_z8_time_us = 250.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 350.0,
|
||||
.fclk_change_latency_us = 24.0,
|
||||
.usr_retraining_latency_us = 2,
|
||||
.writeback_latency_us = 12.0,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user