Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9cbc991126
@ -526,6 +526,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/srbds
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
@ -17,3 +17,4 @@ are configurable at compile, boot or run time.
|
||||
special-register-buffer-data-sampling.rst
|
||||
core-scheduling.rst
|
||||
l1d_flush.rst
|
||||
processor_mmio_stale_data.rst
|
||||
|
246
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
Normal file
246
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
Normal file
@ -0,0 +1,246 @@
|
||||
=========================================
|
||||
Processor MMIO Stale Data Vulnerabilities
|
||||
=========================================
|
||||
|
||||
Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O
|
||||
(MMIO) vulnerabilities that can expose data. The sequences of operations for
|
||||
exposing data range from simple to very complex. Because most of the
|
||||
vulnerabilities require the attacker to have access to MMIO, many environments
|
||||
are not affected. System environments using virtualization where MMIO access is
|
||||
provided to untrusted guests may need mitigation. These vulnerabilities are
|
||||
not transient execution attacks. However, these vulnerabilities may propagate
|
||||
stale data into core fill buffers where the data can subsequently be inferred
|
||||
by an unmitigated transient execution attack. Mitigation for these
|
||||
vulnerabilities includes a combination of microcode update and software
|
||||
changes, depending on the platform and usage model. Some of these mitigations
|
||||
are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or
|
||||
those used to mitigate Special Register Buffer Data Sampling (SRBDS).
|
||||
|
||||
Data Propagators
|
||||
================
|
||||
Propagators are operations that result in stale data being copied or moved from
|
||||
one microarchitectural buffer or register to another. Processor MMIO Stale Data
|
||||
Vulnerabilities are operations that may result in stale data being directly
|
||||
read into an architectural, software-visible state or sampled from a buffer or
|
||||
register.
|
||||
|
||||
Fill Buffer Stale Data Propagator (FBSDP)
|
||||
-----------------------------------------
|
||||
Stale data may propagate from fill buffers (FB) into the non-coherent portion
|
||||
of the uncore on some non-coherent writes. Fill buffer propagation by itself
|
||||
does not make stale data architecturally visible. Stale data must be propagated
|
||||
to a location where it is subject to reading or sampling.
|
||||
|
||||
Sideband Stale Data Propagator (SSDP)
|
||||
-------------------------------------
|
||||
The sideband stale data propagator (SSDP) is limited to the client (including
|
||||
Intel Xeon server E3) uncore implementation. The sideband response buffer is
|
||||
shared by all client cores. For non-coherent reads that go to sideband
|
||||
destinations, the uncore logic returns 64 bytes of data to the core, including
|
||||
both requested data and unrequested stale data, from a transaction buffer and
|
||||
the sideband response buffer. As a result, stale data from the sideband
|
||||
response and transaction buffers may now reside in a core fill buffer.
|
||||
|
||||
Primary Stale Data Propagator (PSDP)
|
||||
------------------------------------
|
||||
The primary stale data propagator (PSDP) is limited to the client (including
|
||||
Intel Xeon server E3) uncore implementation. Similar to the sideband response
|
||||
buffer, the primary response buffer is shared by all client cores. For some
|
||||
processors, MMIO primary reads will return 64 bytes of data to the core fill
|
||||
buffer including both requested data and unrequested stale data. This is
|
||||
similar to the sideband stale data propagator.
|
||||
|
||||
Vulnerabilities
|
||||
===============
|
||||
Device Register Partial Write (DRPW) (CVE-2022-21166)
|
||||
-----------------------------------------------------
|
||||
Some endpoint MMIO registers incorrectly handle writes that are smaller than
|
||||
the register size. Instead of aborting the write or only copying the correct
|
||||
subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than
|
||||
specified by the write transaction may be written to the register. On
|
||||
processors affected by FBSDP, this may expose stale data from the fill buffers
|
||||
of the core that created the write transaction.
|
||||
|
||||
Shared Buffers Data Sampling (SBDS) (CVE-2022-21125)
|
||||
----------------------------------------------------
|
||||
After propagators may have moved data around the uncore and copied stale data
|
||||
into client core fill buffers, processors affected by MFBDS can leak data from
|
||||
the fill buffer. It is limited to the client (including Intel Xeon server E3)
|
||||
uncore implementation.
|
||||
|
||||
Shared Buffers Data Read (SBDR) (CVE-2022-21123)
|
||||
------------------------------------------------
|
||||
It is similar to Shared Buffer Data Sampling (SBDS) except that the data is
|
||||
directly read into the architectural software-visible state. It is limited to
|
||||
the client (including Intel Xeon server E3) uncore implementation.
|
||||
|
||||
Affected Processors
|
||||
===================
|
||||
Not all the CPUs are affected by all the variants. For instance, most
|
||||
processors for the server market (excluding Intel Xeon E3 processors) are
|
||||
impacted by only Device Register Partial Write (DRPW).
|
||||
|
||||
Below is the list of affected Intel processors [#f1]_:
|
||||
|
||||
=================== ============ =========
|
||||
Common name Family_Model Steppings
|
||||
=================== ============ =========
|
||||
HASWELL_X 06_3FH 2,4
|
||||
SKYLAKE_L 06_4EH 3
|
||||
BROADWELL_X 06_4FH All
|
||||
SKYLAKE_X 06_55H 3,4,6,7,11
|
||||
BROADWELL_D 06_56H 3,4,5
|
||||
SKYLAKE 06_5EH 3
|
||||
ICELAKE_X 06_6AH 4,5,6
|
||||
ICELAKE_D 06_6CH 1
|
||||
ICELAKE_L 06_7EH 5
|
||||
ATOM_TREMONT_D 06_86H All
|
||||
LAKEFIELD 06_8AH 1
|
||||
KABYLAKE_L 06_8EH 9 to 12
|
||||
ATOM_TREMONT 06_96H 1
|
||||
ATOM_TREMONT_L 06_9CH 0
|
||||
KABYLAKE 06_9EH 9 to 13
|
||||
COMETLAKE 06_A5H 2,3,5
|
||||
COMETLAKE_L 06_A6H 0,1
|
||||
ROCKETLAKE 06_A7H 1
|
||||
=================== ============ =========
|
||||
|
||||
If a CPU is in the affected processor list, but not affected by a variant, it
|
||||
is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later
|
||||
section, mitigation largely remains the same for all the variants, i.e. to
|
||||
clear the CPU fill buffers via VERW instruction.
|
||||
|
||||
New bits in MSRs
|
||||
================
|
||||
Newer processors and microcode update on existing affected processors added new
|
||||
bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
|
||||
specific variants of Processor MMIO Stale Data vulnerabilities and mitigation
|
||||
capability.
|
||||
|
||||
MSR IA32_ARCH_CAPABILITIES
|
||||
--------------------------
|
||||
Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the
|
||||
Shared Buffers Data Read (SBDR) vulnerability or the sideband stale
|
||||
data propagator (SSDP).
|
||||
Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer
|
||||
Stale Data Propagator (FBSDP).
|
||||
Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data
|
||||
Propagator (PSDP).
|
||||
Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer
|
||||
values as part of MD_CLEAR operations. Processors that do not
|
||||
enumerate MDS_NO (meaning they are affected by MDS) but that do
|
||||
enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate
|
||||
FB_CLEAR as part of their MD_CLEAR support.
|
||||
Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR
|
||||
IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS
|
||||
bit can be set to cause the VERW instruction to not perform the
|
||||
FB_CLEAR action. Not all processors that support FB_CLEAR will support
|
||||
FB_CLEAR_CTRL.
|
||||
|
||||
MSR IA32_MCU_OPT_CTRL
|
||||
---------------------
|
||||
Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR
|
||||
action. This may be useful to reduce the performance impact of FB_CLEAR in
|
||||
cases where system software deems it warranted (for example, when performance
|
||||
is more critical, or the untrusted software has no MMIO access). Note that
|
||||
FB_CLEAR_DIS has no impact on enumeration (for example, it does not change
|
||||
FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors
|
||||
that enumerate FB_CLEAR.
|
||||
|
||||
Mitigation
|
||||
==========
|
||||
Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the
|
||||
same mitigation strategy to force the CPU to clear the affected buffers before
|
||||
an attacker can extract the secrets.
|
||||
|
||||
This is achieved by using the otherwise unused and obsolete VERW instruction in
|
||||
combination with a microcode update. The microcode clears the affected CPU
|
||||
buffers when the VERW instruction is executed.
|
||||
|
||||
Kernel reuses the MDS function to invoke the buffer clearing:
|
||||
|
||||
mds_clear_cpu_buffers()
|
||||
|
||||
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
|
||||
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
|
||||
additional mitigation is needed on such CPUs.
|
||||
|
||||
For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker
|
||||
with MMIO capability. Therefore, VERW is not required for kernel/userspace. For
|
||||
virtualization case, VERW is only needed at VMENTER for a guest with MMIO
|
||||
capability.
|
||||
|
||||
Mitigation points
|
||||
-----------------
|
||||
Return to user space
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation
|
||||
needed.
|
||||
|
||||
C-State transition
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Control register writes by CPU during C-state transition can propagate data
|
||||
from fill buffer to uncore buffers. Execute VERW before C-state transition to
|
||||
clear CPU fill buffers.
|
||||
|
||||
Guest entry point
|
||||
^^^^^^^^^^^^^^^^^
|
||||
Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise
|
||||
execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by
|
||||
MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO
|
||||
Stale Data vulnerabilities, so there is no need to execute VERW for such guests.
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
---------------------------------------------
|
||||
The kernel command line allows to control the Processor MMIO Stale Data
|
||||
mitigations at boot time with the option "mmio_stale_data=". The valid
|
||||
arguments for this option are:
|
||||
|
||||
========== =================================================================
|
||||
full If the CPU is vulnerable, enable mitigation; CPU buffer clearing
|
||||
on exit to userspace and when entering a VM. Idle transitions are
|
||||
protected as well. It does not automatically disable SMT.
|
||||
full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the
|
||||
complete mitigation.
|
||||
off Disables mitigation completely.
|
||||
========== =================================================================
|
||||
|
||||
If the CPU is affected and mmio_stale_data=off is not supplied on the kernel
|
||||
command line, then the kernel selects the appropriate mitigation.
|
||||
|
||||
Mitigation status information
|
||||
-----------------------------
|
||||
The Linux kernel provides a sysfs interface to enumerate the current
|
||||
vulnerability status of the system: whether the system is vulnerable, and
|
||||
which mitigations are active. The relevant sysfs file is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - 'Not affected'
|
||||
- The processor is not vulnerable
|
||||
* - 'Vulnerable'
|
||||
- The processor is vulnerable, but no mitigation enabled
|
||||
* - 'Vulnerable: Clear CPU buffers attempted, no microcode'
|
||||
- The processor is vulnerable, but microcode is not updated. The
|
||||
mitigation is enabled on a best effort basis.
|
||||
* - 'Mitigation: Clear CPU buffers'
|
||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||
enabled.
|
||||
|
||||
If the processor is vulnerable then the following information is appended to
|
||||
the above information:
|
||||
|
||||
======================== ===========================================
|
||||
'SMT vulnerable' SMT is enabled
|
||||
'SMT disabled' SMT is disabled
|
||||
'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown
|
||||
======================== ===========================================
|
||||
|
||||
References
|
||||
----------
|
||||
.. [#f1] Affected Processors
|
||||
https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
|
@ -2469,7 +2469,6 @@
|
||||
|
||||
protected: nVHE-based mode with support for guests whose
|
||||
state is kept private from the host.
|
||||
Not valid if the kernel is running in EL2.
|
||||
|
||||
Defaults to VHE/nVHE based on hardware support. Setting
|
||||
mode to "protected" will disable kexec and hibernation
|
||||
@ -3176,6 +3175,7 @@
|
||||
srbds=off [X86,INTEL]
|
||||
no_entry_flush [PPC]
|
||||
no_uaccess_flush [PPC]
|
||||
mmio_stale_data=off [X86]
|
||||
|
||||
Exceptions:
|
||||
This does not have any effect on
|
||||
@ -3197,6 +3197,7 @@
|
||||
Equivalent to: l1tf=flush,nosmt [X86]
|
||||
mds=full,nosmt [X86]
|
||||
tsx_async_abort=full,nosmt [X86]
|
||||
mmio_stale_data=full,nosmt [X86]
|
||||
|
||||
mminit_loglevel=
|
||||
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
|
||||
@ -3206,6 +3207,40 @@
|
||||
log everything. Information is printed at KERN_DEBUG
|
||||
so loglevel=8 may also need to be specified.
|
||||
|
||||
mmio_stale_data=
|
||||
[X86,INTEL] Control mitigation for the Processor
|
||||
MMIO Stale Data vulnerabilities.
|
||||
|
||||
Processor MMIO Stale Data is a class of
|
||||
vulnerabilities that may expose data after an MMIO
|
||||
operation. Exposed data could originate or end in
|
||||
the same CPU buffers as affected by MDS and TAA.
|
||||
Therefore, similar to MDS and TAA, the mitigation
|
||||
is to clear the affected CPU buffers.
|
||||
|
||||
This parameter controls the mitigation. The
|
||||
options are:
|
||||
|
||||
full - Enable mitigation on vulnerable CPUs
|
||||
|
||||
full,nosmt - Enable mitigation and disable SMT on
|
||||
vulnerable CPUs.
|
||||
|
||||
off - Unconditionally disable mitigation
|
||||
|
||||
On MDS or TAA affected machines,
|
||||
mmio_stale_data=off can be prevented by an active
|
||||
MDS or TAA mitigation as these vulnerabilities are
|
||||
mitigated with the same mechanism so in order to
|
||||
disable this mitigation, you need to specify
|
||||
mds=off and tsx_async_abort=off too.
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
mmio_stale_data=full.
|
||||
|
||||
For details see:
|
||||
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
|
||||
|
||||
module.sig_enforce
|
||||
[KNL] When CONFIG_MODULE_SIG is set, this means that
|
||||
modules without (valid) signatures will fail to load.
|
||||
|
@ -79,7 +79,7 @@ To help deal with the per-inode context, a number helper functions are
|
||||
provided. Firstly, a function to perform basic initialisation on a context and
|
||||
set the operations table pointer::
|
||||
|
||||
void netfs_inode_init(struct inode *inode,
|
||||
void netfs_inode_init(struct netfs_inode *ctx,
|
||||
const struct netfs_request_ops *ops);
|
||||
|
||||
then a function to cast from the VFS inode structure to the netfs context::
|
||||
@ -89,7 +89,7 @@ then a function to cast from the VFS inode structure to the netfs context::
|
||||
and finally, a function to get the cache cookie pointer from the context
|
||||
attached to an inode (or NULL if fscache is disabled)::
|
||||
|
||||
struct fscache_cookie *netfs_i_cookie(struct inode *inode);
|
||||
struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx);
|
||||
|
||||
|
||||
Buffered Read Helpers
|
||||
@ -136,8 +136,9 @@ Three read helpers are provided::
|
||||
|
||||
void netfs_readahead(struct readahead_control *ractl);
|
||||
int netfs_read_folio(struct file *file,
|
||||
struct folio *folio);
|
||||
int netfs_write_begin(struct file *file,
|
||||
struct folio *folio);
|
||||
int netfs_write_begin(struct netfs_inode *ctx,
|
||||
struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
@ -157,9 +158,10 @@ The helpers manage the read request, calling back into the network filesystem
|
||||
through the suppplied table of operations. Waits will be performed as
|
||||
necessary before returning for helpers that are meant to be synchronous.
|
||||
|
||||
If an error occurs and netfs_priv is non-NULL, ops->cleanup() will be called to
|
||||
deal with it. If some parts of the request are in progress when an error
|
||||
occurs, the request will get partially completed if sufficient data is read.
|
||||
If an error occurs, the ->free_request() will be called to clean up the
|
||||
netfs_io_request struct allocated. If some parts of the request are in
|
||||
progress when an error occurs, the request will get partially completed if
|
||||
sufficient data is read.
|
||||
|
||||
Additionally, there is::
|
||||
|
||||
@ -207,8 +209,7 @@ The above fields are the ones the netfs can use. They are:
|
||||
* ``netfs_priv``
|
||||
|
||||
The network filesystem's private data. The value for this can be passed in
|
||||
to the helper functions or set during the request. The ->cleanup() op will
|
||||
be called if this is non-NULL at the end.
|
||||
to the helper functions or set during the request.
|
||||
|
||||
* ``start``
|
||||
* ``len``
|
||||
@ -293,6 +294,7 @@ through which it can issue requests and negotiate::
|
||||
|
||||
struct netfs_request_ops {
|
||||
void (*init_request)(struct netfs_io_request *rreq, struct file *file);
|
||||
void (*free_request)(struct netfs_io_request *rreq);
|
||||
int (*begin_cache_operation)(struct netfs_io_request *rreq);
|
||||
void (*expand_readahead)(struct netfs_io_request *rreq);
|
||||
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
|
||||
@ -301,7 +303,6 @@ through which it can issue requests and negotiate::
|
||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct folio *folio, void **_fsdata);
|
||||
void (*done)(struct netfs_io_request *rreq);
|
||||
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
||||
};
|
||||
|
||||
The operations are as follows:
|
||||
@ -309,7 +310,12 @@ The operations are as follows:
|
||||
* ``init_request()``
|
||||
|
||||
[Optional] This is called to initialise the request structure. It is given
|
||||
the file for reference and can modify the ->netfs_priv value.
|
||||
the file for reference.
|
||||
|
||||
* ``free_request()``
|
||||
|
||||
[Optional] This is called as the request is being deallocated so that the
|
||||
filesystem can clean up any state it has attached there.
|
||||
|
||||
* ``begin_cache_operation()``
|
||||
|
||||
@ -383,11 +389,6 @@ The operations are as follows:
|
||||
[Optional] This is called after the folios in the request have all been
|
||||
unlocked (and marked uptodate if applicable).
|
||||
|
||||
* ``cleanup``
|
||||
|
||||
[Optional] This is called as the request is being deallocated so that the
|
||||
filesystem can clean up ->netfs_priv.
|
||||
|
||||
|
||||
|
||||
Read Helper Procedure
|
||||
|
@ -2925,6 +2925,43 @@ plpmtud_probe_interval - INTEGER
|
||||
|
||||
Default: 0
|
||||
|
||||
reconf_enable - BOOLEAN
|
||||
Enable or disable extension of Stream Reconfiguration functionality
|
||||
specified in RFC6525. This extension provides the ability to "reset"
|
||||
a stream, and it includes the Parameters of "Outgoing/Incoming SSN
|
||||
Reset", "SSN/TSN Reset" and "Add Outgoing/Incoming Streams".
|
||||
|
||||
- 1: Enable extension.
|
||||
- 0: Disable extension.
|
||||
|
||||
Default: 0
|
||||
|
||||
intl_enable - BOOLEAN
|
||||
Enable or disable extension of User Message Interleaving functionality
|
||||
specified in RFC8260. This extension allows the interleaving of user
|
||||
messages sent on different streams. With this feature enabled, I-DATA
|
||||
chunk will replace DATA chunk to carry user messages if also supported
|
||||
by the peer. Note that to use this feature, one needs to set this option
|
||||
to 1 and also needs to set socket options SCTP_FRAGMENT_INTERLEAVE to 2
|
||||
and SCTP_INTERLEAVING_SUPPORTED to 1.
|
||||
|
||||
- 1: Enable extension.
|
||||
- 0: Disable extension.
|
||||
|
||||
Default: 0
|
||||
|
||||
ecn_enable - BOOLEAN
|
||||
Control use of Explicit Congestion Notification (ECN) by SCTP.
|
||||
Like in TCP, ECN is used only when both ends of the SCTP connection
|
||||
indicate support for it. This feature is useful in avoiding losses
|
||||
due to congestion by allowing supporting routers to signal congestion
|
||||
before having to drop packets.
|
||||
|
||||
1: Enable ecn.
|
||||
0: Disable ecn.
|
||||
|
||||
Default: 1
|
||||
|
||||
|
||||
``/proc/sys/net/core/*``
|
||||
========================
|
||||
|
@ -104,7 +104,7 @@ Whenever possible, use the PHY side RGMII delay for these reasons:
|
||||
|
||||
* PHY device drivers in PHYLIB being reusable by nature, being able to
|
||||
configure correctly a specified delay enables more designs with similar delay
|
||||
requirements to be operate correctly
|
||||
requirements to be operated correctly
|
||||
|
||||
For cases where the PHY is not capable of providing this delay, but the
|
||||
Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
|
||||
|
@ -32,6 +32,7 @@ you probably needn't concern yourself with pcmciautils.
|
||||
GNU C 5.1 gcc --version
|
||||
Clang/LLVM (optional) 11.0.0 clang --version
|
||||
GNU make 3.81 make --version
|
||||
bash 4.2 bash --version
|
||||
binutils 2.23 ld -v
|
||||
flex 2.5.35 flex --version
|
||||
bison 2.0 bison --version
|
||||
@ -84,6 +85,12 @@ Make
|
||||
|
||||
You will need GNU make 3.81 or later to build the kernel.
|
||||
|
||||
Bash
|
||||
----
|
||||
|
||||
Some bash scripts are used for the kernel build.
|
||||
Bash 4.2 or newer is needed.
|
||||
|
||||
Binutils
|
||||
--------
|
||||
|
||||
@ -362,6 +369,11 @@ Make
|
||||
|
||||
- <ftp://ftp.gnu.org/gnu/make/>
|
||||
|
||||
Bash
|
||||
----
|
||||
|
||||
- <ftp://ftp.gnu.org/gnu/bash/>
|
||||
|
||||
Binutils
|
||||
--------
|
||||
|
||||
|
@ -7653,6 +7653,7 @@ F: include/uapi/scsi/fc/
|
||||
|
||||
FILE LOCKING (flock() and fcntl()/lockf())
|
||||
M: Jeff Layton <jlayton@kernel.org>
|
||||
M: Chuck Lever <chuck.lever@oracle.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: fs/fcntl.c
|
||||
@ -10745,6 +10746,7 @@ W: http://kernelnewbies.org/KernelJanitors
|
||||
|
||||
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
|
||||
M: Chuck Lever <chuck.lever@oracle.com>
|
||||
M: Jeff Layton <jlayton@kernel.org>
|
||||
L: linux-nfs@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://nfs.sourceforge.net/
|
||||
@ -10869,7 +10871,6 @@ F: arch/riscv/include/asm/kvm*
|
||||
F: arch/riscv/include/uapi/asm/kvm*
|
||||
F: arch/riscv/kvm/
|
||||
F: tools/testing/selftests/kvm/*/riscv/
|
||||
F: tools/testing/selftests/kvm/riscv/
|
||||
|
||||
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
@ -13798,6 +13799,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: include/dt-bindings/net/
|
||||
F: include/linux/etherdevice.h
|
||||
F: include/linux/fcdevice.h
|
||||
F: include/linux/fddidevice.h
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Superb Owl
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -120,26 +120,31 @@
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
label = "lan1";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
label = "lan2";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@2 {
|
||||
reg = <2>;
|
||||
label = "lan3";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@3 {
|
||||
reg = <3>;
|
||||
label = "lan4";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@4 {
|
||||
reg = <4>;
|
||||
label = "lan5";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@5 {
|
||||
|
@ -362,11 +362,6 @@ struct kvm_vcpu_arch {
|
||||
struct arch_timer_cpu timer_cpu;
|
||||
struct kvm_pmu pmu;
|
||||
|
||||
/*
|
||||
* Anything that is not used directly from assembly code goes
|
||||
* here.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Guest registers we preserve during guest debugging.
|
||||
*
|
||||
|
@ -113,6 +113,9 @@ static __always_inline bool has_vhe(void)
|
||||
/*
|
||||
* Code only run in VHE/NVHE hyp context can assume VHE is present or
|
||||
* absent. Otherwise fall back to caps.
|
||||
* This allows the compiler to discard VHE-specific code from the
|
||||
* nVHE object, reducing the number of external symbol references
|
||||
* needed to link.
|
||||
*/
|
||||
if (is_vhe_hyp_code())
|
||||
return true;
|
||||
|
@ -1974,15 +1974,7 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||
#ifdef CONFIG_KVM
|
||||
static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
|
||||
{
|
||||
if (kvm_get_mode() != KVM_MODE_PROTECTED)
|
||||
return false;
|
||||
|
||||
if (is_kernel_in_hyp_mode()) {
|
||||
pr_warn("Protected KVM not available with VHE\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return kvm_get_mode() == KVM_MODE_PROTECTED;
|
||||
}
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
|
@ -1230,6 +1230,9 @@ bool kvm_arch_timer_get_input_level(int vintid)
|
||||
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
|
||||
struct arch_timer_context *timer;
|
||||
|
||||
if (WARN(!vcpu, "No vcpu context!\n"))
|
||||
return false;
|
||||
|
||||
if (vintid == vcpu_vtimer(vcpu)->irq.irq)
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
|
||||
|
@ -150,8 +150,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
if (ret)
|
||||
goto out_free_stage2_pgd;
|
||||
|
||||
if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_stage2_pgd;
|
||||
}
|
||||
cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
|
||||
|
||||
kvm_vgic_early_init(kvm);
|
||||
@ -2271,7 +2273,11 @@ static int __init early_kvm_mode_cfg(char *arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (strcmp(arg, "protected") == 0) {
|
||||
kvm_mode = KVM_MODE_PROTECTED;
|
||||
if (!is_kernel_in_hyp_mode())
|
||||
kvm_mode = KVM_MODE_PROTECTED;
|
||||
else
|
||||
pr_warn_once("Protected KVM not available with VHE\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -80,6 +80,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
|
||||
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
|
||||
|
||||
vcpu->arch.flags &= ~KVM_ARM64_HOST_SVE_ENABLED;
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
|
||||
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
|
||||
|
||||
@ -93,6 +94,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
* operations. Do this for ZA as well for now for simplicity.
|
||||
*/
|
||||
if (system_supports_sme()) {
|
||||
vcpu->arch.flags &= ~KVM_ARM64_HOST_SME_ENABLED;
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
|
||||
vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
|
||||
|
||||
|
@ -314,15 +314,11 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
|
||||
int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
|
||||
enum kvm_pgtable_prot prot)
|
||||
{
|
||||
hyp_assert_lock_held(&host_kvm.lock);
|
||||
|
||||
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
|
||||
}
|
||||
|
||||
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
|
||||
{
|
||||
hyp_assert_lock_held(&host_kvm.lock);
|
||||
|
||||
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
|
||||
addr, size, &host_s2_pool, owner_id);
|
||||
}
|
||||
|
@ -243,15 +243,9 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
|
||||
case SYS_ID_AA64MMFR2_EL1:
|
||||
return get_pvm_id_aa64mmfr2(vcpu);
|
||||
default:
|
||||
/*
|
||||
* Should never happen because all cases are covered in
|
||||
* pvm_sys_reg_descs[].
|
||||
*/
|
||||
WARN_ON(1);
|
||||
break;
|
||||
/* Unhandled ID register, RAZ */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
||||
@ -332,6 +326,16 @@ static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
|
||||
/* Mark the specified system register as an AArch64 feature id register. */
|
||||
#define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
|
||||
|
||||
/*
|
||||
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
|
||||
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
|
||||
* (1 <= crm < 8, 0 <= Op2 < 8).
|
||||
*/
|
||||
#define ID_UNALLOCATED(crm, op2) { \
|
||||
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
|
||||
.access = pvm_access_id_aarch64, \
|
||||
}
|
||||
|
||||
/* Mark the specified system register as Read-As-Zero/Write-Ignored */
|
||||
#define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
|
||||
|
||||
@ -375,24 +379,46 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
|
||||
AARCH32(SYS_MVFR0_EL1),
|
||||
AARCH32(SYS_MVFR1_EL1),
|
||||
AARCH32(SYS_MVFR2_EL1),
|
||||
ID_UNALLOCATED(3,3),
|
||||
AARCH32(SYS_ID_PFR2_EL1),
|
||||
AARCH32(SYS_ID_DFR1_EL1),
|
||||
AARCH32(SYS_ID_MMFR5_EL1),
|
||||
ID_UNALLOCATED(3,7),
|
||||
|
||||
/* AArch64 ID registers */
|
||||
/* CRm=4 */
|
||||
AARCH64(SYS_ID_AA64PFR0_EL1),
|
||||
AARCH64(SYS_ID_AA64PFR1_EL1),
|
||||
ID_UNALLOCATED(4,2),
|
||||
ID_UNALLOCATED(4,3),
|
||||
AARCH64(SYS_ID_AA64ZFR0_EL1),
|
||||
ID_UNALLOCATED(4,5),
|
||||
ID_UNALLOCATED(4,6),
|
||||
ID_UNALLOCATED(4,7),
|
||||
AARCH64(SYS_ID_AA64DFR0_EL1),
|
||||
AARCH64(SYS_ID_AA64DFR1_EL1),
|
||||
ID_UNALLOCATED(5,2),
|
||||
ID_UNALLOCATED(5,3),
|
||||
AARCH64(SYS_ID_AA64AFR0_EL1),
|
||||
AARCH64(SYS_ID_AA64AFR1_EL1),
|
||||
ID_UNALLOCATED(5,6),
|
||||
ID_UNALLOCATED(5,7),
|
||||
AARCH64(SYS_ID_AA64ISAR0_EL1),
|
||||
AARCH64(SYS_ID_AA64ISAR1_EL1),
|
||||
AARCH64(SYS_ID_AA64ISAR2_EL1),
|
||||
ID_UNALLOCATED(6,3),
|
||||
ID_UNALLOCATED(6,4),
|
||||
ID_UNALLOCATED(6,5),
|
||||
ID_UNALLOCATED(6,6),
|
||||
ID_UNALLOCATED(6,7),
|
||||
AARCH64(SYS_ID_AA64MMFR0_EL1),
|
||||
AARCH64(SYS_ID_AA64MMFR1_EL1),
|
||||
AARCH64(SYS_ID_AA64MMFR2_EL1),
|
||||
ID_UNALLOCATED(7,3),
|
||||
ID_UNALLOCATED(7,4),
|
||||
ID_UNALLOCATED(7,5),
|
||||
ID_UNALLOCATED(7,6),
|
||||
ID_UNALLOCATED(7,7),
|
||||
|
||||
/* Scalable Vector Registers are restricted. */
|
||||
|
||||
|
@ -429,11 +429,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
NULL, vgic_uaccess_write_spending, 1,
|
||||
vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_cpending,
|
||||
NULL, vgic_uaccess_write_cpending, 1,
|
||||
vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
|
||||
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
||||
|
@ -353,42 +353,6 @@ static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
u32 value = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* pending state of interrupt is latched in pending_latch variable.
|
||||
* Userspace will save and restore pending state and line_level
|
||||
* separately.
|
||||
* Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
|
||||
* for handling of ISPENDR and ICPENDR.
|
||||
*/
|
||||
for (i = 0; i < len * 8; i++) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
bool state = irq->pending_latch;
|
||||
|
||||
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||
int err;
|
||||
|
||||
err = irq_get_irqchip_state(irq->host_irq,
|
||||
IRQCHIP_STATE_PENDING,
|
||||
&state);
|
||||
WARN_ON(err);
|
||||
}
|
||||
|
||||
if (state)
|
||||
value |= (1U << i);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
@ -666,7 +630,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
|
||||
vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_cpending,
|
||||
@ -750,7 +714,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
|
||||
vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_cpending,
|
||||
|
@ -226,8 +226,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
static unsigned long __read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
bool is_user)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
u32 value = 0;
|
||||
@ -239,6 +240,15 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
unsigned long flags;
|
||||
bool val;
|
||||
|
||||
/*
|
||||
* When used from userspace with a GICv3 model:
|
||||
*
|
||||
* Pending state of interrupt is latched in pending_latch
|
||||
* variable. Userspace will save and restore pending state
|
||||
* and line_level separately.
|
||||
* Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
|
||||
* for handling of ISPENDR and ICPENDR.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||
int err;
|
||||
@ -248,10 +258,20 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
IRQCHIP_STATE_PENDING,
|
||||
&val);
|
||||
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||
} else if (vgic_irq_is_mapped_level(irq)) {
|
||||
} else if (!is_user && vgic_irq_is_mapped_level(irq)) {
|
||||
val = vgic_get_phys_line_level(irq);
|
||||
} else {
|
||||
val = irq_is_pending(irq);
|
||||
switch (vcpu->kvm->arch.vgic.vgic_model) {
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V3:
|
||||
if (is_user) {
|
||||
val = irq->pending_latch;
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
val = irq_is_pending(irq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
value |= ((u32)val << i);
|
||||
@ -263,6 +283,18 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
return value;
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
return __read_pending(vcpu, addr, len, false);
|
||||
}
|
||||
|
||||
unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
return __read_pending(vcpu, addr, len, true);
|
||||
}
|
||||
|
||||
static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
|
||||
{
|
||||
return (vgic_irq_is_sgi(irq->intid) &&
|
||||
|
@ -149,6 +149,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
@ -66,7 +66,7 @@ static void flush_context(void)
|
||||
* the next context-switch, we broadcast TLB flush + I-cache
|
||||
* invalidation over the inner shareable domain on rollover.
|
||||
*/
|
||||
kvm_call_hyp(__kvm_flush_vm_context);
|
||||
kvm_call_hyp(__kvm_flush_vm_context);
|
||||
}
|
||||
|
||||
static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
|
||||
|
@ -343,6 +343,7 @@ config NR_CPUS
|
||||
|
||||
config NUMA
|
||||
bool "NUMA Support"
|
||||
select SMP
|
||||
select ACPI_NUMA if ACPI
|
||||
help
|
||||
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
|
||||
|
@ -19,7 +19,7 @@ typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
|
||||
#define __ARCH_IRQ_STAT
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define __ASM_PERCPU_H
|
||||
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/loongarch.h>
|
||||
|
||||
/* Use r21 for fast access */
|
||||
register unsigned long __my_cpu_offset __asm__("$r21");
|
||||
|
@ -9,10 +9,16 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
extern int smp_num_siblings;
|
||||
extern int num_processors;
|
||||
extern int disabled_cpus;
|
||||
extern cpumask_t cpu_sibling_map[];
|
||||
extern cpumask_t cpu_core_map[];
|
||||
extern cpumask_t cpu_foreign_map[];
|
||||
|
||||
void loongson3_smp_setup(void);
|
||||
void loongson3_prepare_cpus(unsigned int max_cpus);
|
||||
void loongson3_boot_secondary(int cpu, struct task_struct *idle);
|
||||
@ -25,26 +31,11 @@ int loongson3_cpu_disable(void);
|
||||
void loongson3_cpu_die(unsigned int cpu);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static inline void plat_smp_setup(void)
|
||||
{
|
||||
loongson3_smp_setup();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
static inline void plat_smp_setup(void) { }
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
extern int smp_num_siblings;
|
||||
extern int num_processors;
|
||||
extern int disabled_cpus;
|
||||
extern cpumask_t cpu_sibling_map[];
|
||||
extern cpumask_t cpu_core_map[];
|
||||
extern cpumask_t cpu_foreign_map[];
|
||||
|
||||
static inline int raw_smp_processor_id(void)
|
||||
{
|
||||
#if defined(__VDSO__)
|
||||
|
@ -12,13 +12,6 @@
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-features.h>
|
||||
|
||||
/*
|
||||
* Standard way to access the cycle counter.
|
||||
* Currently only used on SMP for scheduling.
|
||||
*
|
||||
* We know that all SMP capable CPUs have cycle counters.
|
||||
*/
|
||||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
#define get_cycles get_cycles
|
||||
|
@ -138,6 +138,7 @@ void __init acpi_boot_table_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int set_processor_mask(u32 id, u32 flags)
|
||||
{
|
||||
|
||||
@ -166,15 +167,18 @@ static int set_processor_mask(u32 id, u32 flags)
|
||||
|
||||
return cpu;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init acpi_process_madt(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
__cpu_number_map[i] = -1;
|
||||
__cpu_logical_map[i] = -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
loongson_sysconf.nr_cpus = num_processors;
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <asm/cpu-info.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
|
||||
/* Populates leaf and increments to next leaf */
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include <asm/setup.h>
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, irq_stack);
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
||||
struct irq_domain *cpu_domain;
|
||||
struct irq_domain *liointc_domain;
|
||||
@ -56,8 +58,11 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int i, r, ipi_irq;
|
||||
int i;
|
||||
#ifdef CONFIG_SMP
|
||||
int r, ipi_irq;
|
||||
static int ipi_dummy_dev;
|
||||
#endif
|
||||
unsigned int order = get_order(IRQ_STACK_SIZE);
|
||||
struct page *page;
|
||||
|
||||
|
@ -120,10 +120,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
/*
|
||||
* Copy architecture-specific thread state
|
||||
*/
|
||||
int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
|
||||
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
{
|
||||
unsigned long childksp;
|
||||
unsigned long tls = args->tls;
|
||||
unsigned long usp = args->stack;
|
||||
unsigned long clone_flags = args->flags;
|
||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||
|
||||
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
||||
@ -136,12 +138,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
|
||||
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
|
||||
p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
|
||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
if (unlikely(args->fn)) {
|
||||
/* kernel thread */
|
||||
p->thread.reg23 = usp; /* fn */
|
||||
p->thread.reg24 = kthread_arg;
|
||||
p->thread.reg03 = childksp;
|
||||
p->thread.reg01 = (unsigned long) ret_from_kernel_thread;
|
||||
p->thread.reg23 = (unsigned long)args->fn;
|
||||
p->thread.reg24 = (unsigned long)args->fn_arg;
|
||||
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->csr_euen = p->thread.csr_euen;
|
||||
childregs->csr_crmd = p->thread.csr_crmd;
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/time.h>
|
||||
|
||||
#define SMBIOS_BIOSSIZE_OFFSET 0x09
|
||||
@ -349,8 +348,6 @@ static void __init prefill_possible_map(void)
|
||||
|
||||
nr_cpu_ids = possible;
|
||||
}
|
||||
#else
|
||||
static inline void prefill_possible_map(void) {}
|
||||
#endif
|
||||
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
@ -367,8 +364,10 @@ void __init setup_arch(char **cmdline_p)
|
||||
arch_mem_init(cmdline_p);
|
||||
|
||||
resource_init();
|
||||
#ifdef CONFIG_SMP
|
||||
plat_smp_setup();
|
||||
prefill_possible_map();
|
||||
#endif
|
||||
|
||||
paging_init();
|
||||
}
|
||||
|
@ -66,8 +66,6 @@ static cpumask_t cpu_core_setup_map;
|
||||
|
||||
struct secondary_data cpuboot_data;
|
||||
static DEFINE_PER_CPU(int, cpu_state);
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
||||
enum ipi_msg_type {
|
||||
IPI_RESCHEDULE,
|
||||
|
@ -97,7 +97,7 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
|
||||
* We ran out of VMIDs so we increment vmid_version and
|
||||
* start assigning VMIDs from 1.
|
||||
*
|
||||
* This also means existing VMIDs assignement to all Guest
|
||||
* This also means existing VMIDs assignment to all Guest
|
||||
* instances is invalid and we have force VMID re-assignement
|
||||
* for all Guest instances. The Guest instances that were not
|
||||
* running will automatically pick-up new VMIDs because will
|
||||
|
@ -544,6 +544,8 @@ static int um_pci_init_vqs(struct um_pci_device *dev)
|
||||
dev->cmd_vq = vqs[0];
|
||||
dev->irq_vq = vqs[1];
|
||||
|
||||
virtio_device_ready(dev->vdev);
|
||||
|
||||
for (i = 0; i < NUM_IRQ_MSGS; i++) {
|
||||
void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
|
||||
|
||||
@ -587,7 +589,7 @@ static int um_pci_virtio_probe(struct virtio_device *vdev)
|
||||
dev->irq = irq_alloc_desc(numa_node_id());
|
||||
if (dev->irq < 0) {
|
||||
err = dev->irq;
|
||||
goto error;
|
||||
goto err_reset;
|
||||
}
|
||||
um_pci_devices[free].dev = dev;
|
||||
vdev->priv = dev;
|
||||
@ -604,6 +606,9 @@ static int um_pci_virtio_probe(struct virtio_device *vdev)
|
||||
|
||||
um_pci_rescan();
|
||||
return 0;
|
||||
err_reset:
|
||||
virtio_reset_device(vdev);
|
||||
vdev->config->del_vqs(vdev);
|
||||
error:
|
||||
mutex_unlock(&um_pci_mtx);
|
||||
kfree(dev);
|
||||
|
@ -446,5 +446,6 @@
|
||||
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -1047,14 +1047,77 @@ struct kvm_x86_msr_filter {
|
||||
};
|
||||
|
||||
enum kvm_apicv_inhibit {
|
||||
|
||||
/********************************************************************/
|
||||
/* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */
|
||||
/********************************************************************/
|
||||
|
||||
/*
|
||||
* APIC acceleration is disabled by a module parameter
|
||||
* and/or not supported in hardware.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_DISABLE,
|
||||
|
||||
/*
|
||||
* APIC acceleration is inhibited because AutoEOI feature is
|
||||
* being used by a HyperV guest.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_HYPERV,
|
||||
APICV_INHIBIT_REASON_NESTED,
|
||||
APICV_INHIBIT_REASON_IRQWIN,
|
||||
APICV_INHIBIT_REASON_PIT_REINJ,
|
||||
APICV_INHIBIT_REASON_X2APIC,
|
||||
APICV_INHIBIT_REASON_BLOCKIRQ,
|
||||
|
||||
/*
|
||||
* APIC acceleration is inhibited because the userspace didn't yet
|
||||
* enable the kernel/split irqchip.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_ABSENT,
|
||||
|
||||
/* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ
|
||||
* (out of band, debug measure of blocking all interrupts on this vCPU)
|
||||
* was enabled, to avoid AVIC/APICv bypassing it.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_BLOCKIRQ,
|
||||
|
||||
/*
|
||||
* For simplicity, the APIC acceleration is inhibited
|
||||
* first time either APIC ID or APIC base are changed by the guest
|
||||
* from their reset values.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_APIC_ID_MODIFIED,
|
||||
APICV_INHIBIT_REASON_APIC_BASE_MODIFIED,
|
||||
|
||||
/******************************************************/
|
||||
/* INHIBITs that are relevant only to the AMD's AVIC. */
|
||||
/******************************************************/
|
||||
|
||||
/*
|
||||
* AVIC is inhibited on a vCPU because it runs a nested guest.
|
||||
*
|
||||
* This is needed because unlike APICv, the peers of this vCPU
|
||||
* cannot use the doorbell mechanism to signal interrupts via AVIC when
|
||||
* a vCPU runs nested.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_NESTED,
|
||||
|
||||
/*
|
||||
* On SVM, the wait for the IRQ window is implemented with pending vIRQ,
|
||||
* which cannot be injected when the AVIC is enabled, thus AVIC
|
||||
* is inhibited while KVM waits for IRQ window.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_IRQWIN,
|
||||
|
||||
/*
|
||||
* PIT (i8254) 're-inject' mode, relies on EOI intercept,
|
||||
* which AVIC doesn't support for edge triggered interrupts.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_PIT_REINJ,
|
||||
|
||||
/*
|
||||
* AVIC is inhibited because the guest has x2apic in its CPUID.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_X2APIC,
|
||||
|
||||
/*
|
||||
* AVIC is disabled because SEV doesn't support it.
|
||||
*/
|
||||
APICV_INHIBIT_REASON_SEV,
|
||||
};
|
||||
|
||||
|
@ -116,6 +116,30 @@
|
||||
* Not susceptible to
|
||||
* TSX Async Abort (TAA) vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /*
|
||||
* Not susceptible to SBDR and SSDP
|
||||
* variants of Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_FBSDP_NO BIT(14) /*
|
||||
* Not susceptible to FBSDP variant of
|
||||
* Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_PSDP_NO BIT(15) /*
|
||||
* Not susceptible to PSDP variant of
|
||||
* Processor MMIO stale data
|
||||
* vulnerabilities.
|
||||
*/
|
||||
#define ARCH_CAP_FB_CLEAR BIT(17) /*
|
||||
* VERW clears CPU fill buffer
|
||||
* even on MDS_NO CPUs.
|
||||
*/
|
||||
#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /*
|
||||
* MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
|
||||
* bit available to control VERW
|
||||
* behavior.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
@ -133,6 +157,7 @@
|
||||
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
|
||||
#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
|
||||
#define RTM_ALLOW BIT(1) /* TSX development mode */
|
||||
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
|
||||
|
||||
#define MSR_IA32_SYSENTER_CS 0x00000174
|
||||
#define MSR_IA32_SYSENTER_ESP 0x00000175
|
||||
|
@ -269,6 +269,8 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
|
||||
|
||||
#include <asm/segment.h>
|
||||
|
||||
/**
|
||||
|
@ -41,8 +41,10 @@ static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
static void __init mds_select_mitigation(void);
|
||||
static void __init mds_print_mitigation(void);
|
||||
static void __init md_clear_update_mitigation(void);
|
||||
static void __init md_clear_select_mitigation(void);
|
||||
static void __init taa_select_mitigation(void);
|
||||
static void __init mmio_select_mitigation(void);
|
||||
static void __init srbds_select_mitigation(void);
|
||||
static void __init l1d_flush_select_mitigation(void);
|
||||
|
||||
@ -85,6 +87,10 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
|
||||
*/
|
||||
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
|
||||
|
||||
/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
|
||||
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
|
||||
EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
|
||||
|
||||
void __init check_bugs(void)
|
||||
{
|
||||
identify_boot_cpu();
|
||||
@ -117,17 +123,10 @@ void __init check_bugs(void)
|
||||
spectre_v2_select_mitigation();
|
||||
ssb_select_mitigation();
|
||||
l1tf_select_mitigation();
|
||||
mds_select_mitigation();
|
||||
taa_select_mitigation();
|
||||
md_clear_select_mitigation();
|
||||
srbds_select_mitigation();
|
||||
l1d_flush_select_mitigation();
|
||||
|
||||
/*
|
||||
* As MDS and TAA mitigations are inter-related, print MDS
|
||||
* mitigation until after TAA mitigation selection is done.
|
||||
*/
|
||||
mds_print_mitigation();
|
||||
|
||||
arch_smt_update();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -267,14 +266,6 @@ static void __init mds_select_mitigation(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void __init mds_print_mitigation(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
pr_info("%s\n", mds_strings[mds_mitigation]);
|
||||
}
|
||||
|
||||
static int __init mds_cmdline(char *str)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_MDS))
|
||||
@ -329,7 +320,7 @@ static void __init taa_select_mitigation(void)
|
||||
/* TSX previously disabled by tsx=off */
|
||||
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
||||
taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu_mitigations_off()) {
|
||||
@ -343,7 +334,7 @@ static void __init taa_select_mitigation(void)
|
||||
*/
|
||||
if (taa_mitigation == TAA_MITIGATION_OFF &&
|
||||
mds_mitigation == MDS_MITIGATION_OFF)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
|
||||
taa_mitigation = TAA_MITIGATION_VERW;
|
||||
@ -375,18 +366,6 @@ static void __init taa_select_mitigation(void)
|
||||
|
||||
if (taa_nosmt || cpu_mitigations_auto_nosmt())
|
||||
cpu_smt_disable(false);
|
||||
|
||||
/*
|
||||
* Update MDS mitigation, if necessary, as the mds_user_clear is
|
||||
* now enabled for TAA mitigation.
|
||||
*/
|
||||
if (mds_mitigation == MDS_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MDS)) {
|
||||
mds_mitigation = MDS_MITIGATION_FULL;
|
||||
mds_select_mitigation();
|
||||
}
|
||||
out:
|
||||
pr_info("%s\n", taa_strings[taa_mitigation]);
|
||||
}
|
||||
|
||||
static int __init tsx_async_abort_parse_cmdline(char *str)
|
||||
@ -410,6 +389,151 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
|
||||
}
|
||||
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "MMIO Stale Data: " fmt
|
||||
|
||||
enum mmio_mitigations {
|
||||
MMIO_MITIGATION_OFF,
|
||||
MMIO_MITIGATION_UCODE_NEEDED,
|
||||
MMIO_MITIGATION_VERW,
|
||||
};
|
||||
|
||||
/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
|
||||
static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
|
||||
static bool mmio_nosmt __ro_after_init = false;
|
||||
|
||||
static const char * const mmio_strings[] = {
|
||||
[MMIO_MITIGATION_OFF] = "Vulnerable",
|
||||
[MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
|
||||
[MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
|
||||
};
|
||||
|
||||
static void __init mmio_select_mitigation(void)
|
||||
{
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
||||
cpu_mitigations_off()) {
|
||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||
return;
|
||||
}
|
||||
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return;
|
||||
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
/*
|
||||
* Enable CPU buffer clear mitigation for host and VMM, if also affected
|
||||
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
|
||||
*/
|
||||
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
|
||||
boot_cpu_has(X86_FEATURE_RTM)))
|
||||
static_branch_enable(&mds_user_clear);
|
||||
else
|
||||
static_branch_enable(&mmio_stale_data_clear);
|
||||
|
||||
/*
|
||||
* If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
|
||||
* be propagated to uncore buffers, clearing the Fill buffers on idle
|
||||
* is required irrespective of SMT state.
|
||||
*/
|
||||
if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
|
||||
/*
|
||||
* Check if the system has the right microcode.
|
||||
*
|
||||
* CPU Fill buffer clear mitigation is enumerated by either an explicit
|
||||
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
|
||||
* affected systems.
|
||||
*/
|
||||
if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
|
||||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
|
||||
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
|
||||
!(ia32_cap & ARCH_CAP_MDS_NO)))
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
else
|
||||
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
|
||||
|
||||
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
|
||||
cpu_smt_disable(false);
|
||||
}
|
||||
|
||||
static int __init mmio_stale_data_parse_cmdline(char *str)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
return 0;
|
||||
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off")) {
|
||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||
} else if (!strcmp(str, "full")) {
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
} else if (!strcmp(str, "full,nosmt")) {
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
mmio_nosmt = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "" fmt
|
||||
|
||||
static void __init md_clear_update_mitigation(void)
|
||||
{
|
||||
if (cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
if (!static_key_enabled(&mds_user_clear))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
|
||||
* mitigation, if necessary.
|
||||
*/
|
||||
if (mds_mitigation == MDS_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MDS)) {
|
||||
mds_mitigation = MDS_MITIGATION_FULL;
|
||||
mds_select_mitigation();
|
||||
}
|
||||
if (taa_mitigation == TAA_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_TAA)) {
|
||||
taa_mitigation = TAA_MITIGATION_VERW;
|
||||
taa_select_mitigation();
|
||||
}
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
mmio_select_mitigation();
|
||||
}
|
||||
out:
|
||||
if (boot_cpu_has_bug(X86_BUG_MDS))
|
||||
pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
|
||||
if (boot_cpu_has_bug(X86_BUG_TAA))
|
||||
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
||||
}
|
||||
|
||||
static void __init md_clear_select_mitigation(void)
|
||||
{
|
||||
mds_select_mitigation();
|
||||
taa_select_mitigation();
|
||||
mmio_select_mitigation();
|
||||
|
||||
/*
|
||||
* As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
|
||||
* and print their mitigation after MDS, TAA and MMIO Stale Data
|
||||
* mitigation selection is done.
|
||||
*/
|
||||
md_clear_update_mitigation();
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "SRBDS: " fmt
|
||||
|
||||
@ -478,11 +602,13 @@ static void __init srbds_select_mitigation(void)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Check to see if this is one of the MDS_NO systems supporting
|
||||
* TSX that are only exposed to SRBDS when TSX is enabled.
|
||||
* Check to see if this is one of the MDS_NO systems supporting TSX that
|
||||
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
|
||||
* by Processor MMIO Stale Data vulnerability.
|
||||
*/
|
||||
ia32_cap = x86_read_arch_cap_msr();
|
||||
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
|
||||
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
|
||||
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
|
||||
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
|
||||
@ -1116,6 +1242,8 @@ static void update_indir_branch_cond(void)
|
||||
/* Update the static key controlling the MDS CPU buffer clear in idle */
|
||||
static void update_mds_branch_idle(void)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
/*
|
||||
* Enable the idle clearing if SMT is active on CPUs which are
|
||||
* affected only by MSBDS and not any other MDS variant.
|
||||
@ -1127,14 +1255,17 @@ static void update_mds_branch_idle(void)
|
||||
if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
|
||||
return;
|
||||
|
||||
if (sched_smt_active())
|
||||
if (sched_smt_active()) {
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
else
|
||||
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
|
||||
(ia32_cap & ARCH_CAP_FBSDP_NO)) {
|
||||
static_branch_disable(&mds_idle_clear);
|
||||
}
|
||||
}
|
||||
|
||||
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
||||
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
|
||||
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
|
||||
|
||||
void cpu_bugs_smt_update(void)
|
||||
{
|
||||
@ -1179,6 +1310,16 @@ void cpu_bugs_smt_update(void)
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mmio_mitigation) {
|
||||
case MMIO_MITIGATION_VERW:
|
||||
case MMIO_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(MMIO_MSG_SMT);
|
||||
break;
|
||||
case MMIO_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
@ -1781,6 +1922,20 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
|
||||
static ssize_t mmio_stale_data_show_state(char *buf)
|
||||
{
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
|
||||
mmio_strings[mmio_mitigation]);
|
||||
}
|
||||
|
||||
return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
|
||||
static char *stibp_state(void)
|
||||
{
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
@ -1881,6 +2036,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
case X86_BUG_SRBDS:
|
||||
return srbds_show_state(buf);
|
||||
|
||||
case X86_BUG_MMIO_STALE_DATA:
|
||||
return mmio_stale_data_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1932,4 +2090,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
}
|
||||
#endif
|
||||
|
@ -1211,18 +1211,42 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
X86_FEATURE_ANY, issues)
|
||||
|
||||
#define SRBDS BIT(0)
|
||||
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
|
||||
#define MMIO BIT(1)
|
||||
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
|
||||
#define MMIO_SBDS BIT(2)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
|
||||
BIT(7) | BIT(0xB), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1243,6 +1267,13 @@ u64 x86_read_arch_cap_msr(void)
|
||||
return ia32_cap;
|
||||
}
|
||||
|
||||
static bool arch_cap_mmio_immune(u64 ia32_cap)
|
||||
{
|
||||
return (ia32_cap & ARCH_CAP_FBSDP_NO &&
|
||||
ia32_cap & ARCH_CAP_PSDP_NO &&
|
||||
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
|
||||
}
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
@ -1296,12 +1327,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
/*
|
||||
* SRBDS affects CPUs which support RDRAND or RDSEED and are listed
|
||||
* in the vulnerability blacklist.
|
||||
*
|
||||
* Some of the implications and mitigation of Shared Buffers Data
|
||||
* Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
|
||||
* SRBDS.
|
||||
*/
|
||||
if ((cpu_has(c, X86_FEATURE_RDRAND) ||
|
||||
cpu_has(c, X86_FEATURE_RDSEED)) &&
|
||||
cpu_matches(cpu_vuln_blacklist, SRBDS))
|
||||
cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
|
||||
setup_force_cpu_bug(X86_BUG_SRBDS);
|
||||
|
||||
/*
|
||||
* Processor MMIO Stale Data bug enumeration
|
||||
*
|
||||
* Affected CPU list is generally enough to enumerate the vulnerability,
|
||||
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
||||
* not want the guest to enumerate the bug.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
|
||||
!arch_cap_mmio_immune(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
@ -2039,6 +2039,19 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic)
|
||||
{
|
||||
struct kvm *kvm = apic->vcpu->kvm;
|
||||
|
||||
if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm))
|
||||
return;
|
||||
|
||||
if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id)
|
||||
return;
|
||||
|
||||
kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
|
||||
}
|
||||
|
||||
static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -2047,10 +2060,12 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
|
||||
switch (reg) {
|
||||
case APIC_ID: /* Local APIC ID */
|
||||
if (!apic_x2apic_mode(apic))
|
||||
if (!apic_x2apic_mode(apic)) {
|
||||
kvm_apic_set_xapic_id(apic, val >> 24);
|
||||
else
|
||||
kvm_lapic_xapic_id_updated(apic);
|
||||
} else {
|
||||
ret = 1;
|
||||
}
|
||||
break;
|
||||
|
||||
case APIC_TASKPRI:
|
||||
@ -2336,8 +2351,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
||||
MSR_IA32_APICBASE_BASE;
|
||||
|
||||
if ((value & MSR_IA32_APICBASE_ENABLE) &&
|
||||
apic->base_address != APIC_DEFAULT_PHYS_BASE)
|
||||
pr_warn_once("APIC base relocation is unsupported by KVM");
|
||||
apic->base_address != APIC_DEFAULT_PHYS_BASE) {
|
||||
kvm_set_apicv_inhibit(apic->vcpu->kvm,
|
||||
APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
|
||||
@ -2648,6 +2665,8 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
|
||||
icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
|
||||
__kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
|
||||
}
|
||||
} else {
|
||||
kvm_lapic_xapic_id_updated(vcpu->arch.apic);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3411,7 +3411,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
||||
root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
|
||||
i << 30, PT32_ROOT_LEVEL, true);
|
||||
mmu->pae_root[i] = root | PT_PRESENT_MASK |
|
||||
shadow_me_mask;
|
||||
shadow_me_value;
|
||||
}
|
||||
mmu->root.hpa = __pa(mmu->pae_root);
|
||||
} else {
|
||||
|
@ -291,58 +291,91 @@ void avic_ring_doorbell(struct kvm_vcpu *vcpu)
|
||||
static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
|
||||
u32 icrl, u32 icrh, u32 index)
|
||||
{
|
||||
u32 dest, apic_id;
|
||||
struct kvm_vcpu *vcpu;
|
||||
u32 l1_physical_id, dest;
|
||||
struct kvm_vcpu *target_vcpu;
|
||||
int dest_mode = icrl & APIC_DEST_MASK;
|
||||
int shorthand = icrl & APIC_SHORT_MASK;
|
||||
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
|
||||
u32 *avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
|
||||
|
||||
if (shorthand != APIC_DEST_NOSHORT)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The AVIC incomplete IPI #vmexit info provides index into
|
||||
* the physical APIC ID table, which can be used to derive
|
||||
* guest physical APIC ID.
|
||||
*/
|
||||
if (dest_mode == APIC_DEST_PHYSICAL) {
|
||||
apic_id = index;
|
||||
} else {
|
||||
if (!apic_x2apic_mode(source)) {
|
||||
/* For xAPIC logical mode, the index is for logical APIC table. */
|
||||
apic_id = avic_logical_id_table[index] & 0x1ff;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Assuming vcpu ID is the same as physical apic ID,
|
||||
* and use it to retrieve the target vCPU.
|
||||
*/
|
||||
vcpu = kvm_get_vcpu_by_id(kvm, apic_id);
|
||||
if (!vcpu)
|
||||
return -EINVAL;
|
||||
|
||||
if (apic_x2apic_mode(vcpu->arch.apic))
|
||||
if (apic_x2apic_mode(source))
|
||||
dest = icrh;
|
||||
else
|
||||
dest = GET_APIC_DEST_FIELD(icrh);
|
||||
|
||||
/*
|
||||
* Try matching the destination APIC ID with the vCPU.
|
||||
*/
|
||||
if (kvm_apic_match_dest(vcpu, source, shorthand, dest, dest_mode)) {
|
||||
vcpu->arch.apic->irr_pending = true;
|
||||
svm_complete_interrupt_delivery(vcpu,
|
||||
icrl & APIC_MODE_MASK,
|
||||
icrl & APIC_INT_LEVELTRIG,
|
||||
icrl & APIC_VECTOR_MASK);
|
||||
return 0;
|
||||
if (dest_mode == APIC_DEST_PHYSICAL) {
|
||||
/* broadcast destination, use slow path */
|
||||
if (apic_x2apic_mode(source) && dest == X2APIC_BROADCAST)
|
||||
return -EINVAL;
|
||||
if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
|
||||
return -EINVAL;
|
||||
|
||||
l1_physical_id = dest;
|
||||
|
||||
if (WARN_ON_ONCE(l1_physical_id != index))
|
||||
return -EINVAL;
|
||||
|
||||
} else {
|
||||
u32 bitmap, cluster;
|
||||
int logid_index;
|
||||
|
||||
if (apic_x2apic_mode(source)) {
|
||||
/* 16 bit dest mask, 16 bit cluster id */
|
||||
bitmap = dest & 0xFFFF0000;
|
||||
cluster = (dest >> 16) << 4;
|
||||
} else if (kvm_lapic_get_reg(source, APIC_DFR) == APIC_DFR_FLAT) {
|
||||
/* 8 bit dest mask*/
|
||||
bitmap = dest;
|
||||
cluster = 0;
|
||||
} else {
|
||||
/* 4 bit desk mask, 4 bit cluster id */
|
||||
bitmap = dest & 0xF;
|
||||
cluster = (dest >> 4) << 2;
|
||||
}
|
||||
|
||||
if (unlikely(!bitmap))
|
||||
/* guest bug: nobody to send the logical interrupt to */
|
||||
return 0;
|
||||
|
||||
if (!is_power_of_2(bitmap))
|
||||
/* multiple logical destinations, use slow path */
|
||||
return -EINVAL;
|
||||
|
||||
logid_index = cluster + __ffs(bitmap);
|
||||
|
||||
if (apic_x2apic_mode(source)) {
|
||||
l1_physical_id = logid_index;
|
||||
} else {
|
||||
u32 *avic_logical_id_table =
|
||||
page_address(kvm_svm->avic_logical_id_table_page);
|
||||
|
||||
u32 logid_entry = avic_logical_id_table[logid_index];
|
||||
|
||||
if (WARN_ON_ONCE(index != logid_index))
|
||||
return -EINVAL;
|
||||
|
||||
/* guest bug: non existing/reserved logical destination */
|
||||
if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
|
||||
return 0;
|
||||
|
||||
l1_physical_id = logid_entry &
|
||||
AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
target_vcpu = kvm_get_vcpu_by_id(kvm, l1_physical_id);
|
||||
if (unlikely(!target_vcpu))
|
||||
/* guest bug: non existing vCPU is a target of this IPI*/
|
||||
return 0;
|
||||
|
||||
target_vcpu->arch.apic->irr_pending = true;
|
||||
svm_complete_interrupt_delivery(target_vcpu,
|
||||
icrl & APIC_MODE_MASK,
|
||||
icrl & APIC_INT_LEVELTRIG,
|
||||
icrl & APIC_VECTOR_MASK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
|
||||
@ -508,35 +541,6 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 *old, *new;
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
u32 id = kvm_xapic_id(vcpu->arch.apic);
|
||||
|
||||
if (vcpu->vcpu_id == id)
|
||||
return 0;
|
||||
|
||||
old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
|
||||
new = avic_get_physical_id_entry(vcpu, id);
|
||||
if (!new || !old)
|
||||
return 1;
|
||||
|
||||
/* We need to move physical_id_entry to new offset */
|
||||
*new = *old;
|
||||
*old = 0ULL;
|
||||
to_svm(vcpu)->avic_physical_id_cache = new;
|
||||
|
||||
/*
|
||||
* Also update the guest physical APIC ID in the logical
|
||||
* APIC ID table entry if already setup the LDR.
|
||||
*/
|
||||
if (svm->ldr_reg)
|
||||
avic_handle_ldr_update(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
@ -555,10 +559,6 @@ static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
|
||||
AVIC_UNACCEL_ACCESS_OFFSET_MASK;
|
||||
|
||||
switch (offset) {
|
||||
case APIC_ID:
|
||||
if (avic_handle_apic_id_update(vcpu))
|
||||
return 0;
|
||||
break;
|
||||
case APIC_LDR:
|
||||
if (avic_handle_ldr_update(vcpu))
|
||||
return 0;
|
||||
@ -650,8 +650,6 @@ int avic_init_vcpu(struct vcpu_svm *svm)
|
||||
|
||||
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (avic_handle_apic_id_update(vcpu) != 0)
|
||||
return;
|
||||
avic_handle_dfr_update(vcpu);
|
||||
avic_handle_ldr_update(vcpu);
|
||||
}
|
||||
@ -910,7 +908,9 @@ bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
|
||||
BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
|
||||
BIT(APICV_INHIBIT_REASON_X2APIC) |
|
||||
BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
|
||||
BIT(APICV_INHIBIT_REASON_SEV);
|
||||
BIT(APICV_INHIBIT_REASON_SEV) |
|
||||
BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |
|
||||
BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
|
||||
|
||||
return supported & BIT(reason);
|
||||
}
|
||||
@ -946,7 +946,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
u64 entry;
|
||||
int h_physical_id = kvm_cpu_get_apicid(cpu);
|
||||
@ -978,7 +978,7 @@ void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
|
||||
}
|
||||
|
||||
void __avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 entry;
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
@ -997,25 +997,6 @@ void __avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
|
||||
}
|
||||
|
||||
static void avic_vcpu_load(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
|
||||
WARN_ON(cpu != vcpu->cpu);
|
||||
|
||||
__avic_vcpu_load(vcpu, cpu);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static void avic_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
__avic_vcpu_put(vcpu);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -1042,7 +1023,7 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
||||
vmcb_mark_dirty(vmcb, VMCB_AVIC);
|
||||
|
||||
if (activated)
|
||||
avic_vcpu_load(vcpu);
|
||||
avic_vcpu_load(vcpu, vcpu->cpu);
|
||||
else
|
||||
avic_vcpu_put(vcpu);
|
||||
|
||||
@ -1075,5 +1056,5 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
if (!kvm_vcpu_apicv_active(vcpu))
|
||||
return;
|
||||
|
||||
avic_vcpu_load(vcpu);
|
||||
avic_vcpu_load(vcpu, vcpu->cpu);
|
||||
}
|
||||
|
@ -616,6 +616,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
struct vmcb *vmcb01 = svm->vmcb01.ptr;
|
||||
struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
|
||||
u32 pause_count12;
|
||||
u32 pause_thresh12;
|
||||
|
||||
/*
|
||||
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
|
||||
@ -671,27 +673,25 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
||||
if (!nested_vmcb_needs_vls_intercept(svm))
|
||||
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
|
||||
|
||||
pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
|
||||
pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
|
||||
if (kvm_pause_in_guest(svm->vcpu.kvm)) {
|
||||
/* use guest values since host doesn't use them */
|
||||
vmcb02->control.pause_filter_count =
|
||||
svm->pause_filter_enabled ?
|
||||
svm->nested.ctl.pause_filter_count : 0;
|
||||
/* use guest values since host doesn't intercept PAUSE */
|
||||
vmcb02->control.pause_filter_count = pause_count12;
|
||||
vmcb02->control.pause_filter_thresh = pause_thresh12;
|
||||
|
||||
vmcb02->control.pause_filter_thresh =
|
||||
svm->pause_threshold_enabled ?
|
||||
svm->nested.ctl.pause_filter_thresh : 0;
|
||||
|
||||
} else if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
|
||||
/* use host values when guest doesn't use them */
|
||||
} else {
|
||||
/* start from host values otherwise */
|
||||
vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
|
||||
vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
|
||||
} else {
|
||||
/*
|
||||
* Intercept every PAUSE otherwise and
|
||||
* ignore both host and guest values
|
||||
*/
|
||||
vmcb02->control.pause_filter_count = 0;
|
||||
vmcb02->control.pause_filter_thresh = 0;
|
||||
|
||||
/* ... but ensure filtering is disabled if so requested. */
|
||||
if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
|
||||
if (!pause_count12)
|
||||
vmcb02->control.pause_filter_count = 0;
|
||||
if (!pause_thresh12)
|
||||
vmcb02->control.pause_filter_thresh = 0;
|
||||
}
|
||||
}
|
||||
|
||||
nested_svm_transition_tlb_flush(vcpu);
|
||||
@ -951,8 +951,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
|
||||
vmcb12->control.event_inj = svm->nested.ctl.event_inj;
|
||||
vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
|
||||
|
||||
if (!kvm_pause_in_guest(vcpu->kvm) && vmcb02->control.pause_filter_count)
|
||||
if (!kvm_pause_in_guest(vcpu->kvm)) {
|
||||
vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
|
||||
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
|
||||
|
||||
}
|
||||
|
||||
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
|
||||
|
||||
|
@ -921,7 +921,7 @@ static void grow_ple_window(struct kvm_vcpu *vcpu)
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
int old = control->pause_filter_count;
|
||||
|
||||
if (kvm_pause_in_guest(vcpu->kvm) || !old)
|
||||
if (kvm_pause_in_guest(vcpu->kvm))
|
||||
return;
|
||||
|
||||
control->pause_filter_count = __grow_ple_window(old,
|
||||
@ -942,7 +942,7 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
int old = control->pause_filter_count;
|
||||
|
||||
if (kvm_pause_in_guest(vcpu->kvm) || !old)
|
||||
if (kvm_pause_in_guest(vcpu->kvm))
|
||||
return;
|
||||
|
||||
control->pause_filter_count =
|
||||
@ -1400,13 +1400,13 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
indirect_branch_prediction_barrier();
|
||||
}
|
||||
if (kvm_vcpu_apicv_active(vcpu))
|
||||
__avic_vcpu_load(vcpu, cpu);
|
||||
avic_vcpu_load(vcpu, cpu);
|
||||
}
|
||||
|
||||
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_apicv_active(vcpu))
|
||||
__avic_vcpu_put(vcpu);
|
||||
avic_vcpu_put(vcpu);
|
||||
|
||||
svm_prepare_host_switch(vcpu);
|
||||
|
||||
|
@ -610,8 +610,8 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
|
||||
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
|
||||
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
|
||||
int avic_init_vcpu(struct vcpu_svm *svm);
|
||||
void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
void __avic_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
void avic_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
|
||||
void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
||||
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
|
||||
|
@ -229,6 +229,9 @@ static const struct {
|
||||
#define L1D_CACHE_ORDER 4
|
||||
static void *vmx_l1d_flush_pages;
|
||||
|
||||
/* Control for disabling CPU Fill buffer clear */
|
||||
static bool __read_mostly vmx_fb_clear_ctrl_available;
|
||||
|
||||
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
|
||||
{
|
||||
struct page *page;
|
||||
@ -360,6 +363,60 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
|
||||
return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
|
||||
}
|
||||
|
||||
static void vmx_setup_fb_clear_ctrl(void)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) &&
|
||||
!boot_cpu_has_bug(X86_BUG_MDS) &&
|
||||
!boot_cpu_has_bug(X86_BUG_TAA)) {
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
|
||||
if (msr & ARCH_CAP_FB_CLEAR_CTRL)
|
||||
vmx_fb_clear_ctrl_available = true;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
if (!vmx->disable_fb_clear)
|
||||
return;
|
||||
|
||||
rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
|
||||
msr |= FB_CLEAR_DIS;
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
|
||||
/* Cache the MSR value to avoid reading it later */
|
||||
vmx->msr_ia32_mcu_opt_ctrl = msr;
|
||||
}
|
||||
|
||||
static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
|
||||
{
|
||||
if (!vmx->disable_fb_clear)
|
||||
return;
|
||||
|
||||
vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
|
||||
}
|
||||
|
||||
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
|
||||
{
|
||||
vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
|
||||
|
||||
/*
|
||||
* If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
|
||||
* at VMEntry. Skip the MSR read/write when a guest has no use case to
|
||||
* execute VERW.
|
||||
*/
|
||||
if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
|
||||
((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
|
||||
(vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
|
||||
vmx->disable_fb_clear = false;
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops vmentry_l1d_flush_ops = {
|
||||
.set = vmentry_l1d_flush_set,
|
||||
.get = vmentry_l1d_flush_get,
|
||||
@ -2252,6 +2309,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
ret = kvm_set_msr_common(vcpu, msr_info);
|
||||
}
|
||||
|
||||
/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
|
||||
if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
|
||||
vmx_update_fb_clear_dis(vcpu, vmx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4553,6 +4614,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
|
||||
|
||||
vpid_sync_context(vmx->vpid);
|
||||
|
||||
vmx_update_fb_clear_dis(vcpu, vmx);
|
||||
}
|
||||
|
||||
static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
|
||||
@ -6772,6 +6835,11 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
vmx_l1d_flush(vcpu);
|
||||
else if (static_branch_unlikely(&mds_user_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
|
||||
kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
mds_clear_cpu_buffers();
|
||||
|
||||
vmx_disable_fb_clear(vmx);
|
||||
|
||||
if (vcpu->arch.cr2 != native_read_cr2())
|
||||
native_write_cr2(vcpu->arch.cr2);
|
||||
@ -6781,6 +6849,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
|
||||
vcpu->arch.cr2 = native_read_cr2();
|
||||
|
||||
vmx_enable_fb_clear(vmx);
|
||||
|
||||
guest_state_exit_irqoff();
|
||||
}
|
||||
|
||||
@ -7709,7 +7779,9 @@ static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
|
||||
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
|
||||
BIT(APICV_INHIBIT_REASON_ABSENT) |
|
||||
BIT(APICV_INHIBIT_REASON_HYPERV) |
|
||||
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
|
||||
BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
|
||||
BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |
|
||||
BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
|
||||
|
||||
return supported & BIT(reason);
|
||||
}
|
||||
@ -8212,6 +8284,8 @@ static int __init vmx_init(void)
|
||||
return r;
|
||||
}
|
||||
|
||||
vmx_setup_fb_clear_ctrl();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
|
||||
|
||||
|
@ -348,6 +348,8 @@ struct vcpu_vmx {
|
||||
u64 msr_ia32_feature_control_valid_bits;
|
||||
/* SGX Launch Control public key hash */
|
||||
u64 msr_ia32_sgxlepubkeyhash[4];
|
||||
u64 msr_ia32_mcu_opt_ctrl;
|
||||
bool disable_fb_clear;
|
||||
|
||||
struct pt_desc pt_desc;
|
||||
struct lbr_desc lbr_desc;
|
||||
|
@ -1617,6 +1617,9 @@ static u64 kvm_get_arch_capabilities(void)
|
||||
*/
|
||||
}
|
||||
|
||||
/* Guests don't need to know "Fill buffer clear control" exists */
|
||||
data &= ~ARCH_CAP_FB_CLEAR_CTRL;
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
@ -9850,6 +9853,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
|
||||
return;
|
||||
|
||||
down_read(&vcpu->kvm->arch.apicv_update_lock);
|
||||
preempt_disable();
|
||||
|
||||
activate = kvm_vcpu_apicv_activated(vcpu);
|
||||
|
||||
@ -9870,6 +9874,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
up_read(&vcpu->kvm->arch.apicv_update_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
|
||||
|
20
block/bio.c
20
block/bio.c
@ -1747,26 +1747,6 @@ bad:
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_init);
|
||||
|
||||
/*
|
||||
* Initialize and setup a new bio_set, based on the settings from
|
||||
* another bio_set.
|
||||
*/
|
||||
int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
|
||||
{
|
||||
int flags;
|
||||
|
||||
flags = 0;
|
||||
if (src->bvec_pool.min_nr)
|
||||
flags |= BIOSET_NEED_BVECS;
|
||||
if (src->rescue_workqueue)
|
||||
flags |= BIOSET_NEED_RESCUER;
|
||||
if (src->cache)
|
||||
flags |= BIOSET_PERCPU_CACHE;
|
||||
|
||||
return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_init_from_src);
|
||||
|
||||
static int __init init_bio(void)
|
||||
{
|
||||
int i;
|
||||
|
2
certs/.gitignore
vendored
2
certs/.gitignore
vendored
@ -1,5 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
/blacklist_hashes_checked
|
||||
/blacklist_hash_list
|
||||
/extract-cert
|
||||
/x509_certificate_list
|
||||
/x509_revocation_list
|
||||
|
@ -7,22 +7,22 @@ obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o c
|
||||
obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o common.o
|
||||
obj-$(CONFIG_SYSTEM_REVOCATION_LIST) += revocation_certificates.o
|
||||
ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),)
|
||||
quiet_cmd_check_blacklist_hashes = CHECK $(patsubst "%",%,$(2))
|
||||
cmd_check_blacklist_hashes = $(AWK) -f $(srctree)/scripts/check-blacklist-hashes.awk $(2); touch $@
|
||||
|
||||
$(eval $(call config_filename,SYSTEM_BLACKLIST_HASH_LIST))
|
||||
$(obj)/blacklist_hashes.o: $(obj)/blacklist_hash_list
|
||||
CFLAGS_blacklist_hashes.o := -I $(obj)
|
||||
|
||||
$(obj)/blacklist_hashes.o: $(obj)/blacklist_hashes_checked
|
||||
quiet_cmd_check_and_copy_blacklist_hash_list = GEN $@
|
||||
cmd_check_and_copy_blacklist_hash_list = \
|
||||
$(AWK) -f $(srctree)/scripts/check-blacklist-hashes.awk $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) >&2; \
|
||||
cat $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) > $@
|
||||
|
||||
CFLAGS_blacklist_hashes.o += -I$(srctree)
|
||||
|
||||
targets += blacklist_hashes_checked
|
||||
$(obj)/blacklist_hashes_checked: $(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(SYSTEM_BLACKLIST_HASH_LIST_FILENAME) scripts/check-blacklist-hashes.awk FORCE
|
||||
$(call if_changed,check_blacklist_hashes,$(SYSTEM_BLACKLIST_HASH_LIST_SRCPREFIX)$(CONFIG_SYSTEM_BLACKLIST_HASH_LIST))
|
||||
$(obj)/blacklist_hash_list: $(CONFIG_SYSTEM_BLACKLIST_HASH_LIST) FORCE
|
||||
$(call if_changed,check_and_copy_blacklist_hash_list)
|
||||
obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
|
||||
else
|
||||
obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
|
||||
endif
|
||||
targets += blacklist_hash_list
|
||||
|
||||
quiet_cmd_extract_certs = CERT $@
|
||||
cmd_extract_certs = $(obj)/extract-cert $(extract-cert-in) $@
|
||||
@ -33,7 +33,7 @@ $(obj)/system_certificates.o: $(obj)/x509_certificate_list
|
||||
$(obj)/x509_certificate_list: $(CONFIG_SYSTEM_TRUSTED_KEYS) $(obj)/extract-cert FORCE
|
||||
$(call if_changed,extract_certs)
|
||||
|
||||
targets += x509_certificate_list blacklist_hashes_checked
|
||||
targets += x509_certificate_list
|
||||
|
||||
# If module signing is requested, say by allyesconfig, but a key has not been
|
||||
# supplied, then one will need to be generated to make sure the build does not
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "blacklist.h"
|
||||
|
||||
const char __initdata *const blacklist_hashes[] = {
|
||||
#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST
|
||||
const char __initconst *const blacklist_hashes[] = {
|
||||
#include "blacklist_hash_list"
|
||||
, NULL
|
||||
};
|
||||
|
@ -564,6 +564,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev,
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
@ -573,6 +579,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
|
||||
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
|
||||
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
|
||||
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
|
||||
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
@ -584,6 +591,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_tsx_async_abort.attr,
|
||||
&dev_attr_itlb_multihit.attr,
|
||||
&dev_attr_srbds.attr,
|
||||
&dev_attr_mmio_stale_data.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -429,28 +429,40 @@ config ADI
|
||||
driver include crash and makedumpfile.
|
||||
|
||||
config RANDOM_TRUST_CPU
|
||||
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
|
||||
bool "Initialize RNG using CPU RNG instructions"
|
||||
default y
|
||||
depends on ARCH_RANDOM
|
||||
default n
|
||||
help
|
||||
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
|
||||
RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
|
||||
for the purposes of initializing Linux's CRNG. Since this is not
|
||||
something that can be independently audited, this amounts to trusting
|
||||
that CPU manufacturer (perhaps with the insistence or mandate
|
||||
of a Nation State's intelligence or law enforcement agencies)
|
||||
has not installed a hidden back door to compromise the CPU's
|
||||
random number generation facilities. This can also be configured
|
||||
at boot with "random.trust_cpu=on/off".
|
||||
Initialize the RNG using random numbers supplied by the CPU's
|
||||
RNG instructions (e.g. RDRAND), if supported and available. These
|
||||
random numbers are never used directly, but are rather hashed into
|
||||
the main input pool, and this happens regardless of whether or not
|
||||
this option is enabled. Instead, this option controls whether the
|
||||
they are credited and hence can initialize the RNG. Additionally,
|
||||
other sources of randomness are always used, regardless of this
|
||||
setting. Enabling this implies trusting that the CPU can supply high
|
||||
quality and non-backdoored random numbers.
|
||||
|
||||
Say Y here unless you have reason to mistrust your CPU or believe
|
||||
its RNG facilities may be faulty. This may also be configured at
|
||||
boot time with "random.trust_cpu=on/off".
|
||||
|
||||
config RANDOM_TRUST_BOOTLOADER
|
||||
bool "Trust the bootloader to initialize Linux's CRNG"
|
||||
bool "Initialize RNG using bootloader-supplied seed"
|
||||
default y
|
||||
help
|
||||
Some bootloaders can provide entropy to increase the kernel's initial
|
||||
device randomness. Say Y here to assume the entropy provided by the
|
||||
booloader is trustworthy so it will be added to the kernel's entropy
|
||||
pool. Otherwise, say N here so it will be regarded as device input that
|
||||
only mixes the entropy pool. This can also be configured at boot with
|
||||
"random.trust_bootloader=on/off".
|
||||
Initialize the RNG using a seed supplied by the bootloader or boot
|
||||
environment (e.g. EFI or a bootloader-generated device tree). This
|
||||
seed is not used directly, but is rather hashed into the main input
|
||||
pool, and this happens regardless of whether or not this option is
|
||||
enabled. Instead, this option controls whether the seed is credited
|
||||
and hence can initialize the RNG. Additionally, other sources of
|
||||
randomness are always used, regardless of this setting. Enabling
|
||||
this implies trusting that the bootloader can supply high quality and
|
||||
non-backdoored seeds.
|
||||
|
||||
Say Y here unless you have reason to mistrust your bootloader or
|
||||
believe its RNG facilities may be faulty. This may also be configured
|
||||
at boot time with "random.trust_bootloader=on/off".
|
||||
|
||||
endmenu
|
||||
|
@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev)
|
||||
goto err_find;
|
||||
}
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
/* we always have a pending entropy request */
|
||||
request_entropy(vi);
|
||||
|
||||
|
@ -650,7 +650,8 @@ static void __cold _credit_init_bits(size_t bits)
|
||||
|
||||
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
|
||||
crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
|
||||
execute_in_process_context(crng_set_ready, &set_ready);
|
||||
if (static_key_initialized)
|
||||
execute_in_process_context(crng_set_ready, &set_ready);
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
pr_notice("crng init done\n");
|
||||
@ -724,9 +725,8 @@ static void __cold _credit_init_bits(size_t bits)
|
||||
*
|
||||
**********************************************************************/
|
||||
|
||||
static bool used_arch_random;
|
||||
static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
|
||||
static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
|
||||
static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
|
||||
static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
|
||||
static int __init parse_trust_cpu(char *arg)
|
||||
{
|
||||
return kstrtobool(arg, &trust_cpu);
|
||||
@ -776,7 +776,7 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica
|
||||
int __init random_init(const char *command_line)
|
||||
{
|
||||
ktime_t now = ktime_get_real();
|
||||
unsigned int i, arch_bytes;
|
||||
unsigned int i, arch_bits;
|
||||
unsigned long entropy;
|
||||
|
||||
#if defined(LATENT_ENTROPY_PLUGIN)
|
||||
@ -784,12 +784,12 @@ int __init random_init(const char *command_line)
|
||||
_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
|
||||
#endif
|
||||
|
||||
for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
|
||||
for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
|
||||
i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
|
||||
if (!arch_get_random_seed_long_early(&entropy) &&
|
||||
!arch_get_random_long_early(&entropy)) {
|
||||
entropy = random_get_entropy();
|
||||
arch_bytes -= sizeof(entropy);
|
||||
arch_bits -= sizeof(entropy) * 8;
|
||||
}
|
||||
_mix_pool_bytes(&entropy, sizeof(entropy));
|
||||
}
|
||||
@ -798,11 +798,18 @@ int __init random_init(const char *command_line)
|
||||
_mix_pool_bytes(command_line, strlen(command_line));
|
||||
add_latent_entropy();
|
||||
|
||||
/*
|
||||
* If we were initialized by the bootloader before jump labels are
|
||||
* initialized, then we should enable the static branch here, where
|
||||
* it's guaranteed that jump labels have been initialized.
|
||||
*/
|
||||
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
|
||||
crng_set_ready(NULL);
|
||||
|
||||
if (crng_ready())
|
||||
crng_reseed();
|
||||
else if (trust_cpu)
|
||||
credit_init_bits(arch_bytes * 8);
|
||||
used_arch_random = arch_bytes * 8 >= POOL_READY_BITS;
|
||||
_credit_init_bits(arch_bits);
|
||||
|
||||
WARN_ON(register_pm_notifier(&pm_notifier));
|
||||
|
||||
@ -811,17 +818,6 @@ int __init random_init(const char *command_line)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns whether arch randomness has been mixed into the initial
|
||||
* state of the RNG, regardless of whether or not that randomness
|
||||
* was credited. Knowing this is only good for a very limited set
|
||||
* of uses, such as early init printk pointer obfuscation.
|
||||
*/
|
||||
bool rng_has_arch_random(void)
|
||||
{
|
||||
return used_arch_random;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add device- or boot-specific data to the input pool to help
|
||||
* initialize it.
|
||||
@ -865,13 +861,12 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
|
||||
* Handle random seed passed by bootloader, and credit it if
|
||||
* CONFIG_RANDOM_TRUST_BOOTLOADER is set.
|
||||
*/
|
||||
void __cold add_bootloader_randomness(const void *buf, size_t len)
|
||||
void __init add_bootloader_randomness(const void *buf, size_t len)
|
||||
{
|
||||
mix_pool_bytes(buf, len);
|
||||
if (trust_bootloader)
|
||||
credit_init_bits(len * 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(add_bootloader_randomness);
|
||||
|
||||
#if IS_ENABLED(CONFIG_VMGENID)
|
||||
static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CRYSTALCOVE_GPIO_NUM 16
|
||||
#define CRYSTALCOVE_VGPIO_NUM 95
|
||||
@ -110,8 +111,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
|
||||
return reg + gpio % 8;
|
||||
}
|
||||
|
||||
static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg,
|
||||
int gpio)
|
||||
static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, int gpio)
|
||||
{
|
||||
u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0;
|
||||
int mask = BIT(gpio % 8);
|
||||
@ -140,8 +140,7 @@ static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio)
|
||||
return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
|
||||
}
|
||||
|
||||
static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio,
|
||||
int value)
|
||||
static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, int value)
|
||||
{
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
|
||||
int reg = to_reg(gpio, CTRL_OUT);
|
||||
@ -168,8 +167,7 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
|
||||
return val & 0x1;
|
||||
}
|
||||
|
||||
static void crystalcove_gpio_set(struct gpio_chip *chip,
|
||||
unsigned int gpio, int value)
|
||||
static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
|
||||
{
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
|
||||
int reg = to_reg(gpio, CTRL_OUT);
|
||||
@ -185,10 +183,10 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
|
||||
|
||||
static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
|
||||
{
|
||||
struct crystalcove_gpio *cg =
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
||||
|
||||
if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
|
||||
if (hwirq >= CRYSTALCOVE_GPIO_NUM)
|
||||
return 0;
|
||||
|
||||
switch (type) {
|
||||
@ -215,22 +213,20 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
|
||||
|
||||
static void crystalcove_bus_lock(struct irq_data *data)
|
||||
{
|
||||
struct crystalcove_gpio *cg =
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
|
||||
mutex_lock(&cg->buslock);
|
||||
}
|
||||
|
||||
static void crystalcove_bus_sync_unlock(struct irq_data *data)
|
||||
{
|
||||
struct crystalcove_gpio *cg =
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
int gpio = data->hwirq;
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
||||
|
||||
if (cg->update & UPDATE_IRQ_TYPE)
|
||||
crystalcove_update_irq_ctrl(cg, gpio);
|
||||
crystalcove_update_irq_ctrl(cg, hwirq);
|
||||
if (cg->update & UPDATE_IRQ_MASK)
|
||||
crystalcove_update_irq_mask(cg, gpio);
|
||||
crystalcove_update_irq_mask(cg, hwirq);
|
||||
cg->update = 0;
|
||||
|
||||
mutex_unlock(&cg->buslock);
|
||||
@ -238,34 +234,43 @@ static void crystalcove_bus_sync_unlock(struct irq_data *data)
|
||||
|
||||
static void crystalcove_irq_unmask(struct irq_data *data)
|
||||
{
|
||||
struct crystalcove_gpio *cg =
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
||||
|
||||
if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
|
||||
cg->set_irq_mask = false;
|
||||
cg->update |= UPDATE_IRQ_MASK;
|
||||
}
|
||||
if (hwirq >= CRYSTALCOVE_GPIO_NUM)
|
||||
return;
|
||||
|
||||
gpiochip_enable_irq(gc, hwirq);
|
||||
|
||||
cg->set_irq_mask = false;
|
||||
cg->update |= UPDATE_IRQ_MASK;
|
||||
}
|
||||
|
||||
static void crystalcove_irq_mask(struct irq_data *data)
|
||||
{
|
||||
struct crystalcove_gpio *cg =
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(data));
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
||||
|
||||
if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
|
||||
cg->set_irq_mask = true;
|
||||
cg->update |= UPDATE_IRQ_MASK;
|
||||
}
|
||||
if (hwirq >= CRYSTALCOVE_GPIO_NUM)
|
||||
return;
|
||||
|
||||
cg->set_irq_mask = true;
|
||||
cg->update |= UPDATE_IRQ_MASK;
|
||||
|
||||
gpiochip_disable_irq(gc, hwirq);
|
||||
}
|
||||
|
||||
static struct irq_chip crystalcove_irqchip = {
|
||||
static const struct irq_chip crystalcove_irqchip = {
|
||||
.name = "Crystal Cove",
|
||||
.irq_mask = crystalcove_irq_mask,
|
||||
.irq_unmask = crystalcove_irq_unmask,
|
||||
.irq_set_type = crystalcove_irq_type,
|
||||
.irq_bus_lock = crystalcove_bus_lock,
|
||||
.irq_bus_sync_unlock = crystalcove_bus_sync_unlock,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
|
||||
@ -293,8 +298,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void crystalcove_gpio_dbg_show(struct seq_file *s,
|
||||
struct gpio_chip *chip)
|
||||
static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
|
||||
{
|
||||
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
|
||||
int gpio, offset;
|
||||
@ -353,7 +357,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
|
||||
cg->regmap = pmic->regmap;
|
||||
|
||||
girq = &cg->chip.irq;
|
||||
girq->chip = &crystalcove_irqchip;
|
||||
gpio_irq_chip_set_chip(girq, &crystalcove_irqchip);
|
||||
/* This will let us handle the parent IRQ in the driver */
|
||||
girq->parent_handler = NULL;
|
||||
girq->num_parents = 0;
|
||||
|
@ -46,7 +46,6 @@
|
||||
struct dln2_gpio {
|
||||
struct platform_device *pdev;
|
||||
struct gpio_chip gpio;
|
||||
struct irq_chip irqchip;
|
||||
|
||||
/*
|
||||
* Cache pin direction to save us one transfer, since the hardware has
|
||||
@ -306,6 +305,7 @@ static void dln2_irq_unmask(struct irq_data *irqd)
|
||||
struct dln2_gpio *dln2 = gpiochip_get_data(gc);
|
||||
int pin = irqd_to_hwirq(irqd);
|
||||
|
||||
gpiochip_enable_irq(gc, pin);
|
||||
set_bit(pin, dln2->unmasked_irqs);
|
||||
}
|
||||
|
||||
@ -316,6 +316,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
|
||||
int pin = irqd_to_hwirq(irqd);
|
||||
|
||||
clear_bit(pin, dln2->unmasked_irqs);
|
||||
gpiochip_disable_irq(gc, pin);
|
||||
}
|
||||
|
||||
static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
|
||||
@ -384,6 +385,17 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
|
||||
mutex_unlock(&dln2->irq_lock);
|
||||
}
|
||||
|
||||
static const struct irq_chip dln2_irqchip = {
|
||||
.name = "dln2-irq",
|
||||
.irq_mask = dln2_irq_mask,
|
||||
.irq_unmask = dln2_irq_unmask,
|
||||
.irq_set_type = dln2_irq_set_type,
|
||||
.irq_bus_lock = dln2_irq_bus_lock,
|
||||
.irq_bus_sync_unlock = dln2_irq_bus_unlock,
|
||||
.flags = IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
|
||||
const void *data, int len)
|
||||
{
|
||||
@ -465,15 +477,8 @@ static int dln2_gpio_probe(struct platform_device *pdev)
|
||||
dln2->gpio.direction_output = dln2_gpio_direction_output;
|
||||
dln2->gpio.set_config = dln2_gpio_set_config;
|
||||
|
||||
dln2->irqchip.name = "dln2-irq",
|
||||
dln2->irqchip.irq_mask = dln2_irq_mask,
|
||||
dln2->irqchip.irq_unmask = dln2_irq_unmask,
|
||||
dln2->irqchip.irq_set_type = dln2_irq_set_type,
|
||||
dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
|
||||
dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
|
||||
|
||||
girq = &dln2->gpio.irq;
|
||||
girq->chip = &dln2->irqchip;
|
||||
gpio_irq_chip_set_chip(girq, &dln2_irqchip);
|
||||
/* The event comes from the outside so no parent handler */
|
||||
girq->parent_handler = NULL;
|
||||
girq->num_parents = 0;
|
||||
|
@ -662,10 +662,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio)
|
||||
gpio->clks[1].id = "db";
|
||||
err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS,
|
||||
gpio->clks);
|
||||
if (err) {
|
||||
dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n");
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
return dev_err_probe(gpio->dev, err,
|
||||
"Cannot get APB/Debounce clocks\n");
|
||||
|
||||
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
|
||||
if (err) {
|
||||
|
@ -220,10 +220,8 @@ static void mrfld_irq_ack(struct irq_data *d)
|
||||
raw_spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask)
|
||||
static void mrfld_irq_unmask_mask(struct mrfld_gpio *priv, u32 gpio, bool unmask)
|
||||
{
|
||||
struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
|
||||
u32 gpio = irqd_to_hwirq(d);
|
||||
void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR);
|
||||
unsigned long flags;
|
||||
u32 value;
|
||||
@ -241,12 +239,20 @@ static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask)
|
||||
|
||||
static void mrfld_irq_mask(struct irq_data *d)
|
||||
{
|
||||
mrfld_irq_unmask_mask(d, false);
|
||||
struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
|
||||
u32 gpio = irqd_to_hwirq(d);
|
||||
|
||||
mrfld_irq_unmask_mask(priv, gpio, false);
|
||||
gpiochip_disable_irq(&priv->chip, gpio);
|
||||
}
|
||||
|
||||
static void mrfld_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
mrfld_irq_unmask_mask(d, true);
|
||||
struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
|
||||
u32 gpio = irqd_to_hwirq(d);
|
||||
|
||||
gpiochip_enable_irq(&priv->chip, gpio);
|
||||
mrfld_irq_unmask_mask(priv, gpio, true);
|
||||
}
|
||||
|
||||
static int mrfld_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
@ -329,13 +335,15 @@ static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip mrfld_irqchip = {
|
||||
static const struct irq_chip mrfld_irqchip = {
|
||||
.name = "gpio-merrifield",
|
||||
.irq_ack = mrfld_irq_ack,
|
||||
.irq_mask = mrfld_irq_mask,
|
||||
.irq_unmask = mrfld_irq_unmask,
|
||||
.irq_set_type = mrfld_irq_set_type,
|
||||
.irq_set_wake = mrfld_irq_set_wake,
|
||||
.flags = IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static void mrfld_irq_handler(struct irq_desc *desc)
|
||||
@ -482,7 +490,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
||||
return retval;
|
||||
|
||||
girq = &priv->chip.irq;
|
||||
girq->chip = &mrfld_irqchip;
|
||||
gpio_irq_chip_set_chip(girq, &mrfld_irqchip);
|
||||
girq->init_hw = mrfld_irq_init_hw;
|
||||
girq->parent_handler = mrfld_irq_handler;
|
||||
girq->num_parents = 1;
|
||||
|
@ -38,7 +38,6 @@
|
||||
|
||||
struct sch_gpio {
|
||||
struct gpio_chip chip;
|
||||
struct irq_chip irqchip;
|
||||
spinlock_t lock;
|
||||
unsigned short iobase;
|
||||
unsigned short resume_base;
|
||||
@ -218,11 +217,9 @@ static void sch_irq_ack(struct irq_data *d)
|
||||
spin_unlock_irqrestore(&sch->lock, flags);
|
||||
}
|
||||
|
||||
static void sch_irq_mask_unmask(struct irq_data *d, int val)
|
||||
static void sch_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t gpio_num, int val)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct sch_gpio *sch = gpiochip_get_data(gc);
|
||||
irq_hw_number_t gpio_num = irqd_to_hwirq(d);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sch->lock, flags);
|
||||
@ -232,14 +229,32 @@ static void sch_irq_mask_unmask(struct irq_data *d, int val)
|
||||
|
||||
static void sch_irq_mask(struct irq_data *d)
|
||||
{
|
||||
sch_irq_mask_unmask(d, 0);
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
irq_hw_number_t gpio_num = irqd_to_hwirq(d);
|
||||
|
||||
sch_irq_mask_unmask(gc, gpio_num, 0);
|
||||
gpiochip_disable_irq(gc, gpio_num);
|
||||
}
|
||||
|
||||
static void sch_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
sch_irq_mask_unmask(d, 1);
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
irq_hw_number_t gpio_num = irqd_to_hwirq(d);
|
||||
|
||||
gpiochip_enable_irq(gc, gpio_num);
|
||||
sch_irq_mask_unmask(gc, gpio_num, 1);
|
||||
}
|
||||
|
||||
static const struct irq_chip sch_irqchip = {
|
||||
.name = "sch_gpio",
|
||||
.irq_ack = sch_irq_ack,
|
||||
.irq_mask = sch_irq_mask,
|
||||
.irq_unmask = sch_irq_unmask,
|
||||
.irq_set_type = sch_irq_type,
|
||||
.flags = IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
|
||||
{
|
||||
struct sch_gpio *sch = context;
|
||||
@ -367,14 +382,8 @@ static int sch_gpio_probe(struct platform_device *pdev)
|
||||
|
||||
platform_set_drvdata(pdev, sch);
|
||||
|
||||
sch->irqchip.name = "sch_gpio";
|
||||
sch->irqchip.irq_ack = sch_irq_ack;
|
||||
sch->irqchip.irq_mask = sch_irq_mask;
|
||||
sch->irqchip.irq_unmask = sch_irq_unmask;
|
||||
sch->irqchip.irq_set_type = sch_irq_type;
|
||||
|
||||
girq = &sch->chip.irq;
|
||||
girq->chip = &sch->irqchip;
|
||||
gpio_irq_chip_set_chip(girq, &sch_irqchip);
|
||||
girq->num_parents = 0;
|
||||
girq->parents = NULL;
|
||||
girq->parent_handler = NULL;
|
||||
|
@ -299,6 +299,8 @@ static void wcove_irq_unmask(struct irq_data *data)
|
||||
if (gpio >= WCOVE_GPIO_NUM)
|
||||
return;
|
||||
|
||||
gpiochip_enable_irq(chip, gpio);
|
||||
|
||||
wg->set_irq_mask = false;
|
||||
wg->update |= UPDATE_IRQ_MASK;
|
||||
}
|
||||
@ -314,15 +316,19 @@ static void wcove_irq_mask(struct irq_data *data)
|
||||
|
||||
wg->set_irq_mask = true;
|
||||
wg->update |= UPDATE_IRQ_MASK;
|
||||
|
||||
gpiochip_disable_irq(chip, gpio);
|
||||
}
|
||||
|
||||
static struct irq_chip wcove_irqchip = {
|
||||
static const struct irq_chip wcove_irqchip = {
|
||||
.name = "Whiskey Cove",
|
||||
.irq_mask = wcove_irq_mask,
|
||||
.irq_unmask = wcove_irq_unmask,
|
||||
.irq_set_type = wcove_irq_type,
|
||||
.irq_bus_lock = wcove_bus_lock,
|
||||
.irq_bus_sync_unlock = wcove_bus_sync_unlock,
|
||||
.flags = IRQCHIP_IMMUTABLE,
|
||||
GPIOCHIP_IRQ_RESOURCE_HELPERS,
|
||||
};
|
||||
|
||||
static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
|
||||
@ -452,7 +458,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
girq = &wg->chip.irq;
|
||||
girq->chip = &wcove_irqchip;
|
||||
gpio_irq_chip_set_chip(girq, &wcove_irqchip);
|
||||
/* This will let us handle the parent IRQ in the driver */
|
||||
girq->parent_handler = NULL;
|
||||
girq->num_parents = 0;
|
||||
|
@ -33,6 +33,14 @@ struct dm_kobject_holder {
|
||||
* access their members!
|
||||
*/
|
||||
|
||||
/*
|
||||
* For mempools pre-allocation at the table loading time.
|
||||
*/
|
||||
struct dm_md_mempools {
|
||||
struct bio_set bs;
|
||||
struct bio_set io_bs;
|
||||
};
|
||||
|
||||
struct mapped_device {
|
||||
struct mutex suspend_lock;
|
||||
|
||||
@ -110,8 +118,7 @@ struct mapped_device {
|
||||
/*
|
||||
* io objects are allocated from here.
|
||||
*/
|
||||
struct bio_set io_bs;
|
||||
struct bio_set bs;
|
||||
struct dm_md_mempools *mempools;
|
||||
|
||||
/* kobject and completion */
|
||||
struct dm_kobject_holder kobj_holder;
|
||||
|
@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq,
|
||||
{
|
||||
int r;
|
||||
|
||||
r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
|
||||
r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
|
||||
dm_rq_bio_constructor, tio);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dm_table_free_md_mempools(struct dm_table *t)
|
||||
{
|
||||
dm_free_md_mempools(t->mempools);
|
||||
t->mempools = NULL;
|
||||
}
|
||||
|
||||
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
|
||||
{
|
||||
return t->mempools;
|
||||
}
|
||||
|
||||
static int setup_indexes(struct dm_table *t)
|
||||
{
|
||||
int i;
|
||||
|
110
drivers/md/dm.c
110
drivers/md/dm.c
@ -136,14 +136,6 @@ static int get_swap_bios(void)
|
||||
return latch;
|
||||
}
|
||||
|
||||
/*
|
||||
* For mempools pre-allocation at the table loading time.
|
||||
*/
|
||||
struct dm_md_mempools {
|
||||
struct bio_set bs;
|
||||
struct bio_set io_bs;
|
||||
};
|
||||
|
||||
struct table_device {
|
||||
struct list_head list;
|
||||
refcount_t count;
|
||||
@ -581,7 +573,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
||||
struct dm_target_io *tio;
|
||||
struct bio *clone;
|
||||
|
||||
clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs);
|
||||
clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
|
||||
/* Set default bdev, but target must bio_set_dev() before issuing IO */
|
||||
clone->bi_bdev = md->disk->part0;
|
||||
|
||||
@ -628,7 +620,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
|
||||
} else {
|
||||
struct mapped_device *md = ci->io->md;
|
||||
|
||||
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs);
|
||||
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
|
||||
&md->mempools->bs);
|
||||
if (!clone)
|
||||
return NULL;
|
||||
/* Set default bdev, but target must bio_set_dev() before issuing IO */
|
||||
@ -1023,23 +1016,19 @@ static void clone_endio(struct bio *bio)
|
||||
struct dm_io *io = tio->io;
|
||||
struct mapped_device *md = io->md;
|
||||
|
||||
if (likely(bio->bi_bdev != md->disk->part0)) {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (unlikely(error == BLK_STS_TARGET)) {
|
||||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||
!bdev_max_discard_sectors(bio->bi_bdev))
|
||||
disable_discard(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!q->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(md);
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&zoned_enabled) &&
|
||||
unlikely(blk_queue_is_zoned(q)))
|
||||
dm_zone_endio(io, bio);
|
||||
if (unlikely(error == BLK_STS_TARGET)) {
|
||||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||
!bdev_max_discard_sectors(bio->bi_bdev))
|
||||
disable_discard(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!bdev_write_zeroes_sectors(bio->bi_bdev))
|
||||
disable_write_zeroes(md);
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&zoned_enabled) &&
|
||||
unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
|
||||
dm_zone_endio(io, bio);
|
||||
|
||||
if (endio) {
|
||||
int r = endio(ti, bio, &error);
|
||||
switch (r) {
|
||||
@ -1876,8 +1865,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||
{
|
||||
if (md->wq)
|
||||
destroy_workqueue(md->wq);
|
||||
bioset_exit(&md->bs);
|
||||
bioset_exit(&md->io_bs);
|
||||
dm_free_md_mempools(md->mempools);
|
||||
|
||||
if (md->dax_dev) {
|
||||
dax_remove_host(md->disk);
|
||||
@ -2049,48 +2037,6 @@ static void free_dev(struct mapped_device *md)
|
||||
kvfree(md);
|
||||
}
|
||||
|
||||
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
||||
{
|
||||
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
||||
int ret = 0;
|
||||
|
||||
if (dm_table_bio_based(t)) {
|
||||
/*
|
||||
* The md may already have mempools that need changing.
|
||||
* If so, reload bioset because front_pad may have changed
|
||||
* because a different table was loaded.
|
||||
*/
|
||||
bioset_exit(&md->bs);
|
||||
bioset_exit(&md->io_bs);
|
||||
|
||||
} else if (bioset_initialized(&md->bs)) {
|
||||
/*
|
||||
* There's no need to reload with request-based dm
|
||||
* because the size of front_pad doesn't change.
|
||||
* Note for future: If you are to reload bioset,
|
||||
* prep-ed requests in the queue may refer
|
||||
* to bio from the old bioset, so you must walk
|
||||
* through the queue to unprep.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
BUG_ON(!p ||
|
||||
bioset_initialized(&md->bs) ||
|
||||
bioset_initialized(&md->io_bs));
|
||||
|
||||
ret = bioset_init_from_src(&md->bs, &p->bs);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
|
||||
if (ret)
|
||||
bioset_exit(&md->bs);
|
||||
out:
|
||||
/* mempool bind completed, no longer need any mempools in the table */
|
||||
dm_table_free_md_mempools(t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bind a table to the device.
|
||||
*/
|
||||
@ -2144,12 +2090,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||
* immutable singletons - used to optimize dm_mq_queue_rq.
|
||||
*/
|
||||
md->immutable_target = dm_table_get_immutable_target(t);
|
||||
}
|
||||
|
||||
ret = __bind_mempools(md, t);
|
||||
if (ret) {
|
||||
old_map = ERR_PTR(ret);
|
||||
goto out;
|
||||
/*
|
||||
* There is no need to reload with request-based dm because the
|
||||
* size of front_pad doesn't change.
|
||||
*
|
||||
* Note for future: If you are to reload bioset, prep-ed
|
||||
* requests in the queue may refer to bio from the old bioset,
|
||||
* so you must walk through the queue to unprep.
|
||||
*/
|
||||
if (!md->mempools) {
|
||||
md->mempools = t->mempools;
|
||||
t->mempools = NULL;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* The md may already have mempools that need changing.
|
||||
* If so, reload bioset because front_pad may have changed
|
||||
* because a different table was loaded.
|
||||
*/
|
||||
dm_free_md_mempools(md->mempools);
|
||||
md->mempools = t->mempools;
|
||||
t->mempools = NULL;
|
||||
}
|
||||
|
||||
ret = dm_table_set_restrictions(t, md->queue, limits);
|
||||
|
@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
|
||||
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
|
||||
bool dm_table_bio_based(struct dm_table *t);
|
||||
bool dm_table_request_based(struct dm_table *t);
|
||||
void dm_table_free_md_mempools(struct dm_table *t);
|
||||
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
|
||||
|
||||
void dm_lock_md_type(struct mapped_device *md);
|
||||
void dm_unlock_md_type(struct mapped_device *md);
|
||||
|
@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
|
||||
* the PHY resources listed last
|
||||
*/
|
||||
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
|
||||
phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
|
||||
phy_irqnum = platform_irq_count(pdev) - 1;
|
||||
dma_irqnum = 1;
|
||||
dma_irqend = phy_irqnum;
|
||||
} else {
|
||||
@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
|
||||
phy_memnum = 0;
|
||||
phy_irqnum = 0;
|
||||
dma_irqnum = 1;
|
||||
dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
|
||||
dma_irqend = platform_irq_count(pdev);
|
||||
}
|
||||
|
||||
/* Obtain the mmio areas for the device */
|
||||
|
@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core)
|
||||
bcma_mdio_mii_unregister(bgmac->mii_bus);
|
||||
bgmac_enet_remove(bgmac);
|
||||
bcma_set_drvdata(core, NULL);
|
||||
kfree(bgmac);
|
||||
}
|
||||
|
||||
static struct bcma_driver bgmac_bcma_driver = {
|
||||
|
@ -769,6 +769,7 @@ struct hnae3_tc_info {
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
|
||||
u16 tqp_count[HNAE3_MAX_TC];
|
||||
u16 tqp_offset[HNAE3_MAX_TC];
|
||||
u8 max_tc; /* Total number of TCs */
|
||||
u8 num_tc; /* Total number of enabled TCs */
|
||||
bool mqprio_active;
|
||||
};
|
||||
|
@ -1129,7 +1129,7 @@ hns3_is_ringparam_changed(struct net_device *ndev,
|
||||
if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
|
||||
old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
|
||||
old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
|
||||
netdev_info(ndev, "ringparam not changed\n");
|
||||
netdev_info(ndev, "descriptor number and rx buffer length not changed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -3268,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
|
||||
static int hclge_update_port_info(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_mac *mac = &hdev->hw.mac;
|
||||
int speed = HCLGE_MAC_SPEED_UNKNOWN;
|
||||
int speed;
|
||||
int ret;
|
||||
|
||||
/* get the port info from SFP cmd if not copper port */
|
||||
@ -3279,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
|
||||
if (!hdev->support_sfp_query)
|
||||
return 0;
|
||||
|
||||
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
|
||||
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
|
||||
speed = mac->speed;
|
||||
ret = hclge_get_sfp_info(hdev, mac);
|
||||
else
|
||||
} else {
|
||||
speed = HCLGE_MAC_SPEED_UNKNOWN;
|
||||
ret = hclge_get_sfp_speed(hdev, &speed);
|
||||
}
|
||||
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
hdev->support_sfp_query = false;
|
||||
@ -3294,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
|
||||
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
|
||||
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
|
||||
hclge_update_port_capability(hdev, mac);
|
||||
if (mac->speed != speed)
|
||||
(void)hclge_tm_port_shaper_cfg(hdev);
|
||||
return 0;
|
||||
}
|
||||
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
|
||||
@ -3376,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
|
||||
link_state_old = vport->vf_info.link_state;
|
||||
vport->vf_info.link_state = link_state;
|
||||
|
||||
/* return success directly if the VF is unalive, VF will
|
||||
* query link state itself when it starts work.
|
||||
*/
|
||||
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
|
||||
return 0;
|
||||
|
||||
ret = hclge_push_vf_link_status(vport);
|
||||
if (ret) {
|
||||
vport->vf_info.link_state = link_state_old;
|
||||
@ -10117,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vport->port_base_vlan_cfg.tbl_sta = false;
|
||||
/* remove old VLAN tag */
|
||||
if (old_info->vlan_tag == 0)
|
||||
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
|
||||
|
@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
|
||||
static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
|
||||
u16 qs_id, u8 pri)
|
||||
static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
|
||||
bool link_vld)
|
||||
{
|
||||
struct hclge_qs_to_pri_link_cmd *map;
|
||||
struct hclge_desc desc;
|
||||
@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
|
||||
|
||||
map->qs_id = cpu_to_le16(qs_id);
|
||||
map->priority = pri;
|
||||
map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
|
||||
map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
|
||||
static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
|
||||
int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_port_shapping_cmd *shap_cfg_cmd;
|
||||
struct hclge_shaper_ir_para ir_para;
|
||||
@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
|
||||
* one tc for VF for simplicity. VF's vport_id is non zero.
|
||||
*/
|
||||
if (vport->vport_id) {
|
||||
kinfo->tc_info.max_tc = 1;
|
||||
kinfo->tc_info.num_tc = 1;
|
||||
vport->qs_offset = HNAE3_MAX_TC +
|
||||
vport->vport_id - HCLGE_VF_VPORT_START_NUM;
|
||||
vport_max_rss_size = hdev->vf_rss_size_max;
|
||||
} else {
|
||||
kinfo->tc_info.max_tc = hdev->tc_max;
|
||||
kinfo->tc_info.num_tc =
|
||||
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
|
||||
vport->qs_offset = 0;
|
||||
@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
|
||||
vport->dwrr = 100; /* 100 percent as init */
|
||||
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
|
||||
hdev->rss_cfg.rss_size = kinfo->rss_size;
|
||||
|
||||
if (vport->vport_id == PF_VPORT_ID)
|
||||
hdev->rss_cfg.rss_size = kinfo->rss_size;
|
||||
|
||||
/* when enable mqprio, the tc_info has been updated. */
|
||||
if (kinfo->tc_info.mqprio_active)
|
||||
@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
|
||||
|
||||
static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
|
||||
{
|
||||
u8 i;
|
||||
u8 i, tc_sch_mode;
|
||||
u32 bw_limit;
|
||||
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
if (i < hdev->tm_info.num_tc) {
|
||||
tc_sch_mode = HCLGE_SCH_MODE_DWRR;
|
||||
bw_limit = hdev->tm_info.pg_info[0].bw_limit;
|
||||
} else {
|
||||
tc_sch_mode = HCLGE_SCH_MODE_SP;
|
||||
bw_limit = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
hdev->tm_info.tc_info[i].tc_id = i;
|
||||
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
|
||||
hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
|
||||
hdev->tm_info.tc_info[i].pgid = 0;
|
||||
hdev->tm_info.tc_info[i].bw_limit =
|
||||
hdev->tm_info.pg_info[0].bw_limit;
|
||||
hdev->tm_info.tc_info[i].bw_limit = bw_limit;
|
||||
}
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
|
||||
for (k = 0; k < hdev->num_alloc_vport; k++) {
|
||||
struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
|
||||
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.max_tc; i++) {
|
||||
u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
|
||||
bool link_vld = i < kinfo->tc_info.num_tc;
|
||||
|
||||
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
|
||||
vport[k].qs_offset + i,
|
||||
i);
|
||||
pri, link_vld);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
|
||||
vport[k].qs_offset + i,
|
||||
k);
|
||||
k, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
|
||||
struct hclge_shaper_ir_para ir_para;
|
||||
u32 shaper_para;
|
||||
u32 shaper_para_c, shaper_para_p;
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
u32 rate = hdev->tm_info.tc_info[i].bw_limit;
|
||||
|
||||
ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
|
||||
&ir_para, max_tm_rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (rate) {
|
||||
ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
|
||||
&ir_para, max_tm_rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
|
||||
ir_para.ir_u,
|
||||
ir_para.ir_s,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
} else {
|
||||
shaper_para_c = 0;
|
||||
shaper_para_p = 0;
|
||||
}
|
||||
|
||||
shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
|
||||
shaper_para, rate);
|
||||
shaper_para_c, rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
|
||||
ir_para.ir_u,
|
||||
ir_para.ir_s,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
|
||||
shaper_para, rate);
|
||||
shaper_para_p, rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
|
||||
int ret;
|
||||
u32 i, k;
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
pg_info =
|
||||
&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
|
||||
dwrr = pg_info->tc_dwrr[i];
|
||||
@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
|
||||
return ret;
|
||||
|
||||
for (k = 0; k < hdev->num_alloc_vport; k++) {
|
||||
struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
|
||||
|
||||
if (i >= kinfo->tc_info.max_tc)
|
||||
continue;
|
||||
|
||||
dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
|
||||
ret = hclge_tm_qs_weight_cfg(
|
||||
hdev, vport[k].qs_offset + i,
|
||||
vport[k].dwrr);
|
||||
dwrr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
|
||||
{
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
int ret;
|
||||
u8 mode;
|
||||
u16 i;
|
||||
|
||||
ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
|
||||
@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < hdev->num_alloc_vport; i++) {
|
||||
struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
|
||||
|
||||
if (pri_id >= kinfo->tc_info.max_tc)
|
||||
continue;
|
||||
|
||||
mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
|
||||
HCLGE_SCH_MODE_SP;
|
||||
ret = hclge_tm_qs_schd_mode_cfg(hdev,
|
||||
vport[i].qs_offset + pri_id,
|
||||
HCLGE_SCH_MODE_DWRR);
|
||||
mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
|
||||
u8 i;
|
||||
|
||||
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
|
||||
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
||||
void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
||||
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
|
||||
int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
|
||||
int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
|
||||
int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
|
||||
int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
|
||||
|
@ -2588,15 +2588,16 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
|
||||
set_bit(__I40E_TESTING, pf->state);
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Cannot start offline testing when PF is in reset state.\n");
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
@ -2643,9 +2644,17 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
data[I40E_ETH_TEST_INTR] = 0;
|
||||
}
|
||||
|
||||
skip_ol_tests:
|
||||
|
||||
netif_info(pf, drv, netdev, "testing finished\n");
|
||||
return;
|
||||
|
||||
skip_ol_tests:
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
netif_info(pf, drv, netdev, "testing failed\n");
|
||||
}
|
||||
|
||||
static void i40e_get_wol(struct net_device *netdev,
|
||||
|
@ -8667,6 +8667,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!tc) {
|
||||
dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
|
||||
return -EBUSY;
|
||||
|
@ -2282,7 +2282,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
}
|
||||
|
||||
if (vf->adq_enabled) {
|
||||
for (i = 0; i < I40E_MAX_VF_VSI; i++)
|
||||
for (i = 0; i < vf->num_tc; i++)
|
||||
num_qps_all += vf->ch[i].num_qps;
|
||||
if (num_qps_all != qci->num_queue_pairs) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
|
@ -985,7 +985,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
||||
f->add = true;
|
||||
f->add_handled = false;
|
||||
f->is_new_mac = true;
|
||||
f->is_primary = false;
|
||||
f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
} else {
|
||||
f->remove = false;
|
||||
|
@ -5763,25 +5763,38 @@ static netdev_features_t
|
||||
ice_fix_features(struct net_device *netdev, netdev_features_t features)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
netdev_features_t supported_vlan_filtering;
|
||||
netdev_features_t requested_vlan_filtering;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
netdev_features_t req_vlan_fltr, cur_vlan_fltr;
|
||||
bool cur_ctag, cur_stag, req_ctag, req_stag;
|
||||
|
||||
requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES;
|
||||
cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
|
||||
cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
|
||||
/* make sure supported_vlan_filtering works for both SVM and DVM */
|
||||
supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
if (ice_is_dvm_ena(&vsi->back->hw))
|
||||
supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
|
||||
req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
|
||||
if (requested_vlan_filtering &&
|
||||
requested_vlan_filtering != supported_vlan_filtering) {
|
||||
if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) {
|
||||
netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n");
|
||||
features |= supported_vlan_filtering;
|
||||
if (req_vlan_fltr != cur_vlan_fltr) {
|
||||
if (ice_is_dvm_ena(&np->vsi->back->hw)) {
|
||||
if (req_ctag && req_stag) {
|
||||
features |= NETIF_VLAN_FILTERING_FEATURES;
|
||||
} else if (!req_ctag && !req_stag) {
|
||||
features &= ~NETIF_VLAN_FILTERING_FEATURES;
|
||||
} else if ((!cur_ctag && req_ctag && !cur_stag) ||
|
||||
(!cur_stag && req_stag && !cur_ctag)) {
|
||||
features |= NETIF_VLAN_FILTERING_FEATURES;
|
||||
netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
|
||||
} else if ((cur_ctag && !req_ctag && cur_stag) ||
|
||||
(cur_stag && !req_stag && cur_ctag)) {
|
||||
features &= ~NETIF_VLAN_FILTERING_FEATURES;
|
||||
netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
|
||||
}
|
||||
} else {
|
||||
netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n");
|
||||
features &= ~supported_vlan_filtering;
|
||||
if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
|
||||
netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
|
||||
|
||||
if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2271,7 +2271,7 @@ static int
|
||||
ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
|
||||
{
|
||||
tx->quad = port / ICE_PORTS_PER_QUAD;
|
||||
tx->quad_offset = tx->quad * INDEX_PER_PORT;
|
||||
tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
|
||||
tx->len = INDEX_PER_PORT;
|
||||
|
||||
return ice_ptp_alloc_tx_tracker(tx);
|
||||
|
@ -49,6 +49,37 @@ struct ice_perout_channel {
|
||||
* To allow multiple ports to access the shared register block independently,
|
||||
* the blocks are split up so that indexes are assigned to each port based on
|
||||
* hardware logical port number.
|
||||
*
|
||||
* The timestamp blocks are handled differently for E810- and E822-based
|
||||
* devices. In E810 devices, each port has its own block of timestamps, while in
|
||||
* E822 there is a need to logically break the block of registers into smaller
|
||||
* chunks based on the port number to avoid collisions.
|
||||
*
|
||||
* Example for port 5 in E810:
|
||||
* +--------+--------+--------+--------+--------+--------+--------+--------+
|
||||
* |register|register|register|register|register|register|register|register|
|
||||
* | block | block | block | block | block | block | block | block |
|
||||
* | for | for | for | for | for | for | for | for |
|
||||
* | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 |
|
||||
* +--------+--------+--------+--------+--------+--------+--------+--------+
|
||||
* ^^
|
||||
* ||
|
||||
* |--- quad offset is always 0
|
||||
* ---- quad number
|
||||
*
|
||||
* Example for port 5 in E822:
|
||||
* +-----------------------------+-----------------------------+
|
||||
* | register block for quad 0 | register block for quad 1 |
|
||||
* |+------+------+------+------+|+------+------+------+------+|
|
||||
* ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3||
|
||||
* |+------+------+------+------+|+------+------+------+------+|
|
||||
* +-----------------------------+-------^---------------------+
|
||||
* ^ |
|
||||
* | --- quad offset*
|
||||
* ---- quad number
|
||||
*
|
||||
* * PHY port 5 is port 1 in quad 1
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -504,6 +504,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
|
||||
}
|
||||
|
||||
if (ice_is_vf_disabled(vf)) {
|
||||
vsi = ice_get_vf_vsi(vf);
|
||||
if (WARN_ON(!vsi))
|
||||
return -EINVAL;
|
||||
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
|
||||
ice_vsi_stop_all_rx_rings(vsi);
|
||||
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
|
||||
vf->vf_id);
|
||||
return 0;
|
||||
|
@ -1592,35 +1592,27 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_vsi_queue_config_info *qci =
|
||||
(struct virtchnl_vsi_queue_config_info *)msg;
|
||||
struct virtchnl_queue_pair_info *qpi;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_vsi *vsi;
|
||||
int i, q_idx;
|
||||
int i = -1, q_idx;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = ice_get_vf_vsi(vf);
|
||||
if (!vsi) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (!vsi)
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
|
||||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
|
||||
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
|
||||
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1633,7 +1625,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
|
||||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
|
||||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1643,7 +1634,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
* for selected "vsi"
|
||||
*/
|
||||
if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@ -1653,14 +1643,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
vsi->tx_rings[i]->count = qpi->txq.ring_len;
|
||||
|
||||
/* Disable any existing queue first */
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
/* Configure a queue with the requested settings */
|
||||
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
goto error_param;
|
||||
}
|
||||
}
|
||||
@ -1674,17 +1663,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
|
||||
if (qpi->rxq.databuffer_size != 0 &&
|
||||
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
|
||||
qpi->rxq.databuffer_size < 1024)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
qpi->rxq.databuffer_size < 1024))
|
||||
goto error_param;
|
||||
}
|
||||
vsi->rx_buf_len = qpi->rxq.databuffer_size;
|
||||
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
|
||||
if (qpi->rxq.max_pkt_size > max_frame_size ||
|
||||
qpi->rxq.max_pkt_size < 64) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
qpi->rxq.max_pkt_size < 64)
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi->max_frame = qpi->rxq.max_pkt_size;
|
||||
/* add space for the port VLAN since the VF driver is
|
||||
@ -1695,16 +1680,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
vsi->max_frame += VLAN_HLEN;
|
||||
|
||||
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
goto error_param;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
|
||||
NULL, 0);
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
|
||||
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
|
||||
error_param:
|
||||
/* disable whatever we can */
|
||||
for (; i >= 0; i--) {
|
||||
if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
|
||||
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
|
||||
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
}
|
||||
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
|
||||
VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1390,7 +1390,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
|
||||
|
||||
static const struct ethtool_ops otx2vf_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_MAX_FRAMES,
|
||||
ETHTOOL_COALESCE_MAX_FRAMES |
|
||||
ETHTOOL_COALESCE_USE_ADAPTIVE,
|
||||
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
|
||||
ETHTOOL_RING_USE_CQE_SIZE,
|
||||
.get_link = otx2_get_link,
|
||||
|
@ -8,8 +8,8 @@
|
||||
#include "spectrum.h"
|
||||
|
||||
enum mlxsw_sp_counter_sub_pool_id {
|
||||
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
|
||||
MLXSW_SP_COUNTER_SUB_POOL_RIF,
|
||||
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
|
||||
};
|
||||
|
||||
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
|
||||
|
@ -547,6 +547,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
|
||||
iowrite32(value, lp->regs + offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* axienet_dma_out32 - Memory mapped Axi DMA register write.
|
||||
* @lp: Pointer to axienet local structure
|
||||
* @reg: Address offset from the base address of the Axi DMA core
|
||||
* @value: Value to be written into the Axi DMA register
|
||||
*
|
||||
* This function writes the desired value into the corresponding Axi DMA
|
||||
* register.
|
||||
*/
|
||||
|
||||
static inline void axienet_dma_out32(struct axienet_local *lp,
|
||||
off_t reg, u32 value)
|
||||
{
|
||||
iowrite32(value, lp->dma_regs + reg);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_64BIT) && defined(iowrite64)
|
||||
/**
|
||||
* axienet_dma_out64 - Memory mapped Axi DMA register write.
|
||||
* @lp: Pointer to axienet local structure
|
||||
* @reg: Address offset from the base address of the Axi DMA core
|
||||
* @value: Value to be written into the Axi DMA register
|
||||
*
|
||||
* This function writes the desired value into the corresponding Axi DMA
|
||||
* register.
|
||||
*/
|
||||
static inline void axienet_dma_out64(struct axienet_local *lp,
|
||||
off_t reg, u64 value)
|
||||
{
|
||||
iowrite64(value, lp->dma_regs + reg);
|
||||
}
|
||||
|
||||
static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
if (lp->features & XAE_FEATURE_DMA_64BIT)
|
||||
axienet_dma_out64(lp, reg, addr);
|
||||
else
|
||||
axienet_dma_out32(lp, reg, lower_32_bits(addr));
|
||||
}
|
||||
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
axienet_dma_out32(lp, reg, lower_32_bits(addr));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
|
||||
int axienet_mdio_enable(struct axienet_local *lp);
|
||||
void axienet_mdio_disable(struct axienet_local *lp);
|
||||
|
@ -133,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
|
||||
return ioread32(lp->dma_regs + reg);
|
||||
}
|
||||
|
||||
/**
|
||||
* axienet_dma_out32 - Memory mapped Axi DMA register write.
|
||||
* @lp: Pointer to axienet local structure
|
||||
* @reg: Address offset from the base address of the Axi DMA core
|
||||
* @value: Value to be written into the Axi DMA register
|
||||
*
|
||||
* This function writes the desired value into the corresponding Axi DMA
|
||||
* register.
|
||||
*/
|
||||
static inline void axienet_dma_out32(struct axienet_local *lp,
|
||||
off_t reg, u32 value)
|
||||
{
|
||||
iowrite32(value, lp->dma_regs + reg);
|
||||
}
|
||||
|
||||
static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
axienet_dma_out32(lp, reg, lower_32_bits(addr));
|
||||
|
||||
if (lp->features & XAE_FEATURE_DMA_64BIT)
|
||||
axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
|
||||
}
|
||||
|
||||
static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
|
||||
struct axidma_bd *desc)
|
||||
{
|
||||
@ -2061,6 +2037,11 @@ static int axienet_probe(struct platform_device *pdev)
|
||||
iowrite32(0x0, desc);
|
||||
}
|
||||
}
|
||||
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
|
||||
dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
|
||||
ret = -EINVAL;
|
||||
goto cleanup_clk;
|
||||
}
|
||||
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
|
||||
if (ret) {
|
||||
|
@ -1750,7 +1750,7 @@ static const struct driver_info ax88179_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1763,7 +1763,7 @@ static const struct driver_info ax88178a_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1776,7 +1776,7 @@ static const struct driver_info cypress_GX3_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1789,7 +1789,7 @@ static const struct driver_info dlink_dub1312_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1802,7 +1802,7 @@ static const struct driver_info sitecom_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1815,7 +1815,7 @@ static const struct driver_info samsung_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1828,7 +1828,7 @@ static const struct driver_info lenovo_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1841,7 +1841,7 @@ static const struct driver_info belkin_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1854,7 +1854,7 @@ static const struct driver_info toshiba_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1867,7 +1867,7 @@ static const struct driver_info mct_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1880,7 +1880,7 @@ static const struct driver_info at_umc2000_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1893,7 +1893,7 @@ static const struct driver_info at_umc200_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
@ -1906,7 +1906,7 @@ static const struct driver_info at_umc2000sp_info = {
|
||||
.link_reset = ax88179_link_reset,
|
||||
.reset = ax88179_reset,
|
||||
.stop = ax88179_stop,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
|
||||
.rx_fixup = ax88179_rx_fixup,
|
||||
.tx_fixup = ax88179_tx_fixup,
|
||||
};
|
||||
|
@ -85,7 +85,7 @@ config NVSW_SN2201
|
||||
depends on I2C
|
||||
depends on REGMAP_I2C
|
||||
help
|
||||
This driver provides support for the Nvidia SN2201 platfom.
|
||||
This driver provides support for the Nvidia SN2201 platform.
|
||||
The SN2201 is a highly integrated for one rack unit system with
|
||||
L3 management switches. It has 48 x 1Gbps RJ45 + 4 x 100G QSFP28
|
||||
ports in a compact 1RU form factor. The system also including a
|
||||
|
@ -326,7 +326,7 @@ static struct resource nvsw_sn2201_lpc_res[] = {
|
||||
};
|
||||
|
||||
/* SN2201 I2C platform data. */
|
||||
struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
|
||||
static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
|
||||
.irq = NVSW_SN2201_CPLD_SYSIRQ,
|
||||
};
|
||||
|
||||
|
@ -405,11 +405,14 @@ MODULE_DEVICE_TABLE(dmi, dmi_ids);
|
||||
static int __init p50_module_init(void)
|
||||
{
|
||||
struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1);
|
||||
int ret;
|
||||
|
||||
if (!dmi_first_match(dmi_ids))
|
||||
return -ENODEV;
|
||||
|
||||
platform_driver_register(&p50_gpio_driver);
|
||||
ret = platform_driver_register(&p50_gpio_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1);
|
||||
if (IS_ERR(gpio_pdev)) {
|
||||
|
@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
|
||||
}}
|
||||
|
||||
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
|
||||
@ -156,6 +157,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -38,6 +38,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
|
||||
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
|
||||
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
|
||||
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
|
||||
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
|
||||
|
||||
/* DMI board names of devices that should use the omen specific path for
|
||||
* thermal profiles.
|
||||
@ -220,6 +221,7 @@ static struct input_dev *hp_wmi_input_dev;
|
||||
static struct platform_device *hp_wmi_platform_dev;
|
||||
static struct platform_profile_handler platform_profile_handler;
|
||||
static bool platform_profile_support;
|
||||
static bool zero_insize_support;
|
||||
|
||||
static struct rfkill *wifi_rfkill;
|
||||
static struct rfkill *bluetooth_rfkill;
|
||||
@ -290,14 +292,16 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
|
||||
struct bios_return *bios_return;
|
||||
union acpi_object *obj = NULL;
|
||||
struct bios_args *args = NULL;
|
||||
int mid, actual_outsize, ret;
|
||||
int mid, actual_insize, actual_outsize;
|
||||
size_t bios_args_size;
|
||||
int ret;
|
||||
|
||||
mid = encode_outsize_for_pvsz(outsize);
|
||||
if (WARN_ON(mid < 0))
|
||||
return mid;
|
||||
|
||||
bios_args_size = struct_size(args, data, insize);
|
||||
actual_insize = max(insize, 128);
|
||||
bios_args_size = struct_size(args, data, actual_insize);
|
||||
args = kmalloc(bios_args_size, GFP_KERNEL);
|
||||
if (!args)
|
||||
return -ENOMEM;
|
||||
@ -374,7 +378,7 @@ static int hp_wmi_read_int(int query)
|
||||
int val = 0, ret;
|
||||
|
||||
ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
|
||||
0, sizeof(val));
|
||||
zero_if_sup(val), sizeof(val));
|
||||
|
||||
if (ret)
|
||||
return ret < 0 ? ret : -EINVAL;
|
||||
@ -410,7 +414,8 @@ static int hp_wmi_get_tablet_mode(void)
|
||||
return -ENODEV;
|
||||
|
||||
ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
|
||||
system_device_mode, 0, sizeof(system_device_mode));
|
||||
system_device_mode, zero_if_sup(system_device_mode),
|
||||
sizeof(system_device_mode));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -497,7 +502,7 @@ static int hp_wmi_fan_speed_max_get(void)
|
||||
int val = 0, ret;
|
||||
|
||||
ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
|
||||
&val, 0, sizeof(val));
|
||||
&val, zero_if_sup(val), sizeof(val));
|
||||
|
||||
if (ret)
|
||||
return ret < 0 ? ret : -EINVAL;
|
||||
@ -509,7 +514,7 @@ static int __init hp_wmi_bios_2008_later(void)
|
||||
{
|
||||
int state = 0;
|
||||
int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
|
||||
0, sizeof(state));
|
||||
zero_if_sup(state), sizeof(state));
|
||||
if (!ret)
|
||||
return 1;
|
||||
|
||||
@ -520,7 +525,7 @@ static int __init hp_wmi_bios_2009_later(void)
|
||||
{
|
||||
u8 state[128];
|
||||
int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
|
||||
0, sizeof(state));
|
||||
zero_if_sup(state), sizeof(state));
|
||||
if (!ret)
|
||||
return 1;
|
||||
|
||||
@ -598,7 +603,7 @@ static int hp_wmi_rfkill2_refresh(void)
|
||||
int err, i;
|
||||
|
||||
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
|
||||
0, sizeof(state));
|
||||
zero_if_sup(state), sizeof(state));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1007,7 +1012,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
|
||||
int err, i;
|
||||
|
||||
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
|
||||
0, sizeof(state));
|
||||
zero_if_sup(state), sizeof(state));
|
||||
if (err)
|
||||
return err < 0 ? err : -EINVAL;
|
||||
|
||||
@ -1483,11 +1488,15 @@ static int __init hp_wmi_init(void)
|
||||
{
|
||||
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
|
||||
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
|
||||
int err;
|
||||
int err, tmp = 0;
|
||||
|
||||
if (!bios_capable && !event_capable)
|
||||
return -ENODEV;
|
||||
|
||||
if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp,
|
||||
sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS)
|
||||
zero_insize_support = true;
|
||||
|
||||
if (event_capable) {
|
||||
err = hp_wmi_input_setup();
|
||||
if (err)
|
||||
|
@ -122,6 +122,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -1912,6 +1912,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -282,7 +282,7 @@ static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
|
||||
auxiliary_set_drvdata(auxdev, priv);
|
||||
|
||||
for (i = 0; i < intel_vsec_dev->num_resources; i++) {
|
||||
struct intel_pmt_entry *entry = &priv->entry[i].entry;
|
||||
struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry;
|
||||
|
||||
ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
|
||||
if (ret < 0)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user