Merge branch 'kvm-on-hv-msrbm-fix' into HEAD
Merge bugfix for enlightened MSR Bitmap, before adding support to KVM for exposing the feature to nested guests.
This commit is contained in:
commit
93b350f884
@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
|
||||
virtual address size configured by the kernel. For example, with a
|
||||
virtual address size of 48, the PAC is 7 bits wide.
|
||||
|
||||
Recent versions of GCC can compile code with APIAKey-based return
|
||||
address protection when passed the -msign-return-address option. This
|
||||
uses instructions in the HINT space (unless -march=armv8.3-a or higher
|
||||
is also passed), and such code can run on systems without the pointer
|
||||
authentication extension.
|
||||
When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
|
||||
with HINT space pointer authentication instructions protecting
|
||||
function returns. Kernels built with this option will work on hardware
|
||||
with or without pointer authentication support.
|
||||
|
||||
In addition to exec(), keys can also be reinitialized to random values
|
||||
using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
|
||||
|
@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
|
||||
The third argument is a struct cpufreq_freqs with the following
|
||||
values:
|
||||
|
||||
===== ===========================
|
||||
cpu number of the affected CPU
|
||||
====== ======================================
|
||||
policy a pointer to the struct cpufreq_policy
|
||||
old old frequency
|
||||
new new frequency
|
||||
flags flags of the cpufreq driver
|
||||
===== ===========================
|
||||
====== ======================================
|
||||
|
||||
3. CPUFreq Table Generation with Operating Performance Point (OPP)
|
||||
==================================================================
|
||||
|
@ -17,9 +17,10 @@ properties:
|
||||
oneOf:
|
||||
- enum:
|
||||
- fsl,imx7ulp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- items:
|
||||
- const: fsl,imx8qxp-lpi2c
|
||||
- enum:
|
||||
- fsl,imx8qxp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- const: fsl,imx7ulp-lpi2c
|
||||
|
||||
reg:
|
||||
|
@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon)
|
||||
--------------------------------
|
||||
|
||||
ksmbd.mountd is userspace process to, transfer user account and password that
|
||||
are registered using ksmbd.adduser(part of utils for user space). Further it
|
||||
are registered using ksmbd.adduser (part of utils for user space). Further it
|
||||
allows sharing information parameters that parsed from smb.conf to ksmbd in
|
||||
kernel. For the execution part it has a daemon which is continuously running
|
||||
and connected to the kernel interface using netlink socket, it waits for the
|
||||
requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
dozen) that are most important for file server from NetShareEnum and
|
||||
NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
|
||||
and passed over to the associated kernel thread for the client.
|
||||
@ -154,11 +154,11 @@ Each layer
|
||||
1. Enable all component prints
|
||||
# sudo ksmbd.control -d "all"
|
||||
|
||||
2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
# sudo ksmbd.control -d "smb"
|
||||
|
||||
3. Show what prints are enable.
|
||||
# cat/sys/class/ksmbd-control/debug
|
||||
3. Show what prints are enabled.
|
||||
# cat /sys/class/ksmbd-control/debug
|
||||
[smb] auth vfs oplock ipc conn [rdma]
|
||||
|
||||
4. Disable prints:
|
||||
|
@ -1,7 +1,7 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=================================
|
||||
NETWORK FILESYSTEM HELPER LIBRARY
|
||||
Network Filesystem Helper Library
|
||||
=================================
|
||||
|
||||
.. Contents:
|
||||
@ -37,22 +37,22 @@ into a common call framework.
|
||||
|
||||
The following services are provided:
|
||||
|
||||
* Handles transparent huge pages (THPs).
|
||||
* Handle folios that span multiple pages.
|
||||
|
||||
* Insulates the netfs from VM interface changes.
|
||||
* Insulate the netfs from VM interface changes.
|
||||
|
||||
* Allows the netfs to arbitrarily split reads up into pieces, even ones that
|
||||
don't match page sizes or page alignments and that may cross pages.
|
||||
* Allow the netfs to arbitrarily split reads up into pieces, even ones that
|
||||
don't match folio sizes or folio alignments and that may cross folios.
|
||||
|
||||
* Allows the netfs to expand a readahead request in both directions to meet
|
||||
its needs.
|
||||
* Allow the netfs to expand a readahead request in both directions to meet its
|
||||
needs.
|
||||
|
||||
* Allows the netfs to partially fulfil a read, which will then be resubmitted.
|
||||
* Allow the netfs to partially fulfil a read, which will then be resubmitted.
|
||||
|
||||
* Handles local caching, allowing cached data and server-read data to be
|
||||
* Handle local caching, allowing cached data and server-read data to be
|
||||
interleaved for a single request.
|
||||
|
||||
* Handles clearing of bufferage that aren't on the server.
|
||||
* Handle clearing of bufferage that aren't on the server.
|
||||
|
||||
* Handle retrying of reads that failed, switching reads from the cache to the
|
||||
server as necessary.
|
||||
@ -70,22 +70,22 @@ Read Helper Functions
|
||||
|
||||
Three read helpers are provided::
|
||||
|
||||
* void netfs_readahead(struct readahead_control *ractl,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);``
|
||||
* int netfs_readpage(struct file *file,
|
||||
struct page *page,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
* int netfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
unsigned int flags,
|
||||
struct page **_page,
|
||||
void **_fsdata,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
void netfs_readahead(struct readahead_control *ractl,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
int netfs_readpage(struct file *file,
|
||||
struct folio *folio,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
int netfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
unsigned int flags,
|
||||
struct folio **_folio,
|
||||
void **_fsdata,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
|
||||
Each corresponds to a VM operation, with the addition of a couple of parameters
|
||||
for the use of the read helpers:
|
||||
@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
|
||||
For ->readahead() and ->readpage(), the network filesystem should just jump
|
||||
into the corresponding read helper; whereas for ->write_begin(), it may be a
|
||||
little more complicated as the network filesystem might want to flush
|
||||
conflicting writes or track dirty data and needs to put the acquired page if an
|
||||
error occurs after calling the helper.
|
||||
conflicting writes or track dirty data and needs to put the acquired folio if
|
||||
an error occurs after calling the helper.
|
||||
|
||||
The helpers manage the read request, calling back into the network filesystem
|
||||
through the suppplied table of operations. Waits will be performed as
|
||||
@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
|
||||
void (*issue_op)(struct netfs_read_subrequest *subreq);
|
||||
bool (*is_still_valid)(struct netfs_read_request *rreq);
|
||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct page *page, void **_fsdata);
|
||||
struct folio *folio, void **_fsdata);
|
||||
void (*done)(struct netfs_read_request *rreq);
|
||||
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
||||
};
|
||||
@ -313,13 +313,14 @@ The operations are as follows:
|
||||
|
||||
There is no return value; the netfs_subreq_terminated() function should be
|
||||
called to indicate whether or not the operation succeeded and how much data
|
||||
it transferred. The filesystem also should not deal with setting pages
|
||||
it transferred. The filesystem also should not deal with setting folios
|
||||
uptodate, unlocking them or dropping their refs - the helpers need to deal
|
||||
with this as they have to coordinate with copying to the local cache.
|
||||
|
||||
Note that the helpers have the pages locked, but not pinned. It is possible
|
||||
to use the ITER_XARRAY iov iterator to refer to the range of the inode that
|
||||
is being operated upon without the need to allocate large bvec tables.
|
||||
Note that the helpers have the folios locked, but not pinned. It is
|
||||
possible to use the ITER_XARRAY iov iterator to refer to the range of the
|
||||
inode that is being operated upon without the need to allocate large bvec
|
||||
tables.
|
||||
|
||||
* ``is_still_valid()``
|
||||
|
||||
@ -330,15 +331,15 @@ The operations are as follows:
|
||||
* ``check_write_begin()``
|
||||
|
||||
[Optional] This is called from the netfs_write_begin() helper once it has
|
||||
allocated/grabbed the page to be modified to allow the filesystem to flush
|
||||
allocated/grabbed the folio to be modified to allow the filesystem to flush
|
||||
conflicting state before allowing it to be modified.
|
||||
|
||||
It should return 0 if everything is now fine, -EAGAIN if the page should be
|
||||
It should return 0 if everything is now fine, -EAGAIN if the folio should be
|
||||
regrabbed and any other error code to abort the operation.
|
||||
|
||||
* ``done``
|
||||
|
||||
[Optional] This is called after the pages in the request have all been
|
||||
[Optional] This is called after the folios in the request have all been
|
||||
unlocked (and marked uptodate if applicable).
|
||||
|
||||
* ``cleanup``
|
||||
@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
|
||||
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
|
||||
end of the slice instead of reissuing.
|
||||
|
||||
* Once the data is read, the pages that have been fully read/cleared:
|
||||
* Once the data is read, the folios that have been fully read/cleared:
|
||||
|
||||
* Will be marked uptodate.
|
||||
|
||||
@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
|
||||
|
||||
* Unlocked
|
||||
|
||||
* Any pages that need writing to the cache will then have DIO writes issued.
|
||||
* Any folios that need writing to the cache will then have DIO writes issued.
|
||||
|
||||
* Synchronous operations will wait for reading to be complete.
|
||||
|
||||
* Writes to the cache will proceed asynchronously and the pages will have the
|
||||
* Writes to the cache will proceed asynchronously and the folios will have the
|
||||
PG_fscache mark removed when that completes.
|
||||
|
||||
* The request structures will be cleaned up when everything has completed.
|
||||
@ -452,6 +453,9 @@ operation table looks like the following::
|
||||
netfs_io_terminated_t term_func,
|
||||
void *term_func_priv);
|
||||
|
||||
int (*prepare_write)(struct netfs_cache_resources *cres,
|
||||
loff_t *_start, size_t *_len, loff_t i_size);
|
||||
|
||||
int (*write)(struct netfs_cache_resources *cres,
|
||||
loff_t start_pos,
|
||||
struct iov_iter *iter,
|
||||
@ -509,6 +513,14 @@ The methods defined in the table are:
|
||||
indicating whether the termination is definitely happening in the caller's
|
||||
context.
|
||||
|
||||
* ``prepare_write()``
|
||||
|
||||
[Required] Called to adjust a write to the cache and check that there is
|
||||
sufficient space in the cache. The start and length values indicate the
|
||||
size of the write that netfslib is proposing, and this can be adjusted by
|
||||
the cache to respect DIO boundaries. The file size is passed for
|
||||
information.
|
||||
|
||||
* ``write()``
|
||||
|
||||
[Required] Called to write to the cache. The start file offset is given
|
||||
@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
|
||||
there isn't a read request structure as well, such as writing dirty data to the
|
||||
cache.
|
||||
|
||||
|
||||
API Function Reference
|
||||
======================
|
||||
|
||||
.. kernel-doc:: include/linux/netfs.h
|
||||
.. kernel-doc:: fs/netfs/read_helper.c
|
||||
|
@ -36,6 +36,8 @@ Key to symbols
|
||||
|
||||
=============== =============================================================
|
||||
S Start condition
|
||||
Sr Repeated start condition, used to switch from write to
|
||||
read mode.
|
||||
P Stop condition
|
||||
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
|
||||
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
|
||||
@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data()
|
||||
This reads a single byte from a device, from a designated register.
|
||||
The register is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
|
||||
|
||||
@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a
|
||||
device, from a designated register that is specified through the Comm
|
||||
byte. But this time, the data is a complete word (16 bits)::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
|
||||
|
||||
@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends
|
||||
16 bits of data to it, and reads 16 bits of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
|
||||
S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
|
||||
|
||||
@ -181,7 +183,7 @@ of data is specified by the device in the Count byte.
|
||||
::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
||||
|
||||
@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends
|
||||
1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
|
||||
S Addr Rd [A] [Count] A [Data] ... A P
|
||||
Sr Addr Rd [A] [Count] A [Data] ... A P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
||||
|
||||
@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a
|
||||
designated register that is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
|
||||
|
||||
|
@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
|
||||
|
||||
0: disable any special handling on port reuse. The new
|
||||
connection will be delivered to the same real server that was
|
||||
servicing the previous connection. This will effectively
|
||||
disable expire_nodest_conn.
|
||||
servicing the previous connection.
|
||||
|
||||
bit 1: enable rescheduling of new connections when it is safe.
|
||||
That is, whenever expire_nodest_conn and for TCP sockets, when
|
||||
|
@ -486,8 +486,8 @@ of packets.
|
||||
Drivers are free to use a more permissive configuration than the requested
|
||||
configuration. It is expected that drivers should only implement directly the
|
||||
most generic mode that can be supported. For example if the hardware can
|
||||
support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
|
||||
support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT
|
||||
is more generic (and more useful to applications).
|
||||
|
||||
A driver which supports hardware time stamping shall update the struct
|
||||
|
32
MAINTAINERS
32
MAINTAINERS
@ -2263,6 +2263,15 @@ L: linux-iio@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/counter/microchip-tcb-capture.c
|
||||
|
||||
ARM/MILBEAUT ARCHITECTURE
|
||||
M: Taichi Sugaya <sugaya.taichi@socionext.com>
|
||||
M: Takao Orito <orito.takao@socionext.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/milbeaut*
|
||||
F: arch/arm/mach-milbeaut/
|
||||
N: milbeaut
|
||||
|
||||
ARM/MIOA701 MACHINE SUPPORT
|
||||
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
@ -2729,10 +2738,11 @@ S: Maintained
|
||||
F: drivers/memory/*emif*
|
||||
|
||||
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: arch/arm/boot/dts/keystone-*
|
||||
F: arch/arm/mach-keystone/
|
||||
|
||||
@ -3570,13 +3580,14 @@ L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/b44.*
|
||||
|
||||
BROADCOM B53 ETHERNET SWITCH DRIVER
|
||||
BROADCOM B53/SF2 ETHERNET SWITCH DRIVER
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
|
||||
F: drivers/net/dsa/b53/*
|
||||
F: drivers/net/dsa/bcm_sf2*
|
||||
F: include/linux/dsa/brcm.h
|
||||
F: include/linux/platform_data/b53.h
|
||||
|
||||
@ -15968,6 +15979,7 @@ F: arch/mips/generic/board-ranchu.c
|
||||
|
||||
RANDOM NUMBER DRIVER
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
S: Maintained
|
||||
F: drivers/char/random.c
|
||||
|
||||
@ -16490,6 +16502,12 @@ T: git git://linuxtv.org/media_tree.git
|
||||
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
|
||||
F: drivers/media/platform/sunxi/sun8i-rotate/
|
||||
|
||||
RPMSG TTY DRIVER
|
||||
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||
L: linux-remoteproc@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/tty/rpmsg_tty.c
|
||||
|
||||
RTL2830 MEDIA DRIVER
|
||||
M: Antti Palosaari <crope@iki.fi>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -16612,7 +16630,8 @@ F: drivers/iommu/s390-iommu.c
|
||||
|
||||
S390 IUCV NETWORK LAYER
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -16623,7 +16642,8 @@ F: net/iucv/
|
||||
|
||||
S390 NETWORK DRIVERS
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -18483,6 +18503,7 @@ F: include/uapi/linux/pkt_sched.h
|
||||
F: include/uapi/linux/tc_act/
|
||||
F: include/uapi/linux/tc_ematch/
|
||||
F: net/sched/
|
||||
F: tools/testing/selftests/tc-testing
|
||||
|
||||
TC90522 MEDIA DRIVER
|
||||
M: Akihiro Tsukada <tskd08@gmail.com>
|
||||
@ -19031,11 +19052,12 @@ F: drivers/mmc/host/tifm_sd.c
|
||||
F: include/linux/tifm.h
|
||||
|
||||
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: drivers/soc/ti/*
|
||||
|
||||
TI LM49xxx FAMILY ASoC CODEC DRIVERS
|
||||
|
4
Makefile
4
Makefile
@ -2,8 +2,8 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Trick or Treat
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Gobble Gobble
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
10
arch/Kconfig
10
arch/Kconfig
@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
|
||||
and vice-versa 32-bit applications to call 64-bit mmap().
|
||||
Required for applications doing different bitness syscalls.
|
||||
|
||||
config PAGE_SIZE_LESS_THAN_64KB
|
||||
def_bool y
|
||||
depends on !ARM64_64K_PAGES
|
||||
depends on !IA64_PAGE_SIZE_64KB
|
||||
depends on !PAGE_SIZE_64KB
|
||||
depends on !PARISC_PAGE_SIZE_64KB
|
||||
depends on !PPC_64K_PAGES
|
||||
depends on !PPC_256K_PAGES
|
||||
depends on !PAGE_SIZE_256KB
|
||||
|
||||
# This allows to use a set of generic functions to determine mmap base
|
||||
# address by giving priority to top-down scheme only if the process
|
||||
# is not in legacy mode (compat task, unlimited stack size or
|
||||
|
@ -488,3 +488,4 @@
|
||||
556 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 557 reserved for memfd_secret
|
||||
558 common process_mrelease sys_process_mrelease
|
||||
559 common futex_waitv sys_futex_waitv
|
||||
|
@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz);
|
||||
|
@ -506,11 +506,17 @@
|
||||
#address-cells = <3>;
|
||||
#interrupt-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
|
||||
interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "pcie", "msi";
|
||||
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 2 &gicv2 GIC_SPI 144
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 3 &gicv2 GIC_SPI 145
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 4 &gicv2 GIC_SPI 146
|
||||
IRQ_TYPE_LEVEL_HIGH>;
|
||||
msi-controller;
|
||||
msi-parent = <&pcie0>;
|
||||
|
@ -242,6 +242,8 @@
|
||||
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pcie0: pcie@12000 {
|
||||
@ -408,7 +410,7 @@
|
||||
i2c0: i2c@18009000 {
|
||||
compatible = "brcm,iproc-i2c";
|
||||
reg = <0x18009000 0x50>;
|
||||
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
clock-frequency = <100000>;
|
||||
|
@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
||||
*/
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
|
||||
static inline void flush_kernel_vmap_range(void *addr, int size)
|
||||
|
@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
|
||||
u32 socfpga_sdram_self_refresh(u32 sdr_base);
|
||||
extern unsigned int socfpga_sdram_self_refresh_sz;
|
||||
|
||||
extern char secondary_trampoline, secondary_trampoline_end;
|
||||
extern char secondary_trampoline[], secondary_trampoline_end[];
|
||||
|
||||
extern unsigned long socfpga_cpu1start_addr;
|
||||
|
||||
|
@ -20,14 +20,14 @@
|
||||
|
||||
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
/* This will put CPU #1 into reset. */
|
||||
writel(RSTMGR_MPUMODRST_CPU1,
|
||||
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
|
||||
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
|
||||
@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
|
||||
SOCFPGA_A10_RSTMGR_MODMPURST);
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
|
||||
|
@ -296,8 +296,7 @@
|
||||
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
|
||||
phys = <&ufs_0_phy>;
|
||||
phy-names = "ufs-phy";
|
||||
samsung,sysreg = <&syscon_fsys2>;
|
||||
samsung,ufs-shareability-reg-offset = <0x710>;
|
||||
samsung,sysreg = <&syscon_fsys2 0x710>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
@ -12,6 +12,17 @@
|
||||
|
||||
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
|
||||
/*
|
||||
* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
|
||||
* "return address pointer" which can be used to uniquely identify a return
|
||||
* address which has been overwritten.
|
||||
*
|
||||
* On arm64 we use the address of the caller's frame record, which remains the
|
||||
* same for the lifetime of the instrumented function, unlike the return
|
||||
* address in the LR.
|
||||
*/
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#else
|
||||
|
@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
||||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
VM_BUG_ON(mm != &init_mm);
|
||||
VM_BUG_ON(mm && mm != &init_mm);
|
||||
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
|
||||
}
|
||||
|
||||
|
@ -47,9 +47,6 @@ struct stack_info {
|
||||
* @prev_type: The type of stack this frame record was on, or a synthetic
|
||||
* value of STACK_TYPE_UNKNOWN. This is used to detect a
|
||||
* transition from one stack to another.
|
||||
*
|
||||
* @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
|
||||
* replacement lr value in the ftrace graph stack.
|
||||
*/
|
||||
struct stackframe {
|
||||
unsigned long fp;
|
||||
@ -57,9 +54,6 @@ struct stackframe {
|
||||
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
|
||||
unsigned long prev_fp;
|
||||
enum stack_type prev_type;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
int graph;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
struct llist_node *kr_cur;
|
||||
#endif
|
||||
|
@ -281,12 +281,22 @@ do { \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
|
||||
__typeof__(x) __rgu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_get_mem("ldtr", x, ptr, err); \
|
||||
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
\
|
||||
(x) = __rgu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
@ -310,14 +320,22 @@ do { \
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __gkn_dst = (dst); \
|
||||
__typeof__(src) __gkn_src = (src); \
|
||||
int __gkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||
(__force type *)(src), __gkn_err); \
|
||||
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
|
||||
(__force type *)(__gkn_src), __gkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__gkn_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
@ -351,11 +369,19 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __rpu_val = (x); \
|
||||
__chk_user_ptr(__rpu_ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_put_mem("sttr", x, ptr, err); \
|
||||
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
@ -380,14 +406,22 @@ do { \
|
||||
|
||||
#define put_user __put_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __pkn_dst = (dst); \
|
||||
__typeof__(src) __pkn_src = (src); \
|
||||
int __pkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_put_mem("str", *((type *)(src)), \
|
||||
(__force type *)(dst), __pkn_err); \
|
||||
__raw_put_mem("str", *((type *)(__pkn_src)), \
|
||||
(__force type *)(__pkn_dst), __pkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__pkn_err)) \
|
||||
goto err_label; \
|
||||
} while(0)
|
||||
|
@ -77,11 +77,17 @@
|
||||
.endm
|
||||
|
||||
SYM_CODE_START(ftrace_regs_caller)
|
||||
#ifdef BTI_C
|
||||
BTI_C
|
||||
#endif
|
||||
ftrace_regs_entry 1
|
||||
b ftrace_common
|
||||
SYM_CODE_END(ftrace_regs_caller)
|
||||
|
||||
SYM_CODE_START(ftrace_caller)
|
||||
#ifdef BTI_C
|
||||
BTI_C
|
||||
#endif
|
||||
ftrace_regs_entry 0
|
||||
b ftrace_common
|
||||
SYM_CODE_END(ftrace_caller)
|
||||
|
@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
|
||||
* on the way back to parent. For this purpose, this function is called
|
||||
* in _mcount() or ftrace_caller() to replace return address (*parent) on
|
||||
* the call stack to return_to_handler.
|
||||
*
|
||||
* Note that @frame_pointer is used only for sanity check later.
|
||||
*/
|
||||
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long frame_pointer)
|
||||
@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
*/
|
||||
old = *parent;
|
||||
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer,
|
||||
(void *)frame_pointer)) {
|
||||
*parent = return_hooker;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
|
||||
if (rc)
|
||||
return rc;
|
||||
kimage->arch.ttbr1 = __pa(trans_pgd);
|
||||
kimage->arch.zero_page = __pa(empty_zero_page);
|
||||
kimage->arch.zero_page = __pa_symbol(empty_zero_page);
|
||||
|
||||
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
|
||||
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
|
||||
|
@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
|
||||
{
|
||||
frame->fp = fp;
|
||||
frame->pc = pc;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
frame->graph = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
frame->kr_cur = NULL;
|
||||
#endif
|
||||
@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->prev_fp = fp;
|
||||
frame->prev_type = info.type;
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (tsk->ret_stack &&
|
||||
(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
(frame->pc == (unsigned long)return_to_handler)) {
|
||||
unsigned long orig_pc;
|
||||
/*
|
||||
* This is a case where function graph tracer has
|
||||
* modified a return address (LR) in a stack frame
|
||||
* to hook a function return.
|
||||
* So replace it to an original value.
|
||||
*/
|
||||
ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
|
||||
if (WARN_ON_ONCE(!ret_stack))
|
||||
orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
|
||||
(void *)frame->fp);
|
||||
if (WARN_ON_ONCE(frame->pc == orig_pc))
|
||||
return -EINVAL;
|
||||
frame->pc = ret_stack->ret;
|
||||
frame->pc = orig_pc;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
|
||||
#endif
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(unwind_frame);
|
||||
|
@ -369,3 +369,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
|
||||
|
@ -448,3 +448,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -454,3 +454,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
|
||||
default 3 if 64BIT && !PAGE_SIZE_64KB
|
||||
default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
|
||||
default 2
|
||||
|
||||
config MIPS_AUTO_PFN_OFFSET
|
||||
|
@ -52,7 +52,7 @@ endif
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
|
||||
|
||||
targets := $(notdir $(vmlinuzobjs-y))
|
||||
|
||||
|
@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
|
||||
SetPageDcacheDirty(page);
|
||||
}
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
|
@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
|
||||
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
{
|
||||
decode_configs(c);
|
||||
|
||||
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
|
||||
c->options |= MIPS_CPU_GSEXCEX;
|
||||
|
||||
@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
panic("Unknown Loongson Processor ID!");
|
||||
break;
|
||||
}
|
||||
|
||||
decode_configs(c);
|
||||
}
|
||||
#else
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
|
||||
|
@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_puts(m, " tx39_cache");
|
||||
if (cpu_has_octeon_cache)
|
||||
seq_puts(m, " octeon_cache");
|
||||
if (cpu_has_fpu)
|
||||
if (raw_cpu_has_fpu)
|
||||
seq_puts(m, " fpu");
|
||||
if (cpu_has_32fpr)
|
||||
seq_puts(m, " 32fpr");
|
||||
|
@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, void *src, int len);
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long pfn);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||
|
@ -15,7 +15,12 @@
|
||||
# Mike Shaver, Helge Deller and Martin K. Petersen
|
||||
#
|
||||
|
||||
ifdef CONFIG_PARISC_SELF_EXTRACT
|
||||
boot := arch/parisc/boot
|
||||
KBUILD_IMAGE := $(boot)/bzImage
|
||||
else
|
||||
KBUILD_IMAGE := vmlinuz
|
||||
endif
|
||||
|
||||
NM = sh $(srctree)/arch/parisc/nm
|
||||
CHECKFLAGS += -D__hppa__=1
|
||||
|
@ -1,7 +1,9 @@
|
||||
CONFIG_LOCALVERSION="-64bit"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_KERNEL_LZ4=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y
|
||||
CONFIG_BLK_DEV_INTEGRITY=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_MEMORY_FAILURE=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y
|
||||
CONFIG_SCSI_SRP_ATTRS=y
|
||||
CONFIG_ISCSI_BOOT_SYSFS=y
|
||||
CONFIG_SCSI_MPT2SAS=y
|
||||
CONFIG_SCSI_LASI700=m
|
||||
CONFIG_SCSI_LASI700=y
|
||||
CONFIG_SCSI_SYM53C8XX_2=y
|
||||
CONFIG_SCSI_ZALON=y
|
||||
CONFIG_SCSI_QLA_ISCSI=m
|
||||
CONFIG_SCSI_DH=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_SATA_SIL=y
|
||||
CONFIG_SATA_SIS=y
|
||||
CONFIG_SATA_VIA=y
|
||||
CONFIG_PATA_NS87415=y
|
||||
CONFIG_PATA_SIL680=y
|
||||
CONFIG_ATA_GENERIC=y
|
||||
@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
CONFIG_DM_RAID=m
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_AUDIT=y
|
||||
CONFIG_FUSION=y
|
||||
CONFIG_FUSION_SPI=y
|
||||
CONFIG_FUSION_SAS=y
|
||||
@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y
|
||||
CONFIG_FB_MATROX_I2C=y
|
||||
CONFIG_FB_MATROX_MAVEN=y
|
||||
CONFIG_FB_RADEON=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_CLUT224 is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_HID_PID=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD_PLATFORM=y
|
||||
CONFIG_UIO=y
|
||||
CONFIG_UIO_PDRV_GENIRQ=m
|
||||
CONFIG_UIO_AEC=m
|
||||
|
@ -147,6 +147,17 @@
|
||||
extrd,u \r, 63-(\sa), 64-(\sa), \t
|
||||
.endm
|
||||
|
||||
/* Extract unsigned for 32- and 64-bit
|
||||
* The extru instruction leaves the most significant 32 bits of the
|
||||
* target register in an undefined state on PA 2.0 systems. */
|
||||
.macro extru_safe r, p, len, t
|
||||
#ifdef CONFIG_64BIT
|
||||
extrd,u \r, 32+(\p), \len, \t
|
||||
#else
|
||||
extru \r, \p, \len, \t
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* load 32-bit 'value' into 'reg' compensating for the ldil
|
||||
* sign-extension when running in wide mode.
|
||||
* WARNING!! neither 'value' nor 'reg' can be expressions
|
||||
|
@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
|
@ -39,6 +39,7 @@ verify "$3"
|
||||
if [ -n "${INSTALLKERNEL}" ]; then
|
||||
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
|
||||
fi
|
||||
|
||||
# Default install
|
||||
|
@ -366,17 +366,9 @@
|
||||
*/
|
||||
.macro L2_ptep pmd,pte,index,va,fault
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
#else
|
||||
# if defined(CONFIG_64BIT)
|
||||
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#else
|
||||
# if PAGE_SIZE > 4096
|
||||
extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
|
||||
# else
|
||||
extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
# endif
|
||||
# endif
|
||||
extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#endif
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
#if CONFIG_PGTABLE_LEVELS < 3
|
||||
@ -386,7 +378,7 @@
|
||||
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
|
||||
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
|
||||
SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
|
||||
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
|
||||
.endm
|
||||
|
@ -566,7 +566,7 @@ lws_compare_and_swap:
|
||||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
@ -751,7 +751,7 @@ cas2_lock_start:
|
||||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
|
@ -249,30 +249,16 @@ void __init time_init(void)
|
||||
static int __init init_cr16_clocksource(void)
|
||||
{
|
||||
/*
|
||||
* The cr16 interval timers are not syncronized across CPUs on
|
||||
* different sockets, so mark them unstable and lower rating on
|
||||
* multi-socket SMP systems.
|
||||
* The cr16 interval timers are not syncronized across CPUs, even if
|
||||
* they share the same socket.
|
||||
*/
|
||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||
int cpu;
|
||||
unsigned long cpu0_loc;
|
||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
||||
/* mark sched_clock unstable */
|
||||
clear_sched_clock_stable();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == 0)
|
||||
continue;
|
||||
if ((cpu0_loc != 0) &&
|
||||
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
|
||||
continue;
|
||||
|
||||
/* mark sched_clock unstable */
|
||||
clear_sched_clock_stable();
|
||||
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
break;
|
||||
}
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
}
|
||||
|
||||
/* register at clocksource framework */
|
||||
|
@ -57,8 +57,6 @@ SECTIONS
|
||||
{
|
||||
. = KERNEL_BINARY_TEXT_START;
|
||||
|
||||
_stext = .; /* start of kernel text, includes init code & data */
|
||||
|
||||
__init_begin = .;
|
||||
HEAD_TEXT_SECTION
|
||||
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
|
||||
@ -82,6 +80,7 @@ SECTIONS
|
||||
/* freed after init ends here */
|
||||
|
||||
_text = .; /* Text and read-only data */
|
||||
_stext = .;
|
||||
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
|
||||
.text ALIGN(PAGE_SIZE) : {
|
||||
TEXT_TEXT
|
||||
|
@ -202,11 +202,11 @@ vmap_stack_overflow:
|
||||
mfspr r1, SPRN_SPRG_THREAD
|
||||
lwz r1, TASK_CPU - THREAD(r1)
|
||||
slwi r1, r1, 3
|
||||
addis r1, r1, emergency_ctx@ha
|
||||
addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#else
|
||||
lis r1, emergency_ctx@ha
|
||||
lis r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#endif
|
||||
lwz r1, emergency_ctx@l(r1)
|
||||
lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
|
||||
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
EXCEPTION_PROLOG_2 0 vmap_stack_overflow
|
||||
prepare_transfer_to_handler
|
||||
|
@ -528,3 +528,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
"r" (0) : "memory");
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
|
||||
} else {
|
||||
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
|
||||
@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
rb += PPC_BIT(51); /* increment set number */
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y
|
||||
CONFIG_CONNECTOR=y
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_DRBD=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
@ -476,6 +475,7 @@ CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_BAREUDP=m
|
||||
CONFIG_AMT=m
|
||||
CONFIG_TUN=m
|
||||
CONFIG_VETH=m
|
||||
CONFIG_VIRTIO_NET=m
|
||||
@ -489,6 +489,7 @@ CONFIG_NLMON=m
|
||||
# CONFIG_NET_VENDOR_AMD is not set
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ASIX is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y
|
||||
CONFIG_WATCHDOG_NOWAYOUT=y
|
||||
CONFIG_SOFT_WATCHDOG=m
|
||||
CONFIG_DIAG288_WATCHDOG=m
|
||||
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
|
||||
CONFIG_FB=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
|
||||
@ -775,12 +777,14 @@ CONFIG_CRC4=m
|
||||
CONFIG_CRC7=m
|
||||
CONFIG_CRC8=m
|
||||
CONFIG_RANDOM32_SELFTEST=y
|
||||
CONFIG_XZ_DEC_MICROLZMA=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_DEBUG_INFO_BTF=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
CONFIG_HEADERS_INSTALL=y
|
||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||
@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_DEBUG_PER_CPU_MAPS=y
|
||||
CONFIG_KFENCE=y
|
||||
CONFIG_KFENCE_STATIC_KEYS=y
|
||||
CONFIG_DEBUG_SHIRQ=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
|
||||
CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
CONFIG_CIO_INJECT=y
|
||||
CONFIG_KUNIT=m
|
||||
@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y
|
||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_TEST_MIN_HEAP=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
CONFIG_KPROBES_SANITY_TEST=m
|
||||
CONFIG_RBTREE_TEST=y
|
||||
CONFIG_INTERVAL_TREE_TEST=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
|
@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y
|
||||
CONFIG_CONNECTOR=y
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_DRBD=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
@ -467,6 +466,7 @@ CONFIG_MACVLAN=m
|
||||
CONFIG_MACVTAP=m
|
||||
CONFIG_VXLAN=m
|
||||
CONFIG_BAREUDP=m
|
||||
CONFIG_AMT=m
|
||||
CONFIG_TUN=m
|
||||
CONFIG_VETH=m
|
||||
CONFIG_VIRTIO_NET=m
|
||||
@ -480,6 +480,7 @@ CONFIG_NLMON=m
|
||||
# CONFIG_NET_VENDOR_AMD is not set
|
||||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ASIX is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m
|
||||
CONFIG_CRC4=m
|
||||
CONFIG_CRC7=m
|
||||
CONFIG_CRC8=m
|
||||
CONFIG_XZ_DEC_MICROLZMA=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_DEBUG_INFO_BTF=y
|
||||
CONFIG_GDB_SCRIPTS=y
|
||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
|
||||
CONFIG_KUNIT=m
|
||||
CONFIG_KUNIT_DEBUGFS=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_KPROBES_SANITY_TEST=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_TEST_BPF=m
|
||||
|
@ -65,9 +65,11 @@ CONFIG_ZFCP=y
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_LSM="yama,loadpin,safesetid,integrity"
|
||||
# CONFIG_ZLIB_DFLTCC is not set
|
||||
CONFIG_XZ_DEC_MICROLZMA=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
# CONFIG_SYMBOLIC_ERRNAME is not set
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_BTF=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
|
@ -14,12 +14,13 @@
|
||||
|
||||
/* I/O Map */
|
||||
#define ZPCI_IOMAP_SHIFT 48
|
||||
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
|
||||
#define ZPCI_IOMAP_ADDR_SHIFT 62
|
||||
#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
|
||||
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
|
||||
#define ZPCI_IOMAP_MAX_ENTRIES \
|
||||
((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
|
||||
(1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
|
||||
#define ZPCI_IOMAP_ADDR_IDX_MASK \
|
||||
(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
|
||||
((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
|
||||
|
||||
struct zpci_iomap_entry {
|
||||
u32 fh;
|
||||
|
@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
|
||||
}
|
||||
|
||||
/*
|
||||
* trigger specification exception
|
||||
* Trigger operation exception; use insn notation to bypass
|
||||
* llvm's integrated assembler sanity checks.
|
||||
*/
|
||||
asm volatile(
|
||||
" mvcl %%r1,%%r1\n"
|
||||
" .insn e,0x0000\n" /* illegal opcode */
|
||||
"0: nopr %%r7\n"
|
||||
EX_TABLE(0b, 0b)
|
||||
:);
|
||||
|
@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
#define flush_icache_user_range flush_icache_range
|
||||
extern void flush_icache_page(struct vm_area_struct *vma,
|
||||
|
@ -451,3 +451,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -494,3 +494,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
||||
ud2
|
||||
1:
|
||||
#endif
|
||||
#ifdef CONFIG_XEN_PV
|
||||
ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
|
||||
#endif
|
||||
|
||||
POP_REGS pop_rdi=0
|
||||
|
||||
/*
|
||||
@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
||||
.Lparanoid_entry_checkgs:
|
||||
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
||||
movl $1, %ebx
|
||||
|
||||
/*
|
||||
* The kernel-enforced convention is a negative GSBASE indicates
|
||||
* a kernel value. No SWAPGS needed on entry and exit.
|
||||
@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
||||
movl $MSR_GS_BASE, %ecx
|
||||
rdmsr
|
||||
testl %edx, %edx
|
||||
jns .Lparanoid_entry_swapgs
|
||||
ret
|
||||
|
||||
.Lparanoid_entry_swapgs:
|
||||
swapgs
|
||||
|
||||
/*
|
||||
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
|
||||
* unconditional CR3 write, even in the PTI case. So do an lfence
|
||||
* to prevent GS speculation, regardless of whether PTI is enabled.
|
||||
*/
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
js .Lparanoid_kernel_gsbase
|
||||
|
||||
/* EBX = 0 -> SWAPGS required on exit */
|
||||
xorl %ebx, %ebx
|
||||
swapgs
|
||||
.Lparanoid_kernel_gsbase:
|
||||
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
ret
|
||||
SYM_CODE_END(paranoid_entry)
|
||||
|
||||
@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
|
||||
pushq %r12
|
||||
ret
|
||||
|
||||
.Lerror_entry_done_lfence:
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
.Lerror_entry_done:
|
||||
ret
|
||||
|
||||
/*
|
||||
* There are two places in the kernel that can potentially fault with
|
||||
* usergs. Handle them here. B stepping K8s sometimes report a
|
||||
@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
|
||||
* .Lgs_change's error handler with kernel gsbase.
|
||||
*/
|
||||
SWAPGS
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
jmp .Lerror_entry_done
|
||||
|
||||
/*
|
||||
* Issue an LFENCE to prevent GS speculation, regardless of whether it is a
|
||||
* kernel or user gsbase.
|
||||
*/
|
||||
.Lerror_entry_done_lfence:
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
ret
|
||||
|
||||
.Lbstep_iret:
|
||||
/* Fix truncated RIP */
|
||||
|
@ -102,12 +102,6 @@ extern void switch_fpu_return(void);
|
||||
*/
|
||||
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
|
||||
|
||||
/*
|
||||
* Tasks that are not using SVA have mm->pasid set to zero to note that they
|
||||
* will not have the valid bit set in MSR_IA32_PASID while they are running.
|
||||
*/
|
||||
#define PASID_DISABLED 0
|
||||
|
||||
/* Trap handling */
|
||||
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
|
||||
extern void fpu_sync_fpstate(struct fpu *fpu);
|
||||
|
@ -108,7 +108,7 @@
|
||||
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
|
||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
|
||||
|
||||
#define INTEL_FAM6_RAPTOR_LAKE 0xB7
|
||||
#define INTEL_FAM6_RAPTORLAKE 0xB7
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
|
@ -281,13 +281,13 @@ HYPERVISOR_callback_op(int cmd, void *arg)
|
||||
return _hypercall2(int, callback_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
static __always_inline int
|
||||
HYPERVISOR_set_debugreg(int reg, unsigned long value)
|
||||
{
|
||||
return _hypercall2(int, set_debugreg, reg, value);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
static __always_inline unsigned long
|
||||
HYPERVISOR_get_debugreg(int reg)
|
||||
{
|
||||
return _hypercall1(unsigned long, get_debugreg, reg);
|
||||
|
@ -64,6 +64,7 @@ void xen_arch_unregister_cpu(int num);
|
||||
|
||||
#ifdef CONFIG_PVH
|
||||
void __init xen_pvh_init(struct boot_params *boot_params);
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
|
||||
struct fpstate *fpstate)
|
||||
{
|
||||
struct xregs_state __user *x = buf;
|
||||
struct _fpx_sw_bytes sw_bytes;
|
||||
struct _fpx_sw_bytes sw_bytes = {};
|
||||
u32 xfeatures;
|
||||
int err;
|
||||
|
||||
|
@ -742,7 +742,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char *prepare_command_line(void)
|
||||
static char * __init prepare_command_line(void)
|
||||
{
|
||||
#ifdef CONFIG_CMDLINE_BOOL
|
||||
#ifdef CONFIG_CMDLINE_OVERRIDE
|
||||
|
@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
||||
char *dst, char *buf, size_t size)
|
||||
{
|
||||
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
|
||||
char __user *target = (char __user *)dst;
|
||||
u64 d8;
|
||||
u32 d4;
|
||||
u16 d2;
|
||||
u8 d1;
|
||||
|
||||
/*
|
||||
* This function uses __put_user() independent of whether kernel or user
|
||||
@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
||||
* instructions here would cause infinite nesting.
|
||||
*/
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 1: {
|
||||
u8 d1;
|
||||
u8 __user *target = (u8 __user *)dst;
|
||||
|
||||
memcpy(&d1, buf, 1);
|
||||
if (__put_user(d1, target))
|
||||
goto fault;
|
||||
break;
|
||||
case 2:
|
||||
}
|
||||
case 2: {
|
||||
u16 d2;
|
||||
u16 __user *target = (u16 __user *)dst;
|
||||
|
||||
memcpy(&d2, buf, 2);
|
||||
if (__put_user(d2, target))
|
||||
goto fault;
|
||||
break;
|
||||
case 4:
|
||||
}
|
||||
case 4: {
|
||||
u32 d4;
|
||||
u32 __user *target = (u32 __user *)dst;
|
||||
|
||||
memcpy(&d4, buf, 4);
|
||||
if (__put_user(d4, target))
|
||||
goto fault;
|
||||
break;
|
||||
case 8:
|
||||
}
|
||||
case 8: {
|
||||
u64 d8;
|
||||
u64 __user *target = (u64 __user *)dst;
|
||||
|
||||
memcpy(&d8, buf, 8);
|
||||
if (__put_user(d8, target))
|
||||
goto fault;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
||||
return ES_UNSUPPORTED;
|
||||
@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
||||
char *src, char *buf, size_t size)
|
||||
{
|
||||
unsigned long error_code = X86_PF_PROT;
|
||||
char __user *s = (char __user *)src;
|
||||
u64 d8;
|
||||
u32 d4;
|
||||
u16 d2;
|
||||
u8 d1;
|
||||
|
||||
/*
|
||||
* This function uses __get_user() independent of whether kernel or user
|
||||
@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
||||
* instructions here would cause infinite nesting.
|
||||
*/
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 1: {
|
||||
u8 d1;
|
||||
u8 __user *s = (u8 __user *)src;
|
||||
|
||||
if (__get_user(d1, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d1, 1);
|
||||
break;
|
||||
case 2:
|
||||
}
|
||||
case 2: {
|
||||
u16 d2;
|
||||
u16 __user *s = (u16 __user *)src;
|
||||
|
||||
if (__get_user(d2, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d2, 2);
|
||||
break;
|
||||
case 4:
|
||||
}
|
||||
case 4: {
|
||||
u32 d4;
|
||||
u32 __user *s = (u32 __user *)src;
|
||||
|
||||
if (__get_user(d4, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d4, 4);
|
||||
break;
|
||||
case 8:
|
||||
}
|
||||
case 8: {
|
||||
u64 d8;
|
||||
u64 __user *s = (u64 __user *)src;
|
||||
if (__get_user(d8, s))
|
||||
goto fault;
|
||||
memcpy(buf, &d8, 8);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
|
||||
return ES_UNSUPPORTED;
|
||||
|
@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
|
||||
|
||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||
|
||||
static void __init tsc_disable_clocksource_watchdog(void)
|
||||
{
|
||||
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
}
|
||||
|
||||
static void __init check_system_tsc_reliable(void)
|
||||
{
|
||||
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
|
||||
@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
|
||||
#endif
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
|
||||
tsc_clocksource_reliable = 1;
|
||||
|
||||
/*
|
||||
* Disable the clocksource watchdog when the system has:
|
||||
* - TSC running at constant frequency
|
||||
* - TSC which does not stop in C-States
|
||||
* - the TSC_ADJUST register which allows to detect even minimal
|
||||
* modifications
|
||||
* - not more than two sockets. As the number of sockets cannot be
|
||||
* evaluated at the early boot stage where this has to be
|
||||
* invoked, check the number of online memory nodes as a
|
||||
* fallback solution which is an reasonable estimate.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
|
||||
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
|
||||
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
|
||||
nr_online_nodes <= 2)
|
||||
tsc_disable_clocksource_watchdog();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
|
||||
if (tsc_unstable)
|
||||
goto unreg;
|
||||
|
||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
||||
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
||||
|
||||
@ -1527,7 +1547,7 @@ void __init tsc_init(void)
|
||||
}
|
||||
|
||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
tsc_disable_clocksource_watchdog();
|
||||
|
||||
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
|
||||
detect_art();
|
||||
|
@ -30,6 +30,7 @@ struct tsc_adjust {
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
|
||||
static struct timer_list tsc_sync_check_timer;
|
||||
|
||||
/*
|
||||
* TSC's on different sockets may be reset asynchronously.
|
||||
@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally the tsc_sync will be checked every time system enters idle
|
||||
* state, but there is still caveat that a system won't enter idle,
|
||||
* either because it's too busy or configured purposely to not enter
|
||||
* idle.
|
||||
*
|
||||
* So setup a periodic timer (every 10 minutes) to make sure the check
|
||||
* is always on.
|
||||
*/
|
||||
|
||||
#define SYNC_CHECK_INTERVAL (HZ * 600)
|
||||
|
||||
static void tsc_sync_check_timer_fn(struct timer_list *unused)
|
||||
{
|
||||
int next_cpu;
|
||||
|
||||
tsc_verify_tsc_adjust(false);
|
||||
|
||||
/* Run the check for all onlined CPUs in turn */
|
||||
next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
|
||||
if (next_cpu >= nr_cpu_ids)
|
||||
next_cpu = cpumask_first(cpu_online_mask);
|
||||
|
||||
tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
|
||||
add_timer_on(&tsc_sync_check_timer, next_cpu);
|
||||
}
|
||||
|
||||
static int __init start_sync_check_timer(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
|
||||
return 0;
|
||||
|
||||
timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
|
||||
tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
|
||||
add_timer(&tsc_sync_check_timer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(start_sync_check_timer);
|
||||
|
||||
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
|
||||
unsigned int cpu, bool bootcpu)
|
||||
{
|
||||
|
@ -2652,15 +2652,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
|
||||
if (!loaded_vmcs->msr_bitmap)
|
||||
goto out_vmcs;
|
||||
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
|
||||
|
||||
if (IS_ENABLED(CONFIG_HYPERV) &&
|
||||
static_branch_unlikely(&enable_evmcs) &&
|
||||
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
|
||||
struct hv_enlightened_vmcs *evmcs =
|
||||
(struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
|
||||
|
||||
evmcs->hv_enlightenments_control.msr_bitmap = 1;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
|
||||
@ -6851,6 +6842,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
||||
if (err < 0)
|
||||
goto free_pml;
|
||||
|
||||
/*
|
||||
* Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
|
||||
* nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
|
||||
* feature only for vmcs01, KVM currently isn't equipped to realize any
|
||||
* performance benefits from enabling it for vmcs02.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
|
||||
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
|
||||
struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
|
||||
|
||||
evmcs->hv_enlightenments_control.msr_bitmap = 1;
|
||||
}
|
||||
|
||||
/* The MSR bitmap starts with all ones */
|
||||
bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
|
||||
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
|
||||
|
@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 *trampoline_pgd;
|
||||
u64 efer;
|
||||
int i;
|
||||
#endif
|
||||
|
||||
base = (unsigned char *)real_mode_header;
|
||||
@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
|
||||
trampoline_header->flags = 0;
|
||||
|
||||
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
||||
|
||||
/* Map the real mode stub as virtual == physical */
|
||||
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
|
||||
trampoline_pgd[511] = init_top_pgt[511].pgd;
|
||||
|
||||
/*
|
||||
* Include the entirety of the kernel mapping into the trampoline
|
||||
* PGD. This way, all mappings present in the normal kernel page
|
||||
* tables are usable while running on trampoline_pgd.
|
||||
*/
|
||||
for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
|
||||
trampoline_pgd[i] = init_top_pgt[i].pgd;
|
||||
#endif
|
||||
|
||||
sme_sev_setup_real_mode(trampoline_header);
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <../entry/calling.h>
|
||||
|
||||
.pushsection .noinstr.text, "ax"
|
||||
/*
|
||||
@ -192,6 +193,25 @@ SYM_CODE_START(xen_iret)
|
||||
jmp hypercall_iret
|
||||
SYM_CODE_END(xen_iret)
|
||||
|
||||
/*
|
||||
* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
|
||||
* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
|
||||
* in XEN pv would cause %rsp to move up to the top of the kernel stack and
|
||||
* leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
|
||||
* interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
|
||||
* frame at the same address is useless.
|
||||
*/
|
||||
SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
|
||||
UNWIND_HINT_REGS
|
||||
POP_REGS
|
||||
|
||||
/* stackleak_erase() can work safely on the kernel stack. */
|
||||
STACKLEAK_ERASE_NOCLOBBER
|
||||
|
||||
addq $8, %rsp /* skip regs->orig_ax */
|
||||
jmp xen_iret
|
||||
SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
|
||||
|
||||
/*
|
||||
* Xen handles syscall callbacks much like ordinary exceptions, which
|
||||
* means we have:
|
||||
|
@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *);
|
||||
|
||||
void local_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
|
||||
#define flush_cache_vunmap(start,end) do { } while (0)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
static inline void flush_dcache_folio(struct folio *folio) { }
|
||||
|
||||
#define flush_icache_range local_flush_icache_range
|
||||
#define flush_cache_page(vma, addr, pfn) do { } while (0)
|
||||
|
@ -419,3 +419,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
12
block/bdev.c
12
block/bdev.c
@ -753,8 +753,7 @@ struct block_device *blkdev_get_no_open(dev_t dev)
|
||||
|
||||
if (!bdev)
|
||||
return NULL;
|
||||
if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
|
||||
!try_module_get(bdev->bd_disk->fops->owner)) {
|
||||
if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) {
|
||||
put_device(&bdev->bd_device);
|
||||
return NULL;
|
||||
}
|
||||
@ -764,7 +763,6 @@ struct block_device *blkdev_get_no_open(dev_t dev)
|
||||
|
||||
void blkdev_put_no_open(struct block_device *bdev)
|
||||
{
|
||||
module_put(bdev->bd_disk->fops->owner);
|
||||
put_device(&bdev->bd_device);
|
||||
}
|
||||
|
||||
@ -820,12 +818,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
|
||||
ret = -ENXIO;
|
||||
if (!disk_live(disk))
|
||||
goto abort_claiming;
|
||||
if (!try_module_get(disk->fops->owner))
|
||||
goto abort_claiming;
|
||||
if (bdev_is_partition(bdev))
|
||||
ret = blkdev_get_part(bdev, mode);
|
||||
else
|
||||
ret = blkdev_get_whole(bdev, mode);
|
||||
if (ret)
|
||||
goto abort_claiming;
|
||||
goto put_module;
|
||||
if (mode & FMODE_EXCL) {
|
||||
bd_finish_claiming(bdev, holder);
|
||||
|
||||
@ -847,7 +847,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
|
||||
if (unblock_events)
|
||||
disk_unblock_events(disk);
|
||||
return bdev;
|
||||
|
||||
put_module:
|
||||
module_put(disk->fops->owner);
|
||||
abort_claiming:
|
||||
if (mode & FMODE_EXCL)
|
||||
bd_abort_claiming(bdev, holder);
|
||||
@ -956,6 +957,7 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
|
||||
blkdev_put_whole(bdev, mode);
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
|
||||
module_put(disk->fops->owner);
|
||||
blkdev_put_no_open(bdev);
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_put);
|
||||
|
@ -1017,6 +1017,7 @@ EXPORT_SYMBOL(submit_bio);
|
||||
/**
|
||||
* bio_poll - poll for BIO completions
|
||||
* @bio: bio to poll for
|
||||
* @iob: batches of IO
|
||||
* @flags: BLK_POLL_* flags that control the behavior
|
||||
*
|
||||
* Poll for completions on queue associated with the bio. Returns number of
|
||||
|
@ -860,13 +860,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
|
||||
if (iob->need_ts)
|
||||
__blk_mq_end_request_acct(rq, now);
|
||||
|
||||
rq_qos_done(rq->q, rq);
|
||||
|
||||
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
|
||||
if (!refcount_dec_and_test(&rq->ref))
|
||||
continue;
|
||||
|
||||
blk_crypto_free_request(rq);
|
||||
blk_pm_mark_last_busy(rq);
|
||||
rq_qos_done(rq->q, rq);
|
||||
|
||||
if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
|
||||
if (cur_hctx)
|
||||
|
@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
||||
static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
|
||||
{
|
||||
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
|
||||
struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
|
||||
struct cpc_register_resource *reg;
|
||||
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
reg = &cpc_desc->cpc_regs[reg_idx];
|
||||
|
||||
if (CPC_IN_PCC(reg)) {
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
||||
|
@ -1084,21 +1084,17 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
|
||||
* Returns parent node of an ACPI device or data firmware node or %NULL if
|
||||
* not available.
|
||||
*/
|
||||
struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
|
||||
static struct fwnode_handle *
|
||||
acpi_node_get_parent(const struct fwnode_handle *fwnode)
|
||||
{
|
||||
if (is_acpi_data_node(fwnode)) {
|
||||
/* All data nodes have parent pointer so just return that */
|
||||
return to_acpi_data_node(fwnode)->parent;
|
||||
} else if (is_acpi_device_node(fwnode)) {
|
||||
acpi_handle handle, parent_handle;
|
||||
struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
|
||||
|
||||
handle = to_acpi_device_node(fwnode)->handle;
|
||||
if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) {
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (!acpi_bus_get_device(parent_handle, &adev))
|
||||
return acpi_fwnode_handle(adev);
|
||||
}
|
||||
if (dev)
|
||||
return acpi_fwnode_handle(to_acpi_device(dev));
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
t->from = thread;
|
||||
else
|
||||
t->from = NULL;
|
||||
t->sender_euid = proc->cred->euid;
|
||||
t->sender_euid = task_euid(proc->tsk);
|
||||
t->to_proc = target_proc;
|
||||
t->to_thread = target_thread;
|
||||
t->code = tr->code;
|
||||
|
@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
|
||||
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
|
||||
return -EINVAL;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
return sysfs_emit(buf, "%s\n",
|
||||
ata_lpm_policy_names[ap->target_lpm_policy]);
|
||||
}
|
||||
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
|
||||
|
@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
|
||||
/* Transfer multiple of 2 bytes */
|
||||
if (rw == READ) {
|
||||
if (swap)
|
||||
raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
|
||||
raw_insw_swapw(data_addr, (u16 *)buf, words);
|
||||
else
|
||||
raw_insw((u16 *)data_addr, (u16 *)buf, words);
|
||||
raw_insw(data_addr, (u16 *)buf, words);
|
||||
} else {
|
||||
if (swap)
|
||||
raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
|
||||
raw_outsw_swapw(data_addr, (u16 *)buf, words);
|
||||
else
|
||||
raw_outsw((u16 *)data_addr, (u16 *)buf, words);
|
||||
raw_outsw(data_addr, (u16 *)buf, words);
|
||||
}
|
||||
|
||||
/* Transfer trailing byte, if any. */
|
||||
@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
|
||||
|
||||
if (rw == READ) {
|
||||
if (swap)
|
||||
raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
|
||||
raw_insw_swapw(data_addr, (u16 *)pad, 1);
|
||||
else
|
||||
raw_insw((u16 *)data_addr, (u16 *)pad, 1);
|
||||
raw_insw(data_addr, (u16 *)pad, 1);
|
||||
*buf = pad[0];
|
||||
} else {
|
||||
pad[0] = *buf;
|
||||
if (swap)
|
||||
raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
|
||||
raw_outsw_swapw(data_addr, (u16 *)pad, 1);
|
||||
else
|
||||
raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
|
||||
raw_outsw(data_addr, (u16 *)pad, 1);
|
||||
}
|
||||
words++;
|
||||
}
|
||||
|
@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sata_fsl_host_stop(struct ata_host *host)
|
||||
{
|
||||
struct sata_fsl_host_priv *host_priv = host->private_data;
|
||||
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* scsi mid-layer and libata interface structures
|
||||
*/
|
||||
@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
|
||||
.port_start = sata_fsl_port_start,
|
||||
.port_stop = sata_fsl_port_stop,
|
||||
|
||||
.host_stop = sata_fsl_host_stop,
|
||||
|
||||
.pmp_attach = sata_fsl_pmp_attach,
|
||||
.pmp_detach = sata_fsl_pmp_detach,
|
||||
};
|
||||
@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
|
||||
host_priv->ssr_base = ssr_base;
|
||||
host_priv->csr_base = csr_base;
|
||||
|
||||
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
||||
if (!irq) {
|
||||
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
||||
irq = platform_get_irq(ofdev, 0);
|
||||
if (irq < 0) {
|
||||
retval = irq;
|
||||
goto error_exit_with_cleanup;
|
||||
}
|
||||
host_priv->irq = irq;
|
||||
@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
|
||||
|
||||
ata_host_detach(host);
|
||||
|
||||
irq_dispose_mapping(host_priv->irq);
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
|
||||
int ret;
|
||||
|
||||
if (idx < 0) {
|
||||
pr_warn("deleting an unspecified loop device is not supported.\n");
|
||||
pr_warn_once("deleting an unspecified loop device is not supported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *req = bd->rq;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
unsigned long flags;
|
||||
unsigned int num;
|
||||
int num;
|
||||
int qid = hctx->queue_num;
|
||||
bool notify = false;
|
||||
blk_status_t status;
|
||||
@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = {
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.feature_table_legacy = features_legacy,
|
||||
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
|
||||
.suppress_used_validation = true,
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
|
@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = {
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ZRAM_WRITEBACK
|
||||
static const struct block_device_operations zram_wb_devops = {
|
||||
.open = zram_open,
|
||||
.submit_bio = zram_submit_bio,
|
||||
.swap_slot_free_notify = zram_slot_free_notify,
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
#endif
|
||||
|
||||
static DEVICE_ATTR_WO(compact);
|
||||
static DEVICE_ATTR_RW(disksize);
|
||||
|
@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __init
|
||||
lba_find_capability(int cap)
|
||||
{
|
||||
struct _parisc_agp_info *info = &parisc_agp_info;
|
||||
@ -366,7 +366,7 @@ fail:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __init
|
||||
find_quicksilver(struct device *dev, void *data)
|
||||
{
|
||||
struct parisc_device **lba = data;
|
||||
@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __init
|
||||
parisc_agp_init(void)
|
||||
{
|
||||
extern struct sba_device *sba_list;
|
||||
|
@ -191,6 +191,8 @@ struct ipmi_user {
|
||||
struct work_struct remove_work;
|
||||
};
|
||||
|
||||
static struct workqueue_struct *remove_work_wq;
|
||||
|
||||
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
||||
__acquires(user->release_barrier)
|
||||
{
|
||||
@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
|
||||
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
||||
|
||||
/* SRCU cleanup must happen in task context. */
|
||||
schedule_work(&user->remove_work);
|
||||
queue_work(remove_work_wq, &user->remove_work);
|
||||
}
|
||||
|
||||
static void _ipmi_destroy_user(struct ipmi_user *user)
|
||||
@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
|
||||
/* We didn't find a user, deliver an error response. */
|
||||
ipmi_inc_stat(intf, unhandled_commands);
|
||||
|
||||
msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
|
||||
msg->data[1] = msg->rsp[2];
|
||||
msg->data[2] = msg->rsp[4] & ~0x3;
|
||||
msg->data[0] = (netfn + 1) << 2;
|
||||
msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
|
||||
msg->data[1] = msg->rsp[1]; /* Addr */
|
||||
msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
|
||||
msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
|
||||
msg->data[3] = cmd;
|
||||
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
|
||||
msg->data_size = 5;
|
||||
@ -4455,13 +4459,24 @@ return_unspecified:
|
||||
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
|
||||
msg->rsp_size = 3;
|
||||
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
|
||||
/* commands must have at least 3 bytes, responses 4. */
|
||||
if (is_cmd && (msg->rsp_size < 3)) {
|
||||
/* commands must have at least 4 bytes, responses 5. */
|
||||
if (is_cmd && (msg->rsp_size < 4)) {
|
||||
ipmi_inc_stat(intf, invalid_commands);
|
||||
goto out;
|
||||
}
|
||||
if (!is_cmd && (msg->rsp_size < 4))
|
||||
goto return_unspecified;
|
||||
if (!is_cmd && (msg->rsp_size < 5)) {
|
||||
ipmi_inc_stat(intf, invalid_ipmb_responses);
|
||||
/* Construct a valid error response. */
|
||||
msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
|
||||
msg->rsp[0] |= (1 << 2); /* Make it a response */
|
||||
msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
|
||||
msg->rsp[1] = msg->data[1]; /* Addr */
|
||||
msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
|
||||
msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
|
||||
msg->rsp[3] = msg->data[3]; /* Cmd */
|
||||
msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
|
||||
msg->rsp_size = 5;
|
||||
}
|
||||
} else if ((msg->data_size >= 2)
|
||||
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
|
||||
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
|
||||
@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
|
||||
if (rv) {
|
||||
rv->done = free_smi_msg;
|
||||
rv->user_data = NULL;
|
||||
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
|
||||
atomic_inc(&smi_msg_inuse_count);
|
||||
}
|
||||
return rv;
|
||||
@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void)
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
||||
|
||||
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
|
||||
if (!remove_work_wq) {
|
||||
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
|
||||
rv = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
initialized = true;
|
||||
|
||||
out:
|
||||
@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void)
|
||||
int count;
|
||||
|
||||
if (initialized) {
|
||||
destroy_workqueue(remove_work_wq);
|
||||
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||
&panic_block);
|
||||
|
||||
|
@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = {
|
||||
.release = cpufreq_sysfs_release,
|
||||
};
|
||||
|
||||
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
|
||||
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
if (unlikely(!dev))
|
||||
return;
|
||||
|
||||
@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
|
||||
|
||||
if (policy->max_freq_req) {
|
||||
/*
|
||||
* CPUFREQ_CREATE_POLICY notification is sent only after
|
||||
* successfully adding max_freq_req request.
|
||||
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
|
||||
* notification, since CPUFREQ_CREATE_POLICY notification was
|
||||
* sent after adding max_freq_req earlier.
|
||||
*/
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_REMOVE_POLICY, policy);
|
||||
@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu)
|
||||
if (new_policy) {
|
||||
for_each_cpu(j, policy->related_cpus) {
|
||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||
add_cpu_dev_symlink(policy, j);
|
||||
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
|
||||
}
|
||||
|
||||
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
|
||||
@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
/* Create sysfs link on CPU registration */
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
if (policy)
|
||||
add_cpu_dev_symlink(policy, cpu);
|
||||
add_cpu_dev_symlink(policy, cpu, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
|
||||
|
||||
static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
|
||||
|
||||
#define CPPC_MAX_PERF U8_MAX
|
||||
|
||||
static void intel_pstate_set_itmt_prio(int cpu)
|
||||
{
|
||||
struct cppc_perf_caps cppc_perf;
|
||||
@ -348,6 +350,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
/*
|
||||
* On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
|
||||
* In this case we can't use CPPC.highest_perf to enable ITMT.
|
||||
* In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
|
||||
*/
|
||||
if (cppc_perf.highest_perf == CPPC_MAX_PERF)
|
||||
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
|
||||
|
||||
/*
|
||||
* The priorities can be set regardless of whether or not
|
||||
* sched_set_itmt_support(true) has been called and it is valid to
|
||||
@ -1006,6 +1016,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
|
||||
*/
|
||||
value &= ~GENMASK_ULL(31, 24);
|
||||
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
|
||||
/*
|
||||
* However, make sure that EPP will be set to "performance" when
|
||||
* the CPU is brought back online again and the "performance"
|
||||
* scaling algorithm is still in effect.
|
||||
*/
|
||||
cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2353,6 +2369,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
|
||||
X86_MATCH(BROADWELL_D, core_funcs),
|
||||
X86_MATCH(BROADWELL_X, core_funcs),
|
||||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(ICELAKE_X, core_funcs),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
|
||||
int i;
|
||||
|
||||
table = &buffer->sg_table;
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
for_each_sgtable_sg(table, sg, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
__free_pages(page, compound_order(page));
|
||||
|
@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes {
|
||||
__le16 reserved;
|
||||
};
|
||||
|
||||
struct scmi_msg_resp_base_discover_agent {
|
||||
__le32 agent_id;
|
||||
u8 name[SCMI_MAX_STR_SIZE];
|
||||
};
|
||||
|
||||
|
||||
struct scmi_msg_base_error_notify {
|
||||
__le32 event_control;
|
||||
#define BASE_TP_NOTIFY_ALL BIT(0)
|
||||
@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
|
||||
int id, char *name)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_msg_resp_base_discover_agent *agent_info;
|
||||
struct scmi_xfer *t;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
|
||||
sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
|
||||
sizeof(__le32), sizeof(*agent_info), &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
put_unaligned_le32(id, t->tx.buf);
|
||||
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
if (!ret)
|
||||
strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
|
||||
if (!ret) {
|
||||
agent_info = t->rx.buf;
|
||||
strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
|
||||
}
|
||||
|
||||
ph->xops->xfer_put(ph, t);
|
||||
|
||||
|
@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
|
||||
scmi_pd_data->domains = domains;
|
||||
scmi_pd_data->num_domains = num_domains;
|
||||
|
||||
of_genpd_add_provider_onecell(np, scmi_pd_data);
|
||||
|
||||
return 0;
|
||||
return of_genpd_add_provider_onecell(np, scmi_pd_data);
|
||||
}
|
||||
|
||||
static const struct scmi_device_id scmi_id_table[] = {
|
||||
|
@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
|
||||
put_unaligned_le32(sensor_id, t->tx.buf);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
if (!ret) {
|
||||
struct sensors_info *si = ph->get_priv(ph);
|
||||
|
@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
|
||||
}
|
||||
|
||||
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
|
||||
struct scmi_vio_msg *msg)
|
||||
struct scmi_vio_msg *msg,
|
||||
struct device *dev)
|
||||
{
|
||||
struct scatterlist sg_in;
|
||||
int rc;
|
||||
@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
|
||||
|
||||
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
|
||||
if (rc)
|
||||
dev_err_once(vioch->cinfo->dev,
|
||||
"failed to add to virtqueue (%d)\n", rc);
|
||||
dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
|
||||
else
|
||||
virtqueue_kick(vioch->vqueue);
|
||||
|
||||
@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
|
||||
struct scmi_vio_msg *msg)
|
||||
{
|
||||
if (vioch->is_rx) {
|
||||
scmi_vio_feed_vq_rx(vioch, msg);
|
||||
scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
|
||||
} else {
|
||||
/* Here IRQs are assumed to be already disabled by the caller */
|
||||
spin_lock(&vioch->lock);
|
||||
@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
|
||||
list_add_tail(&msg->list, &vioch->free_list);
|
||||
spin_unlock_irqrestore(&vioch->lock, flags);
|
||||
} else {
|
||||
scmi_vio_feed_vq_rx(vioch, msg);
|
||||
scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
|
||||
int cnt;
|
||||
|
||||
cmd->domain_id = cpu_to_le32(v->id);
|
||||
cmd->level_index = desc_index;
|
||||
cmd->level_index = cpu_to_le32(desc_index);
|
||||
ret = ph->xops->do_xfer(ph, tl);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -50,7 +50,7 @@ static int __init smccc_soc_init(void)
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_SOC_ID, &res);
|
||||
|
||||
if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
|
||||
if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
|
||||
pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -646,12 +646,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
if (IS_ERR(gobj))
|
||||
return PTR_ERR(gobj);
|
||||
|
||||
/* Import takes an extra reference on the dmabuf. Drop it now to
|
||||
* avoid leaking it. We only need the one reference in
|
||||
* kgd_mem->dmabuf.
|
||||
*/
|
||||
dma_buf_put(mem->dmabuf);
|
||||
|
||||
*bo = gem_to_amdgpu_bo(gobj);
|
||||
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
|
||||
(*bo)->parent = amdgpu_bo_ref(mem->bo);
|
||||
@ -1402,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
struct sg_table *sg = NULL;
|
||||
uint64_t user_addr = 0;
|
||||
struct amdgpu_bo *bo;
|
||||
struct drm_gem_object *gobj;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
u32 domain, alloc_domain;
|
||||
u64 alloc_flags;
|
||||
int ret;
|
||||
@ -1512,14 +1506,16 @@ allocate_init_user_pages_failed:
|
||||
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
|
||||
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
|
||||
err_node_allow:
|
||||
drm_gem_object_put(gobj);
|
||||
/* Don't unreserve system mem limit twice */
|
||||
goto err_reserve_limit;
|
||||
err_bo_create:
|
||||
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
|
||||
err_reserve_limit:
|
||||
mutex_destroy(&(*mem)->lock);
|
||||
kfree(*mem);
|
||||
if (gobj)
|
||||
drm_gem_object_put(gobj);
|
||||
else
|
||||
kfree(*mem);
|
||||
err:
|
||||
if (sg) {
|
||||
sg_free_table(sg);
|
||||
|
@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
|
||||
WREG32(adev->bios_scratch_reg_offset + 3, tmp);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
|
||||
u32 backlight_level)
|
||||
{
|
||||
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
|
||||
|
||||
tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
|
||||
tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
|
||||
ATOM_S2_CURRENT_BL_LEVEL_MASK;
|
||||
|
||||
WREG32(adev->bios_scratch_reg_offset + 2, tmp);
|
||||
}
|
||||
|
||||
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
|
||||
|
@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
|
||||
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
|
||||
bool hung);
|
||||
void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
|
||||
u32 backlight_level);
|
||||
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
|
||||
|
@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
/* disable all interrupts */
|
||||
amdgpu_irq_disable_all(adev);
|
||||
if (adev->mode_info.mode_config_initialized){
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
|
||||
drm_helper_force_disable_all(adev_to_drm(adev));
|
||||
else
|
||||
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
||||
@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
{
|
||||
int r;
|
||||
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
|
||||
if (from_hypervisor)
|
||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||
else
|
||||
@ -5031,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
|
||||
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
|
||||
|
||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
||||
if (!amdgpu_sriov_vf(tmp_adev))
|
||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
||||
|
||||
/*
|
||||
* Mark these ASICs to be reseted as untracked first
|
||||
@ -5089,7 +5092,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||
|
||||
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
|
||||
/* Actual ASIC resets if needed.*/
|
||||
/* TODO Implement XGMI hive reset logic for SRIOV */
|
||||
/* Host driver will handle XGMI hive reset for SRIOV */
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_device_reset_sriov(adev, job ? false : true);
|
||||
if (r)
|
||||
@ -5130,7 +5133,7 @@ skip_hw_reset:
|
||||
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
|
||||
}
|
||||
|
||||
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
|
||||
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
|
||||
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
|
||||
}
|
||||
|
||||
@ -5151,7 +5154,7 @@ skip_sched_resume:
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
/* unlock kfd: SRIOV would do it separately */
|
||||
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
|
||||
amdgpu_amdkfd_post_reset(tmp_adev);
|
||||
amdgpu_amdkfd_post_reset(tmp_adev);
|
||||
|
||||
/* kfd_post_reset will do nothing if kfd device is not initialized,
|
||||
* need to bring up kfd here if it's not be initialized before
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user