s390 updates for the 5.18 merge window

- Raise minimum supported machine generation to z10, which comes with
   various cleanups and code simplifications (usercopy/spectre
   mitigation/etc).
 
 - Rework extables and get rid of anonymous out-of-line fixups.
 
 - Page table helpers cleanup. Add set_pXd()/set_pte() helper
   functions. Covert pte_val()/pXd_val() macros to functions.
 
 - Optimize kretprobe handling by avoiding extra kprobe on
   __kretprobe_trampoline.
 
 - Add support for CEX8 crypto cards.
 
 - Allow to trigger AP bus rescan via writing to /sys/bus/ap/scans.
 
 - Add CONFIG_EXPOLINE_EXTERN option to build the kernel without COMDAT
   group sections which simplifies kpatch support.
 
 - Always use the packed stack layout and extend kernel unwinder tests.
 
 - Add sanity checks for ftrace code patching.
 
 - Add s390dbf debug log for the vfio_ap device driver.
 
 - Various virtual vs physical address confusion fixes.
 
 - Various small fixes and improvements all over the code.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmI94dsACgkQjYWKoQLX
 FBiaCggAm9xYJ06Qt9c+T9B7aA4Lt50w7Bnxqx1/Q7UHQQgDpkNhKzI1kt/xeKY4
 JgZQ9lJC4YRLlyfIVzffLI2DWGbl8BcTpuRWVLhPI5D2yHZBXr2ARe7IGFJueddy
 MVqU/r+U3H0r3obQeUc4TSrHtSRX7eQZWIoVuDU75b9fCniee/bmGZqs6yXPXXh4
 pTZQ/gsIhF/o6eBJLEXLjUAcIasxCk15GXWXmkaSwKHAhfYiintwGmtKqQ8etCvw
 17vdlTjA4ce+3ooD/hXGPa8TqeiGKsIB2Xr89x/48f1eJyp2zPJZ1ZvAUBHJBCNt
 b4sF4ql8303Lj7Be+LeqdlbXfa5PZg==
 =meZf
 -----END PGP SIGNATURE-----

Merge tag 's390-5.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Raise minimum supported machine generation to z10, which comes with
   various cleanups and code simplifications (usercopy/spectre
   mitigation/etc).

 - Rework extables and get rid of anonymous out-of-line fixups.

 - Page table helpers cleanup. Add set_pXd()/set_pte() helper functions.
   Covert pte_val()/pXd_val() macros to functions.

 - Optimize kretprobe handling by avoiding extra kprobe on
   __kretprobe_trampoline.

 - Add support for CEX8 crypto cards.

 - Allow to trigger AP bus rescan via writing to /sys/bus/ap/scans.

 - Add CONFIG_EXPOLINE_EXTERN option to build the kernel without COMDAT
   group sections which simplifies kpatch support.

 - Always use the packed stack layout and extend kernel unwinder tests.

 - Add sanity checks for ftrace code patching.

 - Add s390dbf debug log for the vfio_ap device driver.

 - Various virtual vs physical address confusion fixes.

 - Various small fixes and improvements all over the code.

* tag 's390-5.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (69 commits)
  s390/test_unwind: add kretprobe tests
  s390/kprobes: Avoid additional kprobe in kretprobe handling
  s390: convert ".insn" encoding to instruction names
  s390: assume stckf is always present
  s390/nospec: move to single register thunks
  s390: raise minimum supported machine generation to z10
  s390/uaccess: Add copy_from/to_user_key functions
  s390/nospec: align and size extern thunks
  s390/nospec: add an option to use thunk-extern
  s390/nospec: generate single register thunks if possible
  s390/pci: make zpci_set_irq()/zpci_clear_irq() static
  s390: remove unused expoline to BC instructions
  s390/irq: use assignment instead of cast
  s390/traps: get rid of magic cast for per code
  s390/traps: get rid of magic cast for program interruption code
  s390/signal: fix typo in comments
  s390/asm-offsets: remove unused defines
  s390/test_unwind: avoid build warning with W=1
  s390: remove .fixup section
  s390/bpf: encode register within extable entry
  ...
This commit is contained in:
Linus Torvalds 2022-03-25 10:01:34 -07:00
commit d710d370c4
125 changed files with 1844 additions and 1439 deletions

View File

@ -17019,9 +17019,7 @@ L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: Documentation/s390/vfio-ap.rst
F: drivers/s390/crypto/vfio_ap_drv.c
F: drivers/s390/crypto/vfio_ap_ops.c
F: drivers/s390/crypto/vfio_ap_private.h
F: drivers/s390/crypto/vfio_ap*
S390 VFIO-CCW DRIVER
M: Eric Farman <farman@linux.ibm.com>

View File

@ -122,7 +122,6 @@ config S390
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
select GENERIC_ALLOCATOR
@ -157,7 +156,7 @@ config S390
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FENTRY
@ -232,20 +231,8 @@ source "kernel/livepatch/Kconfig"
menu "Processor type and features"
config HAVE_MARCH_Z900_FEATURES
def_bool n
config HAVE_MARCH_Z990_FEATURES
def_bool n
select HAVE_MARCH_Z900_FEATURES
config HAVE_MARCH_Z9_109_FEATURES
def_bool n
select HAVE_MARCH_Z990_FEATURES
config HAVE_MARCH_Z10_FEATURES
def_bool n
select HAVE_MARCH_Z9_109_FEATURES
config HAVE_MARCH_Z196_FEATURES
def_bool n
@ -271,41 +258,13 @@ choice
prompt "Processor type"
default MARCH_Z196
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
select HAVE_MARCH_Z900_FEATURES
depends on $(cc-option,-march=z900)
help
Select this to enable optimizations for model z800/z900 (2064 and
2066 series). This will enable some optimizations that are not
available on older ESA/390 (31 Bit) only CPUs.
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
select HAVE_MARCH_Z990_FEATURES
depends on $(cc-option,-march=z990)
help
Select this to enable optimizations for model z890/z990 (2084 and
2086 series). The kernel will be slightly faster but will not work
on older machines.
config MARCH_Z9_109
bool "IBM System z9"
select HAVE_MARCH_Z9_109_FEATURES
depends on $(cc-option,-march=z9-109)
help
Select this to enable optimizations for IBM System z9 (2094 and
2096 series). The kernel will be slightly faster but will not work
on older machines.
config MARCH_Z10
bool "IBM System z10"
select HAVE_MARCH_Z10_FEATURES
depends on $(cc-option,-march=z10)
help
Select this to enable optimizations for IBM System z10 (2097 and
2098 series). The kernel will be slightly faster but will not work
on older machines.
Select this to enable optimizations for IBM System z10 (2097 and 2098
series). This is the oldest machine generation currently supported.
config MARCH_Z196
bool "IBM zEnterprise 114 and 196"
@ -354,15 +313,6 @@ config MARCH_Z15
endchoice
config MARCH_Z900_TUNE
def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
config MARCH_Z990_TUNE
def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT
config MARCH_Z9_109_TUNE
def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT
config MARCH_Z10_TUNE
def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT
@ -398,21 +348,8 @@ config TUNE_DEFAULT
Tune the generated code for the target processor for which the kernel
will be compiled.
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
depends on $(cc-option,-mtune=z900)
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
depends on $(cc-option,-mtune=z990)
config TUNE_Z9_109
bool "IBM System z9"
depends on $(cc-option,-mtune=z9-109)
config TUNE_Z10
bool "IBM System z10"
depends on $(cc-option,-mtune=z10)
config TUNE_Z196
bool "IBM zEnterprise 114 and 196"
@ -587,6 +524,7 @@ config KERNEL_NOBP
config EXPOLINE
def_bool n
depends on $(cc-option,-mindirect-branch=thunk)
prompt "Avoid speculative indirect branches in the kernel"
help
Compile the kernel with the expoline compiler options to guard
@ -597,6 +535,19 @@ config EXPOLINE
If unsure, say N.
config EXPOLINE_EXTERN
def_bool n
depends on EXPOLINE
depends on CC_IS_GCC && GCC_VERSION >= 110200
depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
prompt "Generate expolines as extern functions."
help
This option is required for some tooling like kpatch. The kernel is
compiled with -mindirect-branch=thunk-extern and requires a newer
compiler.
If unsure, say N.
choice
prompt "Expoline default"
depends on EXPOLINE
@ -658,20 +609,6 @@ config MAX_PHYSMEM_BITS
Increasing the number of bits also increases the kernel image size.
By default 46 bits (64TB) are supported.
config PACK_STACK
def_bool y
prompt "Pack kernel stack"
help
This option enables the compiler option -mkernel-backchain if it
is available. If the option is available the compiler supports
the new stack layout which dramatically reduces the minimum stack
frame size. With an old compiler a non-leaf function needs a
minimum of 96 bytes on 31 bit and 160 bytes on 64 bit. With
-mkernel-backchain the minimum size drops to 16 byte on 31 bit
and 24 byte on 64 bit.
Say Y if you are unsure.
config CHECK_STACK
def_bool y
depends on !VMAP_STACK

View File

@ -21,7 +21,7 @@ endif
aflags_dwarf := -Wa,-gdwarf-2
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
@ -36,9 +36,6 @@ CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD
mflags-$(CONFIG_MARCH_Z900) := -march=z900
mflags-$(CONFIG_MARCH_Z990) := -march=z990
mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
mflags-$(CONFIG_MARCH_Z10) := -march=z10
mflags-$(CONFIG_MARCH_Z196) := -march=z196
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
@ -51,9 +48,6 @@ export CC_FLAGS_MARCH := $(mflags-y)
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
@ -68,11 +62,6 @@ cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
#
cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
ifneq ($(call cc-option,-mpacked-stack -mbackchain -msoft-float),)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
endif
KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
@ -86,14 +75,18 @@ ifneq ($(call cc-option,-mstack-size=8192 -mstack-guard=128),)
endif
ifdef CONFIG_EXPOLINE
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
ifdef CONFIG_EXPOLINE_EXTERN
KBUILD_LDFLAGS_MODULE += arch/s390/lib/expoline.o
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk-extern
CC_FLAGS_EXPOLINE += -mfunction-return=thunk-extern
else
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
CC_FLAGS_EXPOLINE += -mfunction-return=thunk
CC_FLAGS_EXPOLINE += -mindirect-branch-table
export CC_FLAGS_EXPOLINE
cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
aflags-y += -DCC_USING_EXPOLINE
endif
CC_FLAGS_EXPOLINE += -mindirect-branch-table
export CC_FLAGS_EXPOLINE
cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
aflags-y += -DCC_USING_EXPOLINE
endif
ifdef CONFIG_FUNCTION_TRACER
@ -111,7 +104,7 @@ endif
# Test CFI features of binutils
cfi := $(call as-instr,.cfi_startproc\n.cfi_val_offset 15$(comma)-160\n.cfi_endproc,-DCONFIG_AS_CFI_VAL_OFFSET=1)
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -mpacked-stack -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
KBUILD_AFLAGS += $(aflags-y) $(cfi)

View File

@ -5,7 +5,6 @@
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Rob van der Heij <rvdhei@iae.nl>
* Heiko Carstens <heiko.carstens@de.ibm.com>
*
* There are 5 different IPL methods
* 1) load the image directly into ram at address 0 and do an PSW restart

View File

@ -312,7 +312,7 @@ ENTRY(chacha20_vx_4x)
VPERM XC0,XC0,XC0,BEPERM
VPERM XD0,XD0,XD0,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_4x
VLM XT0,XT3,0,INP,0
@ -339,7 +339,7 @@ ENTRY(chacha20_vx_4x)
VPERM XC0,XC0,XC0,BEPERM
VPERM XD0,XD0,XD0,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_4x
VLM XT0,XT3,0,INP,0
@ -366,7 +366,7 @@ ENTRY(chacha20_vx_4x)
VPERM XC0,XC0,XC0,BEPERM
VPERM XD0,XD0,XD0,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_4x
VLM XT0,XT3,0,INP,0
@ -472,7 +472,7 @@ ENDPROC(chacha20_vx_4x)
#define T3 %v30
ENTRY(chacha20_vx)
.insn rilu,0xc20e00000000,LEN,256 # clgfi LEN,256
clgfi LEN,256
jle chacha20_vx_4x
stmg %r6,%r7,6*8(SP)
@ -725,7 +725,7 @@ ENTRY(chacha20_vx)
VPERM C0,C0,C0,BEPERM
VPERM D0,D0,D0,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_vx
VAF D2,D2,T2 # +K[3]+2
@ -754,7 +754,7 @@ ENTRY(chacha20_vx)
VPERM C0,C1,C1,BEPERM
VPERM D0,D1,D1,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_vx
VLM A1,D1,0,INP,0
@ -780,7 +780,7 @@ ENTRY(chacha20_vx)
VPERM C0,C2,C2,BEPERM
VPERM D0,D2,D2,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_vx
VLM A1,D1,0,INP,0
@ -807,7 +807,7 @@ ENTRY(chacha20_vx)
VPERM C0,C3,C3,BEPERM
VPERM D0,D3,D3,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_vx
VAF D3,D2,T1 # K[3]+4
@ -837,7 +837,7 @@ ENTRY(chacha20_vx)
VPERM C0,C4,C4,BEPERM
VPERM D0,D4,D4,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_vx
VLM A1,D1,0,INP,0
@ -864,7 +864,7 @@ ENTRY(chacha20_vx)
VPERM C0,C5,C5,BEPERM
VPERM D0,D5,D5,BEPERM
.insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40
clgfi LEN,0x40
jl .Ltail_vx
VLM A1,D1,0,INP,0

View File

@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/extable.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/timex.h>

View File

@ -13,6 +13,7 @@
#define _ASM_S390_AP_H_
#include <linux/io.h>
#include <asm/asm-extable.h>
/**
* The ap_qid_t identifier of an ap queue.

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE_H
#include <linux/stringify.h>
#include <asm/asm-const.h>
#define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
#define EX_TYPE_UACCESS 3
#define __EX_TABLE(_section, _fault, _target, _type) \
stringify_in_c(.section _section,"a";) \
stringify_in_c(.align 4;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \
stringify_in_c(.short 0;) \
stringify_in_c(.previous)
#define __EX_TABLE_UA(_section, _fault, _target, _type, _reg) \
stringify_in_c(.section _section,"a";) \
stringify_in_c(.align 4;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.short (_type);) \
stringify_in_c(.macro extable_reg reg;) \
stringify_in_c(.set found, 0;) \
stringify_in_c(.set regnr, 0;) \
stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \
stringify_in_c(.ifc "\reg", "%%\rs";) \
stringify_in_c(.set found, 1;) \
stringify_in_c(.short regnr;) \
stringify_in_c(.endif;) \
stringify_in_c(.set regnr, regnr+1;) \
stringify_in_c(.endr;) \
stringify_in_c(.ifne (found != 1);) \
stringify_in_c(.error "extable_reg: bad register argument";) \
stringify_in_c(.endif;) \
stringify_in_c(.endm;) \
stringify_in_c(extable_reg _reg;) \
stringify_in_c(.purgem extable_reg;) \
stringify_in_c(.previous)
#define EX_TABLE(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP)
#define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP)
#define EX_TABLE_UA(_fault, _target, _reg) \
__EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UACCESS, _reg)
#endif /* __ASM_EXTABLE_H */

View File

@ -256,8 +256,6 @@ static inline bool test_bit_inv(unsigned long nr,
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
/**
* __flogr - find leftmost one
* @word - The word to search
@ -376,16 +374,6 @@ static inline int fls(unsigned int word)
return fls64(word);
}
#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/sched.h>

View File

@ -10,6 +10,7 @@
#define _ASM_S390_CPU_MF_H
#include <linux/errno.h>
#include <asm/asm-extable.h>
#include <asm/facility.h>
asm(".include \"asm/cpu_mf-insn.h\"\n");
@ -159,7 +160,7 @@ struct hws_trailer_entry {
/* Load program parameter */
static inline void lpp(void *pp)
{
asm volatile(".insn s,0xb2800000,0(%0)\n":: "a" (pp) : "memory");
asm volatile("lpp 0(%0)\n" :: "a" (pp) : "memory");
}
/* Query counter information */
@ -168,7 +169,7 @@ static inline int qctri(struct cpumf_ctr_info *info)
int rc = -EINVAL;
asm volatile (
"0: .insn s,0xb28e0000,%1\n"
"0: qctri %1\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(1b, 2b)
@ -182,7 +183,7 @@ static inline int lcctl(u64 ctl)
int cc;
asm volatile (
" .insn s,0xb2840000,%1\n"
" lcctl %1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc) : "Q" (ctl) : "cc");
@ -196,7 +197,7 @@ static inline int __ecctr(u64 ctr, u64 *content)
int cc;
asm volatile (
" .insn rre,0xb2e40000,%0,%2\n"
" ecctr %0,%2\n"
" ipm %1\n"
" srl %1,28\n"
: "=d" (_content), "=d" (cc) : "d" (ctr) : "cc");
@ -246,7 +247,7 @@ static inline int qsi(struct hws_qsi_info_block *info)
int cc = 1;
asm volatile(
"0: .insn s,0xb2860000,%1\n"
"0: qsi %1\n"
"1: lhi %0,0\n"
"2:\n"
EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
@ -261,7 +262,7 @@ static inline int lsctl(struct hws_lsctl_request_block *req)
cc = 1;
asm volatile(
"0: .insn s,0xb2870000,0(%1)\n"
"0: lsctl 0(%1)\n"
"1: ipm %0\n"
" srl %0,28\n"
"2:\n"

View File

@ -5,7 +5,6 @@
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#ifndef _ASM_S390_CRW_H

View File

@ -11,6 +11,7 @@
#include <linux/if_ether.h>
#include <linux/percpu.h>
#include <asm/asm-extable.h>
enum diag_stat_enum {
DIAG_STAT_X008,

View File

@ -25,7 +25,7 @@
struct exception_table_entry
{
int insn, fixup;
long handler;
short type, data;
};
extern struct exception_table_entry *__start_amode31_ex_table;
@ -38,28 +38,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
return (unsigned long)&x->fixup + x->fixup;
}
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
struct pt_regs *);
static inline ex_handler_t
ex_fixup_handler(const struct exception_table_entry *x)
{
if (likely(!x->handler))
return NULL;
return (ex_handler_t)((unsigned long)&x->handler + x->handler);
}
static inline bool ex_handle(const struct exception_table_entry *x,
struct pt_regs *regs)
{
ex_handler_t handler = ex_fixup_handler(x);
if (unlikely(handler))
return handler(x, regs);
regs->psw.addr = extable_fixup(x);
return true;
}
#define ARCH_HAS_RELATIVE_EXTABLE
static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
@ -69,13 +47,26 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
{
a->fixup = b->fixup + delta;
b->fixup = tmp.fixup - delta;
a->handler = b->handler;
if (a->handler)
a->handler += delta;
b->handler = tmp.handler;
if (b->handler)
b->handler -= delta;
a->type = b->type;
b->type = tmp.type;
a->data = b->data;
b->data = tmp.data;
}
#define swap_ex_entry_fixup swap_ex_entry_fixup
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
#else /* !CONFIG_BPF_JIT */
static inline bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs)
{
return false;
}
#endif /* CONFIG_BPF_JIT */
bool fixup_exception(struct pt_regs *regs);
#endif

View File

@ -45,6 +45,7 @@
#define _ASM_S390_FPU_API_H
#include <linux/preempt.h>
#include <asm/asm-extable.h>
void save_fpu_regs(void);
void load_fpu_regs(void);

View File

@ -4,6 +4,7 @@
#include <linux/uaccess.h>
#include <linux/futex.h>
#include <asm/asm-extable.h>
#include <asm/mmu_context.h>
#include <asm/errno.h>

View File

@ -45,9 +45,9 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
{
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY));
else
pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY));
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,

View File

@ -81,8 +81,13 @@ static __always_inline void inc_irq_stat(enum interruption_class irq)
}
struct ext_code {
unsigned short subcode;
unsigned short code;
union {
struct {
unsigned short subcode;
unsigned short code;
};
unsigned int int_code;
};
};
typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);

View File

@ -71,6 +71,7 @@ struct kprobe_ctlblk {
void arch_remove_kprobe(struct kprobe *p);
void __kretprobe_trampoline(void);
void trampoline_probe_handler(struct pt_regs *regs);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self,

View File

@ -2,27 +2,9 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#include <asm/asm-const.h>
#include <linux/stringify.h>
#define __ALIGN .align 16, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
/*
* Helper macro for exception table entries
*/
#define __EX_TABLE(_section, _fault, _target) \
stringify_in_c(.section _section,"a";) \
stringify_in_c(.align 8;) \
stringify_in_c(.long (_fault) - .;) \
stringify_in_c(.long (_target) - .;) \
stringify_in_c(.quad 0;) \
stringify_in_c(.previous)
#define EX_TABLE(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target)
#define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target)
#endif

View File

@ -34,12 +34,22 @@ struct lowcore {
__u32 ext_int_code_addr;
};
__u32 svc_int_code; /* 0x0088 */
__u16 pgm_ilc; /* 0x008c */
__u16 pgm_code; /* 0x008e */
union {
struct {
__u16 pgm_ilc; /* 0x008c */
__u16 pgm_code; /* 0x008e */
};
__u32 pgm_int_code;
};
__u32 data_exc_code; /* 0x0090 */
__u16 mon_class_num; /* 0x0094 */
__u8 per_code; /* 0x0096 */
__u8 per_atmid; /* 0x0097 */
union {
struct {
__u8 per_code; /* 0x0096 */
__u8 per_atmid; /* 0x0097 */
};
__u16 per_code_combined;
};
__u64 per_address; /* 0x0098 */
__u8 exc_access_id; /* 0x00a0 */
__u8 per_access_id; /* 0x00a1 */
@ -153,11 +163,9 @@ struct lowcore {
__u64 gmap; /* 0x03d0 */
__u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */
__u32 return_lpswe; /* 0x0402 */
__u32 return_mcck_lpswe; /* 0x0406 */
__u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
__u32 return_lpswe; /* 0x0400 */
__u32 return_mcck_lpswe; /* 0x0404 */
__u8 pad_0x040a[0x0e00-0x0408]; /* 0x0408 */
/*
* 0xe00 contains the address of the IPL Parameter Information

View File

@ -4,6 +4,7 @@
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <asm/asm-extable.h>
typedef struct {
spinlock_t lock;

View File

@ -6,7 +6,6 @@
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#ifndef _ASM_S390_NMI_H

View File

@ -10,15 +10,18 @@
#ifdef CC_USING_EXPOLINE
_LC_BR_R1 = __LC_BR_R1
/*
* The expoline macros are used to create thunks in the same format
* as gcc generates them. The 'comdat' section flag makes sure that
* the various thunks are merged into a single copy.
*/
.macro __THUNK_PROLOG_NAME name
#ifdef CONFIG_EXPOLINE_EXTERN
.pushsection .text,"ax",@progbits
.align 16,0x07
#else
.pushsection .text.\name,"axG",@progbits,\name,comdat
#endif
.globl \name
.hidden \name
.type \name,@function
@ -26,37 +29,49 @@ _LC_BR_R1 = __LC_BR_R1
CFI_STARTPROC
.endm
.macro __THUNK_EPILOG
.macro __THUNK_EPILOG_NAME name
CFI_ENDPROC
#ifdef CONFIG_EXPOLINE_EXTERN
.size \name, .-\name
#endif
.popsection
.endm
.macro __THUNK_PROLOG_BR r1,r2
__THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1
.macro __THUNK_PROLOG_BR r1
__THUNK_PROLOG_NAME __s390_indirect_jump_r\r1
.endm
.macro __THUNK_PROLOG_BC d0,r1,r2
__THUNK_PROLOG_NAME __s390_indirect_branch_\d0\()_\r2\()use_\r1
.macro __THUNK_EPILOG_BR r1
__THUNK_EPILOG_NAME __s390_indirect_jump_r\r1
.endm
.macro __THUNK_BR r1,r2
jg __s390_indirect_jump_r\r2\()use_r\r1
.macro __THUNK_BR r1
jg __s390_indirect_jump_r\r1
.endm
.macro __THUNK_BC d0,r1,r2
jg __s390_indirect_branch_\d0\()_\r2\()use_\r1
.macro __THUNK_BRASL r1,r2
brasl \r1,__s390_indirect_jump_r\r2
.endm
.macro __THUNK_BRASL r1,r2,r3
brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2
.endm
.macro __DECODE_RR expand,reg,ruse
.macro __DECODE_R expand,reg
.set __decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \reg,%r\r1
\expand \r1
.set __decode_fail,0
.endif
.endr
.if __decode_fail == 1
.error "__DECODE_R failed"
.endif
.endm
.macro __DECODE_RR expand,rsave,rtarget
.set __decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \rsave,%r\r1
.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \ruse,%r\r2
.ifc \rtarget,%r\r2
\expand \r1,\r2
.set __decode_fail,0
.endif
@ -68,125 +83,47 @@ _LC_BR_R1 = __LC_BR_R1
.endif
.endm
.macro __DECODE_RRR expand,rsave,rtarget,ruse
.set __decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \rsave,%r\r1
.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \rtarget,%r\r2
.irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \ruse,%r\r3
\expand \r1,\r2,\r3
.set __decode_fail,0
.endif
.endr
.endif
.endr
.endif
.endr
.if __decode_fail == 1
.error "__DECODE_RRR failed"
.endif
.endm
.macro __DECODE_DRR expand,disp,reg,ruse
.set __decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \reg,%r\r1
.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \ruse,%r\r2
\expand \disp,\r1,\r2
.set __decode_fail,0
.endif
.endr
.endif
.endr
.if __decode_fail == 1
.error "__DECODE_DRR failed"
.endif
.endm
.macro __THUNK_EX_BR reg,ruse
# Be very careful when adding instructions to this macro!
# The ALTERNATIVE replacement code has a .+10 which targets
# the "br \reg" after the code has been patched.
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
.macro __THUNK_EX_BR reg
exrl 0,555f
j .
#else
.ifc \reg,%r1
ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
j .
.else
larl \ruse,555f
ex 0,0(\ruse)
j .
.endif
#endif
555: br \reg
.endm
.macro __THUNK_EX_BC disp,reg,ruse
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
exrl 0,556f
j .
#ifdef CONFIG_EXPOLINE_EXTERN
.macro GEN_BR_THUNK reg
.endm
.macro GEN_BR_THUNK_EXTERN reg
#else
larl \ruse,556f
ex 0,0(\ruse)
j .
.macro GEN_BR_THUNK reg
#endif
556: b \disp(\reg)
__DECODE_R __THUNK_PROLOG_BR,\reg
__THUNK_EX_BR \reg
__DECODE_R __THUNK_EPILOG_BR,\reg
.endm
.macro GEN_BR_THUNK reg,ruse=%r1
__DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
__THUNK_EX_BR \reg,\ruse
__THUNK_EPILOG
.endm
.macro GEN_B_THUNK disp,reg,ruse=%r1
__DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
__THUNK_EX_BC \disp,\reg,\ruse
__THUNK_EPILOG
.endm
.macro BR_EX reg,ruse=%r1
557: __DECODE_RR __THUNK_BR,\reg,\ruse
.macro BR_EX reg
557: __DECODE_R __THUNK_BR,\reg
.pushsection .s390_indirect_branches,"a",@progbits
.long 557b-.
.popsection
.endm
.macro B_EX disp,reg,ruse=%r1
558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
.pushsection .s390_indirect_branches,"a",@progbits
.long 558b-.
.popsection
.endm
.macro BASR_EX rsave,rtarget,ruse=%r1
559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
.macro BASR_EX rsave,rtarget
559: __DECODE_RR __THUNK_BRASL,\rsave,\rtarget
.pushsection .s390_indirect_branches,"a",@progbits
.long 559b-.
.popsection
.endm
#else
.macro GEN_BR_THUNK reg,ruse=%r1
.macro GEN_BR_THUNK reg
.endm
.macro GEN_B_THUNK disp,reg,ruse=%r1
.endm
.macro BR_EX reg,ruse=%r1
.macro BR_EX reg
br \reg
.endm
.macro B_EX disp,reg,ruse=%r1
b \disp(\reg)
.endm
.macro BASR_EX rsave,rtarget,ruse=%r1
.macro BASR_EX rsave,rtarget
basr \rsave,\rtarget
.endm
#endif /* CC_USING_EXPOLINE */

View File

@ -39,7 +39,7 @@ u32 os_info_csum(struct os_info *os_info);
#ifdef CONFIG_CRASH_DUMP
void *os_info_old_entry(int nr, unsigned long *size);
int copy_oldmem_kernel(void *dst, void *src, size_t count);
int copy_oldmem_kernel(void *dst, unsigned long src, size_t count);
#else
static inline void *os_info_old_entry(int nr, unsigned long *size)
{

View File

@ -92,11 +92,31 @@ typedef pte_t *pgtable_t;
#define pgprot_val(x) ((x).pgprot)
#define pgste_val(x) ((x).pgste)
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define p4d_val(x) ((x).p4d)
#define pgd_val(x) ((x).pgd)
static inline unsigned long pte_val(pte_t pte)
{
return pte.pte;
}
static inline unsigned long pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
static inline unsigned long pud_val(pud_t pud)
{
return pud.pud;
}
static inline unsigned long p4d_val(p4d_t p4d)
{
return p4d.p4d;
}
static inline unsigned long pgd_val(pgd_t pgd)
{
return pgd.pgd;
}
#define __pgste(x) ((pgste_t) { (x) } )
#define __pte(x) ((pte_t) { (x) } )

View File

@ -283,9 +283,6 @@ int zpci_dma_exit_device(struct zpci_dev *zdev);
int __init zpci_irq_init(void);
void __init zpci_irq_exit(void);
int zpci_set_irq(struct zpci_dev *zdev);
int zpci_clear_irq(struct zpci_dev *zdev);
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);

View File

@ -103,17 +103,17 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
{
pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
}
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
{
p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@ -129,7 +129,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte)
{
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
}
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)

View File

@ -538,6 +538,36 @@ static inline int mm_alloc_pgste(struct mm_struct *mm)
return 0;
}
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
return __pte(pte_val(pte) & ~pgprot_val(prot));
}
static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
{
return __pte(pte_val(pte) | pgprot_val(prot));
}
static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
{
return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
}
static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
{
return __pmd(pmd_val(pmd) | pgprot_val(prot));
}
static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
{
return __pud(pud_val(pud) & ~pgprot_val(prot));
}
static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
{
return __pud(pud_val(pud) | pgprot_val(prot));
}
/*
* In the case that a guest uses storage keys
* faults should no longer be backed by zero pages
@ -570,7 +600,7 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
" .insn rre,0xb98a0000,%[r1],%[address]"
" cspg %[r1],%[address]"
: [r1] "+&d" (r1.pair), "+m" (*ptr)
: [address] "d" (address)
: "cc");
@ -804,15 +834,13 @@ static inline int pte_soft_dirty(pte_t pte)
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
pte_val(pte) |= _PAGE_SOFT_DIRTY;
return pte;
return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
}
#define pte_swp_mksoft_dirty pte_mksoft_dirty
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
return pte;
return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
}
#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
@ -823,14 +851,12 @@ static inline int pmd_soft_dirty(pmd_t pmd)
static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
return pmd;
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
}
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
return pmd;
return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
}
/*
@ -881,32 +907,57 @@ static inline pgprot_t pte_pgprot(pte_t pte)
* pgd/pmd/pte modification functions
*/
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
WRITE_ONCE(*pgdp, pgd);
}
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
WRITE_ONCE(*p4dp, p4d);
}
static inline void set_pud(pud_t *pudp, pud_t pud)
{
WRITE_ONCE(*pudp, pud);
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
WRITE_ONCE(*pmdp, pmd);
}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
WRITE_ONCE(*ptep, pte);
}
static inline void pgd_clear(pgd_t *pgd)
{
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
}
static inline void p4d_clear(p4d_t *p4d)
{
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
}
static inline void pud_clear(pud_t *pud)
{
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
}
static inline void pmd_clear(pmd_t *pmdp)
{
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_val(*ptep) = _PAGE_INVALID;
set_pte(ptep, __pte(_PAGE_INVALID));
}
/*
@ -915,79 +966,74 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
*/
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
pte = set_pte_bit(pte, newprot);
/*
* newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
* has the invalid bit set, clear it again for readable, young pages
*/
if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
pte_val(pte) &= ~_PAGE_INVALID;
pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
/*
* newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
* protection bit set, clear it again for writable, dirty pages
*/
if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
pte_val(pte) &= ~_PAGE_PROTECT;
pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_val(pte) &= ~_PAGE_WRITE;
pte_val(pte) |= _PAGE_PROTECT;
return pte;
pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
}
static inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) |= _PAGE_WRITE;
pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
if (pte_val(pte) & _PAGE_DIRTY)
pte_val(pte) &= ~_PAGE_PROTECT;
pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~_PAGE_DIRTY;
pte_val(pte) |= _PAGE_PROTECT;
return pte;
pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
if (pte_val(pte) & _PAGE_WRITE)
pte_val(pte) &= ~_PAGE_PROTECT;
pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~_PAGE_YOUNG;
pte_val(pte) |= _PAGE_INVALID;
return pte;
pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_YOUNG;
pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
if (pte_val(pte) & _PAGE_READ)
pte_val(pte) &= ~_PAGE_INVALID;
pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
return pte;
}
static inline pte_t pte_mkspecial(pte_t pte)
{
pte_val(pte) |= _PAGE_SPECIAL;
return pte;
return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
}
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
pte_val(pte) |= _PAGE_LARGE;
return pte;
return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
}
#endif
@ -1006,7 +1052,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
if (__builtin_constant_p(opt) && opt == 0) {
/* Invalidation + TLB flush for the pte */
asm volatile(
" .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
" ipte %[r1],%[r2],0,%[m4]"
: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
[m4] "i" (local));
return;
@ -1015,7 +1061,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
/* Invalidate ptes with options + TLB flush of the ptes */
opt = opt | (asce & _ASCE_ORIGIN);
asm volatile(
" .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
" ipte %[r1],%[r2],%[r3],%[m4]"
: [r2] "+a" (address), [r3] "+a" (opt)
: [r1] "a" (pto), [m4] "i" (local) : "memory");
}
@ -1028,7 +1074,7 @@ static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
/* Invalidate a range of ptes + TLB flush of the ptes */
do {
asm volatile(
" .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
" ipte %[r1],%[r2],%[r3],%[m4]"
: [r2] "+a" (address), [r3] "+a" (nr)
: [r1] "a" (pto), [m4] "i" (local) : "memory");
} while (nr != 255);
@ -1114,7 +1160,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
if (full) {
res = *ptep;
*ptep = __pte(_PAGE_INVALID);
set_pte(ptep, __pte(_PAGE_INVALID));
} else {
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
@ -1198,11 +1244,11 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
if (pte_present(entry))
pte_val(entry) &= ~_PAGE_UNUSED;
entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
if (mm_has_pgste(mm))
ptep_set_pte_at(mm, addr, ptep, entry);
else
*ptep = entry;
set_pte(ptep, entry);
}
/*
@ -1213,9 +1259,9 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage | pgprot_val(pgprot);
__pte = __pte(physpage | pgprot_val(pgprot));
if (!MACHINE_HAS_NX)
pte_val(__pte) &= ~_PAGE_NOEXEC;
__pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
return pte_mkyoung(__pte);
}
@ -1355,61 +1401,57 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd;
pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
return pmd;
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd;
pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
return pmd;
}
static inline pud_t pud_wrprotect(pud_t pud)
{
pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
pud_val(pud) |= _REGION_ENTRY_PROTECT;
return pud;
pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
}
static inline pud_t pud_mkwrite(pud_t pud)
{
pud_val(pud) |= _REGION3_ENTRY_WRITE;
pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
return pud;
}
static inline pud_t pud_mkclean(pud_t pud)
{
pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
pud_val(pud) |= _REGION_ENTRY_PROTECT;
return pud;
pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
}
static inline pud_t pud_mkdirty(pud_t pud)
{
pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
if (pud_val(pud) & _REGION3_ENTRY_WRITE)
pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
return pud;
}
@ -1433,37 +1475,39 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
return pmd;
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
return pmd;
pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
unsigned long mask;
mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
mask |= _SEGMENT_ENTRY_DIRTY;
mask |= _SEGMENT_ENTRY_YOUNG;
mask |= _SEGMENT_ENTRY_LARGE;
mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
pmd = __pmd(pmd_val(pmd) & mask);
pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
return pmd;
}
static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
{
pmd_t __pmd;
pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
return __pmd;
return __pmd(physpage + massage_pgprot_pmd(pgprot));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
@ -1491,7 +1535,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
if (__builtin_constant_p(opt) && opt == 0) {
/* flush without guest asce */
asm volatile(
" .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
" idte %[r1],0,%[r2],%[m4]"
: "+m" (*pmdp)
: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
[m4] "i" (local)
@ -1499,7 +1543,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
} else {
/* flush with guest asce */
asm volatile(
" .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
" idte %[r1],%[r3],%[r2],%[m4]"
: "+m" (*pmdp)
: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
[r3] "a" (asce), [m4] "i" (local)
@ -1518,7 +1562,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
if (__builtin_constant_p(opt) && opt == 0) {
/* flush without guest asce */
asm volatile(
" .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
" idte %[r1],0,%[r2],%[m4]"
: "+m" (*pudp)
: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
[m4] "i" (local)
@ -1526,7 +1570,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
} else {
/* flush with guest asce */
asm volatile(
" .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
" idte %[r1],%[r3],%[r2],%[m4]"
: "+m" (*pudp)
: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
[r3] "a" (asce), [m4] "i" (local)
@ -1585,16 +1629,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
if (!MACHINE_HAS_NX)
pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
*pmdp = entry;
entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
set_pmd(pmdp, entry);
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd;
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
@ -1611,7 +1654,7 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
{
if (full) {
pmd_t pmd = *pmdp;
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
return pmd;
}
return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
@ -1690,12 +1733,12 @@ static inline int has_transparent_hugepage(void)
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
unsigned long pteval;
pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
return pte;
pteval = _PAGE_INVALID | _PAGE_PROTECT;
pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
return __pte(pteval);
}
static inline unsigned long __swp_type(swp_entry_t entry)

View File

@ -225,8 +225,7 @@ static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
{
unsigned long val;
asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
: "=d" (val) : "a" (asi << 8 | parm));
asm volatile("ecag %0,0,0(%1)" : "=d" (val) : "a" (asi << 8 | parm));
return val;
}
@ -313,11 +312,11 @@ static __always_inline void __noreturn disabled_wait(void)
* Basic Program Check Handler.
*/
extern void s390_base_pgm_handler(void);
extern void (*s390_base_pgm_handler_fn)(void);
extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs);
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
extern int memcpy_real(void *, void *, size_t);
extern int memcpy_real(void *, unsigned long, size_t);
extern void memcpy_absolute(void *, void *, size_t);
#define mem_assign_absolute(dest, val) do { \

View File

@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#ifndef _ASM_S390_SCLP_H

View File

@ -3,7 +3,6 @@
* Copyright IBM Corp. 1999, 2012
* Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H

View File

@ -36,22 +36,14 @@ static inline bool on_stack(struct stack_info *info,
/*
* Stack layout of a C stack frame.
* Kernel uses the packed stack layout (-mpacked-stack).
*/
#ifndef __PACK_STACK
struct stack_frame {
unsigned long back_chain;
unsigned long empty1[5];
unsigned long gprs[10];
unsigned int empty2[8];
};
#else
struct stack_frame {
unsigned long empty1[5];
unsigned int empty2[8];
unsigned long gprs[10];
unsigned long back_chain;
};
#endif
/*
* Unlike current_stack_pointer() which simply returns current value of %r15

View File

@ -148,7 +148,7 @@ struct ptff_qui {
asm volatile( \
" lgr 0,%[reg0]\n" \
" lgr 1,%[reg1]\n" \
" .insn e,0x0104\n" \
" ptff\n" \
" ipm %[rc]\n" \
" srl %[rc],28\n" \
: [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \
@ -187,14 +187,10 @@ static inline unsigned long get_tod_clock(void)
static inline unsigned long get_tod_clock_fast(void)
{
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
unsigned long clk;
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
return clk;
#else
return get_tod_clock();
#endif
}
static inline cycles_t get_cycles(void)

View File

@ -25,9 +25,7 @@ static inline void __tlb_flush_idte(unsigned long asce)
if (MACHINE_HAS_TLB_GUEST)
opt |= IDTE_GUEST_ASCE;
/* Global TLB flush for the mm */
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,0"
: : "a" (opt), "a" (asce) : "cc");
asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
}
/*

View File

@ -13,6 +13,7 @@
/*
* User space memory access functions
*/
#include <asm/asm-extable.h>
#include <asm/processor.h>
#include <asm/ctl_reg.h>
#include <asm/extable.h>
@ -79,8 +80,6 @@ union oac {
};
};
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
#define __put_get_user_asm(to, from, size, oac_spec) \
({ \
int __rc; \
@ -90,14 +89,10 @@ union oac {
"0: mvcos %[_to],%[_from],%[_size]\n" \
"1: xr %[rc],%[rc]\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: lhi %[rc],%[retval]\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc]) \
: [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
: [_size] "d" (size), [_from] "Q" (*(from)), \
[retval] "K" (-EFAULT), [spec] "d" (oac_spec.val) \
[spec] "d" (oac_spec.val) \
: "cc", "0"); \
__rc; \
})
@ -178,22 +173,6 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
return rc;
}
#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
size = raw_copy_to_user(ptr, x, size);
return size ? -EFAULT : 0;
}
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
size = raw_copy_from_user(x, ptr, size);
return size ? -EFAULT : 0;
}
#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
@ -289,7 +268,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return __clear_user(to, n);
}
int copy_to_user_real(void __user *dest, void *src, unsigned long count);
int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count);
void *s390_kernel_write(void *dst, const void *src, size_t size);
int __noreturn __put_kernel_bad(void);
@ -302,13 +281,9 @@ int __noreturn __put_kernel_bad(void);
"0: " insn " %2,%1\n" \
"1: xr %0,%0\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: lhi %0,%3\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
: "=d" (__rc), "+Q" (*(to)) \
: "d" (val), "K" (-EFAULT) \
: "d" (val) \
: "cc"); \
__rc; \
})
@ -349,13 +324,9 @@ int __noreturn __get_kernel_bad(void);
"0: " insn " %1,%2\n" \
"1: xr %0,%0\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: lhi %0,%3\n" \
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \
: "=d" (__rc), "+d" (val) \
: "Q" (*(from)), "K" (-EFAULT) \
: "Q" (*(from)) \
: "cc"); \
__rc; \
})

View File

@ -288,7 +288,7 @@ struct zcrypt_device_matrix_ext {
* 0x08: CEX3A
* 0x0a: CEX4
* 0x0b: CEX5
* 0x0c: CEX6 and CEX7
* 0x0c: CEX6, CEX7 or CEX8
* 0x0d: device is disabled
*
* ZCRYPT_QDEPTH_MASK

View File

@ -57,7 +57,9 @@ obj-$(CONFIG_COMPAT) += $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KPROBES) += kprobes_insn_page.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_KPROBES) += mcount.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o

View File

@ -50,9 +50,7 @@ int main(void)
BLANK();
/* idle data offsets */
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit);
OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter);
BLANK();
/* hardware defined lowcore locations 0x000 - 0x1ff */
@ -123,14 +121,12 @@ int main(void)
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
OFFSET(__LC_LAST_BREAK, lowcore, last_break);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info);
OFFSET(__LC_OS_INFO, lowcore, os_info);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
OFFSET(__LC_MCESAD, lowcore, mcesad);
OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);

View File

@ -3,8 +3,7 @@
* arch/s390/kernel/base.S
*
* Copyright IBM Corp. 2006, 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
#include <linux/linkage.h>
@ -15,18 +14,28 @@
GEN_BR_THUNK %r9
GEN_BR_THUNK %r14
__PT_R0 = __PT_GPRS
__PT_R8 = __PT_GPRS + 64
ENTRY(s390_base_pgm_handler)
stmg %r0,%r15,__LC_SAVE_AREA_SYNC
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
lgr %r2,%r11
larl %r1,s390_base_pgm_handler_fn
lg %r9,0(%r1)
ltgr %r9,%r9
jz 1f
BASR_EX %r14,%r9
lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13)
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
lpswe __LC_RETURN_PSW
1: larl %r13,disabled_wait_psw
lpswe 0(%r13)
ENDPROC(s390_base_pgm_handler)
.align 8

View File

@ -3,7 +3,6 @@
* Extract CPU cache information and expose them via sysfs.
*
* Copyright IBM Corp. 2012
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/seq_file.h>
@ -71,8 +70,6 @@ void show_cacheinfo(struct seq_file *m)
struct cacheinfo *cache;
int idx;
if (!test_facility(34))
return;
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
cache = this_cpu_ci->info_list + idx;
@ -132,8 +129,6 @@ int init_cache_level(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
if (!test_facility(34))
return -EOPNOTSUPP;
if (!this_cpu_ci)
return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
@ -157,8 +152,6 @@ int populate_cache_leaves(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
if (!test_facility(34))
return -EOPNOTSUPP;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {

View File

@ -89,7 +89,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
_sigregs32 user_sregs;
int i;
/* Alwys make any pending restarted system call return -EINTR */
/* Always make any pending restarted system call return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))

View File

@ -132,28 +132,27 @@ static inline void *load_real_addr(void *addr)
/*
* Copy memory of the old, dumped system to a kernel space virtual address
*/
int copy_oldmem_kernel(void *dst, void *src, size_t count)
int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
{
unsigned long from, len;
unsigned long len;
void *ra;
int rc;
while (count) {
from = __pa(src);
if (!oldmem_data.start && from < sclp.hsa_size) {
if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - from);
rc = memcpy_hsa_kernel(dst, from, len);
len = min(count, sclp.hsa_size - src);
rc = memcpy_hsa_kernel(dst, src, len);
if (rc)
return rc;
} else {
/* Check for swapped kdump oldmem areas */
if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
from -= oldmem_data.start;
len = min(count, oldmem_data.size - from);
} else if (oldmem_data.start && from < oldmem_data.size) {
len = min(count, oldmem_data.size - from);
from += oldmem_data.start;
if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
src -= oldmem_data.start;
len = min(count, oldmem_data.size - src);
} else if (oldmem_data.start && src < oldmem_data.size) {
len = min(count, oldmem_data.size - src);
src += oldmem_data.start;
} else {
len = count;
}
@ -163,7 +162,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
} else {
ra = dst;
}
if (memcpy_real(ra, (void *) from, len))
if (memcpy_real(ra, src, len))
return -EFAULT;
}
dst += len;
@ -176,31 +175,30 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
/*
* Copy memory of the old, dumped system to a user space virtual address
*/
static int copy_oldmem_user(void __user *dst, void *src, size_t count)
static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
{
unsigned long from, len;
unsigned long len;
int rc;
while (count) {
from = __pa(src);
if (!oldmem_data.start && from < sclp.hsa_size) {
if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - from);
rc = memcpy_hsa_user(dst, from, len);
len = min(count, sclp.hsa_size - src);
rc = memcpy_hsa_user(dst, src, len);
if (rc)
return rc;
} else {
/* Check for swapped kdump oldmem areas */
if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
from -= oldmem_data.start;
len = min(count, oldmem_data.size - from);
} else if (oldmem_data.start && from < oldmem_data.size) {
len = min(count, oldmem_data.size - from);
from += oldmem_data.start;
if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
src -= oldmem_data.start;
len = min(count, oldmem_data.size - src);
} else if (oldmem_data.start && src < oldmem_data.size) {
len = min(count, oldmem_data.size - src);
src += oldmem_data.start;
} else {
len = count;
}
rc = copy_to_user_real(dst, (void *) from, count);
rc = copy_to_user_real(dst, src, count);
if (rc)
return rc;
}
@ -217,12 +215,12 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
unsigned long offset, int userbuf)
{
void *src;
unsigned long src;
int rc;
if (!csize)
return 0;
src = (void *) (pfn << PAGE_SHIFT) + offset;
src = pfn_to_phys(pfn) + offset;
if (userbuf)
rc = copy_oldmem_user((void __force __user *) buf, src, csize);
else
@ -429,10 +427,10 @@ static void *nt_prpsinfo(void *ptr)
static void *get_vmcoreinfo_old(unsigned long *size)
{
char nt_name[11], *vmcoreinfo;
unsigned long addr;
Elf64_Nhdr note;
void *addr;
if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
if (copy_oldmem_kernel(&addr, __LC_VMCORE_INFO, sizeof(addr)))
return NULL;
memset(nt_name, 0, sizeof(nt_name));
if (copy_oldmem_kernel(&note, addr, sizeof(note)))

View File

@ -11,6 +11,7 @@
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
#include <asm/sections.h>

View File

@ -2,7 +2,6 @@
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "setup"
@ -18,6 +17,7 @@
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
@ -149,22 +149,10 @@ static __init void setup_topology(void)
topology_max_mnest = max_mnest;
}
static void early_pgm_check_handler(void)
static void early_pgm_check_handler(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
unsigned long cr0, cr0_new;
unsigned long addr;
addr = S390_lowcore.program_old_psw.addr;
fixup = s390_search_extables(addr);
if (!fixup)
if (!fixup_exception(regs))
disabled_wait();
/* Disable low address protection before storing into lowcore. */
__ctl_store(cr0, 0, 0);
cr0_new = cr0 & ~(1UL << 28);
__ctl_load(cr0_new, 0, 0);
S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
__ctl_load(cr0, 0, 0);
}
static noinline __init void setup_lowcore_early(void)
@ -294,6 +282,11 @@ static void __init check_image_bootable(void)
disabled_wait();
}
static void __init sort_amode31_extable(void)
{
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
}
void __init startup_init(void)
{
sclp_early_adjust_va();
@ -302,6 +295,7 @@ void __init startup_init(void)
time_early_init();
init_kernel_storage_key();
lockdep_off();
sort_amode31_extable();
setup_lowcore_early();
setup_facility_list();
detect_machine_type();

View File

@ -6,11 +6,11 @@
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Hartmut Penner (hp@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-extable.h>
#include <asm/alternative-asm.h>
#include <asm/processor.h>
#include <asm/cache.h>
@ -98,11 +98,6 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
.macro STCK savearea
ALTERNATIVE ".insn s,0xb2050000,\savearea", \
".insn s,0xb27c0000,\savearea", 25
.endm
/*
* The TSTMSK macro generates a test-under-mask instruction by
* calculating the memory offset for the specified mask value.
@ -191,7 +186,6 @@ _LPP_OFFSET = __LC_LPP
#endif
GEN_BR_THUNK %r14
GEN_BR_THUNK %r14,%r13
.section .kprobes.text, "ax"
.Ldummy:
@ -232,7 +226,7 @@ ENTRY(__switch_to)
aghi %r3,__TASK_pid
mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
ALTERNATIVE "", "lpp _LPP_OFFSET", 40
BR_EX %r14
ENDPROC(__switch_to)
@ -443,7 +437,7 @@ ENDPROC(pgm_check_handler)
*/
.macro INT_HANDLER name,lc_old_psw,handler
ENTRY(\name)
STCK __LC_INT_CLOCK
stckf __LC_INT_CLOCK
stpt __LC_SYS_ENTER_TIMER
STBEAR __LC_LAST_BREAK
BPOFF
@ -515,7 +509,7 @@ ENTRY(psw_idle)
.Lpsw_idle_stcctm:
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stckf __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
lpswe __SF_EMPTY(%r15)
.globl psw_idle_exit
@ -527,7 +521,7 @@ ENDPROC(psw_idle)
* Machine check handler routines
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
stckf __LC_MCCK_CLOCK
BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
@ -654,7 +648,7 @@ ENTRY(mcck_int_handler)
ENDPROC(mcck_int_handler)
ENTRY(restart_int_handler)
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
ALTERNATIVE "", "lpp _LPP_OFFSET", 40
stg %r15,__LC_SAVE_AREA_RESTART
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f

View File

@ -5,6 +5,7 @@
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/signal.h>
#include <asm/extable.h>
#include <asm/ptrace.h>
#include <asm/idle.h>

View File

@ -4,8 +4,7 @@
*
* Copyright IBM Corp. 2009,2014
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/moduleloader.h>
@ -59,20 +58,11 @@ asm(
);
#ifdef CONFIG_EXPOLINE
asm(
" .align 16\n"
"ftrace_shared_hotpatch_trampoline_ex:\n"
" lmg %r0,%r1,2(%r1)\n"
" ex %r0," __stringify(__LC_BR_R1) "(%r0)\n"
" j .\n"
"ftrace_shared_hotpatch_trampoline_ex_end:\n"
);
asm(
" .align 16\n"
"ftrace_shared_hotpatch_trampoline_exrl:\n"
" lmg %r0,%r1,2(%r1)\n"
" .insn ril,0xc60000000000,%r0,0f\n" /* exrl */
" exrl %r0,0f\n"
" j .\n"
"0: br %r1\n"
"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
@ -91,12 +81,8 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
tend = ftrace_shared_hotpatch_trampoline_br_end;
#ifdef CONFIG_EXPOLINE
if (!nospec_disable) {
tstart = ftrace_shared_hotpatch_trampoline_ex;
tend = ftrace_shared_hotpatch_trampoline_ex_end;
if (test_facility(35)) { /* exrl */
tstart = ftrace_shared_hotpatch_trampoline_exrl;
tend = ftrace_shared_hotpatch_trampoline_exrl_end;
}
tstart = ftrace_shared_hotpatch_trampoline_exrl;
tend = ftrace_shared_hotpatch_trampoline_exrl_end;
}
#endif /* CONFIG_EXPOLINE */
if (end)
@ -194,25 +180,26 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return 0;
}
static void brcl_disable(void *brcl)
static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
{
u8 op = 0x04; /* set mask field to zero */
u16 old;
u8 op;
s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
if (get_kernel_nofault(old, addr))
return -EFAULT;
if (old != expected)
return -EINVAL;
/* set mask field to all ones or zeroes */
op = enable ? 0xf4 : 0x04;
s390_kernel_write((char *)addr + 1, &op, sizeof(op));
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
brcl_disable((void *)rec->ip);
return 0;
}
static void brcl_enable(void *brcl)
{
u8 op = 0xf4; /* set mask field to all ones */
s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
/* Expect brcl 0xf,... */
return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@ -223,8 +210,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (IS_ERR(trampoline))
return PTR_ERR(trampoline);
s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
brcl_enable((void *)rec->ip);
return 0;
/* Expect brcl 0x0,... */
return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
@ -297,14 +284,24 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
*/
int ftrace_enable_ftrace_graph_caller(void)
{
brcl_disable(ftrace_graph_caller);
int rc;
/* Expect brc 0xf,... */
rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
if (rc)
return rc;
text_poke_sync_lock();
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
brcl_enable(ftrace_graph_caller);
int rc;
/* Expect brc 0x0,... */
rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
if (rc)
return rc;
text_poke_sync_lock();
return 0;
}

View File

@ -16,8 +16,6 @@ extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_start[];
extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[];
extern const char ftrace_shared_hotpatch_trampoline_br[];
extern const char ftrace_shared_hotpatch_trampoline_br_end[];
extern const char ftrace_shared_hotpatch_trampoline_ex[];
extern const char ftrace_shared_hotpatch_trampoline_ex_end[];
extern const char ftrace_shared_hotpatch_trampoline_exrl[];
extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
extern const char ftrace_plt_template[];

View File

@ -5,7 +5,6 @@
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Rob van der Heij <rvdhei@iae.nl>
* Heiko Carstens <heiko.carstens@de.ibm.com>
*
*/

View File

@ -4,7 +4,6 @@
*
* Copyright IBM Corp. 2005, 2012
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Volker Sameske <sameske@de.ibm.com>
*/
@ -20,6 +19,7 @@
#include <linux/gfp.h>
#include <linux/crash_dump.h>
#include <linux/debug_locks.h>
#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/ipl.h>
#include <asm/smp.h>

View File

@ -342,7 +342,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
struct ext_int_info *p;
int index;
ext_code = *(struct ext_code *) &regs->int_code;
ext_code.int_code = regs->int_code;
if (ext_code.code != EXT_IRQ_CLK_COMP)
set_cpu_flag(CIF_NOHZ_DELAY);

View File

@ -372,33 +372,26 @@ static int kprobe_handler(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(kprobe_handler);
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
* - When the probed function returns, this probe
* causes the handlers to fire
*/
static void __used kretprobe_trampoline_holder(void)
void arch_kretprobe_fixup_return(struct pt_regs *regs,
kprobe_opcode_t *correct_ret_addr)
{
asm volatile(".global __kretprobe_trampoline\n"
"__kretprobe_trampoline: bcr 0,0\n");
/* Replace fake return address with real one. */
regs->gprs[14] = (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(arch_kretprobe_fixup_return);
/*
* Called when the probe at kretprobe trampoline is hit
* Called from __kretprobe_trampoline
*/
static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
void trampoline_probe_handler(struct pt_regs *regs)
{
regs->psw.addr = __kretprobe_trampoline_handler(regs, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
kretprobe_trampoline_handler(regs, NULL);
}
NOKPROBE_SYMBOL(trampoline_probe_handler);
/* assembler function that handles the kretprobes must not be probed itself */
NOKPROBE_SYMBOL(__kretprobe_trampoline);
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "breakpoint"
@ -465,7 +458,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
const struct exception_table_entry *entry;
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
@ -487,10 +479,8 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
entry = s390_search_extables(regs->psw.addr);
if (entry && ex_handle(entry, regs))
if (fixup_exception(regs))
return 1;
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
@ -554,18 +544,13 @@ int kprobe_exceptions_notify(struct notifier_block *self,
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
static struct kprobe trampoline = {
.addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline);
return 0;
}
int arch_trampoline_kprobe(struct kprobe *p)
{
return p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline;
return 0;
}
NOKPROBE_SYMBOL(arch_trampoline_kprobe);

View File

@ -88,8 +88,7 @@ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
if (stsi(si, 2, 2, 2))
return;
cpascii(lgr_info->name, si->name, sizeof(si->name));
memcpy(&lgr_info->lpar_number, &si->lpar_number,
sizeof(lgr_info->lpar_number));
lgr_info->lpar_number = si->lpar_number;
}
/*

View File

@ -3,7 +3,6 @@
* Copyright IBM Corp. 2005, 2011
*
* Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/

View File

@ -2,8 +2,6 @@
/*
* Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#include <linux/linkage.h>
@ -13,6 +11,18 @@
#include <asm/ptrace.h>
#include <asm/export.h>
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
/* packed stack: allocate just enough for r14, r15 and backchain */
#define TRACED_FUNC_FRAME_SIZE 24
#ifdef CONFIG_FUNCTION_TRACER
GEN_BR_THUNK %r1
GEN_BR_THUNK %r14
@ -22,26 +32,14 @@ ENTRY(ftrace_stub)
BR_EX %r14
ENDPROC(ftrace_stub)
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
#ifdef __PACK_STACK
/* allocate just enough for r14, r15 and backchain */
#define TRACED_FUNC_FRAME_SIZE 24
#else
#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
#endif
.macro ftrace_regs_entry, allregs=0
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
.if \allregs == 1
lghi %r14,0 # save condition code
ipm %r14 # don't put any instructions
sllg %r14,%r14,16 # clobbering CC before this point
# save psw mask
# don't put any instructions clobbering CC before this point
epsw %r1,%r14
risbg %r14,%r1,0,31,32
.endif
lgr %r1,%r15
@ -57,13 +55,7 @@ ENDPROC(ftrace_stub)
.if \allregs == 1
stg %r14,(STACK_PTREGS_PSW)(%r15)
stosm (STACK_PTREGS_PSW)(%r15),0
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
#else
lghi %r14,_PIF_FTRACE_FULL_REGS
stg %r14,STACK_PTREGS_FLAGS(%r15)
#endif
.else
xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
.endif
@ -141,3 +133,35 @@ SYM_FUNC_START(return_to_handler)
SYM_FUNC_END(return_to_handler)
#endif
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_KPROBES
SYM_FUNC_START(__kretprobe_trampoline)
stg %r14,(__SF_GPRS+8*8)(%r15)
lay %r15,-STACK_FRAME_SIZE(%r15)
stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
# store original stack pointer in backchain and pt_regs
lay %r7,STACK_FRAME_SIZE(%r15)
stg %r7,__SF_BACKCHAIN(%r15)
stg %r7,STACK_PTREGS_GPRS+(15*8)(%r15)
# store full psw
epsw %r2,%r3
risbg %r3,%r2,0,31,32
stg %r3,STACK_PTREGS_PSW(%r15)
larl %r1,__kretprobe_trampoline
stg %r1,STACK_PTREGS_PSW+8(%r15)
lay %r2,STACK_PTREGS(%r15)
brasl %r14,trampoline_probe_handler
mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15)
lmg %r0,%r15,STACK_PTREGS_GPRS(%r15)
lpswe __SF_EMPTY(%r15)
SYM_FUNC_END(__kretprobe_trampoline)
#endif /* CONFIG_KPROBES */

View File

@ -517,15 +517,9 @@ int module_finalize(const Elf_Ehdr *hdr,
ij = me->core_layout.base + me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
if (test_facility(35)) {
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
ij[1] = 0x0005a7f4; /* j . */
ij[2] = 0x000007f1; /* br %r1 */
} else {
ij[0] = 0x44000000 | (unsigned int)
offsetof(struct lowcore, br_r1_trampoline);
ij[1] = 0xa7f40000; /* j . */
}
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
ij[1] = 0x0005a7f4; /* j . */
ij[2] = 0x000007f1; /* br %r1 */
}
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;

View File

@ -6,7 +6,6 @@
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#include <linux/kernel_stat.h>

View File

@ -105,6 +105,7 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
s32 *epo;
/* Second part of the instruction replace is always a nop */
memcpy(insnbuf + 2, branch, sizeof(branch));
for (epo = start; epo < end; epo++) {
instr = (u8 *) epo + *epo;
if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
@ -117,42 +118,20 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
if (thunk[0] == 0xc6 && thunk[1] == 0x00)
/* exrl %r0,<target-br> */
br = thunk + (*(int *)(thunk + 2)) * 2;
else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
thunk[6] == 0x44 && thunk[7] == 0x00 &&
(thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
(thunk[1] & 0xf0) == (thunk[8] & 0xf0))
/* larl %rx,<target br> + ex %r0,0(%rx) */
br = thunk + (*(int *)(thunk + 2)) * 2;
else
continue;
/* Check for unconditional branch 0x07f? or 0x47f???? */
if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
continue;
memcpy(insnbuf + 2, branch, sizeof(branch));
switch (type) {
case BRCL_EXPOLINE:
/* brcl to thunk, replace with br + nop */
insnbuf[0] = br[0];
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
if (br[0] == 0x47) {
/* brcl to b, replace with bc + nopr */
insnbuf[2] = br[2];
insnbuf[3] = br[3];
} else {
/* brcl to br, replace with bcr + nop */
}
break;
case BRASL_EXPOLINE:
/* brasl to thunk, replace with basr + nop */
insnbuf[0] = 0x0d;
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
if (br[0] == 0x47) {
/* brasl to b, replace with bas + nopr */
insnbuf[0] = 0x4d;
insnbuf[2] = br[2];
insnbuf[3] = br[3];
} else {
/* brasl to br, replace with basr + nop */
insnbuf[0] = 0x0d;
}
break;
}

View File

@ -15,6 +15,7 @@
#include <asm/checksum.h>
#include <asm/lowcore.h>
#include <asm/os_info.h>
#include <asm/asm-offsets.h>
/*
* OS info structure has to be page aligned
@ -45,7 +46,7 @@ void os_info_crashkernel_add(unsigned long base, unsigned long size)
*/
void os_info_entry_add(int nr, void *ptr, u64 size)
{
os_info.entry[nr].addr = (u64)(unsigned long)ptr;
os_info.entry[nr].addr = __pa(ptr);
os_info.entry[nr].size = size;
os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0);
os_info.csum = os_info_csum(&os_info);
@ -62,7 +63,7 @@ void __init os_info_init(void)
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
os_info.csum = os_info_csum(&os_info);
mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr);
mem_assign_absolute(S390_lowcore.os_info, __pa(ptr));
}
#ifdef CONFIG_CRASH_DUMP
@ -90,7 +91,7 @@ static void os_info_old_alloc(int nr, int align)
goto fail;
}
buf_align = PTR_ALIGN(buf, align);
if (copy_oldmem_kernel(buf_align, (void *) addr, size)) {
if (copy_oldmem_kernel(buf_align, addr, size)) {
msg = "copy failed";
goto fail_free;
}
@ -123,15 +124,14 @@ static void os_info_old_init(void)
return;
if (!oldmem_data.start)
goto fail;
if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
if (copy_oldmem_kernel(&addr, __LC_OS_INFO, sizeof(addr)))
goto fail;
if (addr == 0 || addr % PAGE_SIZE)
goto fail;
os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL);
if (!os_info_old)
goto fail;
if (copy_oldmem_kernel(os_info_old, (void *) addr,
sizeof(*os_info_old)))
if (copy_oldmem_kernel(os_info_old, addr, sizeof(*os_info_old)))
goto fail_free;
if (os_info_old->magic != OS_INFO_MAGIC)
goto fail_free;

View File

@ -1451,6 +1451,8 @@ static size_t cfdiag_maxsize(struct cpumf_ctr_info *info)
/* Get the CPU speed, try sampling facility first and CPU attributes second. */
static void cfdiag_get_cpu_speed(void)
{
unsigned long mhz;
if (cpum_sf_avail()) { /* Sampling facility first */
struct hws_qsi_info_block si;
@ -1464,12 +1466,9 @@ static void cfdiag_get_cpu_speed(void)
/* Fallback: CPU speed extract static part. Used in case
* CPU Measurement Sampling Facility is turned off.
*/
if (test_facility(34)) {
unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
if (mhz != -1UL)
cfdiag_cpu_speed = mhz & 0xffffffff;
}
mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
if (mhz != -1UL)
cfdiag_cpu_speed = mhz & 0xffffffff;
}
static int cfset_init(void)

View File

@ -172,8 +172,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
static int __init setup_hwcaps(void)
{
/* instructions named N3, "backported" to esa-mode */
if (test_facility(0))
elf_hwcap |= HWCAP_ESAN3;
elf_hwcap |= HWCAP_ESAN3;
/* z/Architecture mode active */
elf_hwcap |= HWCAP_ZARCH;
@ -191,8 +190,7 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_LDISP;
/* extended-immediate */
if (test_facility(21))
elf_hwcap |= HWCAP_EIMM;
elf_hwcap |= HWCAP_EIMM;
/* extended-translation facility 3 enhancement */
if (test_facility(22) && test_facility(30))
@ -262,21 +260,7 @@ static int __init setup_elf_platform(void)
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
case 0x2064:
case 0x2066:
default: /* Use "z900" as default for 64 bit kernels. */
strcpy(elf_platform, "z900");
break;
case 0x2084:
case 0x2086:
strcpy(elf_platform, "z990");
break;
case 0x2094:
case 0x2096:
strcpy(elf_platform, "z9-109");
break;
case 0x2097:
case 0x2098:
default: /* Use "z10" as default. */
strcpy(elf_platform, "z10");
break;
case 0x2817:

View File

@ -147,38 +147,36 @@ void ptrace_disable(struct task_struct *task)
static inline unsigned long __peek_user_per(struct task_struct *child,
addr_t addr)
{
struct per_struct_kernel *dummy = NULL;
if (addr == (addr_t) &dummy->cr9)
if (addr == offsetof(struct per_struct_kernel, cr9))
/* Control bits of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
else if (addr == (addr_t) &dummy->cr10)
else if (addr == offsetof(struct per_struct_kernel, cr10))
/* Start address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
else if (addr == (addr_t) &dummy->cr11)
else if (addr == offsetof(struct per_struct_kernel, cr11))
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
-1UL : child->thread.per_user.end;
else if (addr == (addr_t) &dummy->bits)
else if (addr == offsetof(struct per_struct_kernel, bits))
/* Single-step bit. */
return test_thread_flag(TIF_SINGLE_STEP) ?
(1UL << (BITS_PER_LONG - 1)) : 0;
else if (addr == (addr_t) &dummy->starting_addr)
else if (addr == offsetof(struct per_struct_kernel, starting_addr))
/* Start address of the user specified per set. */
return child->thread.per_user.start;
else if (addr == (addr_t) &dummy->ending_addr)
else if (addr == offsetof(struct per_struct_kernel, ending_addr))
/* End address of the user specified per set. */
return child->thread.per_user.end;
else if (addr == (addr_t) &dummy->perc_atmid)
else if (addr == offsetof(struct per_struct_kernel, perc_atmid))
/* PER code, ATMID and AI of the last PER trap */
return (unsigned long)
child->thread.per_event.cause << (BITS_PER_LONG - 16);
else if (addr == (addr_t) &dummy->address)
else if (addr == offsetof(struct per_struct_kernel, address))
/* Address of the last PER trap */
return child->thread.per_event.address;
else if (addr == (addr_t) &dummy->access_id)
else if (addr == offsetof(struct per_struct_kernel, access_id))
/* Access id of the last PER trap */
return (unsigned long)
child->thread.per_event.paid << (BITS_PER_LONG - 8);
@ -196,61 +194,60 @@ static inline unsigned long __peek_user_per(struct task_struct *child,
*/
static unsigned long __peek_user(struct task_struct *child, addr_t addr)
{
struct user *dummy = NULL;
addr_t offset, tmp;
if (addr < (addr_t) &dummy->regs.acrs) {
if (addr < offsetof(struct user, regs.acrs)) {
/*
* psw and gprs are stored on the stack
*/
tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
if (addr == (addr_t) &dummy->regs.psw.mask) {
if (addr == offsetof(struct user, regs.psw.mask)) {
/* Return a clean psw mask. */
tmp &= PSW_MASK_USER | PSW_MASK_RI;
tmp |= PSW_USER_BITS;
}
} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
offset = addr - offsetof(struct user, regs.acrs);
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
* 32 bit acrs[15] value and shift it by 32. Sick...
*/
if (addr == (addr_t) &dummy->regs.acrs[15])
if (addr == offsetof(struct user, regs.acrs[15]))
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
else
tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
} else if (addr < (addr_t) &dummy->regs.fp_regs) {
} else if (addr < offsetof(struct user, regs.fp_regs)) {
/*
* prevent reads of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fpu.fpc;
tmp <<= BITS_PER_LONG - 32;
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
tmp = *(addr_t *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
@ -258,11 +255,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
tmp = *(addr_t *)
((addr_t) child->thread.fpu.fprs + offset);
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
* Handle access to the per_info structure.
*/
addr -= (addr_t) &dummy->regs.per_info;
addr -= offsetof(struct user, regs.per_info);
tmp = __peek_user_per(child, addr);
} else
@ -281,8 +278,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
if (addr >= offsetof(struct user, regs.acrs) &&
addr < offsetof(struct user, regs.orig_gpr2))
mask = 3;
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@ -294,8 +291,6 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
static inline void __poke_user_per(struct task_struct *child,
addr_t addr, addr_t data)
{
struct per_struct_kernel *dummy = NULL;
/*
* There are only three fields in the per_info struct that the
* debugger user can write to.
@ -308,14 +303,14 @@ static inline void __poke_user_per(struct task_struct *child,
* addresses are used only if single stepping is not in effect.
* Writes to any other field in per_info are ignored.
*/
if (addr == (addr_t) &dummy->cr9)
if (addr == offsetof(struct per_struct_kernel, cr9))
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
else if (addr == (addr_t) &dummy->starting_addr)
else if (addr == offsetof(struct per_struct_kernel, starting_addr))
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
else if (addr == (addr_t) &dummy->ending_addr)
else if (addr == offsetof(struct per_struct_kernel, ending_addr))
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
@ -328,16 +323,15 @@ static inline void __poke_user_per(struct task_struct *child,
*/
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
struct user *dummy = NULL;
addr_t offset;
if (addr < (addr_t) &dummy->regs.acrs) {
if (addr < offsetof(struct user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
if (addr == (addr_t) &dummy->regs.psw.mask) {
if (addr == offsetof(struct user, regs.psw.mask)) {
unsigned long mask = PSW_MASK_USER;
mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
@ -359,36 +353,36 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
regs->int_code = 0x20000 | (data & 0xffff);
}
*(addr_t *)((addr_t) &regs->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
offset = addr - offsetof(struct user, regs.acrs);
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
* half of the value and write the upper 32 bit to
* acrs[15]. Sick...
*/
if (addr == (addr_t) &dummy->regs.acrs[15])
if (addr == offsetof(struct user, regs.acrs[15]))
child->thread.acrs[15] = (unsigned int) (data >> 32);
else
*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
task_pt_regs(child)->orig_gpr2 = data;
} else if (addr < (addr_t) &dummy->regs.fp_regs) {
} else if (addr < offsetof(struct user, regs.fp_regs)) {
/*
* prevent writes of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
return 0;
} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
@ -397,12 +391,12 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
return -EINVAL;
child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
*(addr_t *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = data;
@ -410,11 +404,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
*(addr_t *)((addr_t)
child->thread.fpu.fprs + offset) = data;
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
/*
* Handle access to the per_info structure.
*/
addr -= (addr_t) &dummy->regs.per_info;
addr -= offsetof(struct user, regs.per_info);
__poke_user_per(child, addr, data);
}
@ -431,8 +425,8 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
if (addr >= offsetof(struct user, regs.acrs) &&
addr < offsetof(struct user, regs.orig_gpr2))
mask = 3;
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@ -540,37 +534,35 @@ long arch_ptrace(struct task_struct *child, long request,
static inline __u32 __peek_user_per_compat(struct task_struct *child,
addr_t addr)
{
struct compat_per_struct_kernel *dummy32 = NULL;
if (addr == (addr_t) &dummy32->cr9)
if (addr == offsetof(struct compat_per_struct_kernel, cr9))
/* Control bits of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
else if (addr == (addr_t) &dummy32->cr10)
else if (addr == offsetof(struct compat_per_struct_kernel, cr10))
/* Start address of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
else if (addr == (addr_t) &dummy32->cr11)
else if (addr == offsetof(struct compat_per_struct_kernel, cr11))
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PSW32_ADDR_INSN : child->thread.per_user.end;
else if (addr == (addr_t) &dummy32->bits)
else if (addr == offsetof(struct compat_per_struct_kernel, bits))
/* Single-step bit. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0x80000000 : 0;
else if (addr == (addr_t) &dummy32->starting_addr)
else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
/* Start address of the user specified per set. */
return (__u32) child->thread.per_user.start;
else if (addr == (addr_t) &dummy32->ending_addr)
else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
/* End address of the user specified per set. */
return (__u32) child->thread.per_user.end;
else if (addr == (addr_t) &dummy32->perc_atmid)
else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid))
/* PER code, ATMID and AI of the last PER trap */
return (__u32) child->thread.per_event.cause << 16;
else if (addr == (addr_t) &dummy32->address)
else if (addr == offsetof(struct compat_per_struct_kernel, address))
/* Address of the last PER trap */
return (__u32) child->thread.per_event.address;
else if (addr == (addr_t) &dummy32->access_id)
else if (addr == offsetof(struct compat_per_struct_kernel, access_id))
/* Access id of the last PER trap */
return (__u32) child->thread.per_event.paid << 24;
return 0;
@ -581,21 +573,20 @@ static inline __u32 __peek_user_per_compat(struct task_struct *child,
*/
static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
{
struct compat_user *dummy32 = NULL;
addr_t offset;
__u32 tmp;
if (addr < (addr_t) &dummy32->regs.acrs) {
if (addr < offsetof(struct compat_user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
if (addr == offsetof(struct compat_user, regs.psw.mask)) {
/* Fake a 31 bit psw mask. */
tmp = (__u32)(regs->psw.mask >> 32);
tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
tmp |= PSW32_USER_BITS;
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
} else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
/* Fake a 31 bit psw address. */
tmp = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA);
@ -603,38 +594,38 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
/* gpr 0-15 */
tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
} else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy32->regs.acrs;
offset = addr - offsetof(struct compat_user, regs.acrs);
tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
} else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
} else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
/*
* prevent reads of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
} else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fpu.fpc;
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
tmp = *(__u32 *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
@ -642,11 +633,11 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
tmp = *(__u32 *)
((addr_t) child->thread.fpu.fprs + offset);
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
* Handle access to the per_info structure.
*/
addr -= (addr_t) &dummy32->regs.per_info;
addr -= offsetof(struct compat_user, regs.per_info);
tmp = __peek_user_per_compat(child, addr);
} else
@ -673,16 +664,14 @@ static int peek_user_compat(struct task_struct *child,
static inline void __poke_user_per_compat(struct task_struct *child,
addr_t addr, __u32 data)
{
struct compat_per_struct_kernel *dummy32 = NULL;
if (addr == (addr_t) &dummy32->cr9)
if (addr == offsetof(struct compat_per_struct_kernel, cr9))
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
else if (addr == (addr_t) &dummy32->starting_addr)
else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
else if (addr == (addr_t) &dummy32->ending_addr)
else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
@ -693,16 +682,15 @@ static inline void __poke_user_per_compat(struct task_struct *child,
static int __poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
struct compat_user *dummy32 = NULL;
__u32 tmp = (__u32) data;
addr_t offset;
if (addr < (addr_t) &dummy32->regs.acrs) {
if (addr < offsetof(struct compat_user, regs.acrs)) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw, gprs, acrs and orig_gpr2 are stored on the stack
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
if (addr == offsetof(struct compat_user, regs.psw.mask)) {
__u32 mask = PSW32_MASK_USER;
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
@ -716,7 +704,7 @@ static int __poke_user_compat(struct task_struct *child,
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
} else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
/* Build a 64 bit psw address from 31 bit address. */
regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
/* Transfer 31 bit amode bit to psw mask. */
@ -732,27 +720,27 @@ static int __poke_user_compat(struct task_struct *child,
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
} else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy32->regs.acrs;
offset = addr - offsetof(struct compat_user, regs.acrs);
*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
} else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
} else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
/*
* prevent writess of padding hole between
* orig_gpr2 and fp_regs on s390.
*/
return 0;
} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
} else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
/*
* floating point control reg. is in the thread structure
*/
@ -760,12 +748,12 @@ static int __poke_user_compat(struct task_struct *child,
return -EINVAL;
child->thread.fpu.fpc = data;
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
/*
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
if (MACHINE_HAS_VX)
*(__u32 *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = tmp;
@ -773,11 +761,11 @@ static int __poke_user_compat(struct task_struct *child,
*(__u32 *)((addr_t)
child->thread.fpu.fprs + offset) = tmp;
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
/*
* Handle access to the per_info structure.
*/
addr -= (addr_t) &dummy32->regs.per_info;
addr -= offsetof(struct compat_user, regs.per_info);
__poke_user_per_compat(child, addr, data);
}

View File

@ -2,8 +2,7 @@
/*
* Copyright IBM Corp. 2005
*
* Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Author(s): Rolf Adelsberger
*
*/

View File

@ -490,7 +490,6 @@ static void __init setup_lowcore_dat_off(void)
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;

View File

@ -141,7 +141,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
_sigregs user_sregs;
/* Alwys make any pending restarted system call return -EINTR */
/* Always make any pending restarted system call return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))

View File

@ -5,7 +5,6 @@
* Copyright IBM Corp. 1999, 2012
* Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*
* based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
@ -208,7 +207,6 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
@ -671,7 +669,7 @@ static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
bool is_boot_cpu, void *regs)
{
if (is_boot_cpu)
copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
else
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(regs));
save_area_add_regs(sa, regs);
@ -1253,7 +1251,7 @@ static __always_inline void set_new_lowcore(struct lowcore *lc)
src.odd = sizeof(S390_lowcore);
dst.even = (unsigned long) lc;
dst.odd = sizeof(*lc);
pfx = (unsigned long) lc;
pfx = __pa(lc);
asm volatile(
" mvcl %[dst],%[src]\n"
@ -1293,8 +1291,8 @@ static int __init smp_reinit_ipl_cpu(void)
local_irq_restore(flags);
free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
return 0;
}

View File

@ -3,7 +3,6 @@
* Stack trace management functions
*
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/stacktrace.h>

View File

@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/asm-extable.h>
#include <asm/ebcdic.h>
#include <asm/debug.h>
#include <asm/sysinfo.h>

View File

@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/asm-extable.h>
#include <asm/errno.h>
#include <asm/sigp.h>

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "cpu"

View File

@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/cpu.h>
#include <linux/entry-common.h>
#include <asm/asm-extable.h>
#include <asm/fpu/api.h>
#include <asm/vtime.h>
#include "entry.h"
@ -53,9 +54,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
force_sig_fault(si_signo, si_code, get_trap_ip(regs));
report_user_fault(regs, si_signo, 0);
} else {
const struct exception_table_entry *fixup;
fixup = s390_search_extables(regs->psw.addr);
if (!fixup || !ex_handle(fixup, regs))
if (!fixup_exception(regs))
die(regs, str);
}
}
@ -244,16 +243,12 @@ static void space_switch_exception(struct pt_regs *regs)
static void monitor_event_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
if (user_mode(regs))
return;
switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) {
case BUG_TRAP_TYPE_NONE:
fixup = s390_search_extables(regs->psw.addr);
if (fixup)
ex_handle(fixup, regs);
fixup_exception(regs);
break;
case BUG_TRAP_TYPE_WARN:
break;
@ -291,7 +286,6 @@ static void __init test_monitor_call(void)
void __init trap_init(void)
{
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
local_mcck_enable();
test_monitor_call();
}
@ -303,7 +297,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
unsigned int trapnr;
irqentry_state_t state;
regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc;
regs->int_code = S390_lowcore.pgm_int_code;
regs->int_parm_long = S390_lowcore.trans_exc_code;
state = irqentry_enter(regs);
@ -328,7 +322,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
set_thread_flag(TIF_PER_TRAP);
ev->address = S390_lowcore.per_address;
ev->cause = *(u16 *)&S390_lowcore.per_code;
ev->cause = S390_lowcore.per_code_combined;
ev->paid = S390_lowcore.per_access_id;
} else {
/* PER event in kernel is kprobes */

View File

@ -177,9 +177,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
@ -194,9 +192,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)__ptr & mask) \
if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
@ -213,9 +209,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
@ -327,10 +321,6 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
break;
case 0xc6:
switch (insn->opc1) {
case 0x02: /* pfdrl */
if (!test_facility(34))
rc = EMU_ILLEGAL_OP;
break;
case 0x04: /* cghrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
break;

View File

@ -49,7 +49,6 @@ SECTIONS
SOFTIRQENTRY_TEXT
FTRACE_HOTPATCH_TRAMPOLINES_TEXT
*(.text.*_indirect_*)
*(.fixup)
*(.gnu.warning)
. = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */

View File

@ -128,13 +128,12 @@ static int do_account_vtime(struct task_struct *tsk)
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
/* Use STORE CLOCK by default, STORE CLOCK FAST if available. */
alternative_io("stpt %0\n .insn s,0xb2050000,%1\n",
"stpt %0\n .insn s,0xb27c0000,%1\n",
25,
ASM_OUTPUT2("=Q" (S390_lowcore.last_update_timer),
"=Q" (S390_lowcore.last_update_clock)),
ASM_NO_INPUT_CLOBBER("cc"));
asm volatile(
" stpt %0\n" /* Store current cpu timer value */
" stckf %1" /* Store current tod clock value */
: "=Q" (S390_lowcore.last_update_timer),
"=Q" (S390_lowcore.last_update_clock)
: : "cc");
clock = S390_lowcore.last_update_clock - clock;
timer -= S390_lowcore.last_update_timer;

View File

@ -6,7 +6,6 @@
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Christian Ehrhardt <ehrhardt@de.ibm.com>
* Jason J. Herne <jjherne@us.ibm.com>
*/

View File

@ -7,6 +7,7 @@ lib-y += delay.o string.o uaccess.o find.o spinlock.o
obj-y += mem.o xor.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o
obj-$(CONFIG_S390_KPROBES_SANITY_TEST) += test_kprobes_s390.o
test_kprobes_s390-objs += test_kprobes_asm.o test_kprobes.o

View File

@ -4,7 +4,6 @@
*
* Copyright IBM Corp. 1999, 2008
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
#include <linux/processor.h>

12
arch/s390/lib/expoline.S Normal file
View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/nospec-insn.h>
#include <linux/linkage.h>
.macro GEN_ALL_BR_THUNK_EXTERN
.irp r1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
GEN_BR_THUNK_EXTERN %r\r1
.endr
.endm
GEN_ALL_BR_THUNK_EXTERN

View File

@ -8,6 +8,7 @@
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/kthread.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/slab.h>
@ -16,10 +17,14 @@
#include <linux/wait.h>
#include <asm/irq.h>
struct kunit *current_test;
static struct kunit *current_test;
#define BT_BUF_SIZE (PAGE_SIZE * 4)
static bool force_bt;
module_param_named(backtrace, force_bt, bool, 0444);
MODULE_PARM_DESC(backtrace, "print backtraces for all tests");
/*
* To avoid printk line limit split backtrace by lines
*/
@ -98,7 +103,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
kunit_err(current_test, "Maximum number of frames exceeded\n");
ret = -EINVAL;
}
if (ret)
if (ret || force_bt)
print_backtrace(bt);
kfree(bt);
return ret;
@ -124,7 +129,10 @@ static struct unwindme *unwindme;
#define UWM_CALLER 0x8 /* Unwind starting from caller. */
#define UWM_SWITCH_STACK 0x10 /* Use call_on_stack. */
#define UWM_IRQ 0x20 /* Unwind from irq context. */
#define UWM_PGM 0x40 /* Unwind from program check handler. */
#define UWM_PGM 0x40 /* Unwind from program check handler */
#define UWM_KPROBE_ON_FTRACE 0x80 /* Unwind from kprobe handler called via ftrace. */
#define UWM_FTRACE 0x100 /* Unwind from ftrace handler. */
#define UWM_KRETPROBE 0x200 /* Unwind kretprobe handlers. */
static __always_inline unsigned long get_psw_addr(void)
{
@ -136,8 +144,56 @@ static __always_inline unsigned long get_psw_addr(void)
return psw_addr;
}
#ifdef CONFIG_KPROBES
static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs)
static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct unwindme *u = unwindme;
u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
return 0;
}
static noinline notrace void test_unwind_kretprobed_func(void)
{
asm volatile(" nop\n");
}
static noinline void test_unwind_kretprobed_func_caller(void)
{
test_unwind_kretprobed_func();
}
static int test_unwind_kretprobe(struct unwindme *u)
{
int ret;
struct kretprobe my_kretprobe;
if (!IS_ENABLED(CONFIG_KPROBES))
kunit_skip(current_test, "requires CONFIG_KPROBES");
u->ret = -1; /* make sure kprobe is called */
unwindme = u;
memset(&my_kretprobe, 0, sizeof(my_kretprobe));
my_kretprobe.handler = kretprobe_ret_handler;
my_kretprobe.maxactive = 1;
my_kretprobe.kp.addr = (kprobe_opcode_t *)test_unwind_kretprobed_func;
ret = register_kretprobe(&my_kretprobe);
if (ret < 0) {
kunit_err(current_test, "register_kretprobe failed %d\n", ret);
return -EINVAL;
}
test_unwind_kretprobed_func_caller();
unregister_kretprobe(&my_kretprobe);
unwindme = NULL;
return u->ret;
}
static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct unwindme *u = unwindme;
@ -145,8 +201,97 @@ static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs)
(u->flags & UWM_SP) ? u->sp : 0);
return 0;
}
extern const char test_unwind_kprobed_insn[];
static noinline void test_unwind_kprobed_func(void)
{
asm volatile(
" nopr %%r7\n"
"test_unwind_kprobed_insn:\n"
" nopr %%r7\n"
:);
}
static int test_unwind_kprobe(struct unwindme *u)
{
struct kprobe kp;
int ret;
if (!IS_ENABLED(CONFIG_KPROBES))
kunit_skip(current_test, "requires CONFIG_KPROBES");
if (!IS_ENABLED(CONFIG_KPROBES_ON_FTRACE) && u->flags & UWM_KPROBE_ON_FTRACE)
kunit_skip(current_test, "requires CONFIG_KPROBES_ON_FTRACE");
u->ret = -1; /* make sure kprobe is called */
unwindme = u;
memset(&kp, 0, sizeof(kp));
kp.pre_handler = kprobe_pre_handler;
kp.addr = u->flags & UWM_KPROBE_ON_FTRACE ?
(kprobe_opcode_t *)test_unwind_kprobed_func :
(kprobe_opcode_t *)test_unwind_kprobed_insn;
ret = register_kprobe(&kp);
if (ret < 0) {
kunit_err(current_test, "register_kprobe failed %d\n", ret);
return -EINVAL;
}
test_unwind_kprobed_func();
unregister_kprobe(&kp);
unwindme = NULL;
return u->ret;
}
static void notrace __used test_unwind_ftrace_handler(unsigned long ip,
unsigned long parent_ip,
struct ftrace_ops *fops,
struct ftrace_regs *fregs)
{
struct unwindme *u = (struct unwindme *)fregs->regs.gprs[2];
u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? &fregs->regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
}
static noinline int test_unwind_ftraced_func(struct unwindme *u)
{
return READ_ONCE(u)->ret;
}
static int test_unwind_ftrace(struct unwindme *u)
{
int ret;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_ops *fops;
fops = kunit_kzalloc(current_test, sizeof(*fops), GFP_KERNEL);
fops->func = test_unwind_ftrace_handler;
fops->flags = FTRACE_OPS_FL_DYNAMIC |
FTRACE_OPS_FL_RECURSION |
FTRACE_OPS_FL_SAVE_REGS |
FTRACE_OPS_FL_PERMANENT;
#else
kunit_skip(current_test, "requires CONFIG_DYNAMIC_FTRACE");
#endif
ret = ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 0, 0);
if (ret) {
kunit_err(current_test, "failed to set ftrace filter (%d)\n", ret);
return -1;
}
ret = register_ftrace_function(fops);
if (!ret) {
ret = test_unwind_ftraced_func(u);
unregister_ftrace_function(fops);
} else {
kunit_err(current_test, "failed to register ftrace handler (%d)\n", ret);
}
ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 1, 0);
return ret;
}
/* This function may or may not appear in the backtrace. */
static noinline int unwindme_func4(struct unwindme *u)
{
@ -157,35 +302,12 @@ static noinline int unwindme_func4(struct unwindme *u)
wait_event(u->task_wq, kthread_should_park());
kthread_parkme();
return 0;
#ifdef CONFIG_KPROBES
} else if (u->flags & UWM_PGM) {
struct kprobe kp;
int ret;
unwindme = u;
memset(&kp, 0, sizeof(kp));
kp.symbol_name = "do_report_trap";
kp.pre_handler = pgm_pre_handler;
ret = register_kprobe(&kp);
if (ret < 0) {
kunit_err(current_test, "register_kprobe failed %d\n", ret);
return -EINVAL;
}
/*
* Trigger operation exception; use insn notation to bypass
* llvm's integrated assembler sanity checks.
*/
asm volatile(
" .insn e,0x0000\n" /* illegal opcode */
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
:);
unregister_kprobe(&kp);
unwindme = NULL;
return u->ret;
#endif
} else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) {
return test_unwind_kprobe(u);
} else if (u->flags & (UWM_KRETPROBE)) {
return test_unwind_kretprobe(u);
} else if (u->flags & UWM_FTRACE) {
return test_unwind_ftrace(u);
} else {
struct pt_regs regs;
@ -255,7 +377,7 @@ static int test_unwind_irq(struct unwindme *u)
}
/* Spawns a task and passes it to test_unwind(). */
static int test_unwind_task(struct kunit *test, struct unwindme *u)
static int test_unwind_task(struct unwindme *u)
{
struct task_struct *task;
int ret;
@ -270,7 +392,7 @@ static int test_unwind_task(struct kunit *test, struct unwindme *u)
*/
task = kthread_run(unwindme_func1, u, "%s", __func__);
if (IS_ERR(task)) {
kunit_err(test, "kthread_run() failed\n");
kunit_err(current_test, "kthread_run() failed\n");
return PTR_ERR(task);
}
/*
@ -293,49 +415,43 @@ struct test_params {
/*
* Create required parameter list for tests
*/
#define TEST_WITH_FLAGS(f) { .flags = f, .name = #f }
static const struct test_params param_list[] = {
{.flags = UWM_DEFAULT, .name = "UWM_DEFAULT"},
{.flags = UWM_SP, .name = "UWM_SP"},
{.flags = UWM_REGS, .name = "UWM_REGS"},
{.flags = UWM_SWITCH_STACK,
.name = "UWM_SWITCH_STACK"},
{.flags = UWM_SP | UWM_REGS,
.name = "UWM_SP | UWM_REGS"},
{.flags = UWM_CALLER | UWM_SP,
.name = "WM_CALLER | UWM_SP"},
{.flags = UWM_CALLER | UWM_SP | UWM_REGS,
.name = "UWM_CALLER | UWM_SP | UWM_REGS"},
{.flags = UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK,
.name = "UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"},
{.flags = UWM_THREAD, .name = "UWM_THREAD"},
{.flags = UWM_THREAD | UWM_SP,
.name = "UWM_THREAD | UWM_SP"},
{.flags = UWM_THREAD | UWM_CALLER | UWM_SP,
.name = "UWM_THREAD | UWM_CALLER | UWM_SP"},
{.flags = UWM_IRQ, .name = "UWM_IRQ"},
{.flags = UWM_IRQ | UWM_SWITCH_STACK,
.name = "UWM_IRQ | UWM_SWITCH_STACK"},
{.flags = UWM_IRQ | UWM_SP,
.name = "UWM_IRQ | UWM_SP"},
{.flags = UWM_IRQ | UWM_REGS,
.name = "UWM_IRQ | UWM_REGS"},
{.flags = UWM_IRQ | UWM_SP | UWM_REGS,
.name = "UWM_IRQ | UWM_SP | UWM_REGS"},
{.flags = UWM_IRQ | UWM_CALLER | UWM_SP,
.name = "UWM_IRQ | UWM_CALLER | UWM_SP"},
{.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS,
.name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS"},
{.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK,
.name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"},
#ifdef CONFIG_KPROBES
{.flags = UWM_PGM, .name = "UWM_PGM"},
{.flags = UWM_PGM | UWM_SP,
.name = "UWM_PGM | UWM_SP"},
{.flags = UWM_PGM | UWM_REGS,
.name = "UWM_PGM | UWM_REGS"},
{.flags = UWM_PGM | UWM_SP | UWM_REGS,
.name = "UWM_PGM | UWM_SP | UWM_REGS"},
#endif
TEST_WITH_FLAGS(UWM_DEFAULT),
TEST_WITH_FLAGS(UWM_SP),
TEST_WITH_FLAGS(UWM_REGS),
TEST_WITH_FLAGS(UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_CALLER | UWM_SP),
TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_THREAD),
TEST_WITH_FLAGS(UWM_THREAD | UWM_SP),
TEST_WITH_FLAGS(UWM_THREAD | UWM_CALLER | UWM_SP),
TEST_WITH_FLAGS(UWM_IRQ),
TEST_WITH_FLAGS(UWM_IRQ | UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_IRQ | UWM_SP),
TEST_WITH_FLAGS(UWM_IRQ | UWM_REGS),
TEST_WITH_FLAGS(UWM_IRQ | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP),
TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK),
TEST_WITH_FLAGS(UWM_PGM),
TEST_WITH_FLAGS(UWM_PGM | UWM_SP),
TEST_WITH_FLAGS(UWM_PGM | UWM_REGS),
TEST_WITH_FLAGS(UWM_PGM | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_REGS),
TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_FTRACE),
TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP),
TEST_WITH_FLAGS(UWM_FTRACE | UWM_REGS),
TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP | UWM_REGS),
TEST_WITH_FLAGS(UWM_KRETPROBE),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS),
TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS),
};
/*
@ -360,7 +476,7 @@ static void test_unwind_flags(struct kunit *test)
params = (const struct test_params *)test->param_value;
u.flags = params->flags;
if (u.flags & UWM_THREAD)
KUNIT_EXPECT_EQ(test, 0, test_unwind_task(test, &u));
KUNIT_EXPECT_EQ(test, 0, test_unwind_task(&u));
else if (u.flags & UWM_IRQ)
KUNIT_EXPECT_EQ(test, 0, test_unwind_irq(&u));
else

View File

@ -8,13 +8,10 @@
* Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
#include <linux/jump_label.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/facility.h>
#include <asm/asm-extable.h>
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
@ -34,32 +31,8 @@ void debug_user_asce(int exit)
}
#endif /*CONFIG_DEBUG_ENTRY */
#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
static DEFINE_STATIC_KEY_FALSE(have_mvcos);
static int __init uaccess_init(void)
{
if (test_facility(27))
static_branch_enable(&have_mvcos);
return 0;
}
early_initcall(uaccess_init);
static inline int copy_with_mvcos(void)
{
if (static_branch_likely(&have_mvcos))
return 1;
return 0;
}
#else
static inline int copy_with_mvcos(void)
{
return 1;
}
#endif
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size, unsigned long key)
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
union oac spec = {
@ -72,7 +45,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
tmp1 = -4096UL;
asm volatile(
" lr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"0: mvcos 0(%2),0(%1),%0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
" slgr %1,%3\n"
@ -83,61 +56,18 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
"3: mvcos 0(%2),0(%1),%4\n"
"7: slgr %0,%4\n"
" j 5f\n"
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "+a" (size), "+a" (from), "+a" (to), "+a" (tmp1), "=a" (tmp2)
: [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%[key]\n"
"7: jz 5f\n"
"1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%[key]\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"4: mvcp 0(%4,%2),0(%1),%[key]\n"
"9: slgr %0,%4\n"
" j 6f\n"
"5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: [key] "d" (key << 4)
: "cc", "memory");
return size;
}
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key)
{
if (copy_with_mvcos())
return copy_from_user_mvcos(to, from, n, key);
return copy_from_user_mvcp(to, from, n, key);
}
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return raw_copy_from_user_key(to, from, n, 0);
@ -160,8 +90,8 @@ unsigned long _copy_from_user_key(void *to, const void __user *from,
}
EXPORT_SYMBOL(_copy_from_user_key);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size, unsigned long key)
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
union oac spec = {
@ -174,7 +104,7 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
tmp1 = -4096UL;
asm volatile(
" lr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"0: mvcos 0(%1),0(%2),%0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
" slgr %1,%3\n"
@ -185,61 +115,18 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
"3: mvcos 0(%1),0(%2),%4\n"
"7: slgr %0,%4\n"
" j 5f\n"
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
: [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
"0: mvcs 0(%0,%1),0(%2),%[key]\n"
"7: jz 5f\n"
"1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcs 0(%0,%1),0(%2),%[key]\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"4: mvcs 0(%4,%1),0(%2),%[key]\n"
"9: slgr %0,%4\n"
" j 6f\n"
"5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: [key] "d" (key << 4)
: "cc", "memory");
return size;
}
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key)
{
if (copy_with_mvcos())
return copy_to_user_mvcos(to, from, n, key);
return copy_to_user_mvcs(to, from, n, key);
}
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return raw_copy_to_user_key(to, from, n, 0);
@ -257,7 +144,7 @@ unsigned long _copy_to_user_key(void __user *to, const void *from,
}
EXPORT_SYMBOL(_copy_to_user_key);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
unsigned long __clear_user(void __user *to, unsigned long size)
{
unsigned long tmp1, tmp2;
union oac spec = {
@ -268,7 +155,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
tmp1 = -4096UL;
asm volatile(
" lr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
"0: mvcos 0(%1),0(%4),%0\n"
" jz 4f\n"
"1: algr %0,%2\n"
" slgr %1,%2\n"
@ -278,7 +165,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
" slgr %3,%1\n"
" clgr %0,%3\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
"3: mvcos 0(%1),0(%4),%3\n"
" slgr %0,%3\n"
" j 5f\n"
"4: slgr %0,%0\n"
@ -289,46 +176,4 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
: "cc", "memory", "0");
return size;
}
static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
{
unsigned long tmp1, tmp2;
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
" xc 0(1,%1),0(%1)\n"
"0: aghi %0,257\n"
" la %2,255(%1)\n" /* %2 = ptr + 255 */
" srl %2,12\n"
" sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
" slgr %2,%1\n"
" clgr %0,%2\n" /* clear crosses next page boundary? */
" jnh 5f\n"
" aghi %2,-1\n"
"1: ex %2,0(%3)\n"
" aghi %2,1\n"
" slgr %0,%2\n"
" j 5f\n"
"2: xc 0(256,%1),0(%1)\n"
" la %1,256(%1)\n"
"3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,0(%3)\n"
"5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
return size;
}
unsigned long __clear_user(void __user *to, unsigned long size)
{
if (copy_with_mvcos())
return clear_user_mvcos(to, size);
return clear_user_xc(to, size);
}
EXPORT_SYMBOL(__clear_user);

View File

@ -4,7 +4,7 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o
obj-y += page-states.o pageattr.o pgtable.o pgalloc.o
obj-y += page-states.o pageattr.o pgtable.o pgalloc.o extable.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o

50
arch/s390/mm/extable.c Normal file
View File

@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/extable.h>
#include <linux/errno.h>
#include <linux/panic.h>
#include <asm/asm-extable.h>
#include <asm/extable.h>
const struct exception_table_entry *s390_search_extables(unsigned long addr)
{
const struct exception_table_entry *fixup;
size_t num;
fixup = search_exception_tables(addr);
if (fixup)
return fixup;
num = __stop_amode31_ex_table - __start_amode31_ex_table;
return search_extable(__start_amode31_ex_table, num, addr);
}
static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_regs *regs)
{
regs->psw.addr = extable_fixup(ex);
return true;
}
static bool ex_handler_uaccess(const struct exception_table_entry *ex, struct pt_regs *regs)
{
regs->gprs[ex->data] = -EFAULT;
regs->psw.addr = extable_fixup(ex);
return true;
}
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
ex = s390_search_extables(instruction_pointer(regs));
if (!ex)
return false;
switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS:
return ex_handler_uaccess(ex, regs);
}
panic("invalid exception table entry");
}

View File

@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <linux/kfence.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
#include <asm/gmap.h>
@ -227,27 +228,10 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
}
const struct exception_table_entry *s390_search_extables(unsigned long addr)
{
const struct exception_table_entry *fixup;
fixup = search_extable(__start_amode31_ex_table,
__stop_amode31_ex_table - __start_amode31_ex_table,
addr);
if (!fixup)
fixup = search_exception_tables(addr);
return fixup;
}
static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
/* Are we prepared to handle this kernel fault? */
fixup = s390_search_extables(regs->psw.addr);
if (fixup && ex_handle(fixup, regs))
if (fixup_exception(regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.

View File

@ -974,18 +974,18 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
return -EAGAIN;
if (prot == PROT_NONE && !pmd_i) {
pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
}
if (prot == PROT_READ && !pmd_p) {
pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT));
gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
}
if (bits & GMAP_NOTIFY_MPROT)
pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
/* Shadow GMAP protection needs split PMDs */
if (bits & GMAP_NOTIFY_SHADOW)
@ -1151,7 +1151,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
address = pte_val(pte) & PAGE_MASK;
address += gaddr & ~PAGE_MASK;
*val = *(unsigned long *) address;
pte_val(*ptep) |= _PAGE_YOUNG;
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
/* Do *NOT* clear the _PAGE_INVALID bit! */
rc = 0;
}
@ -1278,7 +1278,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
{
asm volatile(
" .insn rrf,0xb98e0000,%0,%1,0,0"
" idte %0,0,%1"
: : "a" (asce), "a" (vaddr) : "cc", "memory");
}
@ -2275,7 +2275,7 @@ EXPORT_SYMBOL_GPL(ptep_notify);
static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
unsigned long gaddr)
{
pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
}
@ -2294,7 +2294,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
{
gaddr &= HPAGE_MASK;
pmdp_notify_gmap(gmap, pmdp, gaddr);
pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN));
if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
IDTE_GLOBAL);
@ -2302,7 +2302,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
__pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
*pmdp = new;
set_pmd(pmdp, new);
}
static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
@ -2324,7 +2324,7 @@ static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
_SEGMENT_ENTRY_GMAP_UC));
if (purge)
__pmdp_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
spin_unlock(&gmap->guest_table_lock);
}
@ -2447,7 +2447,7 @@ static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
return false;
/* Clear UC indication and reset protection */
pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
return true;
}

View File

@ -73,8 +73,8 @@ static inline unsigned long __pte_to_rste(pte_t pte)
static inline pte_t __rste_to_pte(unsigned long rste)
{
unsigned long pteval;
int present;
pte_t pte;
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
present = pud_present(__pud(rste));
@ -102,29 +102,21 @@ static inline pte_t __rste_to_pte(unsigned long rste)
* u unused, l large
*/
if (present) {
pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
_PAGE_READ);
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
_PAGE_WRITE);
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
_PAGE_INVALID);
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
_PAGE_PROTECT);
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
_PAGE_DIRTY);
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
_PAGE_YOUNG);
pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
pteval |= _PAGE_LARGE | _PAGE_PRESENT;
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG);
#ifdef CONFIG_MEM_SOFT_DIRTY
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
_PAGE_SOFT_DIRTY);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY);
#endif
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
_PAGE_NOEXEC);
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC);
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
pteval = _PAGE_INVALID;
return __pte(pteval);
}
static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
@ -168,7 +160,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
rste |= _SEGMENT_ENTRY_LARGE;
clear_huge_pte_skeys(mm, rste);
pte_val(*ptep) = rste;
set_pte(ptep, __pte(rste));
}
pte_t huge_ptep_get(pte_t *ptep)

View File

@ -175,7 +175,7 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
}
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot));
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@ -194,16 +194,16 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
switch (mode) {
case POPULATE_ONE2ONE:
page = (void *)address;
pte_val(*pt_dir) = __pa(page) | pgt_prot;
set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
break;
case POPULATE_MAP:
page = kasan_early_alloc_pages(0);
memset(page, 0, PAGE_SIZE);
pte_val(*pt_dir) = __pa(page) | pgt_prot;
set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
break;
case POPULATE_ZERO_SHADOW:
page = kasan_early_shadow_page;
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero));
break;
case POPULATE_SHALLOW:
/* should never happen */

View File

@ -4,8 +4,6 @@
*
* Copyright IBM Corp. 2009, 2015
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#include <linux/uaccess.h>
@ -14,6 +12,7 @@
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <asm/asm-extable.h>
#include <asm/ctl_reg.h>
#include <asm/io.h>
#include <asm/stacktrace.h>
@ -123,7 +122,7 @@ static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
/*
* Copy memory in real mode (kernel to kernel)
*/
int memcpy_real(void *dest, void *src, size_t count)
int memcpy_real(void *dest, unsigned long src, size_t count)
{
unsigned long _dest = (unsigned long)dest;
unsigned long _src = (unsigned long)src;
@ -175,7 +174,7 @@ void memcpy_absolute(void *dest, void *src, size_t count)
/*
* Copy memory from kernel (real) to user (virtual)
*/
int copy_to_user_real(void __user *dest, void *src, unsigned long count)
int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count)
{
int offs = 0, size, rc;
char *buf;
@ -201,15 +200,15 @@ out:
/*
* Check if physical address is within prefix or zero page
*/
static int is_swapped(unsigned long addr)
static int is_swapped(phys_addr_t addr)
{
unsigned long lc;
phys_addr_t lc;
int cpu;
if (addr < sizeof(struct lowcore))
return 1;
for_each_online_cpu(cpu) {
lc = (unsigned long) lowcore_ptr[cpu];
lc = virt_to_phys(lowcore_ptr[cpu]);
if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
continue;
return 1;
@ -225,7 +224,8 @@ static int is_swapped(unsigned long addr)
*/
void *xlate_dev_mem_ptr(phys_addr_t addr)
{
void *bounce = (void *) addr;
void *ptr = phys_to_virt(addr);
void *bounce = ptr;
unsigned long size;
cpus_read_lock();
@ -234,7 +234,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
size = PAGE_SIZE - (addr & ~PAGE_MASK);
bounce = (void *) __get_free_page(GFP_ATOMIC);
if (bounce)
memcpy_absolute(bounce, (void *) addr, size);
memcpy_absolute(bounce, ptr, size);
}
preempt_enable();
cpus_read_unlock();
@ -244,8 +244,8 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
/*
* Free converted buffer for /dev/mem access (if necessary)
*/
void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
{
if ((void *) addr != buf)
free_page((unsigned long) buf);
if (addr != virt_to_phys(ptr))
free_page((unsigned long)ptr);
}

View File

@ -14,6 +14,7 @@
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <asm/asm-extable.h>
#include <asm/facility.h>
#include <asm/page-states.h>

View File

@ -98,9 +98,9 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
else if (flags & SET_MEMORY_RW)
new = pte_mkwrite(pte_mkdirty(new));
if (flags & SET_MEMORY_NX)
pte_val(new) |= _PAGE_NOEXEC;
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
else if (flags & SET_MEMORY_X)
pte_val(new) &= ~_PAGE_NOEXEC;
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
ptep++;
addr += PAGE_SIZE;
@ -127,11 +127,11 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
prot &= ~_PAGE_NOEXEC;
ptep = pt_dir;
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_val(*ptep) = pte_addr | prot;
set_pte(ptep, __pte(pte_addr | prot));
pte_addr += PAGE_SIZE;
ptep++;
}
pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
new = __pmd(__pa(pt_dir) | _SEGMENT_ENTRY);
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
update_page_count(PG_DIRECT_MAP_1M, -1);
@ -148,9 +148,9 @@ static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
else if (flags & SET_MEMORY_RW)
new = pmd_mkwrite(pmd_mkdirty(new));
if (flags & SET_MEMORY_NX)
pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC;
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC;
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
}
@ -208,11 +208,11 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
prot &= ~_SEGMENT_ENTRY_NOEXEC;
pmdp = pm_dir;
for (i = 0; i < PTRS_PER_PMD; i++) {
pmd_val(*pmdp) = pmd_addr | prot;
set_pmd(pmdp, __pmd(pmd_addr | prot));
pmd_addr += PMD_SIZE;
pmdp++;
}
pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
new = __pud(__pa(pm_dir) | _REGION3_ENTRY);
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
update_page_count(PG_DIRECT_MAP_2G, -1);
@ -229,9 +229,9 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
else if (flags & SET_MEMORY_RW)
new = pud_mkwrite(pud_mkdirty(new));
if (flags & SET_MEMORY_NX)
pud_val(new) |= _REGION_ENTRY_NOEXEC;
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
pud_val(new) &= ~_REGION_ENTRY_NOEXEC;
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
}
@ -347,23 +347,24 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
pte_t *ptep, pte;
int nr, i, j;
pte_t *pte;
for (i = 0; i < numpages;) {
address = (unsigned long)page_to_virt(page + i);
pte = virt_to_kpte(address);
nr = (unsigned long)pte >> ilog2(sizeof(long));
ptep = virt_to_kpte(address);
nr = (unsigned long)ptep >> ilog2(sizeof(long));
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
nr = min(numpages - i, nr);
if (enable) {
for (j = 0; j < nr; j++) {
pte_val(*pte) &= ~_PAGE_INVALID;
pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID));
set_pte(ptep, pte);
address += PAGE_SIZE;
pte++;
ptep++;
}
} else {
ipte_range(pte, address, nr);
ipte_range(ptep, address, nr);
}
i += nr;
}

View File

@ -53,17 +53,17 @@ __initcall(page_table_register_sysctl);
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, 2);
struct page *page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!page)
return NULL;
arch_set_page_dat(page, 2);
arch_set_page_dat(page, CRST_ALLOC_ORDER);
return (unsigned long *) page_to_virt(page);
}
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
free_pages((unsigned long) table, 2);
free_pages((unsigned long)table, CRST_ALLOC_ORDER);
}
static void __crst_table_upgrade(void *arg)
@ -403,7 +403,7 @@ void __tlb_remove_table(void *_table)
switch (half) {
case 0x00U: /* pmd, pud, or p4d */
free_pages((unsigned long) table, 2);
free_pages((unsigned long)table, CRST_ALLOC_ORDER);
return;
case 0x01U: /* lower 2K of a 4K page table */
case 0x02U: /* higher 2K of a 4K page table */

View File

@ -115,7 +115,7 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(&mm->context.cpu_attach_mask,
cpumask_of(smp_processor_id()))) {
pte_val(*ptep) |= _PAGE_INVALID;
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
mm->context.flush_mm = 1;
} else
ptep_ipte_global(mm, addr, ptep, nodat);
@ -224,15 +224,15 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
* Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes.
*/
pte_val(entry) |= _PAGE_DIRTY;
pte_val(entry) &= ~_PAGE_PROTECT;
entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
}
if (!(pte_val(entry) & _PAGE_PROTECT))
/* This pte allows write access, set user-dirty */
pgste_val(pgste) |= PGSTE_UC_BIT;
}
#endif
*ptep = entry;
set_pte(ptep, entry);
return pgste;
}
@ -275,12 +275,12 @@ static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
pgste = pgste_update_all(old, pgste, mm);
if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
_PGSTE_GPS_USAGE_UNUSED)
pte_val(old) |= _PAGE_UNUSED;
old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
}
pgste = pgste_set_pte(ptep, pgste, new);
pgste_set_unlock(ptep, pgste);
} else {
*ptep = new;
set_pte(ptep, new);
}
return old;
}
@ -345,14 +345,14 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm = vma->vm_mm;
if (!MACHINE_HAS_NX)
pte_val(pte) &= ~_PAGE_NOEXEC;
pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
if (mm_has_pgste(mm)) {
pgste = pgste_get(ptep);
pgste_set_key(ptep, pgste, pte, mm);
pgste = pgste_set_pte(ptep, pgste, pte);
pgste_set_unlock(ptep, pgste);
} else {
*ptep = pte;
set_pte(ptep, pte);
}
preempt_enable();
}
@ -417,7 +417,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(&mm->context.cpu_attach_mask,
cpumask_of(smp_processor_id()))) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
mm->context.flush_mm = 1;
if (mm_has_pgste(mm))
gmap_pmdp_invalidate(mm, addr);
@ -469,7 +469,7 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
preempt_disable();
old = pmdp_flush_direct(mm, addr, pmdp);
*pmdp = new;
set_pmd(pmdp, new);
preempt_enable();
return old;
}
@ -482,7 +482,7 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
preempt_disable();
old = pmdp_flush_lazy(mm, addr, pmdp);
*pmdp = new;
set_pmd(pmdp, new);
preempt_enable();
return old;
}
@ -539,7 +539,7 @@ pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
preempt_disable();
old = pudp_flush_direct(mm, addr, pudp);
*pudp = new;
set_pud(pudp, new);
preempt_enable();
return old;
}
@ -579,9 +579,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
list_del(lh);
}
ptep = (pte_t *) pgtable;
pte_val(*ptep) = _PAGE_INVALID;
set_pte(ptep, __pte(_PAGE_INVALID));
ptep++;
pte_val(*ptep) = _PAGE_INVALID;
set_pte(ptep, __pte(_PAGE_INVALID));
return pgtable;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@ -646,12 +646,12 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
if (prot == PROT_NONE && !pte_i) {
ptep_flush_direct(mm, addr, ptep, nodat);
pgste = pgste_update_all(entry, pgste, mm);
pte_val(entry) |= _PAGE_INVALID;
entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
}
if (prot == PROT_READ && !pte_p) {
ptep_flush_direct(mm, addr, ptep, nodat);
pte_val(entry) &= ~_PAGE_INVALID;
pte_val(entry) |= _PAGE_PROTECT;
entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
}
pgste_val(pgste) |= bit;
pgste = pgste_set_pte(ptep, pgste, entry);
@ -675,8 +675,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
!(pte_val(pte) & _PAGE_PROTECT))) {
pgste_val(spgste) |= PGSTE_VSIE_BIT;
tpgste = pgste_get_lock(tptep);
pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
(pte_val(pte) & _PAGE_PROTECT);
tpte = __pte((pte_val(spte) & PAGE_MASK) |
(pte_val(pte) & _PAGE_PROTECT));
/* don't touch the storage key - it belongs to parent pgste */
tpgste = pgste_set_pte(tptep, tpgste, tpte);
pgste_set_unlock(tptep, tpgste);
@ -773,10 +773,10 @@ bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
ptep_ipte_global(mm, addr, ptep, nodat);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte_val(pte) |= _PAGE_PROTECT;
pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
else
pte_val(pte) |= _PAGE_INVALID;
*ptep = pte;
pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
set_pte(ptep, pte);
}
pgste_set_unlock(ptep, pgste);
return dirty;

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/memory_hotplug.h>
@ -175,9 +174,9 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
if (!new_page)
goto out;
pte_val(*pte) = __pa(new_page) | prot;
set_pte(pte, __pte(__pa(new_page) | prot));
} else {
pte_val(*pte) = __pa(addr) | prot;
set_pte(pte, __pte(__pa(addr) | prot));
}
} else {
continue;
@ -243,7 +242,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
IS_ALIGNED(next, PMD_SIZE) &&
MACHINE_HAS_EDAT1 && addr && direct &&
!debug_pagealloc_enabled()) {
pmd_val(*pmd) = __pa(addr) | prot;
set_pmd(pmd, __pmd(__pa(addr) | prot));
pages++;
continue;
} else if (!direct && MACHINE_HAS_EDAT1) {
@ -258,7 +257,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
*/
new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
if (new_page) {
pmd_val(*pmd) = __pa(new_page) | prot;
set_pmd(pmd, __pmd(__pa(new_page) | prot));
if (!IS_ALIGNED(addr, PMD_SIZE) ||
!IS_ALIGNED(next, PMD_SIZE)) {
vmemmap_use_new_sub_pmd(addr, next);
@ -339,7 +338,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PUD_SIZE) &&
MACHINE_HAS_EDAT2 && addr && direct &&
!debug_pagealloc_enabled()) {
pud_val(*pud) = __pa(addr) | prot;
set_pud(pud, __pud(__pa(addr) | prot));
pages++;
continue;
}
@ -585,13 +584,9 @@ void __init vmem_map_init(void)
__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
if (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) {
/*
* Lowcore must be executable for LPSWE
* and expoline trampoline branch instructions.
*/
/* lowcore must be executable for LPSWE */
if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1);
}
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);

View File

@ -7,7 +7,6 @@
* - HAVE_MARCH_Z196_FEATURES: laal, laalg
* - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
* - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
* - PACK_STACK
* - 64BIT
*
* Copyright IBM Corp. 2012,2015
@ -26,6 +25,7 @@
#include <linux/mm.h>
#include <linux/kernel.h>
#include <asm/cacheflush.h>
#include <asm/extable.h>
#include <asm/dis.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
@ -570,15 +570,8 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
if (nospec_uses_trampoline()) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
} else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,0(%r1) */
EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
}
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
@ -589,20 +582,12 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
(is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
/* br %r1 */
_EMIT2(0x07f1);
} else {
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
/* br %r1 */
_EMIT2(0x07f1);
}
}
@ -622,19 +607,10 @@ static int get_probe_mem_regno(const u8 *insn)
return insn[1] >> 4;
}
static bool ex_handler_bpf(const struct exception_table_entry *x,
struct pt_regs *regs)
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
{
int regno;
u8 *insn;
regs->psw.addr = extable_fixup(x);
insn = (u8 *)__rewind_psw(regs->psw, regs->int_code >> 16);
regno = get_probe_mem_regno(insn);
if (WARN_ON_ONCE(regno < 0))
/* JIT bug - unexpected instruction. */
return false;
regs->gprs[regno] = 0;
regs->gprs[x->data] = 0;
return true;
}
@ -642,16 +618,17 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
int probe_prg, int nop_prg)
{
struct exception_table_entry *ex;
int reg, prg;
s64 delta;
u8 *insn;
int prg;
int i;
if (!fp->aux->extable)
/* Do nothing during early JIT passes. */
return 0;
insn = jit->prg_buf + probe_prg;
if (WARN_ON_ONCE(get_probe_mem_regno(insn) < 0))
reg = get_probe_mem_regno(insn);
if (WARN_ON_ONCE(reg < 0))
/* JIT bug - unexpected probe instruction. */
return -1;
if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
@ -678,7 +655,8 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
/* JIT bug - landing pad and extable must be close. */
return -1;
ex->fixup = delta;
ex->handler = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
ex->type = EX_TYPE_BPF;
ex->data = reg;
jit->excnt++;
}
return 0;

View File

@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
#include <asm/clp.h>

View File

@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/jump_label.h>
#include <asm/asm-extable.h>
#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>

View File

@ -99,7 +99,7 @@ static int zpci_clear_directed_irq(struct zpci_dev *zdev)
}
/* Register adapter interruptions */
int zpci_set_irq(struct zpci_dev *zdev)
static int zpci_set_irq(struct zpci_dev *zdev)
{
int rc;
@ -115,7 +115,7 @@ int zpci_set_irq(struct zpci_dev *zdev)
}
/* Clear adapter interruptions */
int zpci_clear_irq(struct zpci_dev *zdev)
static int zpci_clear_irq(struct zpci_dev *zdev)
{
int rc;

View File

@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <asm/asm-extable.h>
#include <asm/pci_io.h>
#include <asm/pci_debug.h>

View File

@ -0,0 +1,24 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Borrowed from gcc: gcc/testsuite/gcc.target/s390/nobp-section-type-conflict.c
# Checks that we don't get error: section type conflict with put_page.
cat << "END" | $@ -x c - -fno-PIE -march=z10 -mindirect-branch=thunk-extern -mfunction-return=thunk-extern -mindirect-branch-table -O2 -c -o /dev/null
int a;
int b (void);
void c (int);
static void
put_page (void)
{
if (b ())
c (a);
}
__attribute__ ((__section__ (".init.text"), __cold__)) void
d (void)
{
put_page ();
put_page ();
}
END

View File

@ -27,24 +27,16 @@ static struct facility_def facility_defs[] = {
*/
.name = "FACILITIES_ALS",
.bits = (int[]){
#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
0, /* N3 instructions */
1, /* z/Arch mode installed */
#endif
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
18, /* long displacement facility */
#endif
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
21, /* extended-immediate facility */
25, /* store clock fast */
#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
27, /* mvcos */
32, /* compare and swap and store */
33, /* compare and swap and store 2 */
34, /* general instructions extension */
35, /* execute extensions */
#endif
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
45, /* fast-BCR, etc. */
#endif

View File

@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <asm/asm-extable.h>
#include <asm/dasd.h>
#include <asm/debug.h>
#include <asm/diag.h>

Some files were not shown because too many files have changed in this diff Show More