H extension definitions, shared by the KVM and RISC-V trees.
-----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmFawaIUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroO7xwf9HF/zvySIRhwWsxI6RZKA5k+g0ebe 18XzPeBMPeW1C5YLukL67cvoxgW8oHc/ZMhdUKVSEkgMkhF7c6uNgtvodtv7ZFk8 /3wfBAERp37VKltliU2JvVs/8GXboK+uKnq0mIt52CO9+JVH/55URtz7Spxnl8Fc Lx7ioeKoaPw2mnyuU6sSNwt7HuUtqn8meQ1EcYLCGND7Vyu/BevRGvqRGuXqipVb pDlpz5Cu5T0ZCmBj/ko5PmPCU3COQw2jtiugUvKsAjPM92ViNK8B9O7cbrcEh8fi XCOsV1KjzF0kJCvacZMQqkjijL3UriNBRW5JAvd4boV97PGcI9KHZdEFYw== =65Mk -----END PGP SIGNATURE----- gpgsig -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmFb1ygTHHBhbG1lckBk YWJiZWx0LmNvbQAKCRAuExnzX7sYicKuD/9pkpLuebSwmgzDRQ4tcGKQUc+h1rCP U++2qb74Zi7XX5JnXwVraAC9F0ps1hOCfQzgncKd6sT/VTrbKqmPl0QF0NMniCjN xOk5X4AsZH/qGkgizG8o73Ok9+qclbpyTeYJ5iKBKwwhDOAsutGDsSgp5nrYJYgI 30xX62eH0XvfyN1Un/qSqE/3TErkwOAlIcFlAr1Nkel0jnOfMzgdUjQEW/h4hxci 8NWFLVdP1WVdlWSmbCclu1totU884VdPquE0tb6lh6dBQXsAB///AjNHZxsO7jZ0 UGBG9d4hY40zyvu9FB+kvYyafrZXyhCc+AmphtYBY/cd3TyBXEO98mYPQB/h5Tmh wsi1bVIqSAYfw04jJvvtLQYWJybB1el9Mp2jeKg2tEp+j96vvax2FGuHV39lcWXj b79M/gd7fAyY+oQhYoFDsLUoY/FYiECvpBQQrgB8AMl+JiNMOp0pWZIGf98paLh+ 3CjX6WCjmxBNrNbM0bj+mmm3dj2Aq8OG98BVsolqqhUah55t7Wu6Hjax7wHgqORk FfnuemXLKE4H2AZvMcjaAqBd7/Q1SYRfO+LzcfoE8Nhwek3RaHg3wz5n8c8xT8BW 9gK8yC/UrdVZNf41nBqKEjYKaholorBP8/qZoNZGxPP87dlLxkFup91D5MtEn+c+ FNSX5c6BY+8vbQ== =ywXC -----END PGP SIGNATURE----- Merge tag 'for-riscv' of https://git.kernel.org/pub/scm/virt/kvm/kvm.git into for-next H extension definitions, shared by the KVM and RISC-V trees. * tag 'for-riscv' of ssh://gitolite.kernel.org/pub/scm/virt/kvm/kvm: (301 commits) RISC-V: Add hypervisor extension related CSR defines KVM: selftests: Ensure all migrations are performed when test is affined KVM: x86: Swap order of CPUID entry "index" vs. "significant flag" checks ptp: Fix ptp_kvm_getcrosststamp issue for x86 ptp_kvm x86/kvmclock: Move this_cpu_pvti into kvmclock.h KVM: s390: Function documentation fixes selftests: KVM: Don't clobber XMM register when read KVM: VMX: Fix a TSX_CTRL_CPUID_CLEAR field mask issue selftests: KVM: Explicitly use movq to read xmm registers selftests: KVM: Call ucall_init when setting up in rseq_test KVM: Remove tlbs_dirty KVM: X86: Synchronize the shadow pagetable before link it KVM: X86: Fix missed remote tlb flush in rmap_write_protect() KVM: x86: nSVM: don't copy virt_ext from vmcb12 KVM: x86: nSVM: test eax for 4K alignment for GP errata workaround KVM: x86: selftests: test simultaneous uses of V_IRQ from L1 and L0 KVM: x86: nSVM: restore int_vector in svm_clear_vintr kvm: x86: Add AMD PMU MSRs to msrs_to_save_all[] KVM: x86: nVMX: re-evaluate emulation_required on nested VM exit KVM: x86: nVMX: don't fail nested VM entry on invalid guest state if !from_vmentry ...
This commit is contained in:
commit
73698660f1
@ -259,7 +259,7 @@ Configuring the kernel
|
||||
Compiling the kernel
|
||||
--------------------
|
||||
|
||||
- Make sure you have at least gcc 4.9 available.
|
||||
- Make sure you have at least gcc 5.1 available.
|
||||
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
|
||||
|
||||
Please note that you can still run a.out user programs with this kernel.
|
||||
|
@ -54,7 +54,7 @@ properties:
|
||||
- const: toradex,apalis_t30
|
||||
- const: nvidia,tegra30
|
||||
- items:
|
||||
- const: toradex,apalis_t30-eval-v1.1
|
||||
- const: toradex,apalis_t30-v1.1-eval
|
||||
- const: toradex,apalis_t30-eval
|
||||
- const: toradex,apalis_t30-v1.1
|
||||
- const: toradex,apalis_t30
|
||||
|
@ -9,7 +9,7 @@ function block.
|
||||
|
||||
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
|
||||
For a description of the MMSYS_CONFIG binding, see
|
||||
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
|
||||
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
|
||||
|
||||
DISP function blocks
|
||||
====================
|
||||
|
@ -19,7 +19,9 @@ properties:
|
||||
- const: allwinner,sun8i-v3s-emac
|
||||
- const: allwinner,sun50i-a64-emac
|
||||
- items:
|
||||
- const: allwinner,sun50i-h6-emac
|
||||
- enum:
|
||||
- allwinner,sun20i-d1-emac
|
||||
- allwinner,sun50i-h6-emac
|
||||
- const: allwinner,sun50i-a64-emac
|
||||
|
||||
reg:
|
||||
|
@ -0,0 +1,89 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Samsung SoC series UFS host controller Device Tree Bindings
|
||||
|
||||
maintainers:
|
||||
- Alim Akhtar <alim.akhtar@samsung.com>
|
||||
|
||||
description: |
|
||||
Each Samsung UFS host controller instance should have its own node.
|
||||
This binding define Samsung specific binding other then what is used
|
||||
in the common ufshcd bindings
|
||||
[1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
|
||||
|
||||
properties:
|
||||
|
||||
compatible:
|
||||
enum:
|
||||
- samsung,exynos7-ufs
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: HCI register
|
||||
- description: vendor specific register
|
||||
- description: unipro register
|
||||
- description: UFS protector register
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: hci
|
||||
- const: vs_hci
|
||||
- const: unipro
|
||||
- const: ufsp
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: ufs link core clock
|
||||
- description: unipro main clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: core_clk
|
||||
- const: sclk_unipro_main
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
phy-names:
|
||||
const: ufs-phy
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- phys
|
||||
- phy-names
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/exynos7-clk.h>
|
||||
|
||||
ufs: ufs@15570000 {
|
||||
compatible = "samsung,exynos7-ufs";
|
||||
reg = <0x15570000 0x100>,
|
||||
<0x15570100 0x100>,
|
||||
<0x15571000 0x200>,
|
||||
<0x15572000 0x300>;
|
||||
reg-names = "hci", "vs_hci", "unipro", "ufsp";
|
||||
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
|
||||
<&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
|
||||
clock-names = "core_clk", "sclk_unipro_main";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
|
||||
phys = <&ufs_phy>;
|
||||
phy-names = "ufs-phy";
|
||||
};
|
||||
...
|
@ -296,7 +296,7 @@ not available.
|
||||
Device Tree bindings and board design
|
||||
=====================================
|
||||
|
||||
This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
|
||||
This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
|
||||
and aims to showcase some potential switch caveats.
|
||||
|
||||
RMII PHY role and out-of-band signaling
|
||||
|
@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
|
||||
====================== =============== ========================================
|
||||
Program Minimal version Command to check the version
|
||||
====================== =============== ========================================
|
||||
GNU C 4.9 gcc --version
|
||||
GNU C 5.1 gcc --version
|
||||
Clang/LLVM (optional) 10.0.1 clang --version
|
||||
GNU make 3.81 make --version
|
||||
binutils 2.23 ld -v
|
||||
|
@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
|
||||
编译内核
|
||||
---------
|
||||
|
||||
- 确保您至少有gcc 4.9可用。
|
||||
- 确保您至少有gcc 5.1可用。
|
||||
有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。
|
||||
|
||||
请注意,您仍然可以使用此内核运行a.out用户程序。
|
||||
|
@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
|
||||
編譯內核
|
||||
---------
|
||||
|
||||
- 確保您至少有gcc 4.9可用。
|
||||
- 確保您至少有gcc 5.1可用。
|
||||
有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 。
|
||||
|
||||
請注意,您仍然可以使用此內核運行a.out用戶程序。
|
||||
|
@ -14342,7 +14342,8 @@ F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
|
||||
F: drivers/pci/controller/pci-ixp4xx.c
|
||||
|
||||
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
|
||||
M: Jonathan Derrick <jonathan.derrick@intel.com>
|
||||
M: Nirmal Patel <nirmal.patel@linux.intel.com>
|
||||
R: Jonathan Derrick <jonathan.derrick@linux.dev>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/pci/controller/vmd.c
|
||||
|
8
Makefile
8
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Opossums on Parade
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -849,12 +849,6 @@ endif
|
||||
|
||||
DEBUG_CFLAGS :=
|
||||
|
||||
# Workaround for GCC versions < 5.0
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO
|
||||
|
||||
ifdef CONFIG_DEBUG_INFO_SPLIT
|
||||
|
@ -20,7 +20,7 @@ config ALPHA
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select VIRT_TO_BUS
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_PCI_IOMAP if PCI
|
||||
select GENERIC_PCI_IOMAP
|
||||
select AUTO_IRQ_AFFINITY if SMP
|
||||
select GENERIC_IRQ_SHOW
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
@ -199,7 +199,6 @@ config ALPHA_EIGER
|
||||
|
||||
config ALPHA_JENSEN
|
||||
bool "Jensen"
|
||||
depends on BROKEN
|
||||
select HAVE_EISA
|
||||
help
|
||||
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
|
||||
|
@ -16,3 +16,4 @@ extern void __divlu(void);
|
||||
extern void __remlu(void);
|
||||
extern void __divqu(void);
|
||||
extern void __remqu(void);
|
||||
extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
|
||||
|
@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
*/
|
||||
#ifdef USE_48_BIT_KSEG
|
||||
static inline unsigned long virt_to_phys(void *address)
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return (unsigned long)address - IDENT_ADDR;
|
||||
}
|
||||
@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
|
||||
return (void *) (address + IDENT_ADDR);
|
||||
}
|
||||
#else
|
||||
static inline unsigned long virt_to_phys(void *address)
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
unsigned long phys = (unsigned long)address;
|
||||
|
||||
@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
|
||||
extern unsigned long __direct_map_base;
|
||||
extern unsigned long __direct_map_size;
|
||||
|
||||
static inline unsigned long __deprecated virt_to_bus(void *address)
|
||||
static inline unsigned long __deprecated virt_to_bus(volatile void *address)
|
||||
{
|
||||
unsigned long phys = virt_to_phys(address);
|
||||
unsigned long bus = phys + __direct_map_base;
|
||||
|
@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
|
||||
* convinced that I need one of the newer machines.
|
||||
*/
|
||||
|
||||
static inline unsigned int jensen_local_inb(unsigned long addr)
|
||||
__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
|
||||
{
|
||||
return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
|
||||
}
|
||||
|
||||
static inline void jensen_local_outb(u8 b, unsigned long addr)
|
||||
__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
|
||||
{
|
||||
*(vuip)((addr << 9) + EISA_VL82C106) = b;
|
||||
mb();
|
||||
}
|
||||
|
||||
static inline unsigned int jensen_bus_inb(unsigned long addr)
|
||||
__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
|
||||
{
|
||||
long result;
|
||||
|
||||
@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
|
||||
return __kernel_extbl(result, addr & 3);
|
||||
}
|
||||
|
||||
static inline void jensen_bus_outb(u8 b, unsigned long addr)
|
||||
__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
|
||||
{
|
||||
jensen_set_hae(0);
|
||||
*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
|
||||
|
43
arch/alpha/include/asm/setup.h
Normal file
43
arch/alpha/include/asm/setup.h
Normal file
@ -0,0 +1,43 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __ALPHA_SETUP_H
|
||||
#define __ALPHA_SETUP_H
|
||||
|
||||
#include <uapi/asm/setup.h>
|
||||
|
||||
/*
|
||||
* We leave one page for the initial stack page, and one page for
|
||||
* the initial process structure. Also, the console eats 3 MB for
|
||||
* the initial bootloader (one of which we can reclaim later).
|
||||
*/
|
||||
#define BOOT_PCB 0x20000000
|
||||
#define BOOT_ADDR 0x20000000
|
||||
/* Remove when official MILO sources have ELF support: */
|
||||
#define BOOT_SIZE (16*1024)
|
||||
|
||||
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
|
||||
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
|
||||
#else
|
||||
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
|
||||
#endif
|
||||
|
||||
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
|
||||
#define SWAPPER_PGD KERNEL_START
|
||||
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
|
||||
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
|
||||
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
|
||||
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
|
||||
|
||||
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
|
||||
|
||||
/*
|
||||
* This is setup by the secondary bootstrap loader. Because
|
||||
* the zero page is zeroed out as soon as the vm system is
|
||||
* initialized, we need to copy things out into a more permanent
|
||||
* place.
|
||||
*/
|
||||
#define PARAM ZERO_PGE
|
||||
#define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000)))
|
||||
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
|
||||
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
|
||||
|
||||
#endif
|
@ -1,43 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef __ALPHA_SETUP_H
|
||||
#define __ALPHA_SETUP_H
|
||||
#ifndef _UAPI__ALPHA_SETUP_H
|
||||
#define _UAPI__ALPHA_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 256
|
||||
|
||||
/*
|
||||
* We leave one page for the initial stack page, and one page for
|
||||
* the initial process structure. Also, the console eats 3 MB for
|
||||
* the initial bootloader (one of which we can reclaim later).
|
||||
*/
|
||||
#define BOOT_PCB 0x20000000
|
||||
#define BOOT_ADDR 0x20000000
|
||||
/* Remove when official MILO sources have ELF support: */
|
||||
#define BOOT_SIZE (16*1024)
|
||||
|
||||
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
|
||||
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
|
||||
#else
|
||||
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
|
||||
#endif
|
||||
|
||||
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
|
||||
#define SWAPPER_PGD KERNEL_START
|
||||
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
|
||||
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
|
||||
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
|
||||
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
|
||||
|
||||
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
|
||||
|
||||
/*
|
||||
* This is setup by the secondary bootstrap loader. Because
|
||||
* the zero page is zeroed out as soon as the vm system is
|
||||
* initialized, we need to copy things out into a more permanent
|
||||
* place.
|
||||
*/
|
||||
#define PARAM ZERO_PGE
|
||||
#define COMMAND_LINE ((char*)(PARAM + 0x0000))
|
||||
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
|
||||
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
|
||||
|
||||
#endif
|
||||
#endif /* _UAPI__ALPHA_SETUP_H */
|
||||
|
@ -7,6 +7,11 @@
|
||||
*
|
||||
* Code supporting the Jensen.
|
||||
*/
|
||||
#define __EXTERN_INLINE
|
||||
#include <asm/io.h>
|
||||
#include <asm/jensen.h>
|
||||
#undef __EXTERN_INLINE
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
@ -17,11 +22,6 @@
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __EXTERN_INLINE inline
|
||||
#include <asm/io.h>
|
||||
#include <asm/jensen.h>
|
||||
#undef __EXTERN_INLINE
|
||||
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
|
||||
ev67-$(CONFIG_ALPHA_EV67) := ev67-
|
||||
|
||||
lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
|
||||
udiv-qrnnd.o \
|
||||
udelay.o \
|
||||
$(ev6-y)memset.o \
|
||||
$(ev6-y)memcpy.o \
|
||||
|
@ -25,6 +25,7 @@
|
||||
# along with GCC; see the file COPYING. If not, write to the
|
||||
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
|
||||
# MA 02111-1307, USA.
|
||||
#include <asm/export.h>
|
||||
|
||||
.set noreorder
|
||||
.set noat
|
||||
@ -161,3 +162,4 @@ $Odd:
|
||||
ret $31,($26),1
|
||||
|
||||
.end __udiv_qrnnd
|
||||
EXPORT_SYMBOL(__udiv_qrnnd)
|
@ -7,4 +7,4 @@ ccflags-y := -w
|
||||
|
||||
obj-$(CONFIG_MATHEMU) += math-emu.o
|
||||
|
||||
math-emu-objs := math.o qrnnd.o
|
||||
math-emu-objs := math.o
|
||||
|
@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
|
||||
egress:
|
||||
return si_code;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__udiv_qrnnd);
|
||||
|
@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
uprobe_notify_resume(regs);
|
||||
} else {
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
local_irq_disable();
|
||||
|
@ -86,7 +86,7 @@ config ARM64
|
||||
select ARCH_SUPPORTS_LTO_CLANG_THIN
|
||||
select ARCH_SUPPORTS_CFI_CLANG
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
|
@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
|
||||
void sve_alloc(struct task_struct *task)
|
||||
{
|
||||
if (task->thread.sve_state) {
|
||||
memset(task->thread.sve_state, 0, sve_state_size(current));
|
||||
memset(task->thread.sve_state, 0, sve_state_size(task));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/unistd.h>
|
||||
@ -58,7 +57,7 @@
|
||||
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
unsigned long __stack_chk_guard __ro_after_init;
|
||||
EXPORT_SYMBOL(__stack_chk_guard);
|
||||
#endif
|
||||
|
||||
|
@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
|
||||
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
if (thread_flags & _TIF_FOREIGN_FPSTATE)
|
||||
fpsimd_restore_current_state();
|
||||
|
@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
|
||||
# runtime. Because the hypervisor is part of the kernel binary, relocations
|
||||
# produce a kernel VA. We enumerate relocations targeting hyp at build time
|
||||
# and convert the kernel VAs at those positions to hyp VAs.
|
||||
$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
|
||||
$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
|
||||
$(call if_changed,hyprel)
|
||||
|
||||
# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
|
||||
|
@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
||||
|
||||
int kvm_perf_init(void)
|
||||
{
|
||||
if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
|
||||
static_branch_enable(&kvm_arm_pmu_available);
|
||||
|
||||
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||
}
|
||||
|
||||
|
@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
||||
kvm_pmu_create_perf_event(vcpu, select_idx);
|
||||
}
|
||||
|
||||
int kvm_pmu_probe_pmuver(void)
|
||||
void kvm_host_pmu_init(struct arm_pmu *pmu)
|
||||
{
|
||||
if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
|
||||
!kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
|
||||
static_branch_enable(&kvm_arm_pmu_available);
|
||||
}
|
||||
|
||||
static int kvm_pmu_probe_pmuver(void)
|
||||
{
|
||||
struct perf_event_attr attr = { };
|
||||
struct perf_event *event;
|
||||
|
@ -260,8 +260,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
|
@ -17,21 +17,21 @@
|
||||
* two accesses to memory, which may be undesirable for some devices.
|
||||
*/
|
||||
#define in_8(addr) \
|
||||
({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
|
||||
({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
|
||||
#define in_be16(addr) \
|
||||
({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
|
||||
({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
|
||||
#define in_be32(addr) \
|
||||
({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
|
||||
({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
|
||||
#define in_le16(addr) \
|
||||
({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
|
||||
({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
|
||||
#define in_le32(addr) \
|
||||
({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
|
||||
({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
|
||||
|
||||
#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
|
||||
#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
|
||||
#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
|
||||
#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
|
||||
#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
|
||||
#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
|
||||
#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
|
||||
#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
|
||||
#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
|
||||
#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
|
||||
|
||||
#define raw_inb in_8
|
||||
#define raw_inw in_be16
|
||||
|
@ -171,7 +171,6 @@ static int bcd2int (unsigned char b)
|
||||
|
||||
int mvme147_hwclk(int op, struct rtc_time *t)
|
||||
{
|
||||
#warning check me!
|
||||
if (!op) {
|
||||
m147_rtc->ctrl = RTC_READ;
|
||||
t->tm_year = bcd2int (m147_rtc->bcd_year);
|
||||
@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t)
|
||||
m147_rtc->ctrl = 0;
|
||||
if (t->tm_year < 70)
|
||||
t->tm_year += 100;
|
||||
} else {
|
||||
/* FIXME Setting the time is not yet supported */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -436,7 +436,6 @@ int bcd2int (unsigned char b)
|
||||
|
||||
int mvme16x_hwclk(int op, struct rtc_time *t)
|
||||
{
|
||||
#warning check me!
|
||||
if (!op) {
|
||||
rtc->ctrl = RTC_READ;
|
||||
t->tm_year = bcd2int (rtc->bcd_year);
|
||||
@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
|
||||
rtc->ctrl = 0;
|
||||
if (t->tm_year < 70)
|
||||
t->tm_year += 100;
|
||||
} else {
|
||||
/* FIXME Setting the time is not yet supported */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
user_enter();
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ extern int npmem_ranges;
|
||||
#include <asm-generic/getorder.h>
|
||||
#include <asm/pdc.h>
|
||||
|
||||
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
|
||||
#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
|
||||
|
||||
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
|
||||
/* based on work by Jason Eckhardt (jason@equator.com) */
|
||||
|
@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
||||
{
|
||||
if (!INDIRECT_ADDR(addr)) {
|
||||
iounmap(addr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_iounmap);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(ioread8);
|
||||
EXPORT_SYMBOL(ioread16);
|
||||
@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
|
||||
EXPORT_SYMBOL(iowrite32_rep);
|
||||
EXPORT_SYMBOL(ioport_map);
|
||||
EXPORT_SYMBOL(ioport_unmap);
|
||||
EXPORT_SYMBOL(pci_iounmap);
|
||||
|
@ -35,7 +35,6 @@ endif
|
||||
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
|
||||
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
|
||||
-include $(srctree)/include/linux/compiler_attributes.h \
|
||||
$(LINUXINCLUDE)
|
||||
|
||||
ifdef CONFIG_PPC64_BOOT_WRAPPER
|
||||
@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
|
||||
BOOTCFLAGS += -fno-stack-protector
|
||||
endif
|
||||
|
||||
BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h
|
||||
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
|
||||
|
||||
DTC_FLAGS ?= -p 1024
|
||||
|
@ -12,16 +12,6 @@
|
||||
# define ASM_CONST(x) __ASM_CONST(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Inline assembly memory constraint
|
||||
*
|
||||
* GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
|
||||
*
|
||||
*/
|
||||
#if defined(GCC_VERSION) && GCC_VERSION < 50000
|
||||
#define UPD_CONSTR ""
|
||||
#else
|
||||
#define UPD_CONSTR "<>"
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_POWERPC_ASM_CONST_H */
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/tm.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
|
||||
@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
||||
*/
|
||||
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
|
||||
|
||||
/*
|
||||
* If system call is called with TM active, set _TIF_RESTOREALL to
|
||||
* prevent RFSCV being used to return to userspace, because POWER9
|
||||
* TM implementation has problems with this instruction returning to
|
||||
* transactional state. Final register values are not relevant because
|
||||
* the transaction will be aborted upon return anyway. Or in the case
|
||||
* of unsupported_scv SIGILL fault, the return state does not much
|
||||
* matter because it's an edge case.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
|
||||
unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
|
||||
current_thread_info()->flags |= _TIF_RESTOREALL;
|
||||
|
||||
/*
|
||||
* If the system call was made with a transaction active, doom it and
|
||||
* return without performing the system call. Unless it was an
|
||||
* unsupported scv vector, in which case it's treated like an illegal
|
||||
* instruction.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
|
||||
!trap_is_unsupported_scv(regs)) {
|
||||
/* Enable TM in the kernel, and disable EE (for scv) */
|
||||
hard_irq_disable();
|
||||
mtmsr(mfmsr() | MSR_TM);
|
||||
|
||||
/* tabort, this dooms the transaction, nothing else */
|
||||
asm volatile(".long 0x7c00071d | ((%0) << 16)"
|
||||
:: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
|
||||
|
||||
/*
|
||||
* Userspace will never see the return value. Execution will
|
||||
* resume after the tbegin. of the aborted transaction with the
|
||||
* checkpointed register state. A context switch could occur
|
||||
* or signal delivered to the process before resuming the
|
||||
* doomed transaction context, but that should all be handled
|
||||
* as expected.
|
||||
*/
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif // CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/tm.h>
|
||||
|
||||
.section ".toc","aw"
|
||||
SYS_CALL_TABLE:
|
||||
@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
|
||||
.globl system_call_vectored_\name
|
||||
system_call_vectored_\name:
|
||||
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
|
||||
bne tabort_syscall
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
SCV_INTERRUPT_TO_KERNEL
|
||||
mr r10,r1
|
||||
ld r1,PACAKSAVE(r13)
|
||||
@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
|
||||
.globl system_call_common
|
||||
system_call_common:
|
||||
_ASM_NOKPROBE_SYMBOL(system_call_common)
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
|
||||
bne tabort_syscall
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
mr r10,r1
|
||||
ld r1,PACAKSAVE(r13)
|
||||
std r10,0(r1)
|
||||
@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
|
||||
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
tabort_syscall:
|
||||
_ASM_NOKPROBE_SYMBOL(tabort_syscall)
|
||||
/* Firstly we need to enable TM in the kernel */
|
||||
mfmsr r10
|
||||
li r9, 1
|
||||
rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r10, 0
|
||||
|
||||
/* tabort, this dooms the transaction, nothing else */
|
||||
li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
|
||||
TABORT(R9)
|
||||
|
||||
/*
|
||||
* Return directly to userspace. We have corrupted user register state,
|
||||
* but userspace will never see that register state. Execution will
|
||||
* resume after the tbegin of the aborted transaction with the
|
||||
* checkpointed register state.
|
||||
*/
|
||||
li r9, MSR_RI
|
||||
andc r10, r10, r9
|
||||
mtmsrd r10, 1
|
||||
mtspr SPRN_SRR0, r11
|
||||
mtspr SPRN_SRR1, r12
|
||||
RFI_TO_USER
|
||||
b . /* prevent speculative execution */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
|
||||
* touched, no exit work created, then this can be used.
|
||||
|
@ -249,6 +249,7 @@ void machine_check_queue_event(void)
|
||||
{
|
||||
int index;
|
||||
struct machine_check_event evt;
|
||||
unsigned long msr;
|
||||
|
||||
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
|
||||
return;
|
||||
@ -262,8 +263,20 @@ void machine_check_queue_event(void)
|
||||
memcpy(&local_paca->mce_info->mce_event_queue[index],
|
||||
&evt, sizeof(evt));
|
||||
|
||||
/* Queue irq work to process this event later. */
|
||||
/*
|
||||
* Queue irq work to process this event later. Before
|
||||
* queuing the work enable translation for non radix LPAR,
|
||||
* as irq_work_queue may try to access memory outside RMO
|
||||
* region.
|
||||
*/
|
||||
if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
msr = mfmsr();
|
||||
mtmsr(msr | MSR_IR | MSR_DR);
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
mtmsr(msr);
|
||||
} else {
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
}
|
||||
}
|
||||
|
||||
void mce_common_process_ue(struct pt_regs *regs,
|
||||
|
@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
do_signal(current);
|
||||
}
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME)
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long get_tm_stackpointer(struct task_struct *tsk)
|
||||
|
@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
|
||||
/* The following code handles the fake_suspend = 1 case */
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -PPC_MIN_STKFRM(r1)
|
||||
stdu r1, -TM_FRAME_SIZE(r1)
|
||||
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
nop
|
||||
|
||||
/*
|
||||
* It's possible that treclaim. may modify registers, if we have lost
|
||||
* track of fake-suspend state in the guest due to it using rfscv.
|
||||
* Save and restore registers in case this occurs.
|
||||
*/
|
||||
mfspr r3, SPRN_DSCR
|
||||
mfspr r4, SPRN_XER
|
||||
mfspr r5, SPRN_AMR
|
||||
/* SPRN_TAR would need to be saved here if the kernel ever used it */
|
||||
mfcr r12
|
||||
SAVE_NVGPRS(r1)
|
||||
SAVE_GPR(2, r1)
|
||||
SAVE_GPR(3, r1)
|
||||
SAVE_GPR(4, r1)
|
||||
SAVE_GPR(5, r1)
|
||||
stw r12, 8(r1)
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
|
||||
/* We have to treclaim here because that's the only way to do S->N */
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
TRECLAIM(R3)
|
||||
|
||||
GET_PACA(r13)
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
REST_GPR(2, r1)
|
||||
REST_GPR(3, r1)
|
||||
REST_GPR(4, r1)
|
||||
REST_GPR(5, r1)
|
||||
lwz r12, 8(r1)
|
||||
REST_NVGPRS(r1)
|
||||
mtspr SPRN_DSCR, r3
|
||||
mtspr SPRN_XER, r4
|
||||
mtspr SPRN_AMR, r5
|
||||
mtcr r12
|
||||
HMT_MEDIUM
|
||||
|
||||
/*
|
||||
* We were in fake suspend, so we are not going to save the
|
||||
* register state as the guest checkpointed state (since
|
||||
@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
|
||||
addi r1, r1, PPC_MIN_STKFRM
|
||||
addi r1, r1, TM_FRAME_SIZE
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
|
||||
if (xics_ics->check(xics_ics, hwirq))
|
||||
return -EINVAL;
|
||||
|
||||
/* No chip data for the XICS domain */
|
||||
/* Let the ICS be the chip data for the XICS domain. For ICS native */
|
||||
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
|
||||
NULL, handle_fasteoi_irq, NULL, NULL);
|
||||
xics_ics, handle_fasteoi_irq, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ config ARCH_RV32I
|
||||
config ARCH_RV64I
|
||||
bool "RV64I"
|
||||
select 64BIT
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||
|
@ -58,22 +58,32 @@
|
||||
|
||||
/* Interrupt causes (minus the high bit) */
|
||||
#define IRQ_S_SOFT 1
|
||||
#define IRQ_VS_SOFT 2
|
||||
#define IRQ_M_SOFT 3
|
||||
#define IRQ_S_TIMER 5
|
||||
#define IRQ_VS_TIMER 6
|
||||
#define IRQ_M_TIMER 7
|
||||
#define IRQ_S_EXT 9
|
||||
#define IRQ_VS_EXT 10
|
||||
#define IRQ_M_EXT 11
|
||||
|
||||
/* Exception causes */
|
||||
#define EXC_INST_MISALIGNED 0
|
||||
#define EXC_INST_ACCESS 1
|
||||
#define EXC_INST_ILLEGAL 2
|
||||
#define EXC_BREAKPOINT 3
|
||||
#define EXC_LOAD_ACCESS 5
|
||||
#define EXC_STORE_ACCESS 7
|
||||
#define EXC_SYSCALL 8
|
||||
#define EXC_HYPERVISOR_SYSCALL 9
|
||||
#define EXC_SUPERVISOR_SYSCALL 10
|
||||
#define EXC_INST_PAGE_FAULT 12
|
||||
#define EXC_LOAD_PAGE_FAULT 13
|
||||
#define EXC_STORE_PAGE_FAULT 15
|
||||
#define EXC_INST_GUEST_PAGE_FAULT 20
|
||||
#define EXC_LOAD_GUEST_PAGE_FAULT 21
|
||||
#define EXC_VIRTUAL_INST_FAULT 22
|
||||
#define EXC_STORE_GUEST_PAGE_FAULT 23
|
||||
|
||||
/* PMP configuration */
|
||||
#define PMP_R 0x01
|
||||
@ -85,6 +95,58 @@
|
||||
#define PMP_A_NAPOT 0x18
|
||||
#define PMP_L 0x80
|
||||
|
||||
/* HSTATUS flags */
|
||||
#ifdef CONFIG_64BIT
|
||||
#define HSTATUS_VSXL _AC(0x300000000, UL)
|
||||
#define HSTATUS_VSXL_SHIFT 32
|
||||
#endif
|
||||
#define HSTATUS_VTSR _AC(0x00400000, UL)
|
||||
#define HSTATUS_VTW _AC(0x00200000, UL)
|
||||
#define HSTATUS_VTVM _AC(0x00100000, UL)
|
||||
#define HSTATUS_VGEIN _AC(0x0003f000, UL)
|
||||
#define HSTATUS_VGEIN_SHIFT 12
|
||||
#define HSTATUS_HU _AC(0x00000200, UL)
|
||||
#define HSTATUS_SPVP _AC(0x00000100, UL)
|
||||
#define HSTATUS_SPV _AC(0x00000080, UL)
|
||||
#define HSTATUS_GVA _AC(0x00000040, UL)
|
||||
#define HSTATUS_VSBE _AC(0x00000020, UL)
|
||||
|
||||
/* HGATP flags */
|
||||
#define HGATP_MODE_OFF _AC(0, UL)
|
||||
#define HGATP_MODE_SV32X4 _AC(1, UL)
|
||||
#define HGATP_MODE_SV39X4 _AC(8, UL)
|
||||
#define HGATP_MODE_SV48X4 _AC(9, UL)
|
||||
|
||||
#define HGATP32_MODE_SHIFT 31
|
||||
#define HGATP32_VMID_SHIFT 22
|
||||
#define HGATP32_VMID_MASK _AC(0x1FC00000, UL)
|
||||
#define HGATP32_PPN _AC(0x003FFFFF, UL)
|
||||
|
||||
#define HGATP64_MODE_SHIFT 60
|
||||
#define HGATP64_VMID_SHIFT 44
|
||||
#define HGATP64_VMID_MASK _AC(0x03FFF00000000000, UL)
|
||||
#define HGATP64_PPN _AC(0x00000FFFFFFFFFFF, UL)
|
||||
|
||||
#define HGATP_PAGE_SHIFT 12
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define HGATP_PPN HGATP64_PPN
|
||||
#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT
|
||||
#define HGATP_VMID_MASK HGATP64_VMID_MASK
|
||||
#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT
|
||||
#else
|
||||
#define HGATP_PPN HGATP32_PPN
|
||||
#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT
|
||||
#define HGATP_VMID_MASK HGATP32_VMID_MASK
|
||||
#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
|
||||
#endif
|
||||
|
||||
/* VSIP & HVIP relation */
|
||||
#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
|
||||
#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
|
||||
(_AC(1, UL) << IRQ_S_TIMER) | \
|
||||
(_AC(1, UL) << IRQ_S_EXT))
|
||||
|
||||
/* symbolic CSR names: */
|
||||
#define CSR_CYCLE 0xc00
|
||||
#define CSR_TIME 0xc01
|
||||
@ -104,6 +166,31 @@
|
||||
#define CSR_SIP 0x144
|
||||
#define CSR_SATP 0x180
|
||||
|
||||
#define CSR_VSSTATUS 0x200
|
||||
#define CSR_VSIE 0x204
|
||||
#define CSR_VSTVEC 0x205
|
||||
#define CSR_VSSCRATCH 0x240
|
||||
#define CSR_VSEPC 0x241
|
||||
#define CSR_VSCAUSE 0x242
|
||||
#define CSR_VSTVAL 0x243
|
||||
#define CSR_VSIP 0x244
|
||||
#define CSR_VSATP 0x280
|
||||
|
||||
#define CSR_HSTATUS 0x600
|
||||
#define CSR_HEDELEG 0x602
|
||||
#define CSR_HIDELEG 0x603
|
||||
#define CSR_HIE 0x604
|
||||
#define CSR_HTIMEDELTA 0x605
|
||||
#define CSR_HCOUNTEREN 0x606
|
||||
#define CSR_HGEIE 0x607
|
||||
#define CSR_HTIMEDELTAH 0x615
|
||||
#define CSR_HTVAL 0x643
|
||||
#define CSR_HIP 0x644
|
||||
#define CSR_HVIP 0x645
|
||||
#define CSR_HTINST 0x64a
|
||||
#define CSR_HGATP 0x680
|
||||
#define CSR_HGEIP 0xe12
|
||||
|
||||
#define CSR_MSTATUS 0x300
|
||||
#define CSR_MISA 0x301
|
||||
#define CSR_MIE 0x304
|
||||
|
@ -685,16 +685,6 @@ config STACK_GUARD
|
||||
The minimum size for the stack guard should be 256 for 31 bit and
|
||||
512 for 64 bit.
|
||||
|
||||
config WARN_DYNAMIC_STACK
|
||||
def_bool n
|
||||
prompt "Emit compiler warnings for function with dynamic stack usage"
|
||||
help
|
||||
This option enables the compiler option -mwarn-dynamicstack. If the
|
||||
compiler supports this options generates warnings for functions
|
||||
that dynamically allocate stack space using alloca.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "I/O subsystem"
|
||||
|
@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_WARN_DYNAMIC_STACK
|
||||
ifneq ($(call cc-option,-mwarn-dynamicstack),)
|
||||
KBUILD_CFLAGS += -mwarn-dynamicstack
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_EXPOLINE
|
||||
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
|
||||
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
|
||||
|
@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_BPF_LSM=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_SCHED_CORE=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
@ -503,6 +504,7 @@ CONFIG_NLMON=m
|
||||
# CONFIG_NET_VENDOR_HUAWEI is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
# CONFIG_NET_VENDOR_MICROSOFT is not set
|
||||
# CONFIG_NET_VENDOR_LITEX is not set
|
||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||
CONFIG_MLX4_EN=m
|
||||
CONFIG_MLX5_CORE=m
|
||||
@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
|
||||
CONFIG_NFSD_V4=y
|
||||
CONFIG_NFSD_V4_SECURITY_LABEL=y
|
||||
CONFIG_CIFS=m
|
||||
CONFIG_CIFS_WEAK_PW_HASH=y
|
||||
CONFIG_CIFS_UPCALL=y
|
||||
CONFIG_CIFS_XATTR=y
|
||||
CONFIG_CIFS_POSIX=y
|
||||
@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_DMA_API_DEBUG=y
|
||||
CONFIG_STRING_SELFTEST=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
|
||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_TEST_MIN_HEAP=y
|
||||
CONFIG_TEST_SORT=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
CONFIG_RBTREE_TEST=y
|
||||
CONFIG_INTERVAL_TREE_TEST=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_STRING_SELFTEST=y
|
||||
CONFIG_TEST_BITOPS=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_TEST_LIVEPATCH=m
|
||||
|
@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_BPF_LSM=y
|
||||
CONFIG_SCHED_CORE=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
@ -494,6 +495,7 @@ CONFIG_NLMON=m
|
||||
# CONFIG_NET_VENDOR_HUAWEI is not set
|
||||
# CONFIG_NET_VENDOR_INTEL is not set
|
||||
# CONFIG_NET_VENDOR_MICROSOFT is not set
|
||||
# CONFIG_NET_VENDOR_LITEX is not set
|
||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||
CONFIG_MLX4_EN=m
|
||||
CONFIG_MLX5_CORE=m
|
||||
@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
|
||||
CONFIG_NFSD_V4=y
|
||||
CONFIG_NFSD_V4_SECURITY_LABEL=y
|
||||
CONFIG_CIFS=m
|
||||
CONFIG_CIFS_WEAK_PW_HASH=y
|
||||
CONFIG_CIFS_UPCALL=y
|
||||
CONFIG_CIFS_XATTR=y
|
||||
CONFIG_CIFS_POSIX=y
|
||||
@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
|
||||
CONFIG_CRYPTO_VMAC=m
|
||||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MD4=m
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
|
@ -894,6 +894,11 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
|
||||
/**
|
||||
* guest_translate_address - translate guest logical into guest absolute address
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: Guest virtual address
|
||||
* @ar: Access register
|
||||
* @gpa: Guest physical address
|
||||
* @mode: Translation access mode
|
||||
*
|
||||
* Parameter semantics are the same as the ones from guest_translate.
|
||||
* The memory contents at the guest address are not changed.
|
||||
@ -934,6 +939,11 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
|
||||
/**
|
||||
* check_gva_range - test a range of guest virtual addresses for accessibility
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: Guest virtual address
|
||||
* @ar: Access register
|
||||
* @length: Length of test range
|
||||
* @mode: Translation access mode
|
||||
*/
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
unsigned long length, enum gacc_mode mode)
|
||||
@ -956,6 +966,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
|
||||
/**
|
||||
* kvm_s390_check_low_addr_prot_real - check for low-address protection
|
||||
* @vcpu: virtual cpu
|
||||
* @gra: Guest real address
|
||||
*
|
||||
* Checks whether an address is subject to low-address protection and set
|
||||
@ -979,6 +990,7 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
|
||||
* @pgt: pointer to the beginning of the page table for the given address if
|
||||
* successful (return value 0), or to the first invalid DAT entry in
|
||||
* case of exceptions (return value > 0)
|
||||
* @dat_protection: referenced memory is write protected
|
||||
* @fake: pgt references contiguous guest memory block, not a pgtable
|
||||
*/
|
||||
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
||||
|
@ -269,6 +269,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
|
||||
|
||||
/**
|
||||
* handle_external_interrupt - used for external interruption interceptions
|
||||
* @vcpu: virtual cpu
|
||||
*
|
||||
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
|
||||
* the new PSW does not have external interrupts disabled. In the first case,
|
||||
@ -315,7 +316,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle MOVE PAGE partial execution interception.
|
||||
* handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
|
||||
* @vcpu: virtual cpu
|
||||
*
|
||||
* This interception can only happen for guests with DAT disabled and
|
||||
* addresses that are currently not mapped in the host. Thus we try to
|
||||
|
@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
|
||||
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
|
||||
set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
|
||||
set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
|
||||
}
|
||||
|
||||
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
|
||||
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
|
||||
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
|
||||
}
|
||||
|
||||
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
|
||||
|
@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
|
||||
kvm_s390_patch_guest_per_regs(vcpu);
|
||||
}
|
||||
|
||||
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
|
||||
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
|
||||
|
||||
vcpu->arch.sie_block->icptcode = 0;
|
||||
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
|
||||
|
@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
|
||||
return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
|
||||
}
|
||||
|
||||
static inline int kvm_is_ucontrol(struct kvm *kvm)
|
||||
|
@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
ret = -EINVAL;
|
||||
vma = find_vma(current->mm, mmio_addr);
|
||||
vma = vma_lookup(current->mm, mmio_addr);
|
||||
if (!vma)
|
||||
goto out_unlock_mmap;
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
ret = -EINVAL;
|
||||
vma = find_vma(current->mm, mmio_addr);
|
||||
vma = vma_lookup(current->mm, mmio_addr);
|
||||
if (!vma)
|
||||
goto out_unlock_mmap;
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
|
@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
|
||||
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,lzo)
|
||||
|
||||
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
|
||||
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
|
||||
$(call if_changed,uimage,bzip2)
|
||||
|
||||
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
|
||||
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
|
||||
$(call if_changed,uimage,gzip)
|
||||
|
||||
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
|
||||
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
|
||||
$(call if_changed,uimage,lzma)
|
||||
|
||||
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
|
||||
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
|
||||
$(call if_changed,uimage,xz)
|
||||
|
||||
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
|
||||
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
|
||||
$(call if_changed,uimage,lzo)
|
||||
|
||||
$(obj)/uImage.bin: $(obj)/vmlinux.bin
|
||||
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,uimage,none)
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
|
||||
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
|
||||
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
OBJCOPYFLAGS_uImage.srec := -I binary -O srec
|
||||
$(obj)/uImage.srec: $(obj)/uImage
|
||||
$(obj)/uImage.srec: $(obj)/uImage FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
|
||||
|
@ -356,7 +356,9 @@ err_nomem:
|
||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (!sparc_dma_free_resource(cpu_addr, size))
|
||||
return;
|
||||
|
||||
dma_make_coherent(dma_addr, size);
|
||||
|
@ -39,6 +39,7 @@ struct mdesc_hdr {
|
||||
u32 node_sz; /* node block size */
|
||||
u32 name_sz; /* name block size */
|
||||
u32 data_sz; /* data block size */
|
||||
char data[];
|
||||
} __attribute__((aligned(16)));
|
||||
|
||||
struct mdesc_elem {
|
||||
@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
|
||||
|
||||
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
|
||||
{
|
||||
return (struct mdesc_elem *) (mdesc + 1);
|
||||
return (struct mdesc_elem *) mdesc->data;
|
||||
}
|
||||
|
||||
static void *name_block(struct mdesc_hdr *mdesc)
|
||||
|
@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y
|
||||
|
||||
config ARCH_NR_GPIO
|
||||
int
|
||||
default 1024 if X86_64
|
||||
default 512
|
||||
|
||||
config ARCH_SUSPEND_POSSIBLE
|
||||
def_bool y
|
||||
|
||||
|
@ -4,6 +4,12 @@
|
||||
|
||||
tune = $(call cc-option,-mtune=$(1),$(2))
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
|
||||
else
|
||||
align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
endif
|
||||
|
||||
cflags-$(CONFIG_M486SX) += -march=i486
|
||||
cflags-$(CONFIG_M486) += -march=i486
|
||||
cflags-$(CONFIG_M586) += -march=i586
|
||||
@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6
|
||||
# They make zero difference whatsosever to performance at this time.
|
||||
cflags-$(CONFIG_MK7) += -march=athlon
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
|
||||
cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
|
||||
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
|
||||
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
|
||||
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
|
||||
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
|
||||
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
|
||||
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
|
||||
cflags-$(CONFIG_MVIAC7) += -march=i686
|
||||
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
|
||||
|
@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
|
||||
/*
|
||||
* IPI implementation on Hyper-V.
|
||||
*/
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
{
|
||||
struct hv_send_ipi_ex **arg;
|
||||
struct hv_send_ipi_ex *ipi_arg;
|
||||
@ -123,6 +124,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
|
||||
|
||||
if (!cpumask_equal(mask, cpu_present_mask)) {
|
||||
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
|
||||
if (exclude_self)
|
||||
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
|
||||
else
|
||||
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
|
||||
}
|
||||
if (nr_bank < 0)
|
||||
@ -138,15 +142,25 @@ ipi_mask_ex_done:
|
||||
return hv_result_success(status);
|
||||
}
|
||||
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
static bool __send_ipi_mask(const struct cpumask *mask, int vector,
|
||||
bool exclude_self)
|
||||
{
|
||||
int cur_cpu, vcpu;
|
||||
int cur_cpu, vcpu, this_cpu = smp_processor_id();
|
||||
struct hv_send_ipi ipi_arg;
|
||||
u64 status;
|
||||
unsigned int weight;
|
||||
|
||||
trace_hyperv_send_ipi_mask(mask, vector);
|
||||
|
||||
if (cpumask_empty(mask))
|
||||
weight = cpumask_weight(mask);
|
||||
|
||||
/*
|
||||
* Do nothing if
|
||||
* 1. the mask is empty
|
||||
* 2. the mask only contains self when exclude_self is true
|
||||
*/
|
||||
if (weight == 0 ||
|
||||
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
|
||||
return true;
|
||||
|
||||
if (!hv_hypercall_pg)
|
||||
@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
ipi_arg.cpu_mask = 0;
|
||||
|
||||
for_each_cpu(cur_cpu, mask) {
|
||||
if (exclude_self && cur_cpu == this_cpu)
|
||||
continue;
|
||||
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
|
||||
if (vcpu == VP_INVAL)
|
||||
return false;
|
||||
@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
return hv_result_success(status);
|
||||
|
||||
do_ex_hypercall:
|
||||
return __send_ipi_mask_ex(mask, vector);
|
||||
return __send_ipi_mask_ex(mask, vector, exclude_self);
|
||||
}
|
||||
|
||||
static bool __send_ipi_one(int cpu, int vector)
|
||||
@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
|
||||
return false;
|
||||
|
||||
if (vp >= 64)
|
||||
return __send_ipi_mask_ex(cpumask_of(cpu), vector);
|
||||
return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
|
||||
|
||||
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
|
||||
return hv_result_success(status);
|
||||
@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
|
||||
|
||||
static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
if (!__send_ipi_mask(mask, vector))
|
||||
if (!__send_ipi_mask(mask, vector, false))
|
||||
orig_apic.send_IPI_mask(mask, vector);
|
||||
}
|
||||
|
||||
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
struct cpumask new_mask;
|
||||
const struct cpumask *local_mask;
|
||||
|
||||
cpumask_copy(&new_mask, mask);
|
||||
cpumask_clear_cpu(this_cpu, &new_mask);
|
||||
local_mask = &new_mask;
|
||||
if (!__send_ipi_mask(local_mask, vector))
|
||||
if (!__send_ipi_mask(mask, vector, true))
|
||||
orig_apic.send_IPI_mask_allbutself(mask, vector);
|
||||
}
|
||||
|
||||
@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
|
||||
|
||||
static void hv_send_ipi_all(int vector)
|
||||
{
|
||||
if (!__send_ipi_mask(cpu_online_mask, vector))
|
||||
if (!__send_ipi_mask(cpu_online_mask, vector, false))
|
||||
orig_apic.send_IPI_all(vector);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
|
||||
struct kvm_page_track_notifier_node *node);
|
||||
};
|
||||
|
||||
void kvm_page_track_init(struct kvm *kvm);
|
||||
int kvm_page_track_init(struct kvm *kvm);
|
||||
void kvm_page_track_cleanup(struct kvm *kvm);
|
||||
|
||||
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
|
||||
|
@ -2,6 +2,20 @@
|
||||
#ifndef _ASM_X86_KVM_CLOCK_H
|
||||
#define _ASM_X86_KVM_CLOCK_H
|
||||
|
||||
#include <linux/percpu.h>
|
||||
|
||||
extern struct clocksource kvm_clock;
|
||||
|
||||
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
|
||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||
{
|
||||
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
||||
}
|
||||
|
||||
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
|
||||
{
|
||||
return this_cpu_read(hv_clock_per_cpu);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_KVM_CLOCK_H */
|
||||
|
@ -301,8 +301,8 @@ do { \
|
||||
unsigned int __gu_low, __gu_high; \
|
||||
const unsigned int __user *__gu_ptr; \
|
||||
__gu_ptr = (const void __user *)(ptr); \
|
||||
__get_user_asm(__gu_low, ptr, "l", "=r", label); \
|
||||
__get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
|
||||
__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
|
||||
__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
|
||||
(x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
|
||||
} while (0)
|
||||
#else
|
||||
|
@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
|
||||
|
||||
static void kill_me_now(struct callback_head *ch)
|
||||
{
|
||||
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
|
||||
|
||||
p->mce_count = 0;
|
||||
force_sig(SIGBUS);
|
||||
}
|
||||
|
||||
@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
|
||||
int flags = MF_ACTION_REQUIRED;
|
||||
int ret;
|
||||
|
||||
p->mce_count = 0;
|
||||
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
|
||||
|
||||
if (!p->mce_ripv)
|
||||
@ -1290,8 +1294,12 @@ static void kill_me_maybe(struct callback_head *cb)
|
||||
}
|
||||
}
|
||||
|
||||
static void queue_task_work(struct mce *m, int kill_current_task)
|
||||
static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
|
||||
{
|
||||
int count = ++current->mce_count;
|
||||
|
||||
/* First call, save all the details */
|
||||
if (count == 1) {
|
||||
current->mce_addr = m->addr;
|
||||
current->mce_kflags = m->kflags;
|
||||
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
|
||||
@ -1301,6 +1309,19 @@ static void queue_task_work(struct mce *m, int kill_current_task)
|
||||
current->mce_kill_me.func = kill_me_now;
|
||||
else
|
||||
current->mce_kill_me.func = kill_me_maybe;
|
||||
}
|
||||
|
||||
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
|
||||
if (count > 10)
|
||||
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
|
||||
|
||||
/* Second or later call, make sure page address matches the one from first call */
|
||||
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
|
||||
mce_panic("Consecutive machine checks to different user pages", m, msg);
|
||||
|
||||
/* Do not call task_work_add() more than once */
|
||||
if (count > 1)
|
||||
return;
|
||||
|
||||
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
|
||||
}
|
||||
@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
||||
/* If this triggers there is no way to recover. Die hard. */
|
||||
BUG_ON(!on_thread_stack() || !user_mode(regs));
|
||||
|
||||
queue_task_work(&m, kill_current_task);
|
||||
queue_task_work(&m, msg, kill_current_task);
|
||||
|
||||
} else {
|
||||
/*
|
||||
@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
if (m.kflags & MCE_IN_KERNEL_COPYIN)
|
||||
queue_task_work(&m, kill_current_task);
|
||||
queue_task_work(&m, msg, kill_current_task);
|
||||
}
|
||||
out:
|
||||
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
|
||||
|
@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
|
||||
static struct pvclock_vsyscall_time_info
|
||||
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
|
||||
static struct pvclock_wall_clock wall_clock __bss_decrypted;
|
||||
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
static struct pvclock_vsyscall_time_info *hvclock_mem;
|
||||
|
||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||
{
|
||||
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
||||
}
|
||||
|
||||
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
|
||||
{
|
||||
return this_cpu_read(hv_clock_per_cpu);
|
||||
}
|
||||
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
|
||||
|
||||
/*
|
||||
* The wallclock is the time of day when we booted. Since then, some time may
|
||||
|
@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
||||
|
||||
static void __init pcpu_fc_free(void *ptr, size_t size)
|
||||
{
|
||||
memblock_free(__pa(ptr), size);
|
||||
memblock_free_ptr(ptr, size);
|
||||
}
|
||||
|
||||
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||
|
@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
|
||||
for (i = 0; i < nent; i++) {
|
||||
e = &entries[i];
|
||||
|
||||
if (e->function == function && (e->index == index ||
|
||||
!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
|
||||
if (e->function == function &&
|
||||
(!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
|
||||
return e;
|
||||
}
|
||||
|
||||
|
@ -4206,7 +4206,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
|
||||
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
|
||||
|
||||
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
|
||||
return emulate_ud(ctxt);
|
||||
return emulate_gp(ctxt, 0);
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
|
||||
stimer_init(&hv_vcpu->stimer[i], i);
|
||||
|
||||
hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
|
||||
hv_vcpu->vp_index = vcpu->vcpu_idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
||||
switch (msr) {
|
||||
case HV_X64_MSR_VP_INDEX: {
|
||||
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
|
||||
int vcpu_idx = kvm_vcpu_get_idx(vcpu);
|
||||
u32 new_vp_index = (u32)data;
|
||||
|
||||
if (!host || new_vp_index >= KVM_MAX_VCPUS)
|
||||
@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
||||
* VP index is changing, adjust num_mismatched_vp_indexes if
|
||||
* it now matches or no longer matches vcpu_idx.
|
||||
*/
|
||||
if (hv_vcpu->vp_index == vcpu_idx)
|
||||
if (hv_vcpu->vp_index == vcpu->vcpu_idx)
|
||||
atomic_inc(&hv->num_mismatched_vp_indexes);
|
||||
else if (new_vp_index == vcpu_idx)
|
||||
else if (new_vp_index == vcpu->vcpu_idx)
|
||||
atomic_dec(&hv->num_mismatched_vp_indexes);
|
||||
|
||||
hv_vcpu->vp_index = new_vp_index;
|
||||
|
@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
|
||||
|
||||
return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
|
||||
return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
|
||||
}
|
||||
|
||||
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
|
||||
|
@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
unsigned index;
|
||||
bool mask_before, mask_after;
|
||||
union kvm_ioapic_redirect_entry *e;
|
||||
unsigned long vcpu_bitmap;
|
||||
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
|
||||
switch (ioapic->ioregsel) {
|
||||
case IOAPIC_REG_VERSION:
|
||||
@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
irq.shorthand = APIC_DEST_NOSHORT;
|
||||
irq.dest_id = e->fields.dest_id;
|
||||
irq.msi_redir_hint = false;
|
||||
bitmap_zero(&vcpu_bitmap, 16);
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
if (old_dest_mode != e->fields.dest_mode ||
|
||||
old_dest_id != e->fields.dest_id) {
|
||||
/*
|
||||
@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
kvm_lapic_irq_dest_mode(
|
||||
!!e->fields.dest_mode);
|
||||
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
}
|
||||
kvm_make_scan_ioapic_request_mask(ioapic->kvm,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
} else {
|
||||
kvm_make_scan_ioapic_request(ioapic->kvm);
|
||||
}
|
||||
|
@ -2027,8 +2027,8 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
|
||||
} while (!sp->unsync_children);
|
||||
}
|
||||
|
||||
static void mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *parent)
|
||||
static int mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *parent, bool can_yield)
|
||||
{
|
||||
int i;
|
||||
struct kvm_mmu_page *sp;
|
||||
@ -2055,12 +2055,18 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
|
||||
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
|
||||
if (!can_yield) {
|
||||
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
|
||||
flush = false;
|
||||
}
|
||||
}
|
||||
|
||||
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
|
||||
@ -2146,9 +2152,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
|
||||
}
|
||||
|
||||
if (sp->unsync_children)
|
||||
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
|
||||
|
||||
__clear_sp_write_flooding_count(sp);
|
||||
|
||||
trace_get_page:
|
||||
@ -3684,7 +3687,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
|
||||
|
||||
mmu_sync_children(vcpu, sp);
|
||||
mmu_sync_children(vcpu, sp, true);
|
||||
|
||||
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
@ -3700,7 +3703,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||||
if (IS_VALID_PAE_ROOT(root)) {
|
||||
root &= PT64_BASE_ADDR_MASK;
|
||||
sp = to_shadow_page(root);
|
||||
mmu_sync_children(vcpu, sp);
|
||||
mmu_sync_children(vcpu, sp, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,13 +164,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
|
||||
cleanup_srcu_struct(&head->track_srcu);
|
||||
}
|
||||
|
||||
void kvm_page_track_init(struct kvm *kvm)
|
||||
int kvm_page_track_init(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_page_track_notifier_head *head;
|
||||
|
||||
head = &kvm->arch.track_notifier_head;
|
||||
init_srcu_struct(&head->track_srcu);
|
||||
INIT_HLIST_HEAD(&head->track_notifier_list);
|
||||
return init_srcu_struct(&head->track_srcu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -707,8 +707,27 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
|
||||
if (!is_shadow_present_pte(*it.sptep)) {
|
||||
table_gfn = gw->table_gfn[it.level - 2];
|
||||
access = gw->pt_access[it.level - 2];
|
||||
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
|
||||
false, access);
|
||||
sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
|
||||
it.level-1, false, access);
|
||||
/*
|
||||
* We must synchronize the pagetable before linking it
|
||||
* because the guest doesn't need to flush tlb when
|
||||
* the gpte is changed from non-present to present.
|
||||
* Otherwise, the guest may use the wrong mapping.
|
||||
*
|
||||
* For PG_LEVEL_4K, kvm_mmu_get_page() has already
|
||||
* synchronized it transiently via kvm_sync_page().
|
||||
*
|
||||
* For higher level pagetable, we synchronize it via
|
||||
* the slower mmu_sync_children(). If it needs to
|
||||
* break, some progress has been made; return
|
||||
* RET_PF_RETRY and retry on the next #PF.
|
||||
* KVM_REQ_MMU_SYNC is not necessary but it
|
||||
* expedites the process.
|
||||
*/
|
||||
if (sp->unsync_children &&
|
||||
mmu_sync_children(vcpu, sp, false))
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1047,14 +1066,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
* Using the cached information from sp->gfns is safe because:
|
||||
* - The spte has a reference to the struct page, so the pfn for a given gfn
|
||||
* can't change unless all sptes pointing to it are nuked first.
|
||||
*
|
||||
* Note:
|
||||
* We should flush all tlbs if spte is dropped even though guest is
|
||||
* responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
|
||||
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
|
||||
* used by guest then tlbs are not flushed, so guest is allowed to access the
|
||||
* freed pages.
|
||||
* And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
|
||||
*/
|
||||
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
@ -1107,13 +1118,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
return 0;
|
||||
|
||||
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
|
||||
/*
|
||||
* Update spte before increasing tlbs_dirty to make
|
||||
* sure no tlb flush is lost after spte is zapped; see
|
||||
* the comments in kvm_flush_remote_tlbs().
|
||||
*/
|
||||
smp_wmb();
|
||||
vcpu->kvm->tlbs_dirty++;
|
||||
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1128,12 +1133,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
|
||||
if (gfn != sp->gfns[i]) {
|
||||
drop_spte(vcpu->kvm, &sp->spt[i]);
|
||||
/*
|
||||
* The same as above where we are doing
|
||||
* prefetch_invalid_gpte().
|
||||
*/
|
||||
smp_wmb();
|
||||
vcpu->kvm->tlbs_dirty++;
|
||||
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
||||
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
|
||||
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
|
||||
|
||||
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
|
||||
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
||||
svm->vmcb->control.int_state = svm->nested.ctl.int_state;
|
||||
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
|
||||
@ -579,7 +578,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
|
||||
}
|
||||
|
||||
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
|
||||
struct vmcb *vmcb12)
|
||||
struct vmcb *vmcb12, bool from_vmrun)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
int ret;
|
||||
@ -609,13 +608,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
|
||||
nested_vmcb02_prepare_save(svm, vmcb12);
|
||||
|
||||
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
|
||||
nested_npt_enabled(svm), true);
|
||||
nested_npt_enabled(svm), from_vmrun);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!npt_enabled)
|
||||
vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
|
||||
|
||||
if (!from_vmrun)
|
||||
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
||||
|
||||
svm_set_gif(svm, true);
|
||||
|
||||
return 0;
|
||||
@ -681,7 +683,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
|
||||
|
||||
svm->nested.nested_run_pending = 1;
|
||||
|
||||
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
|
||||
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
|
||||
goto out_exit_err;
|
||||
|
||||
if (nested_svm_vmrun_msrpm(svm))
|
||||
|
@ -595,20 +595,12 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
int *error)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
struct sev_data_launch_update_vmsa vmsa;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, ret;
|
||||
|
||||
if (!sev_es_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
||||
vmsa.reserved = 0;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
int ret;
|
||||
|
||||
/* Perform some pre-encryption checks against the VMSA */
|
||||
ret = sev_es_sync_vmsa(svm);
|
||||
@ -616,22 +608,37 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The LAUNCH_UPDATE_VMSA command will perform in-place
|
||||
* encryption of the VMSA memory content (i.e it will write
|
||||
* the same memory region with the guest's key), so invalidate
|
||||
* it first.
|
||||
* The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
|
||||
* the VMSA memory content (i.e it will write the same memory region
|
||||
* with the guest's key), so invalidate it first.
|
||||
*/
|
||||
clflush_cache_range(svm->vmsa, PAGE_SIZE);
|
||||
|
||||
vmsa.handle = sev->handle;
|
||||
vmsa.reserved = 0;
|
||||
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vmsa.address = __sme_pa(svm->vmsa);
|
||||
vmsa.len = PAGE_SIZE;
|
||||
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
|
||||
&argp->error);
|
||||
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, ret;
|
||||
|
||||
if (!sev_es_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
ret = mutex_lock_killable(&vcpu->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
svm->vcpu.arch.guest_state_protected = true;
|
||||
ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1397,8 +1404,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
|
||||
/* Bind ASID to this guest */
|
||||
ret = sev_bind_asid(kvm, start.handle, error);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
sev_decommission(start.handle);
|
||||
goto e_free_session;
|
||||
}
|
||||
|
||||
params.handle = start.handle;
|
||||
if (copy_to_user((void __user *)(uintptr_t)argp->data,
|
||||
@ -1464,7 +1473,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
|
||||
/* Pin guest memory */
|
||||
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
|
||||
PAGE_SIZE, &n, 0);
|
||||
PAGE_SIZE, &n, 1);
|
||||
if (IS_ERR(guest_page)) {
|
||||
ret = PTR_ERR(guest_page);
|
||||
goto e_free_trans;
|
||||
@ -1501,6 +1510,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
|
||||
}
|
||||
|
||||
static bool cmd_allowed_from_miror(u32 cmd_id)
|
||||
{
|
||||
/*
|
||||
* Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
|
||||
* active mirror VMs. Also allow the debugging and status commands.
|
||||
*/
|
||||
if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
|
||||
cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
|
||||
cmd_id == KVM_SEV_DBG_ENCRYPT)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
||||
{
|
||||
struct kvm_sev_cmd sev_cmd;
|
||||
@ -1517,8 +1540,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
/* enc_context_owner handles all memory enc operations */
|
||||
if (is_mirroring_enc_context(kvm)) {
|
||||
/* Only the enc_context_owner handles some memory enc operations. */
|
||||
if (is_mirroring_enc_context(kvm) &&
|
||||
!cmd_allowed_from_miror(sev_cmd.id)) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1715,8 +1739,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||
{
|
||||
struct file *source_kvm_file;
|
||||
struct kvm *source_kvm;
|
||||
struct kvm_sev_info *mirror_sev;
|
||||
unsigned int asid;
|
||||
struct kvm_sev_info source_sev, *mirror_sev;
|
||||
int ret;
|
||||
|
||||
source_kvm_file = fget(source_fd);
|
||||
@ -1739,7 +1762,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||
goto e_source_unlock;
|
||||
}
|
||||
|
||||
asid = to_kvm_svm(source_kvm)->sev_info.asid;
|
||||
memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
|
||||
sizeof(source_sev));
|
||||
|
||||
/*
|
||||
* The mirror kvm holds an enc_context_owner ref so its asid can't
|
||||
@ -1759,8 +1783,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
||||
/* Set enc_context_owner and copy its encryption context over */
|
||||
mirror_sev = &to_kvm_svm(kvm)->sev_info;
|
||||
mirror_sev->enc_context_owner = source_kvm;
|
||||
mirror_sev->asid = asid;
|
||||
mirror_sev->active = true;
|
||||
mirror_sev->asid = source_sev.asid;
|
||||
mirror_sev->fd = source_sev.fd;
|
||||
mirror_sev->es_active = source_sev.es_active;
|
||||
mirror_sev->handle = source_sev.handle;
|
||||
/*
|
||||
* Do not copy ap_jump_table. Since the mirror does not share the same
|
||||
* KVM contexts as the original, and they may have different
|
||||
* memory-views.
|
||||
*/
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
|
@ -1566,6 +1566,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
|
||||
|
||||
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
|
||||
V_IRQ_INJECTION_BITS_MASK;
|
||||
|
||||
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
||||
}
|
||||
|
||||
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
|
||||
@ -2222,6 +2224,10 @@ static int gp_interception(struct kvm_vcpu *vcpu)
|
||||
if (error_code)
|
||||
goto reinject;
|
||||
|
||||
/* All SVM instructions expect page aligned RAX */
|
||||
if (svm->vmcb->save.rax & ~PAGE_MASK)
|
||||
goto reinject;
|
||||
|
||||
/* Decode the instruction for usage later */
|
||||
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
|
||||
goto reinject;
|
||||
@ -4285,7 +4291,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
struct kvm_host_map map_save;
|
||||
int ret;
|
||||
|
||||
if (is_guest_mode(vcpu)) {
|
||||
if (!is_guest_mode(vcpu))
|
||||
return 0;
|
||||
|
||||
/* FED8h - SVM Guest */
|
||||
put_smstate(u64, smstate, 0x7ed8, 1);
|
||||
/* FEE0h - SVM Guest VMCB Physical Address */
|
||||
@ -4321,7 +4329,6 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
&svm->vmcb01.ptr->save);
|
||||
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4329,50 +4336,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct kvm_host_map map, map_save;
|
||||
int ret = 0;
|
||||
|
||||
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
|
||||
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
|
||||
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
|
||||
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
|
||||
u64 saved_efer, vmcb12_gpa;
|
||||
struct vmcb *vmcb12;
|
||||
int ret;
|
||||
|
||||
if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||
return 0;
|
||||
|
||||
/* Non-zero if SMI arrived while vCPU was in guest mode. */
|
||||
if (!GET_SMSTATE(u64, smstate, 0x7ed8))
|
||||
return 0;
|
||||
|
||||
if (guest) {
|
||||
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
|
||||
return 1;
|
||||
|
||||
saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
|
||||
if (!(saved_efer & EFER_SVME))
|
||||
return 1;
|
||||
|
||||
if (kvm_vcpu_map(vcpu,
|
||||
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
|
||||
vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
|
||||
return 1;
|
||||
|
||||
ret = 1;
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
|
||||
goto unmap_map;
|
||||
|
||||
if (svm_allocate_nested(svm))
|
||||
return 1;
|
||||
|
||||
vmcb12 = map.hva;
|
||||
|
||||
nested_load_control_from_vmcb12(svm, &vmcb12->control);
|
||||
|
||||
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
|
||||
kvm_vcpu_unmap(vcpu, &map, true);
|
||||
goto unmap_save;
|
||||
|
||||
/*
|
||||
* Restore L1 host state from L1 HSAVE area as VMCB01 was
|
||||
* used during SMM (see svm_enter_smm())
|
||||
*/
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
|
||||
&map_save) == -EINVAL)
|
||||
return 1;
|
||||
|
||||
svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
|
||||
map_save.hva + 0x400);
|
||||
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
|
||||
|
||||
/*
|
||||
* Enter the nested guest now
|
||||
*/
|
||||
|
||||
vmcb12 = map.hva;
|
||||
nested_load_control_from_vmcb12(svm, &vmcb12->control);
|
||||
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
|
||||
|
||||
unmap_save:
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
}
|
||||
}
|
||||
|
||||
unmap_map:
|
||||
kvm_vcpu_unmap(vcpu, &map, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
|
||||
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
|
||||
}
|
||||
|
||||
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
|
||||
int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
|
||||
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
|
||||
void svm_leave_nested(struct vcpu_svm *svm);
|
||||
void svm_free_nested(struct vcpu_svm *svm);
|
||||
int svm_allocate_nested(struct vcpu_svm *svm);
|
||||
|
@ -353,14 +353,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
|
||||
switch (msr_index) {
|
||||
case MSR_IA32_VMX_EXIT_CTLS:
|
||||
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
||||
ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_ENTRY_CTLS:
|
||||
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
||||
ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
||||
ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
|
||||
break;
|
||||
case MSR_IA32_VMX_PINBASED_CTLS:
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_VMFUNC:
|
||||
ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2583,8 +2583,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
||||
* Guest state is invalid and unrestricted guest is disabled,
|
||||
* which means L1 attempted VMEntry to L2 with invalid state.
|
||||
* Fail the VMEntry.
|
||||
*
|
||||
* However when force loading the guest state (SMM exit or
|
||||
* loading nested state after migration, it is possible to
|
||||
* have invalid guest state now, which will be later fixed by
|
||||
* restoring L2 register state
|
||||
*/
|
||||
if (CC(!vmx_guest_state_valid(vcpu))) {
|
||||
if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
|
||||
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -4351,6 +4356,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
||||
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
|
||||
vmcs12->vm_exit_msr_load_count))
|
||||
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
|
||||
|
||||
to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
|
||||
@ -4899,14 +4906,7 @@ out_vmcs02:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Emulate the VMXON instruction.
|
||||
* Currently, we just remember that VMX is active, and do not save or even
|
||||
* inspect the argument to VMXON (the so-called "VMXON pointer") because we
|
||||
* do not currently need to store anything in that guest-allocated memory
|
||||
* region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
|
||||
* argument is different from the VMXON pointer (which the spec says they do).
|
||||
*/
|
||||
/* Emulate the VMXON instruction. */
|
||||
static int handle_vmon(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
@ -5903,6 +5903,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
||||
case EXIT_REASON_VMFUNC:
|
||||
/* VM functions are emulated through L2->L0 vmexits. */
|
||||
return true;
|
||||
case EXIT_REASON_BUS_LOCK:
|
||||
/*
|
||||
* At present, bus lock VM exit is never exposed to L1.
|
||||
* Handle L2's bus locks in L0 directly.
|
||||
*/
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1323,7 +1323,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
vmx_prepare_switch_to_host(to_vmx(vcpu));
|
||||
}
|
||||
|
||||
static bool emulation_required(struct kvm_vcpu *vcpu)
|
||||
bool vmx_emulation_required(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
|
||||
}
|
||||
@ -1367,7 +1367,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
||||
vmcs_writel(GUEST_RFLAGS, rflags);
|
||||
|
||||
if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
|
||||
vmx->emulation_required = emulation_required(vcpu);
|
||||
vmx->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
|
||||
@ -1837,10 +1837,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
&msr_info->data))
|
||||
return 1;
|
||||
/*
|
||||
* Enlightened VMCS v1 doesn't have certain fields, but buggy
|
||||
* Hyper-V versions are still trying to use corresponding
|
||||
* features when they are exposed. Filter out the essential
|
||||
* minimum.
|
||||
* Enlightened VMCS v1 doesn't have certain VMCS fields but
|
||||
* instead of just ignoring the features, different Hyper-V
|
||||
* versions are either trying to use them and fail or do some
|
||||
* sanity checking and refuse to boot. Filter all unsupported
|
||||
* features out.
|
||||
*/
|
||||
if (!msr_info->host_initiated &&
|
||||
vmx->nested.enlightened_vmcs_enabled)
|
||||
@ -3077,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
}
|
||||
|
||||
/* depends on vcpu->arch.cr0 to be set to a new value */
|
||||
vmx->emulation_required = emulation_required(vcpu);
|
||||
vmx->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static int vmx_get_max_tdp_level(void)
|
||||
@ -3330,7 +3331,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int
|
||||
{
|
||||
__vmx_set_segment(vcpu, var, seg);
|
||||
|
||||
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
|
||||
to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
|
||||
@ -6621,10 +6622,24 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmx->loaded_vmcs->soft_vnmi_blocked))
|
||||
vmx->loaded_vmcs->entry_time = ktime_get();
|
||||
|
||||
/* Don't enter VMX if guest state is invalid, let the exit handler
|
||||
start emulation until we arrive back to a valid state */
|
||||
if (vmx->emulation_required)
|
||||
/*
|
||||
* Don't enter VMX if guest state is invalid, let the exit handler
|
||||
* start emulation until we arrive back to a valid state. Synthesize a
|
||||
* consistency check VM-Exit due to invalid guest state and bail.
|
||||
*/
|
||||
if (unlikely(vmx->emulation_required)) {
|
||||
|
||||
/* We don't emulate invalid state of a nested guest */
|
||||
vmx->fail = is_guest_mode(vcpu);
|
||||
|
||||
vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
|
||||
vmx->exit_reason.failed_vmentry = 1;
|
||||
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
|
||||
vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
|
||||
kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
|
||||
vmx->exit_intr_info = 0;
|
||||
return EXIT_FASTPATH_NONE;
|
||||
}
|
||||
|
||||
trace_kvm_entry(vcpu);
|
||||
|
||||
@ -6833,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
|
||||
if (tsx_ctrl)
|
||||
vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
}
|
||||
|
||||
err = alloc_loaded_vmcs(&vmx->vmcs01);
|
||||
|
@ -248,12 +248,8 @@ struct vcpu_vmx {
|
||||
* only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
|
||||
* of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
|
||||
* be loaded into hardware if those conditions aren't met.
|
||||
* nr_active_uret_msrs tracks the number of MSRs that need to be loaded
|
||||
* into hardware when running the guest. guest_uret_msrs[] is resorted
|
||||
* whenever the number of "active" uret MSRs is modified.
|
||||
*/
|
||||
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
|
||||
int nr_active_uret_msrs;
|
||||
bool guest_uret_msrs_loaded;
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 msr_host_kernel_gs_base;
|
||||
@ -359,6 +355,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
|
||||
unsigned long fs_base, unsigned long gs_base);
|
||||
int vmx_get_cpl(struct kvm_vcpu *vcpu);
|
||||
bool vmx_emulation_required(struct kvm_vcpu *vcpu);
|
||||
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
|
||||
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
|
||||
|
@ -1332,6 +1332,13 @@ static const u32 msrs_to_save_all[] = {
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
|
||||
|
||||
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
|
||||
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
|
||||
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
|
||||
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
|
||||
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
|
||||
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
|
||||
};
|
||||
|
||||
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
|
||||
@ -2969,7 +2976,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
if (vcpu->xen.vcpu_time_info_set)
|
||||
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
|
||||
if (v == kvm_get_vcpu(v->kvm, 0))
|
||||
if (!v->vcpu_idx)
|
||||
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
|
||||
return 0;
|
||||
}
|
||||
@ -7658,6 +7665,13 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
|
||||
|
||||
/* Process a latched INIT or SMI, if any. */
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
|
||||
/*
|
||||
* Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
|
||||
* on SMM exit we still need to reload them from
|
||||
* guest memory
|
||||
*/
|
||||
vcpu->arch.pdptrs_from_userspace = false;
|
||||
}
|
||||
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
@ -10652,6 +10666,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
int r;
|
||||
|
||||
vcpu->arch.last_vmentry_cpu = -1;
|
||||
vcpu->arch.regs_avail = ~0;
|
||||
vcpu->arch.regs_dirty = ~0;
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
@ -10893,6 +10909,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
|
||||
kvm_rip_write(vcpu, 0xfff0);
|
||||
|
||||
vcpu->arch.cr3 = 0;
|
||||
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
|
||||
|
||||
/*
|
||||
* CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions
|
||||
* of Intel's SDM list CD/NW as being set on INIT, but they contradict
|
||||
@ -11139,9 +11158,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (type)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kvm_page_track_init(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
|
||||
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
||||
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
|
||||
@ -11174,7 +11199,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
kvm_apicv_init(kvm);
|
||||
kvm_hv_init_vm(kvm);
|
||||
kvm_page_track_init(kvm);
|
||||
kvm_mmu_init_vm(kvm);
|
||||
kvm_xen_init_vm(kvm);
|
||||
|
||||
|
@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
|
||||
return 0;
|
||||
|
||||
p4d = p4d_offset(pgd, addr);
|
||||
if (p4d_none(*p4d))
|
||||
if (!p4d_present(*p4d))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
if (pud_none(*pud))
|
||||
if (!pud_present(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_large(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
if (!pmd_present(*pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_large(*pmd))
|
||||
|
@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||
p = early_alloc(PMD_SIZE, nid, false);
|
||||
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
|
||||
return;
|
||||
else if (p)
|
||||
memblock_free(__pa(p), PMD_SIZE);
|
||||
memblock_free_ptr(p, PMD_SIZE);
|
||||
}
|
||||
|
||||
p = early_alloc(PAGE_SIZE, nid, true);
|
||||
@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
|
||||
p = early_alloc(PUD_SIZE, nid, false);
|
||||
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
|
||||
return;
|
||||
else if (p)
|
||||
memblock_free(__pa(p), PUD_SIZE);
|
||||
memblock_free_ptr(p, PUD_SIZE);
|
||||
}
|
||||
|
||||
p = early_alloc(PAGE_SIZE, nid, true);
|
||||
|
@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
|
||||
|
||||
/* numa_distance could be 1LU marking allocation failure, test cnt */
|
||||
if (numa_distance_cnt)
|
||||
memblock_free(__pa(numa_distance), size);
|
||||
memblock_free_ptr(numa_distance, size);
|
||||
numa_distance_cnt = 0;
|
||||
numa_distance = NULL; /* enable table creation */
|
||||
}
|
||||
|
@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
|
||||
}
|
||||
|
||||
/* free the copied physical distance table */
|
||||
if (phys_dist)
|
||||
memblock_free(__pa(phys_dist), phys_size);
|
||||
memblock_free_ptr(phys_dist, phys_size);
|
||||
return;
|
||||
|
||||
no_emu:
|
||||
|
@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
|
||||
int err = 0;
|
||||
|
||||
start = sanitize_phys(start);
|
||||
end = sanitize_phys(end);
|
||||
|
||||
/*
|
||||
* The end address passed into this function is exclusive, but
|
||||
* sanitize_phys() expects an inclusive address.
|
||||
*/
|
||||
end = sanitize_phys(end - 1) + 1;
|
||||
if (start >= end) {
|
||||
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
|
||||
start, end - 1, cattr_name(req_type));
|
||||
|
@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void)
|
||||
x86_platform.legacy.rtc = 1;
|
||||
}
|
||||
|
||||
static void __init xen_domu_set_legacy_features(void)
|
||||
{
|
||||
x86_platform.legacy.rtc = 0;
|
||||
}
|
||||
|
||||
/* First C function to be called on Xen boot */
|
||||
asmlinkage __visible void __init xen_start_kernel(void)
|
||||
{
|
||||
@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
add_preferred_console("xenboot", 0, NULL);
|
||||
if (pci_xen)
|
||||
x86_init.pci.arch_init = pci_xen_init;
|
||||
x86_platform.set_legacy_features =
|
||||
xen_domu_set_legacy_features;
|
||||
} else {
|
||||
const struct dom0_vga_console_info *info =
|
||||
(void *)((char *)xen_start_info +
|
||||
|
@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
|
||||
if (pinned) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
if (static_branch_likely(&xen_struct_pages_ready))
|
||||
pinned = false;
|
||||
if (static_branch_likely(&xen_struct_pages_ready)) {
|
||||
pinned = PagePinned(page);
|
||||
SetPagePinned(page);
|
||||
}
|
||||
|
||||
xen_mc_batch();
|
||||
|
||||
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
|
||||
|
||||
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
|
||||
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
|
||||
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
|
||||
|
||||
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||
|
@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
|
||||
if (preloaded)
|
||||
radix_tree_preload_end();
|
||||
|
||||
ret = blk_iolatency_init(q);
|
||||
if (ret)
|
||||
goto err_destroy_all;
|
||||
|
||||
ret = blk_ioprio_init(q);
|
||||
if (ret)
|
||||
goto err_destroy_all;
|
||||
@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
|
||||
if (ret)
|
||||
goto err_destroy_all;
|
||||
|
||||
ret = blk_iolatency_init(q);
|
||||
if (ret) {
|
||||
blk_throtl_exit(q);
|
||||
goto err_destroy_all;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_all:
|
||||
@ -1364,10 +1366,14 @@ enomem:
|
||||
/* alloc failed, nothing's initialized yet, free everything */
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||
struct blkcg *blkcg = blkg->blkcg;
|
||||
|
||||
spin_lock(&blkcg->lock);
|
||||
if (blkg->pd[pol->plid]) {
|
||||
pol->pd_free_fn(blkg->pd[pol->plid]);
|
||||
blkg->pd[pol->plid] = NULL;
|
||||
}
|
||||
spin_unlock(&blkcg->lock);
|
||||
}
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
ret = -ENOMEM;
|
||||
@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
||||
__clear_bit(pol->plid, q->blkcg_pols);
|
||||
|
||||
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||
struct blkcg *blkcg = blkg->blkcg;
|
||||
|
||||
spin_lock(&blkcg->lock);
|
||||
if (blkg->pd[pol->plid]) {
|
||||
if (pol->pd_offline_fn)
|
||||
pol->pd_offline_fn(blkg->pd[pol->plid]);
|
||||
pol->pd_free_fn(blkg->pd[pol->plid]);
|
||||
blkg->pd[pol->plid] = NULL;
|
||||
}
|
||||
spin_unlock(&blkcg->lock);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
|
@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
|
||||
*/
|
||||
void blk_integrity_unregister(struct gendisk *disk)
|
||||
{
|
||||
struct blk_integrity *bi = &disk->queue->integrity;
|
||||
|
||||
if (!bi->profile)
|
||||
return;
|
||||
|
||||
/* ensure all bios are off the integrity workqueue */
|
||||
blk_flush_integrity();
|
||||
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
|
||||
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
|
||||
memset(bi, 0, sizeof(*bi));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_integrity_unregister);
|
||||
|
||||
|
@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
|
||||
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
rq = tags->rqs[bitnr];
|
||||
if (!rq || !refcount_inc_not_zero(&rq->ref))
|
||||
if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
|
||||
rq = NULL;
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
return rq;
|
||||
|
@ -264,7 +264,7 @@ void __init numa_free_distance(void)
|
||||
size = numa_distance_cnt * numa_distance_cnt *
|
||||
sizeof(numa_distance[0]);
|
||||
|
||||
memblock_free(__pa(numa_distance), size);
|
||||
memblock_free_ptr(numa_distance, size);
|
||||
numa_distance_cnt = 0;
|
||||
numa_distance = NULL;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/mc146818rtc.h>
|
||||
|
||||
@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
|
||||
const char *file = *(const char **)(tracedata + 2);
|
||||
unsigned int user_hash_value, file_hash_value;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return;
|
||||
|
||||
user_hash_value = user % USERHASH;
|
||||
file_hash_value = hash_string(lineno, file, FILEHASH);
|
||||
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
|
||||
@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
|
||||
|
||||
static int __init early_resume_init(void)
|
||||
{
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
hash_value_early_read = read_magic_time();
|
||||
register_pm_notifier(&pm_trace_nb);
|
||||
return 0;
|
||||
@ -277,6 +284,9 @@ static int __init late_resume_init(void)
|
||||
unsigned int val = hash_value_early_read;
|
||||
unsigned int user, file, dev;
|
||||
|
||||
if (!x86_platform.legacy.rtc)
|
||||
return 0;
|
||||
|
||||
user = val % USERHASH;
|
||||
val = val / USERHASH;
|
||||
file = val % FILEHASH;
|
||||
|
@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
|
||||
if (count)
|
||||
return count;
|
||||
|
||||
kobject_put(&attr_set->kobj);
|
||||
mutex_destroy(&attr_set->update_lock);
|
||||
kobject_put(&attr_set->kobj);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gov_attr_set_put);
|
||||
|
@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return -ENODEV;
|
||||
|
||||
if (no_load)
|
||||
return -ENODEV;
|
||||
|
||||
id = x86_match_cpu(hwp_support_ids);
|
||||
if (id) {
|
||||
bool hwp_forced = intel_pstate_hwp_is_enabled();
|
||||
|
||||
if (hwp_forced)
|
||||
pr_info("HWP enabled by BIOS\n");
|
||||
else if (no_load)
|
||||
return -ENODEV;
|
||||
|
||||
copy_cpu_funcs(&core_funcs);
|
||||
/*
|
||||
* Avoid enabling HWP for processors without EPP support,
|
||||
@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
|
||||
* If HWP is enabled already, though, there is no choice but to
|
||||
* deal with it.
|
||||
*/
|
||||
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
|
||||
intel_pstate_hwp_is_enabled()) {
|
||||
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
|
||||
hwp_active++;
|
||||
hwp_mode_bdw = id->driver_data;
|
||||
intel_pstate.attr = hwp_cpufreq_attrs;
|
||||
@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
|
||||
|
||||
goto hwp_cpu_matched;
|
||||
}
|
||||
pr_info("HWP not enabled\n");
|
||||
} else {
|
||||
if (no_load)
|
||||
return -ENODEV;
|
||||
|
||||
id = x86_match_cpu(intel_pstate_cpu_ids);
|
||||
if (!id) {
|
||||
pr_info("CPU model not supported\n");
|
||||
@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
|
||||
else if (!strcmp(str, "passive"))
|
||||
default_driver = &intel_cpufreq;
|
||||
|
||||
if (!strcmp(str, "no_hwp")) {
|
||||
pr_info("HWP disabled\n");
|
||||
if (!strcmp(str, "no_hwp"))
|
||||
no_hwp = 1;
|
||||
}
|
||||
|
||||
if (!strcmp(str, "force"))
|
||||
force_load = 1;
|
||||
if (!strcmp(str, "hwp_only"))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user