Linux 3.13-rc5
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.15 (GNU/Linux) iQEcBAABAgAGBQJSt1TYAAoJEHm+PkMAQRiGvWgH/iWg8TmEWz4KUKuHthuUi0uh 9+2YDN+4k577Xa0obaG24giQeqkCdxMr3oJrS3FO9jeAmaNRHEGhxUPneisb7RtN 9jIVsz5bPa9/DcK9nqtaUQdvA0O5AWTE6AAPmwleTVy+8CygBo987T2uGELnQCSR FrKCrMePc7Pj0uthMVXrpJ17Ffm/j7jkfRtPhrxHJyngh3jEux+tPbpynH/aKuO2 47unsSQimoFk7n2SPFvNu3E/vZE2xVbu5SH9MCl5yyiCifFjpi3B71LV/FW51lvm cK8Jzn2BmYDygRFjnZfq4zJHqYJCBHzpjCLWmR37Dm/JS91aGq3+Cg0stDP8UTs= =+Hcn -----END PGP SIGNATURE----- Merge tag 'v3.13-rc5' into next Linux 3.13-rc5 * tag 'v3.13-rc5': (231 commits) Linux 3.13-rc5 aio: clean up and fix aio_setup_ring page mapping aio/migratepages: make aio migrate pages sane aio: fix kioctx leak introduced by "aio: Fix a trinity splat" Don't set the INITRD_COMPRESS environment variable automatically mm: fix build of split ptlock code pstore: Don't allow high traffic options on fragile devices mm: do not allocate page->ptl dynamically, if spinlock_t fits to long mm: page_alloc: revert NUMA aspect of fair allocation policy Revert "mm: page_alloc: exclude unreclaimable allocations from zone fairness policy" mm: Fix NULL pointer dereference in madvise(MADV_WILLNEED) support qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure target: Remove extra percpu_ref_init arm64: ptrace: avoid using HW_BREAKPOINT_EMPTY for disabled events ARC: Allow conditional multiple inclusion of uapi/asm/unistd.h target/file: Update hw_max_sectors based on current block_size iser-target: Move INIT_WORK setup into isert_create_device_ib_res iscsi-target: Fix incorrect np->np_thread NULL assignment mm/hugetlb: check for pte NULL pointer in __page_check_address() fix build with make 3.80 ... Conflicts: drivers/usb/phy/Kconfig
This commit is contained in:
commit
e90b8417af
240
Documentation/module-signing.txt
Normal file
240
Documentation/module-signing.txt
Normal file
@ -0,0 +1,240 @@
|
||||
==============================
|
||||
KERNEL MODULE SIGNING FACILITY
|
||||
==============================
|
||||
|
||||
CONTENTS
|
||||
|
||||
- Overview.
|
||||
- Configuring module signing.
|
||||
- Generating signing keys.
|
||||
- Public keys in the kernel.
|
||||
- Manually signing modules.
|
||||
- Signed modules and stripping.
|
||||
- Loading signed modules.
|
||||
- Non-valid signatures and unsigned modules.
|
||||
- Administering/protecting the private key.
|
||||
|
||||
|
||||
========
|
||||
OVERVIEW
|
||||
========
|
||||
|
||||
The kernel module signing facility cryptographically signs modules during
|
||||
installation and then checks the signature upon loading the module. This
|
||||
allows increased kernel security by disallowing the loading of unsigned modules
|
||||
or modules signed with an invalid key. Module signing increases security by
|
||||
making it harder to load a malicious module into the kernel. The module
|
||||
signature checking is done by the kernel so that it is not necessary to have
|
||||
trusted userspace bits.
|
||||
|
||||
This facility uses X.509 ITU-T standard certificates to encode the public keys
|
||||
involved. The signatures are not themselves encoded in any industrial standard
|
||||
type. The facility currently only supports the RSA public key encryption
|
||||
standard (though it is pluggable and permits others to be used). The possible
|
||||
hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
|
||||
SHA-512 (the algorithm is selected by data in the signature).
|
||||
|
||||
|
||||
==========================
|
||||
CONFIGURING MODULE SIGNING
|
||||
==========================
|
||||
|
||||
The module signing facility is enabled by going to the "Enable Loadable Module
|
||||
Support" section of the kernel configuration and turning on
|
||||
|
||||
CONFIG_MODULE_SIG "Module signature verification"
|
||||
|
||||
This has a number of options available:
|
||||
|
||||
(1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
|
||||
|
||||
This specifies how the kernel should deal with a module that has a
|
||||
signature for which the key is not known or a module that is unsigned.
|
||||
|
||||
If this is off (ie. "permissive"), then modules for which the key is not
|
||||
available and modules that are unsigned are permitted, but the kernel will
|
||||
be marked as being tainted.
|
||||
|
||||
If this is on (ie. "restrictive"), only modules that have a valid
|
||||
signature that can be verified by a public key in the kernel's possession
|
||||
will be loaded. All other modules will generate an error.
|
||||
|
||||
Irrespective of the setting here, if the module has a signature block that
|
||||
cannot be parsed, it will be rejected out of hand.
|
||||
|
||||
|
||||
(2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
|
||||
|
||||
If this is on then modules will be automatically signed during the
|
||||
modules_install phase of a build. If this is off, then the modules must
|
||||
be signed manually using:
|
||||
|
||||
scripts/sign-file
|
||||
|
||||
|
||||
(3) "Which hash algorithm should modules be signed with?"
|
||||
|
||||
This presents a choice of which hash algorithm the installation phase will
|
||||
sign the modules with:
|
||||
|
||||
CONFIG_SIG_SHA1 "Sign modules with SHA-1"
|
||||
CONFIG_SIG_SHA224 "Sign modules with SHA-224"
|
||||
CONFIG_SIG_SHA256 "Sign modules with SHA-256"
|
||||
CONFIG_SIG_SHA384 "Sign modules with SHA-384"
|
||||
CONFIG_SIG_SHA512 "Sign modules with SHA-512"
|
||||
|
||||
The algorithm selected here will also be built into the kernel (rather
|
||||
than being a module) so that modules signed with that algorithm can have
|
||||
their signatures checked without causing a dependency loop.
|
||||
|
||||
|
||||
=======================
|
||||
GENERATING SIGNING KEYS
|
||||
=======================
|
||||
|
||||
Cryptographic keypairs are required to generate and check signatures. A
|
||||
private key is used to generate a signature and the corresponding public key is
|
||||
used to check it. The private key is only needed during the build, after which
|
||||
it can be deleted or stored securely. The public key gets built into the
|
||||
kernel so that it can be used to check the signatures as the modules are
|
||||
loaded.
|
||||
|
||||
Under normal conditions, the kernel build will automatically generate a new
|
||||
keypair using openssl if one does not exist in the files:
|
||||
|
||||
signing_key.priv
|
||||
signing_key.x509
|
||||
|
||||
during the building of vmlinux (the public part of the key needs to be built
|
||||
into vmlinux) using parameters in the:
|
||||
|
||||
x509.genkey
|
||||
|
||||
file (which is also generated if it does not already exist).
|
||||
|
||||
It is strongly recommended that you provide your own x509.genkey file.
|
||||
|
||||
Most notably, in the x509.genkey file, the req_distinguished_name section
|
||||
should be altered from the default:
|
||||
|
||||
[ req_distinguished_name ]
|
||||
O = Magrathea
|
||||
CN = Glacier signing key
|
||||
emailAddress = slartibartfast@magrathea.h2g2
|
||||
|
||||
The generated RSA key size can also be set with:
|
||||
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
|
||||
|
||||
It is also possible to manually generate the key private/public files using the
|
||||
x509.genkey key generation configuration file in the root node of the Linux
|
||||
kernel sources tree and the openssl command. The following is an example to
|
||||
generate the public/private key files:
|
||||
|
||||
openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
|
||||
-config x509.genkey -outform DER -out signing_key.x509 \
|
||||
-keyout signing_key.priv
|
||||
|
||||
|
||||
=========================
|
||||
PUBLIC KEYS IN THE KERNEL
|
||||
=========================
|
||||
|
||||
The kernel contains a ring of public keys that can be viewed by root. They're
|
||||
in a keyring called ".system_keyring" that can be seen by:
|
||||
|
||||
[root@deneb ~]# cat /proc/keys
|
||||
...
|
||||
223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1
|
||||
302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
|
||||
...
|
||||
|
||||
Beyond the public key generated specifically for module signing, any file
|
||||
placed in the kernel source root directory or the kernel build root directory
|
||||
whose name is suffixed with ".x509" will be assumed to be an X.509 public key
|
||||
and will be added to the keyring.
|
||||
|
||||
Further, the architecture code may take public keys from a hardware store and
|
||||
add those in also (e.g. from the UEFI key database).
|
||||
|
||||
Finally, it is possible to add additional public keys by doing:
|
||||
|
||||
keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
|
||||
|
||||
e.g.:
|
||||
|
||||
keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
|
||||
|
||||
Note, however, that the kernel will only permit keys to be added to
|
||||
.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
|
||||
that is already resident in the .system_keyring at the time the key was added.
|
||||
|
||||
|
||||
=========================
|
||||
MANUALLY SIGNING MODULES
|
||||
=========================
|
||||
|
||||
To manually sign a module, use the scripts/sign-file tool available in
|
||||
the Linux kernel source tree. The script requires 4 arguments:
|
||||
|
||||
1. The hash algorithm (e.g., sha256)
|
||||
2. The private key filename
|
||||
3. The public key filename
|
||||
4. The kernel module to be signed
|
||||
|
||||
The following is an example to sign a kernel module:
|
||||
|
||||
scripts/sign-file sha512 kernel-signkey.priv \
|
||||
kernel-signkey.x509 module.ko
|
||||
|
||||
The hash algorithm used does not have to match the one configured, but if it
|
||||
doesn't, you should make sure that hash algorithm is either built into the
|
||||
kernel or can be loaded without requiring itself.
|
||||
|
||||
|
||||
============================
|
||||
SIGNED MODULES AND STRIPPING
|
||||
============================
|
||||
|
||||
A signed module has a digital signature simply appended at the end. The string
|
||||
"~Module signature appended~." at the end of the module's file confirms that a
|
||||
signature is present but it does not confirm that the signature is valid!
|
||||
|
||||
Signed modules are BRITTLE as the signature is outside of the defined ELF
|
||||
container. Thus they MAY NOT be stripped once the signature is computed and
|
||||
attached. Note the entire module is the signed payload, including any and all
|
||||
debug information present at the time of signing.
|
||||
|
||||
|
||||
======================
|
||||
LOADING SIGNED MODULES
|
||||
======================
|
||||
|
||||
Modules are loaded with insmod, modprobe, init_module() or finit_module(),
|
||||
exactly as for unsigned modules as no processing is done in userspace. The
|
||||
signature checking is all done within the kernel.
|
||||
|
||||
|
||||
=========================================
|
||||
NON-VALID SIGNATURES AND UNSIGNED MODULES
|
||||
=========================================
|
||||
|
||||
If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
|
||||
the kernel command line, the kernel will only load validly signed modules
|
||||
for which it has a public key. Otherwise, it will also load modules that are
|
||||
unsigned. Any module for which the kernel has a key, but which proves to have
|
||||
a signature mismatch will not be permitted to load.
|
||||
|
||||
Any module that has an unparseable signature will be rejected.
|
||||
|
||||
|
||||
=========================================
|
||||
ADMINISTERING/PROTECTING THE PRIVATE KEY
|
||||
=========================================
|
||||
|
||||
Since the private key is used to sign modules, viruses and malware could use
|
||||
the private key to sign modules and compromise the operating system. The
|
||||
private key must be either destroyed or moved to a secure location and not kept
|
||||
in the root node of the kernel source tree.
|
@ -16,8 +16,12 @@ ip_default_ttl - INTEGER
|
||||
Default: 64 (as recommended by RFC1700)
|
||||
|
||||
ip_no_pmtu_disc - BOOLEAN
|
||||
Disable Path MTU Discovery.
|
||||
default FALSE
|
||||
Disable Path MTU Discovery. If enabled and a
|
||||
fragmentation-required ICMP is received, the PMTU to this
|
||||
destination will be set to min_pmtu (see below). You will need
|
||||
to raise min_pmtu to the smallest interface MTU on your system
|
||||
manually if you want to avoid locally generated fragments.
|
||||
Default: FALSE
|
||||
|
||||
min_pmtu - INTEGER
|
||||
default 552 - minimum discovered Path MTU
|
||||
|
27
MAINTAINERS
27
MAINTAINERS
@ -1008,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-keystone/
|
||||
F: drivers/clk/keystone/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
|
||||
ARM/LOGICPD PXA270 MACHINE SUPPORT
|
||||
M: Lennert Buytenhek <kernel@wantstofly.org>
|
||||
@ -3761,9 +3763,11 @@ F: include/uapi/linux/gigaset_dev.h
|
||||
|
||||
GPIO SUBSYSTEM
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
S: Maintained
|
||||
M: Alexandre Courbot <gnurou@gmail.com>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
F: Documentation/gpio.txt
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
||||
S: Maintained
|
||||
F: Documentation/gpio/
|
||||
F: drivers/gpio/
|
||||
F: include/linux/gpio*
|
||||
F: include/asm-generic/gpio.h
|
||||
@ -3831,6 +3835,12 @@ T: git git://linuxtv.org/media_tree.git
|
||||
S: Maintained
|
||||
F: drivers/media/usb/gspca/
|
||||
|
||||
GUID PARTITION TABLE (GPT)
|
||||
M: Davidlohr Bueso <davidlohr@hp.com>
|
||||
L: linux-efi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: block/partitions/efi.*
|
||||
|
||||
STK1160 USB VIDEO CAPTURE DRIVER
|
||||
M: Ezequiel Garcia <elezegarcia@gmail.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -5911,12 +5921,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com>
|
||||
M: Herbert Xu <herbert@gondor.apana.org.au>
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
L: netdev@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
|
||||
S: Maintained
|
||||
F: net/xfrm/
|
||||
F: net/key/
|
||||
F: net/ipv4/xfrm*
|
||||
F: net/ipv4/esp4.c
|
||||
F: net/ipv4/ah4.c
|
||||
F: net/ipv4/ipcomp.c
|
||||
F: net/ipv4/ip_vti.c
|
||||
F: net/ipv6/xfrm*
|
||||
F: net/ipv6/esp6.c
|
||||
F: net/ipv6/ah6.c
|
||||
F: net/ipv6/ipcomp6.c
|
||||
F: net/ipv6/ip6_vti.c
|
||||
F: include/uapi/linux/xfrm.h
|
||||
F: include/net/xfrm.h
|
||||
|
||||
@ -9571,7 +9590,7 @@ F: drivers/xen/*swiotlb*
|
||||
|
||||
XFS FILESYSTEM
|
||||
P: Silicon Graphics Inc
|
||||
M: Dave Chinner <dchinner@fromorbit.com>
|
||||
M: Dave Chinner <david@fromorbit.com>
|
||||
M: Ben Myers <bpm@sgi.com>
|
||||
M: xfs@oss.sgi.com
|
||||
L: xfs@oss.sgi.com
|
||||
|
24
Makefile
24
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -732,19 +732,15 @@ export mod_strip_cmd
|
||||
# Select initial ramdisk compression format, default is gzip(1).
|
||||
# This shall be used by the dracut(8) tool while creating an initramfs image.
|
||||
#
|
||||
INITRD_COMPRESS=gzip
|
||||
ifeq ($(CONFIG_RD_BZIP2), y)
|
||||
INITRD_COMPRESS=bzip2
|
||||
else ifeq ($(CONFIG_RD_LZMA), y)
|
||||
INITRD_COMPRESS=lzma
|
||||
else ifeq ($(CONFIG_RD_XZ), y)
|
||||
INITRD_COMPRESS=xz
|
||||
else ifeq ($(CONFIG_RD_LZO), y)
|
||||
INITRD_COMPRESS=lzo
|
||||
else ifeq ($(CONFIG_RD_LZ4), y)
|
||||
INITRD_COMPRESS=lz4
|
||||
endif
|
||||
export INITRD_COMPRESS
|
||||
INITRD_COMPRESS-y := gzip
|
||||
INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
|
||||
INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
|
||||
INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
|
||||
INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
|
||||
INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
|
||||
# do not export INITRD_COMPRESS, since we didn't actually
|
||||
# choose a sane default compression above.
|
||||
# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
|
||||
|
||||
ifdef CONFIG_MODULE_SIG_ALL
|
||||
MODSECKEY = ./signing_key.priv
|
||||
|
@ -8,7 +8,11 @@
|
||||
|
||||
/******** no-legacy-syscalls-ABI *******/
|
||||
|
||||
#ifndef _UAPI_ASM_ARC_UNISTD_H
|
||||
/*
|
||||
* Non-typical guard macro to enable inclusion twice in ARCH sys.c
|
||||
* That is how the Generic syscall wrapper generator works
|
||||
*/
|
||||
#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
|
||||
#define _UAPI_ASM_ARC_UNISTD_H
|
||||
|
||||
#define __ARCH_WANT_SYS_EXECVE
|
||||
@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
|
||||
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
|
||||
__SYSCALL(__NR_sysfs, sys_sysfs)
|
||||
|
||||
#undef __SYSCALL
|
||||
|
||||
#endif
|
||||
|
@ -87,9 +87,9 @@
|
||||
interrupts = <1 9 0xf04>;
|
||||
};
|
||||
|
||||
gpio0: gpio@ffc40000 {
|
||||
gpio0: gpio@e6050000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc40000 0 0x2c>;
|
||||
reg = <0 0xe6050000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 4 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -99,9 +99,9 @@
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio1: gpio@ffc41000 {
|
||||
gpio1: gpio@e6051000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc41000 0 0x2c>;
|
||||
reg = <0 0xe6051000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 5 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -111,9 +111,9 @@
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio2: gpio@ffc42000 {
|
||||
gpio2: gpio@e6052000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc42000 0 0x2c>;
|
||||
reg = <0 0xe6052000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 6 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -123,9 +123,9 @@
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio3: gpio@ffc43000 {
|
||||
gpio3: gpio@e6053000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc43000 0 0x2c>;
|
||||
reg = <0 0xe6053000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 7 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -135,9 +135,9 @@
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio4: gpio@ffc44000 {
|
||||
gpio4: gpio@e6054000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc44000 0 0x2c>;
|
||||
reg = <0 0xe6054000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 8 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
@ -147,9 +147,9 @@
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
gpio5: gpio@ffc45000 {
|
||||
gpio5: gpio@e6055000 {
|
||||
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
|
||||
reg = <0 0xffc45000 0 0x2c>;
|
||||
reg = <0 0xe6055000 0 0x50>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 9 0x4>;
|
||||
#gpio-cells = <2>;
|
||||
|
@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
|
||||
{ "dss_hdmi", "omapdss_hdmi", -1 },
|
||||
};
|
||||
|
||||
static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
|
||||
{
|
||||
u32 enable_mask, enable_shift;
|
||||
u32 pipd_mask, pipd_shift;
|
||||
u32 reg;
|
||||
|
||||
if (dsi_id == 0) {
|
||||
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
|
||||
enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
|
||||
pipd_mask = OMAP4_DSI1_PIPD_MASK;
|
||||
pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
|
||||
} else if (dsi_id == 1) {
|
||||
enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
|
||||
enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
|
||||
pipd_mask = OMAP4_DSI2_PIPD_MASK;
|
||||
pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
|
||||
} else {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
|
||||
|
||||
reg &= ~enable_mask;
|
||||
reg &= ~pipd_mask;
|
||||
|
||||
reg |= (lanes << enable_shift) & enable_mask;
|
||||
reg |= (lanes << pipd_shift) & pipd_mask;
|
||||
|
||||
omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
|
||||
{
|
||||
if (cpu_is_omap44xx())
|
||||
return omap4_dsi_mux_pads(dsi_id, lane_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
|
||||
{
|
||||
if (cpu_is_omap44xx())
|
||||
omap4_dsi_mux_pads(dsi_id, 0);
|
||||
}
|
||||
|
||||
static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
|
||||
|
@ -8,8 +8,6 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
|
||||
panic("SoC is not S3C64xx!");
|
||||
}
|
||||
|
||||
static void __init s3c64xx_dt_init_irq(void)
|
||||
{
|
||||
of_clk_init(NULL);
|
||||
samsung_wdt_reset_of_init();
|
||||
irqchip_init();
|
||||
};
|
||||
|
||||
static void __init s3c64xx_dt_init_machine(void)
|
||||
{
|
||||
samsung_wdt_reset_of_init();
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
}
|
||||
|
||||
@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
|
||||
/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
|
||||
.dt_compat = s3c64xx_dt_compat,
|
||||
.map_io = s3c64xx_dt_map_io,
|
||||
.init_irq = s3c64xx_dt_init_irq,
|
||||
.init_machine = s3c64xx_dt_init_machine,
|
||||
.restart = s3c64xx_dt_restart,
|
||||
MACHINE_END
|
||||
|
@ -245,7 +245,9 @@ static void __init lager_init(void)
|
||||
{
|
||||
lager_add_standard_devices();
|
||||
|
||||
phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
|
||||
if (IS_ENABLED(CONFIG_PHYLIB))
|
||||
phy_register_fixup_for_id("r8a7790-ether-ff:01",
|
||||
lager_ksz8041_fixup);
|
||||
}
|
||||
|
||||
static const char * const lager_boards_compat_dt[] __initconst = {
|
||||
|
@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
struct remap_data *info = data;
|
||||
struct page *page = info->pages[info->index++];
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
pte_t pte = pfn_pte(pfn, info->prot);
|
||||
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
|
||||
|
||||
if (map_foreign_page(pfn, info->fgmfn, info->domid))
|
||||
return -EFAULT;
|
||||
@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
|
||||
}
|
||||
if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
|
||||
return 0;
|
||||
xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
|
||||
xen_hvm_resume_frames = res.start;
|
||||
xen_events_irq = irq_of_parse_and_map(node, 0);
|
||||
pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
|
||||
version, xen_events_irq, xen_hvm_resume_frames);
|
||||
version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
|
||||
xen_domain_type = XEN_HVM_DOMAIN;
|
||||
|
||||
xen_setup_features();
|
||||
|
@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
|
||||
}
|
||||
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
|
||||
|
@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
|
||||
{
|
||||
int err, len, type, disabled = !ctrl.enabled;
|
||||
|
||||
if (disabled) {
|
||||
len = 0;
|
||||
type = HW_BREAKPOINT_EMPTY;
|
||||
} else {
|
||||
err = arch_bp_generic_fields(ctrl, &len, &type);
|
||||
if (err)
|
||||
return err;
|
||||
attr->disabled = disabled;
|
||||
if (disabled)
|
||||
return 0;
|
||||
|
||||
switch (note_type) {
|
||||
case NT_ARM_HW_BREAK:
|
||||
if ((type & HW_BREAKPOINT_X) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case NT_ARM_HW_WATCH:
|
||||
if ((type & HW_BREAKPOINT_RW) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
err = arch_bp_generic_fields(ctrl, &len, &type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (note_type) {
|
||||
case NT_ARM_HW_BREAK:
|
||||
if ((type & HW_BREAKPOINT_X) != type)
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case NT_ARM_HW_WATCH:
|
||||
if ((type & HW_BREAKPOINT_RW) != type)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
attr->bp_len = len;
|
||||
attr->bp_type = type;
|
||||
attr->disabled = disabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
|
||||
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
|
||||
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu);
|
||||
|
||||
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -79,6 +79,7 @@ struct kvmppc_host_state {
|
||||
ulong vmhandler;
|
||||
ulong scratch0;
|
||||
ulong scratch1;
|
||||
ulong scratch2;
|
||||
u8 in_guest;
|
||||
u8 restore_hid5;
|
||||
u8 napping;
|
||||
@ -106,6 +107,7 @@ struct kvmppc_host_state {
|
||||
};
|
||||
|
||||
struct kvmppc_book3s_shadow_vcpu {
|
||||
bool in_use;
|
||||
ulong gpr[14];
|
||||
u32 cr;
|
||||
u32 xer;
|
||||
|
@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
|
||||
int64_t opal_pci_poll(uint64_t phb_id);
|
||||
int64_t opal_return_cpu(void);
|
||||
|
||||
int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
|
||||
int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
|
||||
int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
|
||||
|
||||
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
|
||||
uint32_t addr, uint32_t data, uint32_t sz);
|
||||
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
|
||||
uint32_t addr, uint32_t *data, uint32_t sz);
|
||||
uint32_t addr, __be32 *data, uint32_t sz);
|
||||
int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
|
||||
int64_t opal_manage_flash(uint8_t op);
|
||||
int64_t opal_update_flash(uint64_t blk_list);
|
||||
|
@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
|
||||
extern void enable_kernel_spe(void);
|
||||
extern void giveup_spe(struct task_struct *);
|
||||
extern void load_up_spe(struct task_struct *);
|
||||
extern void switch_booke_debug_regs(struct thread_struct *new_thread);
|
||||
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern void discard_lazy_cpu_state(void);
|
||||
|
@ -576,6 +576,7 @@ int main(void)
|
||||
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
|
||||
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
|
||||
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
|
||||
HSTATE_FIELD(HSTATE_NAPPING, napping);
|
||||
|
@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
||||
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
|
||||
{
|
||||
unsigned long addr;
|
||||
const u32 *basep, *sizep;
|
||||
const __be32 *basep, *sizep;
|
||||
unsigned int rtas_start = 0, rtas_end = 0;
|
||||
|
||||
basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
|
||||
sizep = of_get_property(rtas.dev, "rtas-size", NULL);
|
||||
|
||||
if (basep && sizep) {
|
||||
rtas_start = *basep;
|
||||
rtas_end = *basep + *sizep;
|
||||
rtas_start = be32_to_cpup(basep);
|
||||
rtas_end = rtas_start + be32_to_cpup(sizep);
|
||||
}
|
||||
|
||||
for (addr = begin; addr < end; addr += PAGE_SIZE) {
|
||||
|
@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void prime_debug_regs(struct thread_struct *thread)
|
||||
static void prime_debug_regs(struct debug_reg *debug)
|
||||
{
|
||||
/*
|
||||
* We could have inherited MSR_DE from userspace, since
|
||||
@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
|
||||
*/
|
||||
mtmsr(mfmsr() & ~MSR_DE);
|
||||
|
||||
mtspr(SPRN_IAC1, thread->debug.iac1);
|
||||
mtspr(SPRN_IAC2, thread->debug.iac2);
|
||||
mtspr(SPRN_IAC1, debug->iac1);
|
||||
mtspr(SPRN_IAC2, debug->iac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
mtspr(SPRN_IAC3, thread->debug.iac3);
|
||||
mtspr(SPRN_IAC4, thread->debug.iac4);
|
||||
mtspr(SPRN_IAC3, debug->iac3);
|
||||
mtspr(SPRN_IAC4, debug->iac4);
|
||||
#endif
|
||||
mtspr(SPRN_DAC1, thread->debug.dac1);
|
||||
mtspr(SPRN_DAC2, thread->debug.dac2);
|
||||
mtspr(SPRN_DAC1, debug->dac1);
|
||||
mtspr(SPRN_DAC2, debug->dac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
mtspr(SPRN_DVC1, thread->debug.dvc1);
|
||||
mtspr(SPRN_DVC2, thread->debug.dvc2);
|
||||
mtspr(SPRN_DVC1, debug->dvc1);
|
||||
mtspr(SPRN_DVC2, debug->dvc2);
|
||||
#endif
|
||||
mtspr(SPRN_DBCR0, thread->debug.dbcr0);
|
||||
mtspr(SPRN_DBCR1, thread->debug.dbcr1);
|
||||
mtspr(SPRN_DBCR0, debug->dbcr0);
|
||||
mtspr(SPRN_DBCR1, debug->dbcr1);
|
||||
#ifdef CONFIG_BOOKE
|
||||
mtspr(SPRN_DBCR2, thread->debug.dbcr2);
|
||||
mtspr(SPRN_DBCR2, debug->dbcr2);
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
|
||||
* debug registers, set the debug registers from the values
|
||||
* stored in the new thread.
|
||||
*/
|
||||
void switch_booke_debug_regs(struct thread_struct *new_thread)
|
||||
void switch_booke_debug_regs(struct debug_reg *new_debug)
|
||||
{
|
||||
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
|
||||
|| (new_thread->debug.dbcr0 & DBCR0_IDM))
|
||||
prime_debug_regs(new_thread);
|
||||
|| (new_debug->dbcr0 & DBCR0_IDM))
|
||||
prime_debug_regs(new_debug);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
|
||||
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
||||
@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
switch_booke_debug_regs(&new->thread);
|
||||
switch_booke_debug_regs(&new->thread.debug);
|
||||
#else
|
||||
/*
|
||||
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
|
||||
|
@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&tmp, &child->thread.fp_state.fpr,
|
||||
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
||||
sizeof(long));
|
||||
else
|
||||
tmp = child->thread.fp_state.fpscr;
|
||||
@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&child->thread.fp_state.fpr, &data,
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
||||
sizeof(long));
|
||||
else
|
||||
child->thread.fp_state.fpscr = data;
|
||||
|
@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
|
||||
if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
(dn = of_find_node_by_path("/rtas"))) {
|
||||
int num_addr_cell, num_size_cell, maxcpus;
|
||||
const unsigned int *ireg;
|
||||
const __be32 *ireg;
|
||||
|
||||
num_addr_cell = of_n_addr_cells(dn);
|
||||
num_size_cell = of_n_size_cells(dn);
|
||||
@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
|
||||
if (!ireg)
|
||||
goto out;
|
||||
|
||||
maxcpus = ireg[num_addr_cell + num_size_cell];
|
||||
maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
|
||||
|
||||
/* Double maxcpus for processors which have SMT capability */
|
||||
if (cpu_has_feature(CPU_FTR_SMT))
|
||||
|
@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
int cpu_to_core_id(int cpu)
|
||||
{
|
||||
struct device_node *np;
|
||||
const int *reg;
|
||||
const __be32 *reg;
|
||||
int id = -1;
|
||||
|
||||
np = of_get_cpu_node(cpu, NULL);
|
||||
@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu)
|
||||
if (!reg)
|
||||
goto out;
|
||||
|
||||
id = *reg;
|
||||
id = be32_to_cpup(reg);
|
||||
out:
|
||||
of_node_put(np);
|
||||
return id;
|
||||
|
@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
slb_v = vcpu->kvm->arch.vrma_slb_v;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
/* Find the HPTE in the hash table */
|
||||
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
|
||||
HPTE_V_VALID | HPTE_V_ABSENT);
|
||||
if (index < 0)
|
||||
if (index < 0) {
|
||||
preempt_enable();
|
||||
return -ENOENT;
|
||||
}
|
||||
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
|
||||
v = hptep[0] & ~HPTE_V_HVLOCK;
|
||||
gr = kvm->arch.revmap[index].guest_rpte;
|
||||
@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
/* Unlock the HPTE */
|
||||
asm volatile("lwsync" : : : "memory");
|
||||
hptep[0] = v;
|
||||
preempt_enable();
|
||||
|
||||
gpte->eaddr = eaddr;
|
||||
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
|
||||
@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return -EFAULT;
|
||||
} else {
|
||||
page = pages[0];
|
||||
pfn = page_to_pfn(page);
|
||||
if (PageHuge(page)) {
|
||||
page = compound_head(page);
|
||||
pte_size <<= compound_order(page);
|
||||
@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
rcu_read_unlock_sched();
|
||||
}
|
||||
pfn = page_to_pfn(page);
|
||||
}
|
||||
|
||||
ret = -EFAULT;
|
||||
@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
|
||||
}
|
||||
|
||||
/* Set the HPTE to point to pfn */
|
||||
r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
|
||||
/*
|
||||
* Set the HPTE to point to pfn.
|
||||
* Since the pfn is at PAGE_SIZE granularity, make sure we
|
||||
* don't mask out lower-order bits if psize < PAGE_SIZE.
|
||||
*/
|
||||
if (psize < PAGE_SIZE)
|
||||
psize = PAGE_SIZE;
|
||||
r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
|
||||
if (hpte_is_writable(r) && !write_ok)
|
||||
r = hpte_make_readonly(r);
|
||||
ret = RESUME_GUEST;
|
||||
|
@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
||||
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
|
||||
vc->preempt_tb != TB_NIL) {
|
||||
vc->stolen_tb += mftb() - vc->preempt_tb;
|
||||
@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
||||
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
|
||||
vcpu->arch.busy_preempt = TB_NIL;
|
||||
}
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
||||
}
|
||||
|
||||
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
||||
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
|
||||
vc->preempt_tb = mftb();
|
||||
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
|
||||
vcpu->arch.busy_preempt = mftb();
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
||||
}
|
||||
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
|
||||
*/
|
||||
if (vc->vcore_state != VCORE_INACTIVE &&
|
||||
vc->runner->arch.run_task != current) {
|
||||
spin_lock(&vc->runner->arch.tbacct_lock);
|
||||
spin_lock_irq(&vc->runner->arch.tbacct_lock);
|
||||
p = vc->stolen_tb;
|
||||
if (vc->preempt_tb != TB_NIL)
|
||||
p += now - vc->preempt_tb;
|
||||
spin_unlock(&vc->runner->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vc->runner->arch.tbacct_lock);
|
||||
} else {
|
||||
p = vc->stolen_tb;
|
||||
}
|
||||
@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
|
||||
core_stolen = vcore_stolen_time(vc, now);
|
||||
stolen = core_stolen - vcpu->arch.stolen_logged;
|
||||
vcpu->arch.stolen_logged = core_stolen;
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irq(&vcpu->arch.tbacct_lock);
|
||||
stolen += vcpu->arch.busy_stolen;
|
||||
vcpu->arch.busy_stolen = 0;
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vcpu->arch.tbacct_lock);
|
||||
if (!dt || !vpa)
|
||||
return;
|
||||
memset(dt, 0, sizeof(struct dtl_entry));
|
||||
@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
||||
if (list_empty(&vcpu->kvm->arch.rtas_tokens))
|
||||
return RESUME_HOST;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
rc = kvmppc_rtas_hcall(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
if (rc == -ENOENT)
|
||||
return RESUME_HOST;
|
||||
@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
|
||||
|
||||
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
|
||||
return;
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irq(&vcpu->arch.tbacct_lock);
|
||||
now = mftb();
|
||||
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
|
||||
vcpu->arch.stolen_logged;
|
||||
vcpu->arch.busy_preempt = now;
|
||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vcpu->arch.tbacct_lock);
|
||||
--vc->n_runnable;
|
||||
list_del(&vcpu->arch.run_list);
|
||||
}
|
||||
|
@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
is_io = pa & (HPTE_R_I | HPTE_R_W);
|
||||
pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
|
||||
pa &= PAGE_MASK;
|
||||
pa |= gpa & ~PAGE_MASK;
|
||||
} else {
|
||||
/* Translate to host virtual address */
|
||||
hva = __gfn_to_hva_memslot(memslot, gfn);
|
||||
@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
ptel = hpte_make_readonly(ptel);
|
||||
is_io = hpte_cache_bits(pte_val(pte));
|
||||
pa = pte_pfn(pte) << PAGE_SHIFT;
|
||||
pa |= hva & (pte_size - 1);
|
||||
pa |= gpa & ~PAGE_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
if (pte_size < psize)
|
||||
return H_PARAMETER;
|
||||
if (pa && pte_size > psize)
|
||||
pa |= gpa & (pte_size - 1);
|
||||
|
||||
ptel &= ~(HPTE_R_PP0 - psize);
|
||||
ptel |= pa;
|
||||
@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
|
||||
20, /* 1M, unsupported */
|
||||
};
|
||||
|
||||
/* When called from virtmode, this func should be protected by
|
||||
* preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
|
||||
* can trigger deadlock issue.
|
||||
*/
|
||||
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
|
||||
unsigned long valid)
|
||||
{
|
||||
|
@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
13: b machine_check_fwnmi
|
||||
|
||||
|
||||
/*
|
||||
* We come in here when wakened from nap mode on a secondary hw thread.
|
||||
* Relocation is off and most register values are lost.
|
||||
@ -224,6 +223,11 @@ kvm_start_guest:
|
||||
/* Clear our vcpu pointer so we don't come back in early */
|
||||
li r0, 0
|
||||
std r0, HSTATE_KVM_VCPU(r13)
|
||||
/*
|
||||
* Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
|
||||
* the nap_count, because once the increment to nap_count is
|
||||
* visible we could be given another vcpu.
|
||||
*/
|
||||
lwsync
|
||||
/* Clear any pending IPI - we're an offline thread */
|
||||
ld r5, HSTATE_XICS_PHYS(r13)
|
||||
@ -241,7 +245,6 @@ kvm_start_guest:
|
||||
/* increment the nap count and then go to nap mode */
|
||||
ld r4, HSTATE_KVM_VCORE(r13)
|
||||
addi r4, r4, VCORE_NAP_COUNT
|
||||
lwsync /* make previous updates visible */
|
||||
51: lwarx r3, 0, r4
|
||||
addi r3, r3, 1
|
||||
stwcx. r3, 0, r4
|
||||
@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
|
||||
* guest CR, R12 saved in shadow VCPU SCRATCH1/0
|
||||
* guest R13 saved in SPRN_SCRATCH0
|
||||
*/
|
||||
/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
|
||||
std r9, HSTATE_HOST_R2(r13)
|
||||
std r9, HSTATE_SCRATCH2(r13)
|
||||
|
||||
lbz r9, HSTATE_IN_GUEST(r13)
|
||||
cmpwi r9, KVM_GUEST_MODE_HOST_HV
|
||||
beq kvmppc_bad_host_intr
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
cmpwi r9, KVM_GUEST_MODE_GUEST
|
||||
ld r9, HSTATE_HOST_R2(r13)
|
||||
ld r9, HSTATE_SCRATCH2(r13)
|
||||
beq kvmppc_interrupt_pr
|
||||
#endif
|
||||
/* We're now back in the host but in guest MMU context */
|
||||
@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
|
||||
std r6, VCPU_GPR(R6)(r9)
|
||||
std r7, VCPU_GPR(R7)(r9)
|
||||
std r8, VCPU_GPR(R8)(r9)
|
||||
ld r0, HSTATE_HOST_R2(r13)
|
||||
ld r0, HSTATE_SCRATCH2(r13)
|
||||
std r0, VCPU_GPR(R9)(r9)
|
||||
std r10, VCPU_GPR(R10)(r9)
|
||||
std r11, VCPU_GPR(R11)(r9)
|
||||
@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
*/
|
||||
/* Increment the threads-exiting-guest count in the 0xff00
|
||||
bits of vcore->entry_exit_count */
|
||||
lwsync
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
addi r6,r5,VCORE_ENTRY_EXIT
|
||||
41: lwarx r3,0,r6
|
||||
addi r0,r3,0x100
|
||||
stwcx. r0,0,r6
|
||||
bne 41b
|
||||
lwsync
|
||||
isync /* order stwcx. vs. reading napping_threads */
|
||||
|
||||
/*
|
||||
* At this point we have an interrupt that we have to pass
|
||||
@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
sld r0,r0,r4
|
||||
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
|
||||
beq 43f
|
||||
/* Order entry/exit update vs. IPIs */
|
||||
sync
|
||||
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
|
||||
subf r6,r4,r13
|
||||
42: andi. r0,r3,1
|
||||
@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
|
||||
bge kvm_cede_exit
|
||||
stwcx. r4,0,r6
|
||||
bne 31b
|
||||
/* order napping_threads update vs testing entry_exit_count */
|
||||
isync
|
||||
li r0,1
|
||||
stb r0,HSTATE_NAPPING(r13)
|
||||
/* order napping_threads update vs testing entry_exit_count */
|
||||
lwsync
|
||||
mr r4,r3
|
||||
lwz r7,VCORE_ENTRY_EXIT(r5)
|
||||
cmpwi r7,0x100
|
||||
|
@ -129,29 +129,32 @@ kvm_start_lightweight:
|
||||
* R12 = exit handler id
|
||||
* R13 = PACA
|
||||
* SVCPU.* = guest *
|
||||
* MSR.EE = 1
|
||||
*
|
||||
*/
|
||||
|
||||
PPC_LL r3, GPR4(r1) /* vcpu pointer */
|
||||
|
||||
/*
|
||||
* kvmppc_copy_from_svcpu can clobber volatile registers, save
|
||||
* the exit handler id to the vcpu and restore it from there later.
|
||||
*/
|
||||
stw r12, VCPU_TRAP(r3)
|
||||
|
||||
/* Transfer reg values from shadow vcpu back to vcpu struct */
|
||||
/* On 64-bit, interrupts are still off at this point */
|
||||
PPC_LL r3, GPR4(r1) /* vcpu pointer */
|
||||
|
||||
GET_SHADOW_VCPU(r4)
|
||||
bl FUNC(kvmppc_copy_from_svcpu)
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Re-enable interrupts */
|
||||
ld r3, HSTATE_HOST_MSR(r13)
|
||||
ori r3, r3, MSR_EE
|
||||
MTMSR_EERI(r3)
|
||||
|
||||
/*
|
||||
* Reload kernel SPRG3 value.
|
||||
* No need to save guest value as usermode can't modify SPRG3.
|
||||
*/
|
||||
ld r3, PACA_SPRG3(r13)
|
||||
mtspr SPRN_SPRG3, r3
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
/* R7 = vcpu */
|
||||
@ -177,7 +180,7 @@ kvm_start_lightweight:
|
||||
PPC_STL r31, VCPU_GPR(R31)(r7)
|
||||
|
||||
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
|
||||
mr r5, r12
|
||||
lwz r5, VCPU_TRAP(r7)
|
||||
|
||||
/* Restore r3 (kvm_run) and r4 (vcpu) */
|
||||
REST_2GPRS(3, r1)
|
||||
|
@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
|
||||
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
|
||||
svcpu->in_use = 0;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
vcpu->cpu = smp_processor_id();
|
||||
@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
if (svcpu->in_use) {
|
||||
kvmppc_copy_from_svcpu(vcpu, svcpu);
|
||||
}
|
||||
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
|
||||
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
|
||||
svcpu_put(svcpu);
|
||||
@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
svcpu->ctr = vcpu->arch.ctr;
|
||||
svcpu->lr = vcpu->arch.lr;
|
||||
svcpu->pc = vcpu->arch.pc;
|
||||
svcpu->in_use = true;
|
||||
}
|
||||
|
||||
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
||||
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
/*
|
||||
* vcpu_put would just call us again because in_use hasn't
|
||||
* been updated yet.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Maybe we were already preempted and synced the svcpu from
|
||||
* our preempt notifiers. Don't bother touching this svcpu then.
|
||||
*/
|
||||
if (!svcpu->in_use)
|
||||
goto out;
|
||||
|
||||
vcpu->arch.gpr[0] = svcpu->gpr[0];
|
||||
vcpu->arch.gpr[1] = svcpu->gpr[1];
|
||||
vcpu->arch.gpr[2] = svcpu->gpr[2];
|
||||
@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.fault_dar = svcpu->fault_dar;
|
||||
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
|
||||
vcpu->arch.last_inst = svcpu->last_inst;
|
||||
svcpu->in_use = false;
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
||||
|
@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
|
||||
|
||||
li r6, MSR_IR | MSR_DR
|
||||
andc r6, r5, r6 /* Clear DR and IR in MSR value */
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/*
|
||||
* Set EE in HOST_MSR so that it's enabled when we get into our
|
||||
* C exit handler function. On 64-bit we delay enabling
|
||||
* interrupts until we have finished transferring stuff
|
||||
* to or from the PACA.
|
||||
* C exit handler function.
|
||||
*/
|
||||
ori r5, r5, MSR_EE
|
||||
#endif
|
||||
mtsrr0 r7
|
||||
mtsrr1 r6
|
||||
RFI
|
||||
|
@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
||||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret, s;
|
||||
struct thread_struct thread;
|
||||
struct debug_reg debug;
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
struct thread_fp_state fp;
|
||||
int fpexc_mode;
|
||||
@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
#endif
|
||||
|
||||
/* Switch to guest debug context */
|
||||
thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
switch_booke_debug_regs(&thread);
|
||||
thread.debug = current->thread.debug;
|
||||
debug = vcpu->arch.shadow_dbg_reg;
|
||||
switch_booke_debug_regs(&debug);
|
||||
debug = current->thread.debug;
|
||||
current->thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
|
||||
kvmppc_fix_ee_before_entry();
|
||||
@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
We also get here with interrupts enabled. */
|
||||
|
||||
/* Switch back to user space debug context */
|
||||
switch_booke_debug_regs(&thread);
|
||||
current->thread.debug = thread.debug;
|
||||
switch_booke_debug_regs(&debug);
|
||||
current->thread.debug = debug;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
kvmppc_save_guest_fp(vcpu);
|
||||
|
@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
|
||||
static u8 opal_lpc_inb(unsigned long port)
|
||||
{
|
||||
int64_t rc;
|
||||
uint32_t data;
|
||||
__be32 data;
|
||||
|
||||
if (opal_lpc_chip_id < 0 || port > 0xffff)
|
||||
return 0xff;
|
||||
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
|
||||
return rc ? 0xff : data;
|
||||
return rc ? 0xff : be32_to_cpu(data);
|
||||
}
|
||||
|
||||
static __le16 __opal_lpc_inw(unsigned long port)
|
||||
{
|
||||
int64_t rc;
|
||||
uint32_t data;
|
||||
__be32 data;
|
||||
|
||||
if (opal_lpc_chip_id < 0 || port > 0xfffe)
|
||||
return 0xffff;
|
||||
if (port & 1)
|
||||
return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
|
||||
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
|
||||
return rc ? 0xffff : data;
|
||||
return rc ? 0xffff : be32_to_cpu(data);
|
||||
}
|
||||
static u16 opal_lpc_inw(unsigned long port)
|
||||
{
|
||||
@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
|
||||
static __le32 __opal_lpc_inl(unsigned long port)
|
||||
{
|
||||
int64_t rc;
|
||||
uint32_t data;
|
||||
__be32 data;
|
||||
|
||||
if (opal_lpc_chip_id < 0 || port > 0xfffc)
|
||||
return 0xffffffff;
|
||||
@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
|
||||
(__le32)opal_lpc_inb(port + 2) << 8 |
|
||||
opal_lpc_inb(port + 3);
|
||||
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
|
||||
return rc ? 0xffffffff : data;
|
||||
return rc ? 0xffffffff : be32_to_cpu(data);
|
||||
}
|
||||
|
||||
static u32 opal_lpc_inl(unsigned long port)
|
||||
|
@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
|
||||
{
|
||||
struct opal_scom_map *m = map;
|
||||
int64_t rc;
|
||||
__be64 v;
|
||||
|
||||
reg = opal_scom_unmangle(reg);
|
||||
rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
|
||||
rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
|
||||
*value = be64_to_cpu(v);
|
||||
return opal_xscom_err_xlate(rc);
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
|
||||
{
|
||||
struct hvcall_ppp_data ppp_data;
|
||||
struct device_node *root;
|
||||
const int *perf_level;
|
||||
const __be32 *perf_level;
|
||||
int rc;
|
||||
|
||||
rc = h_get_ppp(&ppp_data);
|
||||
@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
|
||||
perf_level = of_get_property(root,
|
||||
"ibm,partition-performance-parameters-level",
|
||||
NULL);
|
||||
if (perf_level && (*perf_level >= 1)) {
|
||||
if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
|
||||
seq_printf(m,
|
||||
"physical_procs_allocated_to_virtualization=%d\n",
|
||||
ppp_data.phys_platform_procs);
|
||||
@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
|
||||
int partition_potential_processors;
|
||||
int partition_active_processors;
|
||||
struct device_node *rtas_node;
|
||||
const int *lrdrp = NULL;
|
||||
const __be32 *lrdrp = NULL;
|
||||
|
||||
rtas_node = of_find_node_by_path("/rtas");
|
||||
if (rtas_node)
|
||||
@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
|
||||
if (lrdrp == NULL) {
|
||||
partition_potential_processors = vdso_data->processorCount;
|
||||
} else {
|
||||
partition_potential_processors = *(lrdrp + 4);
|
||||
partition_potential_processors = be32_to_cpup(lrdrp + 4);
|
||||
}
|
||||
of_node_put(rtas_node);
|
||||
|
||||
@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
|
||||
const char *model = "";
|
||||
const char *system_id = "";
|
||||
const char *tmp;
|
||||
const unsigned int *lp_index_ptr;
|
||||
const __be32 *lp_index_ptr;
|
||||
unsigned int lp_index = 0;
|
||||
|
||||
seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
|
||||
@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
|
||||
lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
|
||||
NULL);
|
||||
if (lp_index_ptr)
|
||||
lp_index = *lp_index_ptr;
|
||||
lp_index = be32_to_cpup(lp_index_ptr);
|
||||
of_node_put(rootdn);
|
||||
}
|
||||
seq_printf(m, "serial_number=%s\n", system_id);
|
||||
|
@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
|
||||
{
|
||||
struct device_node *dn;
|
||||
struct pci_dn *pdn;
|
||||
const u32 *req_msi;
|
||||
const __be32 *p;
|
||||
u32 req_msi;
|
||||
|
||||
pdn = pci_get_pdn(pdev);
|
||||
if (!pdn)
|
||||
@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
|
||||
|
||||
dn = pdn->node;
|
||||
|
||||
req_msi = of_get_property(dn, prop_name, NULL);
|
||||
if (!req_msi) {
|
||||
p = of_get_property(dn, prop_name, NULL);
|
||||
if (!p) {
|
||||
pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (*req_msi < nvec) {
|
||||
req_msi = be32_to_cpup(p);
|
||||
if (req_msi < nvec) {
|
||||
pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
|
||||
|
||||
if (*req_msi == 0) /* Be paranoid */
|
||||
if (req_msi == 0) /* Be paranoid */
|
||||
return -ENOSPC;
|
||||
|
||||
return *req_msi;
|
||||
return req_msi;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
|
||||
static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
|
||||
{
|
||||
struct device_node *dn;
|
||||
const u32 *p;
|
||||
const __be32 *p;
|
||||
|
||||
dn = of_node_get(pci_device_to_OF_node(dev));
|
||||
while (dn) {
|
||||
@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
|
||||
if (p) {
|
||||
pr_debug("rtas_msi: found prop on dn %s\n",
|
||||
dn->full_name);
|
||||
*total = *p;
|
||||
*total = be32_to_cpup(p);
|
||||
return dn;
|
||||
}
|
||||
|
||||
@ -232,13 +234,13 @@ struct msi_counts {
|
||||
static void *count_non_bridge_devices(struct device_node *dn, void *data)
|
||||
{
|
||||
struct msi_counts *counts = data;
|
||||
const u32 *p;
|
||||
const __be32 *p;
|
||||
u32 class;
|
||||
|
||||
pr_debug("rtas_msi: counting %s\n", dn->full_name);
|
||||
|
||||
p = of_get_property(dn, "class-code", NULL);
|
||||
class = p ? *p : 0;
|
||||
class = p ? be32_to_cpup(p) : 0;
|
||||
|
||||
if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
|
||||
counts->num_devices++;
|
||||
@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
|
||||
static void *count_spare_msis(struct device_node *dn, void *data)
|
||||
{
|
||||
struct msi_counts *counts = data;
|
||||
const u32 *p;
|
||||
const __be32 *p;
|
||||
int req;
|
||||
|
||||
if (dn == counts->requestor)
|
||||
@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
|
||||
req = 0;
|
||||
p = of_get_property(dn, "ibm,req#msi", NULL);
|
||||
if (p)
|
||||
req = *p;
|
||||
req = be32_to_cpup(p);
|
||||
|
||||
p = of_get_property(dn, "ibm,req#msi-x", NULL);
|
||||
if (p)
|
||||
req = max(req, (int)*p);
|
||||
req = max(req, (int)be32_to_cpup(p));
|
||||
}
|
||||
|
||||
if (req < counts->quota)
|
||||
|
@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
|
||||
static DEFINE_SPINLOCK(nvram_lock);
|
||||
|
||||
struct err_log_info {
|
||||
int error_type;
|
||||
unsigned int seq_num;
|
||||
__be32 error_type;
|
||||
__be32 seq_num;
|
||||
};
|
||||
|
||||
struct nvram_os_partition {
|
||||
@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
|
||||
};
|
||||
|
||||
struct oops_log_info {
|
||||
u16 version;
|
||||
u16 report_length;
|
||||
u64 timestamp;
|
||||
__be16 version;
|
||||
__be16 report_length;
|
||||
__be64 timestamp;
|
||||
} __attribute__((packed));
|
||||
|
||||
static void oops_to_nvram(struct kmsg_dumper *dumper,
|
||||
@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
|
||||
length = part->size;
|
||||
}
|
||||
|
||||
info.error_type = err_type;
|
||||
info.seq_num = error_log_cnt;
|
||||
info.error_type = cpu_to_be32(err_type);
|
||||
info.seq_num = cpu_to_be32(error_log_cnt);
|
||||
|
||||
tmp_index = part->index;
|
||||
|
||||
@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
|
||||
}
|
||||
|
||||
if (part->os_partition) {
|
||||
*error_log_cnt = info.seq_num;
|
||||
*err_type = info.error_type;
|
||||
*error_log_cnt = be32_to_cpu(info.seq_num);
|
||||
*err_type = be32_to_cpu(info.error_type);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
|
||||
pr_err("nvram: logging uncompressed oops/panic report\n");
|
||||
return -1;
|
||||
}
|
||||
oops_hdr->version = OOPS_HDR_VERSION;
|
||||
oops_hdr->report_length = (u16) zipped_len;
|
||||
oops_hdr->timestamp = get_seconds();
|
||||
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
|
||||
oops_hdr->report_length = cpu_to_be16(zipped_len);
|
||||
oops_hdr->timestamp = cpu_to_be64(get_seconds());
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
|
||||
clobbering_unread_rtas_event())
|
||||
return -1;
|
||||
|
||||
oops_hdr->version = OOPS_HDR_VERSION;
|
||||
oops_hdr->report_length = (u16) size;
|
||||
oops_hdr->timestamp = get_seconds();
|
||||
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
|
||||
oops_hdr->report_length = cpu_to_be16(size);
|
||||
oops_hdr->timestamp = cpu_to_be64(get_seconds());
|
||||
|
||||
if (compressed)
|
||||
err_type = ERR_TYPE_KERNEL_PANIC_GZ;
|
||||
@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
|
||||
size_t length, hdr_size;
|
||||
|
||||
oops_hdr = (struct oops_log_info *)buff;
|
||||
if (oops_hdr->version < OOPS_HDR_VERSION) {
|
||||
if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
|
||||
/* Old format oops header had 2-byte record size */
|
||||
hdr_size = sizeof(u16);
|
||||
length = oops_hdr->version;
|
||||
length = be16_to_cpu(oops_hdr->version);
|
||||
time->tv_sec = 0;
|
||||
time->tv_nsec = 0;
|
||||
} else {
|
||||
hdr_size = sizeof(*oops_hdr);
|
||||
length = oops_hdr->report_length;
|
||||
time->tv_sec = oops_hdr->timestamp;
|
||||
length = be16_to_cpu(oops_hdr->report_length);
|
||||
time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
|
||||
time->tv_nsec = 0;
|
||||
}
|
||||
*buf = kmalloc(length, GFP_KERNEL);
|
||||
@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
|
||||
kmsg_dump_get_buffer(dumper, false,
|
||||
oops_data, oops_data_sz, &text_len);
|
||||
err_type = ERR_TYPE_KERNEL_PANIC;
|
||||
oops_hdr->version = OOPS_HDR_VERSION;
|
||||
oops_hdr->report_length = (u16) text_len;
|
||||
oops_hdr->timestamp = get_seconds();
|
||||
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
|
||||
oops_hdr->report_length = cpu_to_be16(text_len);
|
||||
oops_hdr->timestamp = cpu_to_be64(get_seconds());
|
||||
}
|
||||
|
||||
(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
|
||||
(int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
|
||||
(int) (sizeof(*oops_hdr) + text_len), err_type,
|
||||
++oops_count);
|
||||
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
|
@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
|
||||
{
|
||||
struct device_node *dn, *pdn;
|
||||
struct pci_bus *bus;
|
||||
const uint32_t *pcie_link_speed_stats;
|
||||
const __be32 *pcie_link_speed_stats;
|
||||
|
||||
bus = bridge->bus;
|
||||
|
||||
@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
|
||||
return 0;
|
||||
|
||||
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
|
||||
pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
|
||||
pcie_link_speed_stats = of_get_property(pdn,
|
||||
"ibm,pcie-link-speed-stats", NULL);
|
||||
if (pcie_link_speed_stats)
|
||||
break;
|
||||
@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (pcie_link_speed_stats[0]) {
|
||||
switch (be32_to_cpup(pcie_link_speed_stats)) {
|
||||
case 0x01:
|
||||
bus->max_bus_speed = PCIE_SPEED_2_5GT;
|
||||
break;
|
||||
@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
|
||||
break;
|
||||
}
|
||||
|
||||
switch (pcie_link_speed_stats[1]) {
|
||||
switch (be32_to_cpup(pcie_link_speed_stats)) {
|
||||
case 0x01:
|
||||
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
|
||||
break;
|
||||
|
@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
|
||||
checksum.o strlen.o div64.o div64-generic.o
|
||||
|
||||
# Extracted from libgcc
|
||||
lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
|
||||
obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
|
||||
ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
|
||||
udiv_qrnnd.o
|
||||
|
||||
|
@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
|
||||
}
|
||||
|
||||
#define pte_accessible pte_accessible
|
||||
static inline unsigned long pte_accessible(pte_t a)
|
||||
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
{
|
||||
return pte_val(a) & _PAGE_VALID;
|
||||
}
|
||||
@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(orig))
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@ config X86
|
||||
select HAVE_AOUT if X86_32
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select ARCH_SUPPORTS_INT128 if X86_64
|
||||
select ARCH_WANTS_PROT_NUMA_PROT_NONE
|
||||
select HAVE_IDE
|
||||
select HAVE_OPROFILE
|
||||
|
@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
|
||||
}
|
||||
|
||||
#define pte_accessible pte_accessible
|
||||
static inline int pte_accessible(pte_t a)
|
||||
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
{
|
||||
return pte_flags(a) & _PAGE_PRESENT;
|
||||
if (pte_flags(a) & _PAGE_PRESENT)
|
||||
return true;
|
||||
|
||||
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
|
||||
mm_tlb_flush_pending(mm))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int pte_hidden(pte_t pte)
|
||||
|
@ -7,6 +7,12 @@
|
||||
|
||||
DECLARE_PER_CPU(int, __preempt_count);
|
||||
|
||||
/*
|
||||
* We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
|
||||
* that a decrement hitting 0 means we can and should reschedule.
|
||||
*/
|
||||
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
|
||||
|
||||
/*
|
||||
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
|
||||
* that think a non-zero value indicates we cannot preempt.
|
||||
@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
|
||||
__this_cpu_add_4(__preempt_count, -val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
|
||||
* a decrement which hits zero means we have no preempt_count and should
|
||||
* reschedule.
|
||||
*/
|
||||
static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
|
||||
|
@ -262,11 +262,20 @@ struct cpu_hw_events {
|
||||
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
|
||||
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
|
||||
|
||||
#define EVENT_CONSTRAINT_END \
|
||||
EVENT_CONSTRAINT(0, 0, 0)
|
||||
/*
|
||||
* We define the end marker as having a weight of -1
|
||||
* to enable blacklisting of events using a counter bitmask
|
||||
* of zero and thus a weight of zero.
|
||||
* The end marker has a weight that cannot possibly be
|
||||
* obtained from counting the bits in the bitmask.
|
||||
*/
|
||||
#define EVENT_CONSTRAINT_END { .weight = -1 }
|
||||
|
||||
/*
|
||||
* Check for end marker with weight == -1
|
||||
*/
|
||||
#define for_each_event_constraint(e, c) \
|
||||
for ((e) = (c); (e)->weight; (e)++)
|
||||
for ((e) = (c); (e)->weight != -1; (e)++)
|
||||
|
||||
/*
|
||||
* Extra registers for specific events.
|
||||
|
@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
pte_t pte = gup_get_pte(ptep);
|
||||
struct page *page;
|
||||
|
||||
/* Similar to the PMD case, NUMA hinting must take slow path */
|
||||
if (pte_numa(pte)) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
|
||||
return 0;
|
||||
if (unlikely(pmd_large(pmd))) {
|
||||
/*
|
||||
* NUMA hinting faults need to be handled in the GUP
|
||||
* slowpath for accounting purposes and so that they
|
||||
* can be serialised against THP migration.
|
||||
*/
|
||||
if (pmd_numa(pmd))
|
||||
return 0;
|
||||
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
|
||||
static struct pstore_info erst_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "erst",
|
||||
.flags = PSTORE_FLAGS_FRAGILE,
|
||||
.open = erst_open_pstore,
|
||||
.close = erst_close_pstore,
|
||||
.read = erst_reader,
|
||||
|
@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
|
||||
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
|
||||
int ret;
|
||||
|
||||
ret = regmap_update_bits(s2mps11->iodev->regmap,
|
||||
ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
|
||||
S2MPS11_REG_RTC_CTRL,
|
||||
s2mps11->mask, s2mps11->mask);
|
||||
if (!ret)
|
||||
@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
|
||||
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
|
||||
int ret;
|
||||
|
||||
ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
|
||||
ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
|
||||
s2mps11->mask, ~s2mps11->mask);
|
||||
|
||||
if (!ret)
|
||||
@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
|
||||
s2mps11_clk->hw.init = &s2mps11_clks_init[i];
|
||||
s2mps11_clk->mask = 1 << i;
|
||||
|
||||
ret = regmap_read(s2mps11_clk->iodev->regmap,
|
||||
ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
|
||||
S2MPS11_REG_RTC_CTRL, &val);
|
||||
if (ret < 0)
|
||||
goto err_reg;
|
||||
|
@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
|
||||
config CLKSRC_EFM32
|
||||
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
|
||||
depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
|
||||
select CLKSRC_MMIO
|
||||
default ARCH_EFM32
|
||||
help
|
||||
Support to use the timers of EFM32 SoCs as clock source and clock
|
||||
|
@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
|
||||
|
||||
init_func = match->data;
|
||||
init_func(np);
|
||||
of_node_put(np);
|
||||
}
|
||||
}
|
||||
|
@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
|
||||
|
||||
static u64 read_sched_clock(void)
|
||||
{
|
||||
return __raw_readl(sched_io_base);
|
||||
return ~__raw_readl(sched_io_base);
|
||||
}
|
||||
|
||||
static const struct of_device_id sptimer_ids[] __initconst = {
|
||||
{ .compatible = "picochip,pc3x2-rtc" },
|
||||
{ .compatible = "snps,dw-apb-timer-sp" },
|
||||
{ /* Sentinel */ },
|
||||
};
|
||||
|
||||
@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
|
||||
num_called++;
|
||||
}
|
||||
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
|
||||
CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init);
|
||||
CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
|
||||
CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
|
||||
CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
|
||||
|
@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
|
||||
writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
|
||||
timer_base + TIMER_CTL_REG(0));
|
||||
|
||||
/* Make sure timer is stopped before playing with interrupts */
|
||||
sun4i_clkevt_time_stop(0);
|
||||
|
||||
ret = setup_irq(irq, &sun4i_timer_irq);
|
||||
if (ret)
|
||||
pr_warn("failed to setup irq %d\n", irq);
|
||||
|
@ -255,11 +255,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
|
||||
|
||||
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
|
||||
|
||||
/*
|
||||
* Set scale and timer for sched_clock.
|
||||
*/
|
||||
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
|
||||
|
||||
/*
|
||||
* Setup free-running clocksource timer (interrupts
|
||||
* disabled).
|
||||
@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
|
||||
timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
|
||||
TIMER0_DIV(TIMER_DIVIDER_SHIFT));
|
||||
|
||||
/*
|
||||
* Set scale and timer for sched_clock.
|
||||
*/
|
||||
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
|
||||
|
||||
clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
|
||||
"armada_370_xp_clocksource",
|
||||
timer_clk, 300, 32, clocksource_mmio_readl_down);
|
||||
|
@ -62,6 +62,7 @@ config INTEL_IOATDMA
|
||||
tristate "Intel I/OAT DMA support"
|
||||
depends on PCI && X86
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select DCA
|
||||
help
|
||||
Enable support for the Intel(R) I/OAT DMA engine present
|
||||
@ -112,6 +113,7 @@ config MV_XOR
|
||||
bool "Marvell XOR engine support"
|
||||
depends on PLAT_ORION
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
---help---
|
||||
Enable support for the Marvell XOR engine.
|
||||
@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
|
||||
tristate "AMCC PPC440SPe ADMA support"
|
||||
depends on 440SPe || 440SP
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
|
||||
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
help
|
||||
@ -352,6 +355,7 @@ config NET_DMA
|
||||
bool "Network: TCP receive copy offload"
|
||||
depends on DMA_ENGINE && NET
|
||||
default (INTEL_IOATDMA || FSL_DMA)
|
||||
depends on BROKEN
|
||||
help
|
||||
This enables the use of DMA engines in the network stack to
|
||||
offload receive copy-to-user operations, freeing CPU cycles.
|
||||
@ -377,4 +381,7 @@ config DMATEST
|
||||
Simple DMA test client. Say N unless you're debugging a
|
||||
DMA Device driver.
|
||||
|
||||
config DMA_ENGINE_RAID
|
||||
bool
|
||||
|
||||
endif
|
||||
|
@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
|
||||
{
|
||||
return &chan->dev->device;
|
||||
}
|
||||
static struct device *chan2parent(struct dma_chan *chan)
|
||||
{
|
||||
return chan->dev->device.parent;
|
||||
}
|
||||
|
||||
#if defined(VERBOSE_DEBUG)
|
||||
static void vdbg_dump_regs(struct at_dma_chan *atchan)
|
||||
|
@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
|
||||
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
|
||||
static struct dmaengine_unmap_pool unmap_pool[] = {
|
||||
__UNMAP_POOL(2),
|
||||
#if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
|
||||
#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
|
||||
__UNMAP_POOL(16),
|
||||
__UNMAP_POOL(128),
|
||||
__UNMAP_POOL(256),
|
||||
@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
|
||||
dma_cookie_t cookie;
|
||||
unsigned long flags;
|
||||
|
||||
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
|
||||
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
|
||||
if (!unmap)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -539,9 +539,9 @@ static int dmatest_func(void *data)
|
||||
|
||||
um->len = params->buf_size;
|
||||
for (i = 0; i < src_cnt; i++) {
|
||||
unsigned long buf = (unsigned long) thread->srcs[i];
|
||||
void *buf = thread->srcs[i];
|
||||
struct page *pg = virt_to_page(buf);
|
||||
unsigned pg_off = buf & ~PAGE_MASK;
|
||||
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
|
||||
|
||||
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
|
||||
um->len, DMA_TO_DEVICE);
|
||||
@ -559,9 +559,9 @@ static int dmatest_func(void *data)
|
||||
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
|
||||
dsts = &um->addr[src_cnt];
|
||||
for (i = 0; i < dst_cnt; i++) {
|
||||
unsigned long buf = (unsigned long) thread->dsts[i];
|
||||
void *buf = thread->dsts[i];
|
||||
struct page *pg = virt_to_page(buf);
|
||||
unsigned pg_off = buf & ~PAGE_MASK;
|
||||
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
|
||||
|
||||
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
|
||||
hw->count = CPU_TO_DMA(chan, count, 32);
|
||||
}
|
||||
|
||||
static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
|
||||
{
|
||||
return DMA_TO_CPU(chan, desc->hw.count, 32);
|
||||
}
|
||||
|
||||
static void set_desc_src(struct fsldma_chan *chan,
|
||||
struct fsl_dma_ld_hw *hw, dma_addr_t src)
|
||||
{
|
||||
@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
|
||||
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
|
||||
}
|
||||
|
||||
static dma_addr_t get_desc_src(struct fsldma_chan *chan,
|
||||
struct fsl_desc_sw *desc)
|
||||
{
|
||||
u64 snoop_bits;
|
||||
|
||||
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
|
||||
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
|
||||
return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
|
||||
}
|
||||
|
||||
static void set_desc_dst(struct fsldma_chan *chan,
|
||||
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
|
||||
{
|
||||
@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
|
||||
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
|
||||
}
|
||||
|
||||
static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
|
||||
struct fsl_desc_sw *desc)
|
||||
{
|
||||
u64 snoop_bits;
|
||||
|
||||
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
|
||||
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
|
||||
return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
|
||||
}
|
||||
|
||||
static void set_desc_next(struct fsldma_chan *chan,
|
||||
struct fsl_dma_ld_hw *hw, dma_addr_t next)
|
||||
{
|
||||
@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
|
||||
struct fsl_desc_sw *child;
|
||||
unsigned long flags;
|
||||
dma_cookie_t cookie;
|
||||
dma_cookie_t cookie = -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&chan->desc_lock, flags);
|
||||
|
||||
@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
|
||||
struct fsl_desc_sw *desc)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd = &desc->async_tx;
|
||||
struct device *dev = chan->common.device->dev;
|
||||
dma_addr_t src = get_desc_src(chan, desc);
|
||||
dma_addr_t dst = get_desc_dst(chan, desc);
|
||||
u32 len = get_desc_cnt(chan, desc);
|
||||
|
||||
/* Run the link descriptor callback function */
|
||||
if (txd->callback) {
|
||||
|
@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
|
||||
hw_desc->desc_command = (1 << 31);
|
||||
}
|
||||
|
||||
static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
|
||||
{
|
||||
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
||||
return hw_desc->phy_dest_addr;
|
||||
}
|
||||
|
||||
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
|
||||
u32 byte_count)
|
||||
{
|
||||
@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
|
||||
/*
|
||||
* Perform a transaction to verify the HW works.
|
||||
*/
|
||||
#define MV_XOR_TEST_SIZE 2000
|
||||
|
||||
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
{
|
||||
@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
struct dma_chan *dma_chan;
|
||||
dma_cookie_t cookie;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
int err = 0;
|
||||
|
||||
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
|
||||
src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
|
||||
dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
|
||||
if (!dest) {
|
||||
kfree(src);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Fill in src buffer */
|
||||
for (i = 0; i < MV_XOR_TEST_SIZE; i++)
|
||||
for (i = 0; i < PAGE_SIZE; i++)
|
||||
((u8 *) src)[i] = (u8)i;
|
||||
|
||||
dma_chan = &mv_chan->dmachan;
|
||||
@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
goto out;
|
||||
}
|
||||
|
||||
dest_dma = dma_map_single(dma_chan->device->dev, dest,
|
||||
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
|
||||
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
|
||||
if (!unmap) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
src_dma = dma_map_single(dma_chan->device->dev, src,
|
||||
MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
|
||||
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
|
||||
PAGE_SIZE, DMA_TO_DEVICE);
|
||||
unmap->to_cnt = 1;
|
||||
unmap->addr[0] = src_dma;
|
||||
|
||||
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
unmap->from_cnt = 1;
|
||||
unmap->addr[1] = dest_dma;
|
||||
|
||||
unmap->len = PAGE_SIZE;
|
||||
|
||||
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
|
||||
MV_XOR_TEST_SIZE, 0);
|
||||
PAGE_SIZE, 0);
|
||||
cookie = mv_xor_tx_submit(tx);
|
||||
mv_xor_issue_pending(dma_chan);
|
||||
async_tx_ack(tx);
|
||||
@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
|
||||
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
|
||||
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (memcmp(src, dest, PAGE_SIZE)) {
|
||||
dev_err(dma_chan->device->dev,
|
||||
"Self-test copy failed compare, disabling\n");
|
||||
err = -ENODEV;
|
||||
@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
free_resources:
|
||||
dmaengine_unmap_put(unmap);
|
||||
mv_xor_free_chan_resources(dma_chan);
|
||||
out:
|
||||
kfree(src);
|
||||
@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
|
||||
dma_addr_t dest_dma;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
struct dma_chan *dma_chan;
|
||||
dma_cookie_t cookie;
|
||||
u8 cmp_byte = 0;
|
||||
u32 cmp_word;
|
||||
int err = 0;
|
||||
int src_count = MV_XOR_NUM_SRC_TEST;
|
||||
|
||||
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
|
||||
for (src_idx = 0; src_idx < src_count; src_idx++) {
|
||||
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
|
||||
if (!xor_srcs[src_idx]) {
|
||||
while (src_idx--)
|
||||
@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
/* Fill in src buffers */
|
||||
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
|
||||
for (src_idx = 0; src_idx < src_count; src_idx++) {
|
||||
u8 *ptr = page_address(xor_srcs[src_idx]);
|
||||
for (i = 0; i < PAGE_SIZE; i++)
|
||||
ptr[i] = (1 << src_idx);
|
||||
}
|
||||
|
||||
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
|
||||
for (src_idx = 0; src_idx < src_count; src_idx++)
|
||||
cmp_byte ^= (u8) (1 << src_idx);
|
||||
|
||||
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
|
||||
@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* test xor */
|
||||
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
|
||||
GFP_KERNEL);
|
||||
if (!unmap) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
|
||||
dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
|
||||
0, PAGE_SIZE, DMA_TO_DEVICE);
|
||||
/* test xor */
|
||||
for (i = 0; i < src_count; i++) {
|
||||
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
|
||||
0, PAGE_SIZE, DMA_TO_DEVICE);
|
||||
dma_srcs[i] = unmap->addr[i];
|
||||
unmap->to_cnt++;
|
||||
}
|
||||
|
||||
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
dest_dma = unmap->addr[src_count];
|
||||
unmap->from_cnt = 1;
|
||||
unmap->len = PAGE_SIZE;
|
||||
|
||||
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
||||
MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
|
||||
src_count, PAGE_SIZE, 0);
|
||||
|
||||
cookie = mv_xor_tx_submit(tx);
|
||||
mv_xor_issue_pending(dma_chan);
|
||||
@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
}
|
||||
|
||||
free_resources:
|
||||
dmaengine_unmap_put(unmap);
|
||||
mv_xor_free_chan_resources(dma_chan);
|
||||
out:
|
||||
src_idx = MV_XOR_NUM_SRC_TEST;
|
||||
src_idx = src_count;
|
||||
while (src_idx--)
|
||||
__free_page(xor_srcs[src_idx]);
|
||||
__free_page(dest);
|
||||
@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
int i = 0;
|
||||
|
||||
for_each_child_of_node(pdev->dev.of_node, np) {
|
||||
struct mv_xor_chan *chan;
|
||||
dma_cap_mask_t cap_mask;
|
||||
int irq;
|
||||
|
||||
@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] =
|
||||
mv_xor_channel_add(xordev, pdev, i,
|
||||
cap_mask, irq);
|
||||
if (IS_ERR(xordev->channels[i])) {
|
||||
ret = PTR_ERR(xordev->channels[i]);
|
||||
xordev->channels[i] = NULL;
|
||||
chan = mv_xor_channel_add(xordev, pdev, i,
|
||||
cap_mask, irq);
|
||||
if (IS_ERR(chan)) {
|
||||
ret = PTR_ERR(chan);
|
||||
irq_dispose_mapping(irq);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] = chan;
|
||||
i++;
|
||||
}
|
||||
} else if (pdata && pdata->channels) {
|
||||
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
|
||||
struct mv_xor_channel_data *cd;
|
||||
struct mv_xor_chan *chan;
|
||||
int irq;
|
||||
|
||||
cd = &pdata->channels[i];
|
||||
@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] =
|
||||
mv_xor_channel_add(xordev, pdev, i,
|
||||
cd->cap_mask, irq);
|
||||
if (IS_ERR(xordev->channels[i])) {
|
||||
ret = PTR_ERR(xordev->channels[i]);
|
||||
chan = mv_xor_channel_add(xordev, pdev, i,
|
||||
cd->cap_mask, irq);
|
||||
if (IS_ERR(chan)) {
|
||||
ret = PTR_ERR(chan);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
xordev->channels[i] = chan;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
|
||||
static inline void _init_desc(struct dma_pl330_desc *desc)
|
||||
{
|
||||
desc->pchan = NULL;
|
||||
desc->req.x = &desc->px;
|
||||
desc->req.token = desc;
|
||||
desc->rqcfg.swap = SWAP_NO;
|
||||
desc->rqcfg.privileged = 0;
|
||||
desc->rqcfg.insnaccess = 0;
|
||||
desc->rqcfg.scctl = SCCTRL0;
|
||||
desc->rqcfg.dcctl = DCCTRL0;
|
||||
desc->req.cfg = &desc->rqcfg;
|
||||
@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
|
||||
if (!pdmac)
|
||||
return 0;
|
||||
|
||||
desc = kmalloc(count * sizeof(*desc), flg);
|
||||
desc = kcalloc(count, sizeof(*desc), flg);
|
||||
if (!desc)
|
||||
return 0;
|
||||
|
||||
|
@ -532,29 +532,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
|
||||
hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
|
||||
}
|
||||
|
||||
/**
|
||||
* ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
|
||||
*/
|
||||
static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
|
||||
int value, unsigned long flags)
|
||||
{
|
||||
struct dma_cdb *hw_desc = desc->hw_desc;
|
||||
|
||||
memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
|
||||
desc->hw_next = NULL;
|
||||
desc->src_cnt = 1;
|
||||
desc->dst_cnt = 1;
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
set_bit(PPC440SPE_DESC_INT, &desc->flags);
|
||||
else
|
||||
clear_bit(PPC440SPE_DESC_INT, &desc->flags);
|
||||
|
||||
hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
|
||||
hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
|
||||
hw_desc->opc = DMA_CDB_OPC_DFILL128;
|
||||
}
|
||||
|
||||
/**
|
||||
* ppc440spe_desc_set_src_addr - set source address into the descriptor
|
||||
*/
|
||||
@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
|
||||
struct ppc440spe_adma_chan *chan,
|
||||
dma_cookie_t cookie)
|
||||
{
|
||||
int i;
|
||||
|
||||
BUG_ON(desc->async_tx.cookie < 0);
|
||||
if (desc->async_tx.cookie > 0) {
|
||||
cookie = desc->async_tx.cookie;
|
||||
@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
|
||||
ppc440spe_adma_prep_dma_interrupt;
|
||||
}
|
||||
pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
|
||||
"( %s%s%s%s%s%s%s)\n",
|
||||
"( %s%s%s%s%s%s)\n",
|
||||
dev_name(adev->dev),
|
||||
dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
|
||||
dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
|
||||
|
@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
|
||||
dma_async_tx_callback callback;
|
||||
void *param;
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
struct txx9dmac_slave *ds = dc->chan.private;
|
||||
|
||||
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
|
||||
txd->cookie, desc);
|
||||
|
@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
|
||||
.cmd_per_lun = 1,
|
||||
.can_queue = 1,
|
||||
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
|
||||
|
@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
||||
static struct pstore_info efi_pstore_info = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "efi",
|
||||
.flags = PSTORE_FLAGS_FRAGILE,
|
||||
.open = efi_pstore_open,
|
||||
.close = efi_pstore_close,
|
||||
.read = efi_pstore_read,
|
||||
|
@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
|
||||
|
||||
spin_lock_irqsave(&tlmm_lock, irq_flags);
|
||||
writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
|
||||
clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
|
||||
clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
|
||||
__clear_bit(gpio, msm_gpio.enabled_irqs);
|
||||
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
|
||||
}
|
||||
@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
|
||||
|
||||
spin_lock_irqsave(&tlmm_lock, irq_flags);
|
||||
__set_bit(gpio, msm_gpio.enabled_irqs);
|
||||
set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
|
||||
set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
|
||||
writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
|
||||
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
|
||||
}
|
||||
|
@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
|
||||
u32 pending;
|
||||
unsigned int offset, irqs_handled = 0;
|
||||
|
||||
while ((pending = gpio_rcar_read(p, INTDT))) {
|
||||
while ((pending = gpio_rcar_read(p, INTDT) &
|
||||
gpio_rcar_read(p, INTMSK))) {
|
||||
offset = __ffs(pending);
|
||||
gpio_rcar_write(p, INTCLR, BIT(offset));
|
||||
generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
|
||||
|
@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
|
||||
if (offset < TWL4030_GPIO_MAX)
|
||||
ret = twl4030_set_gpio_direction(offset, 1);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
ret = -EINVAL; /* LED outputs can't be set as input */
|
||||
|
||||
if (!ret)
|
||||
priv->direction &= ~BIT(offset);
|
||||
@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
|
||||
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
|
||||
{
|
||||
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
|
||||
int ret = -EINVAL;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
if (offset < TWL4030_GPIO_MAX)
|
||||
if (offset < TWL4030_GPIO_MAX) {
|
||||
ret = twl4030_set_gpio_direction(offset, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&priv->mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
|
||||
*/
|
||||
|
||||
priv->direction |= BIT(offset);
|
||||
mutex_unlock(&priv->mutex);
|
||||
|
@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
|
||||
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
|
||||
|
||||
int armada_fbdev_init(struct drm_device *);
|
||||
void armada_fbdev_lastclose(struct drm_device *);
|
||||
void armada_fbdev_fini(struct drm_device *);
|
||||
|
||||
int armada_overlay_plane_create(struct drm_device *, unsigned long);
|
||||
|
@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
|
||||
DRM_UNLOCKED),
|
||||
};
|
||||
|
||||
static void armada_drm_lastclose(struct drm_device *dev)
|
||||
{
|
||||
armada_fbdev_lastclose(dev);
|
||||
}
|
||||
|
||||
static const struct file_operations armada_drm_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
|
||||
.open = NULL,
|
||||
.preclose = NULL,
|
||||
.postclose = NULL,
|
||||
.lastclose = NULL,
|
||||
.lastclose = armada_drm_lastclose,
|
||||
.unload = armada_drm_unload,
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.enable_vblank = armada_drm_enable_vblank,
|
||||
|
@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
|
||||
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
|
||||
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
|
||||
dfb->fb.width, dfb->fb.height,
|
||||
dfb->fb.bits_per_pixel, obj->phys_addr);
|
||||
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
|
||||
dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
|
||||
(unsigned long long)obj->phys_addr);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void armada_fbdev_lastclose(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
if (priv->fbdev)
|
||||
drm_fb_helper_restore_fbdev_mode(priv->fbdev);
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
void armada_fbdev_fini(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
|
||||
framebuffer_release(info);
|
||||
}
|
||||
|
||||
drm_fb_helper_fini(fbh);
|
||||
|
||||
if (fbh->fb)
|
||||
fbh->fb->funcs->destroy(fbh->fb);
|
||||
|
||||
drm_fb_helper_fini(fbh);
|
||||
|
||||
priv->fbdev = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
|
||||
obj->dev_addr = obj->linear->start;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
|
||||
obj, obj->phys_addr, obj->dev_addr);
|
||||
DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
|
||||
(unsigned long long)obj->phys_addr,
|
||||
(unsigned long long)obj->dev_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
|
||||
* refcount on the gem object itself.
|
||||
*/
|
||||
drm_gem_object_reference(obj);
|
||||
dma_buf_put(buf);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
|
||||
}
|
||||
|
||||
dobj->obj.import_attach = attach;
|
||||
get_dma_buf(buf);
|
||||
|
||||
/*
|
||||
* Don't call dma_buf_map_attachment() here - it maps the
|
||||
|
@ -68,6 +68,8 @@
|
||||
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
|
||||
/* Force reduced-blanking timings for detailed modes */
|
||||
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
|
||||
/* Force 8bpc */
|
||||
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
|
||||
|
||||
struct detailed_mode_closure {
|
||||
struct drm_connector *connector;
|
||||
@ -128,6 +130,9 @@ static struct edid_quirk {
|
||||
|
||||
/* Medion MD 30217 PG */
|
||||
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
|
||||
|
||||
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
|
||||
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
|
||||
};
|
||||
|
||||
/*
|
||||
@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
|
||||
|
||||
drm_add_display_info(edid, &connector->display_info);
|
||||
|
||||
if (quirks & EDID_QUIRK_FORCE_8BPC)
|
||||
connector->display_info.bpc = 8;
|
||||
|
||||
return num_modes;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_add_edid_modes);
|
||||
|
@ -566,11 +566,11 @@ err_unload:
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
err_primary_node:
|
||||
drm_put_minor(dev->primary);
|
||||
drm_unplug_minor(dev->primary);
|
||||
err_render_node:
|
||||
drm_put_minor(dev->render);
|
||||
drm_unplug_minor(dev->render);
|
||||
err_control_node:
|
||||
drm_put_minor(dev->control);
|
||||
drm_unplug_minor(dev->control);
|
||||
err_agp:
|
||||
if (dev->driver->bus->agp_destroy)
|
||||
dev->driver->bus->agp_destroy(dev);
|
||||
|
@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv;
|
||||
|
||||
/*
|
||||
* The dri breadcrumb update races against the drm master disappearing.
|
||||
* Instead of trying to fix this (this is by far not the only ums issue)
|
||||
* just don't do the update in kms mode.
|
||||
*/
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
if (dev->primary->master) {
|
||||
master_priv = dev->primary->master->driver_priv;
|
||||
if (master_priv->sarea_priv)
|
||||
@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
|
||||
mutex_init(&dev_priv->pc8.lock);
|
||||
dev_priv->pc8.requirements_met = false;
|
||||
dev_priv->pc8.gpu_idle = false;
|
||||
dev_priv->pc8.irqs_disabled = false;
|
||||
dev_priv->pc8.enabled = false;
|
||||
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
|
||||
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
|
||||
intel_pm_setup(dev);
|
||||
|
||||
intel_display_crc_init(dev);
|
||||
|
||||
@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
}
|
||||
|
||||
intel_irq_init(dev);
|
||||
intel_pm_init(dev);
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
|
||||
|
||||
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
|
||||
{
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_context_close(dev, file_priv);
|
||||
i915_gem_release(dev, file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
|
@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
drm_mode_config_reset(dev);
|
||||
intel_modeset_setup_hw_state(dev, true);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
|
@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pdev->device & 0xFF00) == 0x0C00)
|
||||
#define IS_ULT(dev) (IS_HASWELL(dev) && \
|
||||
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
|
||||
(((dev)->pdev->device & 0xf) == 0x2 || \
|
||||
((dev)->pdev->device & 0xf) == 0x6 || \
|
||||
((dev)->pdev->device & 0xf) == 0xe))
|
||||
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pdev->device & 0xFF00) == 0x0A00)
|
||||
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
|
||||
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pdev->device & 0x00F0) == 0x0020)
|
||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||
@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged);
|
||||
|
||||
extern void intel_irq_init(struct drm_device *dev);
|
||||
extern void intel_pm_init(struct drm_device *dev);
|
||||
extern void intel_hpd_init(struct drm_device *dev);
|
||||
extern void intel_pm_init(struct drm_device *dev);
|
||||
|
||||
extern void intel_uncore_sanitize(struct drm_device *dev);
|
||||
extern void intel_uncore_early_sanitize(struct drm_device *dev);
|
||||
|
@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static struct i915_hw_context *
|
||||
@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Clear this page out of any CPU caches for coherent swap-in/out. Note
|
||||
/*
|
||||
* Pin can switch back to the default context if we end up calling into
|
||||
* evict_everything - as a last ditch gtt defrag effort that also
|
||||
* switches to the default context. Hence we need to reload from here.
|
||||
*/
|
||||
from = ring->last_context;
|
||||
|
||||
/*
|
||||
* Clear this page out of any CPU caches for coherent swap-in/out. Note
|
||||
* that thanks to write = false in this call and us not setting any gpu
|
||||
* write domains when putting a context object onto the active list
|
||||
* (when switching away from it), this won't block.
|
||||
* XXX: We need a real interface to do this instead of trickery. */
|
||||
*
|
||||
* XXX: We need a real interface to do this instead of trickery.
|
||||
*/
|
||||
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(to->obj);
|
||||
|
@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
||||
} else
|
||||
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
|
||||
|
||||
search_again:
|
||||
/* First see if there is a large enough contiguous idle region... */
|
||||
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
|
||||
if (mark_free(vma, &unwind_list))
|
||||
@ -115,10 +116,17 @@ none:
|
||||
list_del_init(&vma->exec_list);
|
||||
}
|
||||
|
||||
/* We expect the caller to unpin, evict all and try again, or give up.
|
||||
* So calling i915_gem_evict_vm() is unnecessary.
|
||||
/* Can we unpin some objects such as idle hw contents,
|
||||
* or pending flips?
|
||||
*/
|
||||
return -ENOSPC;
|
||||
ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Only idle the GPU and repeat the search once */
|
||||
i915_gem_retire_requests(dev);
|
||||
nonblocking = true;
|
||||
goto search_again;
|
||||
|
||||
found:
|
||||
/* drm_mm doesn't allow any other other operations while
|
||||
|
@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
kfree(ppgtt->gen8_pt_dma_addr[i]);
|
||||
}
|
||||
|
||||
__free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
|
||||
__free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
|
||||
__free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
|
||||
__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1241,6 +1241,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
|
||||
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
|
||||
if (bdw_gmch_ctl)
|
||||
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
|
||||
if (bdw_gmch_ctl > 4) {
|
||||
WARN_ON(!i915_preliminary_hw_support);
|
||||
return 4<<20;
|
||||
}
|
||||
|
||||
return bdw_gmch_ctl << 20;
|
||||
}
|
||||
|
||||
|
@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
|
||||
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
|
||||
PIPE_CONF_CHECK_I(pipe_bpp);
|
||||
|
||||
if (!IS_HASWELL(dev)) {
|
||||
if (!HAS_DDI(dev)) {
|
||||
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
|
||||
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
|
||||
}
|
||||
@ -11036,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
}
|
||||
|
||||
intel_modeset_check_state(dev);
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
}
|
||||
|
||||
void intel_modeset_gem_init(struct drm_device *dev)
|
||||
@ -11046,7 +11044,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
|
||||
|
||||
intel_setup_overlay(dev);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
drm_mode_config_reset(dev);
|
||||
intel_modeset_setup_hw_state(dev, false);
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
void intel_modeset_cleanup(struct drm_device *dev)
|
||||
|
@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
uint32_t sprite_width, int pixel_size,
|
||||
bool enabled, bool scaled);
|
||||
void intel_init_pm(struct drm_device *dev);
|
||||
void intel_pm_setup(struct drm_device *dev);
|
||||
bool intel_fbc_enabled(struct drm_device *dev);
|
||||
void intel_update_fbc(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
|
@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (IS_BROADWELL(dev)) {
|
||||
val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
} else {
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
|
||||
return val;
|
||||
}
|
||||
|
||||
static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
|
||||
}
|
||||
|
||||
static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
|
||||
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
|
||||
level = intel_panel_compute_brightness(dev, pipe, level);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (IS_BROADWELL(dev))
|
||||
return intel_bdw_panel_set_backlight(dev, level);
|
||||
else if (HAS_PCH_SPLIT(dev))
|
||||
return intel_pch_panel_set_backlight(dev, level);
|
||||
|
||||
if (is_backlight_combination_mode(dev)) {
|
||||
@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
||||
POSTING_READ(reg);
|
||||
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev) &&
|
||||
if (IS_BROADWELL(dev)) {
|
||||
/*
|
||||
* Broadwell requires PCH override to drive the PCH
|
||||
* backlight pin. The above will configure the CPU
|
||||
* backlight pin, which we don't plan to use.
|
||||
*/
|
||||
tmp = I915_READ(BLC_PWM_PCH_CTL1);
|
||||
tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
|
||||
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
|
||||
} else if (HAS_PCH_SPLIT(dev) &&
|
||||
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
|
||||
tmp = I915_READ(BLC_PWM_PCH_CTL1);
|
||||
tmp |= BLM_PCH_PWM_ENABLE;
|
||||
|
@ -5685,6 +5685,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool is_enabled, enable_requested;
|
||||
unsigned long irqflags;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||
@ -5702,9 +5703,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
HSW_PWR_WELL_STATE_ENABLED), 20))
|
||||
DRM_ERROR("Timeout enabling power well\n");
|
||||
}
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
|
||||
dev_priv->de_irq_mask[PIPE_B]);
|
||||
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
|
||||
~dev_priv->de_irq_mask[PIPE_B] |
|
||||
GEN8_PIPE_VBLANK);
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
|
||||
dev_priv->de_irq_mask[PIPE_C]);
|
||||
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
|
||||
~dev_priv->de_irq_mask[PIPE_C] |
|
||||
GEN8_PIPE_VBLANK);
|
||||
POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
} else {
|
||||
if (enable_requested) {
|
||||
unsigned long irqflags;
|
||||
enum pipe p;
|
||||
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
|
||||
@ -6130,10 +6146,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
|
||||
return val;
|
||||
}
|
||||
|
||||
void intel_pm_init(struct drm_device *dev)
|
||||
void intel_pm_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
|
||||
mutex_init(&dev_priv->pc8.lock);
|
||||
dev_priv->pc8.requirements_met = false;
|
||||
dev_priv->pc8.gpu_idle = false;
|
||||
dev_priv->pc8.irqs_disabled = false;
|
||||
dev_priv->pc8.enabled = false;
|
||||
dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
|
||||
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
|
||||
intel_gen6_powersave_work);
|
||||
}
|
||||
|
@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
||||
} else if (IS_GEN6(ring->dev)) {
|
||||
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
|
||||
} else {
|
||||
/* XXX: gen8 returns to sanity */
|
||||
mmio = RING_HWS_PGA(ring->mmio_base);
|
||||
}
|
||||
|
||||
|
@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
|
||||
int intel_gpu_reset(struct drm_device *dev)
|
||||
{
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 8:
|
||||
case 7:
|
||||
case 6: return gen6_do_reset(dev);
|
||||
case 5: return ironlake_do_reset(dev);
|
||||
|
@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
|
||||
if (nouveau_runtime_pm == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* are we optimus enabled? */
|
||||
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
|
||||
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_debug_level(SILENT);
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
|
||||
|
@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
} else if ((rdev->family == CHIP_TAHITI) ||
|
||||
(rdev->family == CHIP_PITCAIRN))
|
||||
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
|
||||
else if (rdev->family == CHIP_VERDE)
|
||||
else if ((rdev->family == CHIP_VERDE) ||
|
||||
(rdev->family == CHIP_OLAND) ||
|
||||
(rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
|
||||
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
|
||||
|
||||
switch (radeon_crtc->crtc_id) {
|
||||
|
@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
|
||||
radeon_ring_write(ring, 0); /* src/dst endian swap */
|
||||
radeon_ring_write(ring, src_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, dst_offset & 0xffffffff);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
|
||||
src_offset += cur_size_in_bytes;
|
||||
dst_offset += cur_size_in_bytes;
|
||||
|
@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
|
||||
.hdmi_setmode = &evergreen_hdmi_setmode,
|
||||
},
|
||||
.copy = {
|
||||
.blit = NULL,
|
||||
.blit = &cik_copy_cpdma,
|
||||
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.dma = &cik_copy_dma,
|
||||
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
|
||||
.hdmi_setmode = &evergreen_hdmi_setmode,
|
||||
},
|
||||
.copy = {
|
||||
.blit = NULL,
|
||||
.blit = &cik_copy_cpdma,
|
||||
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.dma = &cik_copy_dma,
|
||||
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
|
@ -508,15 +508,6 @@ static const struct file_operations radeon_driver_kms_fops = {
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
static void
|
||||
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
radeon_driver_unload_kms(dev);
|
||||
}
|
||||
|
||||
static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP |
|
||||
@ -586,7 +577,6 @@ static struct pci_driver radeon_kms_pci_driver = {
|
||||
.probe = radeon_pci_probe,
|
||||
.remove = radeon_pci_remove,
|
||||
.driver.pm = &radeon_pm_ops,
|
||||
.shutdown = radeon_pci_shutdown,
|
||||
};
|
||||
|
||||
static int __init radeon_init(void)
|
||||
|
@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
|
||||
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
|
||||
base = G_000100_MC_FB_START(base) << 16;
|
||||
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
|
||||
/* Some boards seem to be configured for 128MB of sideport memory,
|
||||
* but really only have 64MB. Just skip the sideport and use
|
||||
* UMA memory.
|
||||
*/
|
||||
if (rdev->mc.igp_sideport_enabled &&
|
||||
(rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
|
||||
base += 128 * 1024 * 1024;
|
||||
rdev->mc.real_vram_size -= 128 * 1024 * 1024;
|
||||
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
|
||||
}
|
||||
|
||||
/* Use K8 direct mapping for fast fb access. */
|
||||
rdev->fastfb_working = false;
|
||||
|
@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
}
|
||||
|
||||
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
|
||||
drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
|
||||
page_last = vma_pages(vma) +
|
||||
drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
|
||||
vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
|
||||
page_last = vma_pages(vma) + vma->vm_pgoff -
|
||||
drm_vma_node_start(&bo->vma_node);
|
||||
|
||||
if (unlikely(page_offset >= bo->num_pages)) {
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
|
@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
break;
|
||||
}
|
||||
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
|
||||
param->value = dev_priv->memory_size;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
||||
param->param);
|
||||
|
@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
||||
.address = 1,
|
||||
.scan_index = 1,
|
||||
.scan_type = IIO_ST('u', 12, 16, 0),
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.realbits = 12,
|
||||
.storagebits = 16,
|
||||
.shift = 0,
|
||||
.endianness = IIO_BE,
|
||||
},
|
||||
},
|
||||
.channel[1] = {
|
||||
.type = IIO_VOLTAGE,
|
||||
@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
||||
.address = 0,
|
||||
.scan_index = 0,
|
||||
.scan_type = IIO_ST('u', 12, 16, 0),
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.realbits = 12,
|
||||
.storagebits = 16,
|
||||
.shift = 0,
|
||||
.endianness = IIO_BE,
|
||||
},
|
||||
},
|
||||
.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
|
||||
.int_vref_mv = 2500,
|
||||
|
@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
|
||||
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
|
||||
.address = ADIS16448_BARO_OUT,
|
||||
.scan_index = ADIS16400_SCAN_BARO,
|
||||
.scan_type = IIO_ST('s', 16, 16, 0),
|
||||
.scan_type = {
|
||||
.sign = 's',
|
||||
.realbits = 16,
|
||||
.storagebits = 16,
|
||||
.endianness = IIO_BE,
|
||||
},
|
||||
},
|
||||
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
|
||||
IIO_CHAN_SOFT_TIMESTAMP(11)
|
||||
|
@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
return IIO_VAL_INT;
|
||||
}
|
||||
|
||||
static int cm36651_write_int_time(struct cm36651_data *cm36651,
|
||||
|
@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
|
||||
isert_conn->conn_rx_descs = NULL;
|
||||
}
|
||||
|
||||
static void isert_cq_tx_work(struct work_struct *);
|
||||
static void isert_cq_tx_callback(struct ib_cq *, void *);
|
||||
static void isert_cq_rx_work(struct work_struct *);
|
||||
static void isert_cq_rx_callback(struct ib_cq *, void *);
|
||||
|
||||
static int
|
||||
@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
|
||||
cq_desc[i].device = device;
|
||||
cq_desc[i].cq_index = i;
|
||||
|
||||
INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
|
||||
device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
|
||||
isert_cq_rx_callback,
|
||||
isert_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_RX_CQ_LEN, i);
|
||||
if (IS_ERR(device->dev_rx_cq[i]))
|
||||
if (IS_ERR(device->dev_rx_cq[i])) {
|
||||
ret = PTR_ERR(device->dev_rx_cq[i]);
|
||||
device->dev_rx_cq[i] = NULL;
|
||||
goto out_cq;
|
||||
}
|
||||
|
||||
INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
|
||||
device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
|
||||
isert_cq_tx_callback,
|
||||
isert_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_TX_CQ_LEN, i);
|
||||
if (IS_ERR(device->dev_tx_cq[i]))
|
||||
if (IS_ERR(device->dev_tx_cq[i])) {
|
||||
ret = PTR_ERR(device->dev_tx_cq[i]);
|
||||
device->dev_tx_cq[i] = NULL;
|
||||
goto out_cq;
|
||||
}
|
||||
|
||||
ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
|
||||
if (ret)
|
||||
goto out_cq;
|
||||
|
||||
if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
|
||||
goto out_cq;
|
||||
|
||||
if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
|
||||
ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
|
||||
if (ret)
|
||||
goto out_cq;
|
||||
}
|
||||
|
||||
@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
|
||||
{
|
||||
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
|
||||
|
||||
INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
|
||||
queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
|
||||
}
|
||||
|
||||
@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
|
||||
{
|
||||
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
|
||||
|
||||
INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
|
||||
queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
|
||||
}
|
||||
|
||||
|
@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
|
||||
static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
|
||||
int irq, int do_mask)
|
||||
{
|
||||
int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */
|
||||
int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */
|
||||
/* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
|
||||
int bitfield_width = 4;
|
||||
int shift = 32 - (irq + 1) * bitfield_width;
|
||||
|
||||
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
|
||||
shift, bitfield_width,
|
||||
@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
|
||||
|
||||
static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
|
||||
{
|
||||
/* The SENSE register is assumed to be 32-bit. */
|
||||
int bitfield_width = p->config.sense_bitfield_width;
|
||||
int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */
|
||||
int shift = 32 - (irq + 1) * bitfield_width;
|
||||
|
||||
dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
|
||||
|
||||
|
@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
|
||||
urb->transfer_dma);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
|
||||
* allowed (MAX_TX_URBS).
|
||||
*/
|
||||
if (!context) {
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
|
||||
usb_free_urb(urb);
|
||||
|
||||
netdev_warn(netdev, "couldn't find free context\n");
|
||||
|
||||
|
@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
|
||||
/* set LED in default state (end of init phase) */
|
||||
pcan_usb_pro_set_led(dev, 0, 1);
|
||||
|
||||
kfree(bi);
|
||||
kfree(fi);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
|
@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
|
||||
|
||||
qlcnic_83xx_poll_process_aen(adapter);
|
||||
|
||||
if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
|
||||
ahw->diag_cnt++;
|
||||
if (ahw->diag_test) {
|
||||
if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
|
||||
ahw->diag_cnt++;
|
||||
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
|
||||
}
|
||||
|
||||
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
|
||||
/* disable and free mailbox interrupt */
|
||||
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
|
||||
qlcnic_83xx_enable_mbx_poll(adapter);
|
||||
qlcnic_83xx_free_mbx_intr(adapter);
|
||||
}
|
||||
adapter->ahw->loopback_state = 0;
|
||||
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
|
||||
}
|
||||
@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
|
||||
{
|
||||
struct qlcnic_adapter *adapter = netdev_priv(netdev);
|
||||
struct qlcnic_host_sds_ring *sds_ring;
|
||||
int ring, err;
|
||||
int ring;
|
||||
|
||||
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
|
||||
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
|
||||
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
|
||||
sds_ring = &adapter->recv_ctx->sds_rings[ring];
|
||||
qlcnic_83xx_disable_intr(adapter, sds_ring);
|
||||
if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
|
||||
qlcnic_83xx_enable_mbx_poll(adapter);
|
||||
if (adapter->flags & QLCNIC_MSIX_ENABLED)
|
||||
qlcnic_83xx_disable_intr(adapter, sds_ring);
|
||||
}
|
||||
}
|
||||
|
||||
qlcnic_fw_destroy_ctx(adapter);
|
||||
qlcnic_detach(adapter);
|
||||
|
||||
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
|
||||
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
|
||||
err = qlcnic_83xx_setup_mbx_intr(adapter);
|
||||
qlcnic_83xx_disable_mbx_poll(adapter);
|
||||
if (err) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"%s: failed to setup mbx interrupt\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
adapter->ahw->diag_test = 0;
|
||||
adapter->drv_sds_rings = drv_sds_rings;
|
||||
|
||||
@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
|
||||
if (netif_running(netdev))
|
||||
__qlcnic_up(adapter, netdev);
|
||||
|
||||
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
|
||||
!(adapter->flags & QLCNIC_MSIX_ENABLED))
|
||||
qlcnic_83xx_disable_mbx_poll(adapter);
|
||||
out:
|
||||
netif_device_attach(netdev);
|
||||
}
|
||||
@ -3754,6 +3734,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
struct qlcnic_hardware_context *ahw = adapter->ahw;
|
||||
u32 offset;
|
||||
|
||||
offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
|
||||
dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
|
||||
readl(ahw->pci_base0 + offset),
|
||||
QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
|
||||
QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
|
||||
QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
|
||||
}
|
||||
|
||||
static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
|
||||
{
|
||||
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
|
||||
@ -3798,6 +3791,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
|
||||
__func__, cmd->cmd_op, cmd->type, ahw->pci_func,
|
||||
ahw->op_mode);
|
||||
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
|
||||
qlcnic_dump_mailbox_registers(adapter);
|
||||
qlcnic_83xx_get_mbx_data(adapter, cmd);
|
||||
qlcnic_dump_mbx(adapter, cmd);
|
||||
qlcnic_83xx_idc_request_reset(adapter,
|
||||
QLCNIC_FORCE_FW_DUMP_KEY);
|
||||
|
@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
|
||||
pci_channel_state_t);
|
||||
pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
|
||||
void qlcnic_83xx_io_resume(struct pci_dev *);
|
||||
void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
|
||||
#endif
|
||||
|
@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
|
||||
adapter->ahw->idc.err_code = -EIO;
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"%s: Device in unknown state\n", __func__);
|
||||
clear_bit(__QLCNIC_RESETTING, &adapter->state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
|
||||
struct qlcnic_hardware_context *ahw = adapter->ahw;
|
||||
struct qlcnic_mailbox *mbx = ahw->mailbox;
|
||||
int ret = 0;
|
||||
u32 owner;
|
||||
u32 val;
|
||||
|
||||
/* Perform NIC configuration based ready state entry actions */
|
||||
@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
|
||||
set_bit(__QLCNIC_RESETTING, &adapter->state);
|
||||
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
|
||||
} else {
|
||||
owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
|
||||
if (ahw->pci_func == owner)
|
||||
qlcnic_dump_fw(adapter);
|
||||
netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
|
||||
__func__);
|
||||
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
|
||||
static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
|
||||
clear_bit(__QLCNIC_RESETTING, &adapter->state);
|
||||
adapter->ahw->idc.err_code = -EIO;
|
||||
struct qlcnic_hardware_context *ahw = adapter->ahw;
|
||||
u32 val, owner;
|
||||
|
||||
return 0;
|
||||
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
|
||||
if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
|
||||
owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
|
||||
if (ahw->pci_func == owner) {
|
||||
qlcnic_83xx_stop_hw(adapter);
|
||||
qlcnic_dump_fw(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
|
||||
__func__);
|
||||
clear_bit(__QLCNIC_RESETTING, &adapter->state);
|
||||
ahw->idc.err_code = -EIO;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
|
||||
@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
|
||||
adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
|
||||
qlcnic_83xx_periodic_tasks(adapter);
|
||||
|
||||
/* Do not reschedule if firmaware is in hanged state and auto
|
||||
* recovery is disabled
|
||||
*/
|
||||
if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
|
||||
return;
|
||||
|
||||
/* Re-schedule the function */
|
||||
if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
|
||||
qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
|
||||
@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
|
||||
}
|
||||
|
||||
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
|
||||
if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
|
||||
!qlcnic_auto_fw_reset) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"%s:failed, device in non reset mode\n", __func__);
|
||||
if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
|
||||
netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
|
||||
__func__);
|
||||
qlcnic_83xx_idc_enter_failed_state(adapter, 0);
|
||||
qlcnic_83xx_unlock_driver(adapter);
|
||||
return;
|
||||
}
|
||||
@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
|
||||
if (size & 0xF)
|
||||
size = (size + 16) & ~0xF;
|
||||
|
||||
p_cache = kzalloc(size, GFP_KERNEL);
|
||||
p_cache = vzalloc(size);
|
||||
if (p_cache == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
|
||||
size / sizeof(u32));
|
||||
if (ret) {
|
||||
kfree(p_cache);
|
||||
vfree(p_cache);
|
||||
return ret;
|
||||
}
|
||||
/* 16 byte write to MS memory */
|
||||
ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
|
||||
size / 16);
|
||||
if (ret) {
|
||||
kfree(p_cache);
|
||||
vfree(p_cache);
|
||||
return ret;
|
||||
}
|
||||
kfree(p_cache);
|
||||
vfree(p_cache);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
|
||||
p_dev->ahw->reset.seq_index = index;
|
||||
}
|
||||
|
||||
static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
|
||||
void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
|
||||
{
|
||||
p_dev->ahw->reset.seq_index = 0;
|
||||
|
||||
@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
|
||||
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
|
||||
if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
|
||||
qlcnic_dump_fw(adapter);
|
||||
|
||||
if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
|
||||
netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
|
||||
__func__);
|
||||
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
|
||||
return err;
|
||||
}
|
||||
|
||||
qlcnic_83xx_init_hw(adapter);
|
||||
|
||||
if (qlcnic_83xx_copy_bootloader(adapter))
|
||||
@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
|
||||
ahw->nic_mode = QLCNIC_DEFAULT_MODE;
|
||||
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
|
||||
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
|
||||
adapter->max_sds_rings = ahw->max_rx_ques;
|
||||
adapter->max_tx_rings = ahw->max_tx_ques;
|
||||
adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
|
||||
adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
|
||||
} else {
|
||||
return -EIO;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user