Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/cavium/Kconfig The cavium conflict was overlapping dependency changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
182ad468e7
@ -35,7 +35,7 @@ Example:
|
|||||||
device_type = "dma";
|
device_type = "dma";
|
||||||
reg = <0x0 0x1f270000 0x0 0x10000>,
|
reg = <0x0 0x1f270000 0x0 0x10000>,
|
||||||
<0x0 0x1f200000 0x0 0x10000>,
|
<0x0 0x1f200000 0x0 0x10000>,
|
||||||
<0x0 0x1b008000 0x0 0x2000>,
|
<0x0 0x1b000000 0x0 0x400000>,
|
||||||
<0x0 0x1054a000 0x0 0x100>;
|
<0x0 0x1054a000 0x0 0x100>;
|
||||||
interrupts = <0x0 0x82 0x4>,
|
interrupts = <0x0 0x82 0x4>,
|
||||||
<0x0 0xb8 0x4>,
|
<0x0 0xb8 0x4>,
|
||||||
|
@ -82,6 +82,9 @@ Optional properties:
|
|||||||
- id: If there are multiple instance of the same type, in order to
|
- id: If there are multiple instance of the same type, in order to
|
||||||
differentiate between each instance "id" can be used (e.g., multi-lane PCIe
|
differentiate between each instance "id" can be used (e.g., multi-lane PCIe
|
||||||
PHY). If "id" is not provided, it is set to default value of '1'.
|
PHY). If "id" is not provided, it is set to default value of '1'.
|
||||||
|
- syscon-pllreset: Handle to system control region that contains the
|
||||||
|
CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
|
||||||
|
register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
|
||||||
|
|
||||||
This is usually a subnode of ocp2scp to which it is connected.
|
This is usually a subnode of ocp2scp to which it is connected.
|
||||||
|
|
||||||
@ -100,3 +103,16 @@ usb3phy@4a084400 {
|
|||||||
"sysclk",
|
"sysclk",
|
||||||
"refclk";
|
"refclk";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
sata_phy: phy@4A096000 {
|
||||||
|
compatible = "ti,phy-pipe3-sata";
|
||||||
|
reg = <0x4A096000 0x80>, /* phy_rx */
|
||||||
|
<0x4A096400 0x64>, /* phy_tx */
|
||||||
|
<0x4A096800 0x40>; /* pll_ctrl */
|
||||||
|
reg-names = "phy_rx", "phy_tx", "pll_ctrl";
|
||||||
|
ctrl-module = <&omap_control_sata>;
|
||||||
|
clocks = <&sys_clkin1>, <&sata_ref_clk>;
|
||||||
|
clock-names = "sysclk", "refclk";
|
||||||
|
syscon-pllreset = <&scm_conf 0x3fc>;
|
||||||
|
#phy-cells = <0>;
|
||||||
|
};
|
||||||
|
@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
|
|||||||
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
|
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
|
||||||
|
|
||||||
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
|
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
|
||||||
the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
|
the DualPoint Stick. The M, R and L bits signal the combined status of both
|
||||||
buttons get reported separately in the PSM, PSR and PSL bits.
|
the pointingstick and touchpad buttons, except for Dell dualpoint devices
|
||||||
|
where the pointingstick buttons get reported separately in the PSM, PSR
|
||||||
|
and PSL bits.
|
||||||
|
|
||||||
Dualpoint device -- interleaved packet format
|
Dualpoint device -- interleaved packet format
|
||||||
---------------------------------------------
|
---------------------------------------------
|
||||||
|
@ -5600,6 +5600,7 @@ F: kernel/irq/
|
|||||||
IRQCHIP DRIVERS
|
IRQCHIP DRIVERS
|
||||||
M: Thomas Gleixner <tglx@linutronix.de>
|
M: Thomas Gleixner <tglx@linutronix.de>
|
||||||
M: Jason Cooper <jason@lakedaemon.net>
|
M: Jason Cooper <jason@lakedaemon.net>
|
||||||
|
M: Marc Zyngier <marc.zyngier@arm.com>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
|
||||||
@ -5608,11 +5609,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
|
|||||||
F: drivers/irqchip/
|
F: drivers/irqchip/
|
||||||
|
|
||||||
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
|
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
|
||||||
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
M: Jiang Liu <jiang.liu@linux.intel.com>
|
||||||
|
M: Marc Zyngier <marc.zyngier@arm.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
|
||||||
F: Documentation/IRQ-domain.txt
|
F: Documentation/IRQ-domain.txt
|
||||||
F: include/linux/irqdomain.h
|
F: include/linux/irqdomain.h
|
||||||
F: kernel/irq/irqdomain.c
|
F: kernel/irq/irqdomain.c
|
||||||
|
F: kernel/irq/msi.c
|
||||||
|
|
||||||
ISAPNP
|
ISAPNP
|
||||||
M: Jaroslav Kysela <perex@perex.cz>
|
M: Jaroslav Kysela <perex@perex.cz>
|
||||||
|
11
Makefile
11
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 2
|
PATCHLEVEL = 2
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc6
|
||||||
NAME = Hurr durr I'ma sheep
|
NAME = Hurr durr I'ma sheep
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
@ -597,6 +597,11 @@ endif # $(dot-config)
|
|||||||
# Defaults to vmlinux, but the arch makefile usually adds further targets
|
# Defaults to vmlinux, but the arch makefile usually adds further targets
|
||||||
all: vmlinux
|
all: vmlinux
|
||||||
|
|
||||||
|
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
|
||||||
|
# values of the respective KBUILD_* variables
|
||||||
|
ARCH_CPPFLAGS :=
|
||||||
|
ARCH_AFLAGS :=
|
||||||
|
ARCH_CFLAGS :=
|
||||||
include arch/$(SRCARCH)/Makefile
|
include arch/$(SRCARCH)/Makefile
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
|
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
|
||||||
@ -848,10 +853,10 @@ export mod_strip_cmd
|
|||||||
mod_compress_cmd = true
|
mod_compress_cmd = true
|
||||||
ifdef CONFIG_MODULE_COMPRESS
|
ifdef CONFIG_MODULE_COMPRESS
|
||||||
ifdef CONFIG_MODULE_COMPRESS_GZIP
|
ifdef CONFIG_MODULE_COMPRESS_GZIP
|
||||||
mod_compress_cmd = gzip -n
|
mod_compress_cmd = gzip -n -f
|
||||||
endif # CONFIG_MODULE_COMPRESS_GZIP
|
endif # CONFIG_MODULE_COMPRESS_GZIP
|
||||||
ifdef CONFIG_MODULE_COMPRESS_XZ
|
ifdef CONFIG_MODULE_COMPRESS_XZ
|
||||||
mod_compress_cmd = xz
|
mod_compress_cmd = xz -f
|
||||||
endif # CONFIG_MODULE_COMPRESS_XZ
|
endif # CONFIG_MODULE_COMPRESS_XZ
|
||||||
endif # CONFIG_MODULE_COMPRESS
|
endif # CONFIG_MODULE_COMPRESS
|
||||||
export mod_compress_cmd
|
export mod_compress_cmd
|
||||||
|
@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
|
|||||||
|
|
||||||
config ARC_PAGE_SIZE_16K
|
config ARC_PAGE_SIZE_16K
|
||||||
bool "16KB"
|
bool "16KB"
|
||||||
depends on ARC_MMU_V3
|
depends on ARC_MMU_V3 || ARC_MMU_V4
|
||||||
|
|
||||||
config ARC_PAGE_SIZE_4K
|
config ARC_PAGE_SIZE_4K
|
||||||
bool "4KB"
|
bool "4KB"
|
||||||
depends on ARC_MMU_V3
|
depends on ARC_MMU_V3 || ARC_MMU_V4
|
||||||
|
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
@ -365,6 +365,11 @@ config ARC_HAS_LLSC
|
|||||||
default y
|
default y
|
||||||
depends on !ARC_CANT_LLSC
|
depends on !ARC_CANT_LLSC
|
||||||
|
|
||||||
|
config ARC_STAR_9000923308
|
||||||
|
bool "Workaround for llock/scond livelock"
|
||||||
|
default y
|
||||||
|
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
|
||||||
|
|
||||||
config ARC_HAS_SWAPE
|
config ARC_HAS_SWAPE
|
||||||
bool "Insn: SWAPE (endian-swap)"
|
bool "Insn: SWAPE (endian-swap)"
|
||||||
default y
|
default y
|
||||||
@ -379,6 +384,10 @@ config ARC_HAS_LL64
|
|||||||
dest operands with 2 possible source operands.
|
dest operands with 2 possible source operands.
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
config ARC_HAS_DIV_REM
|
||||||
|
bool "Insn: div, divu, rem, remu"
|
||||||
|
default y
|
||||||
|
|
||||||
config ARC_HAS_RTC
|
config ARC_HAS_RTC
|
||||||
bool "Local 64-bit r/o cycle counter"
|
bool "Local 64-bit r/o cycle counter"
|
||||||
default n
|
default n
|
||||||
|
@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
|
|||||||
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
|
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
|
||||||
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
|
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
|
||||||
|
|
||||||
|
ifdef CONFIG_ISA_ARCV2
|
||||||
|
|
||||||
ifndef CONFIG_ARC_HAS_LL64
|
ifndef CONFIG_ARC_HAS_LL64
|
||||||
cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64
|
cflags-y += -mno-ll64
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef CONFIG_ARC_HAS_DIV_REM
|
||||||
|
cflags-y += -mno-div-rem
|
||||||
|
endif
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
|
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
|
||||||
|
@ -89,11 +89,10 @@
|
|||||||
#define ECR_C_BIT_DTLB_LD_MISS 8
|
#define ECR_C_BIT_DTLB_LD_MISS 8
|
||||||
#define ECR_C_BIT_DTLB_ST_MISS 9
|
#define ECR_C_BIT_DTLB_ST_MISS 9
|
||||||
|
|
||||||
|
|
||||||
/* Auxiliary registers */
|
/* Auxiliary registers */
|
||||||
#define AUX_IDENTITY 4
|
#define AUX_IDENTITY 4
|
||||||
#define AUX_INTR_VEC_BASE 0x25
|
#define AUX_INTR_VEC_BASE 0x25
|
||||||
|
#define AUX_NON_VOL 0x5e
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Floating Pt Registers
|
* Floating Pt Registers
|
||||||
@ -240,9 +239,9 @@ struct bcr_extn_xymem {
|
|||||||
|
|
||||||
struct bcr_perip {
|
struct bcr_perip {
|
||||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
unsigned int start:8, pad2:8, sz:8, pad:8;
|
unsigned int start:8, pad2:8, sz:8, ver:8;
|
||||||
#else
|
#else
|
||||||
unsigned int pad:8, sz:8, pad2:8, start:8;
|
unsigned int ver:8, sz:8, pad2:8, start:8;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -23,33 +23,60 @@
|
|||||||
|
|
||||||
#define atomic_set(v, i) (((v)->counter) = (i))
|
#define atomic_set(v, i) (((v)->counter) = (i))
|
||||||
|
|
||||||
#ifdef CONFIG_ISA_ARCV2
|
#ifdef CONFIG_ARC_STAR_9000923308
|
||||||
#define PREFETCHW " prefetchw [%1] \n"
|
|
||||||
#else
|
#define SCOND_FAIL_RETRY_VAR_DEF \
|
||||||
#define PREFETCHW
|
unsigned int delay = 1, tmp; \
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_ASM \
|
||||||
|
" bz 4f \n" \
|
||||||
|
" ; --- scond fail delay --- \n" \
|
||||||
|
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
|
||||||
|
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
|
||||||
|
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
|
||||||
|
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
|
||||||
|
" b 1b \n" /* start over */ \
|
||||||
|
"4: ; --- success --- \n" \
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_VARS \
|
||||||
|
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
|
||||||
|
|
||||||
|
#else /* !CONFIG_ARC_STAR_9000923308 */
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_VAR_DEF
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_ASM \
|
||||||
|
" bnz 1b \n" \
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_VARS
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ATOMIC_OP(op, c_op, asm_op) \
|
#define ATOMIC_OP(op, c_op, asm_op) \
|
||||||
static inline void atomic_##op(int i, atomic_t *v) \
|
static inline void atomic_##op(int i, atomic_t *v) \
|
||||||
{ \
|
{ \
|
||||||
unsigned int temp; \
|
unsigned int val; \
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF \
|
||||||
\
|
\
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"1: \n" \
|
"1: llock %[val], [%[ctr]] \n" \
|
||||||
PREFETCHW \
|
" " #asm_op " %[val], %[val], %[i] \n" \
|
||||||
" llock %0, [%1] \n" \
|
" scond %[val], [%[ctr]] \n" \
|
||||||
" " #asm_op " %0, %0, %2 \n" \
|
" \n" \
|
||||||
" scond %0, [%1] \n" \
|
SCOND_FAIL_RETRY_ASM \
|
||||||
" bnz 1b \n" \
|
\
|
||||||
: "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
|
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
|
||||||
: "r"(&v->counter), "ir"(i) \
|
SCOND_FAIL_RETRY_VARS \
|
||||||
|
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
|
||||||
|
[i] "ir" (i) \
|
||||||
: "cc"); \
|
: "cc"); \
|
||||||
} \
|
} \
|
||||||
|
|
||||||
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
||||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||||
{ \
|
{ \
|
||||||
unsigned int temp; \
|
unsigned int val; \
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF \
|
||||||
\
|
\
|
||||||
/* \
|
/* \
|
||||||
* Explicit full memory barrier needed before/after as \
|
* Explicit full memory barrier needed before/after as \
|
||||||
@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||||||
smp_mb(); \
|
smp_mb(); \
|
||||||
\
|
\
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"1: \n" \
|
"1: llock %[val], [%[ctr]] \n" \
|
||||||
PREFETCHW \
|
" " #asm_op " %[val], %[val], %[i] \n" \
|
||||||
" llock %0, [%1] \n" \
|
" scond %[val], [%[ctr]] \n" \
|
||||||
" " #asm_op " %0, %0, %2 \n" \
|
" \n" \
|
||||||
" scond %0, [%1] \n" \
|
SCOND_FAIL_RETRY_ASM \
|
||||||
" bnz 1b \n" \
|
\
|
||||||
: "=&r"(temp) \
|
: [val] "=&r" (val) \
|
||||||
: "r"(&v->counter), "ir"(i) \
|
SCOND_FAIL_RETRY_VARS \
|
||||||
|
: [ctr] "r" (&v->counter), \
|
||||||
|
[i] "ir" (i) \
|
||||||
: "cc"); \
|
: "cc"); \
|
||||||
\
|
\
|
||||||
smp_mb(); \
|
smp_mb(); \
|
||||||
\
|
\
|
||||||
return temp; \
|
return val; \
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_ARC_HAS_LLSC */
|
#else /* !CONFIG_ARC_HAS_LLSC */
|
||||||
@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
|
|||||||
#undef ATOMIC_OPS
|
#undef ATOMIC_OPS
|
||||||
#undef ATOMIC_OP_RETURN
|
#undef ATOMIC_OP_RETURN
|
||||||
#undef ATOMIC_OP
|
#undef ATOMIC_OP
|
||||||
|
#undef SCOND_FAIL_RETRY_VAR_DEF
|
||||||
|
#undef SCOND_FAIL_RETRY_ASM
|
||||||
|
#undef SCOND_FAIL_RETRY_VARS
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __atomic_add_unless - add unless the number is a given value
|
* __atomic_add_unless - add unless the number is a given value
|
||||||
|
@ -20,20 +20,20 @@
|
|||||||
struct pt_regs {
|
struct pt_regs {
|
||||||
|
|
||||||
/* Real registers */
|
/* Real registers */
|
||||||
long bta; /* bta_l1, bta_l2, erbta */
|
unsigned long bta; /* bta_l1, bta_l2, erbta */
|
||||||
|
|
||||||
long lp_start, lp_end, lp_count;
|
unsigned long lp_start, lp_end, lp_count;
|
||||||
|
|
||||||
long status32; /* status32_l1, status32_l2, erstatus */
|
unsigned long status32; /* status32_l1, status32_l2, erstatus */
|
||||||
long ret; /* ilink1, ilink2 or eret */
|
unsigned long ret; /* ilink1, ilink2 or eret */
|
||||||
long blink;
|
unsigned long blink;
|
||||||
long fp;
|
unsigned long fp;
|
||||||
long r26; /* gp */
|
unsigned long r26; /* gp */
|
||||||
|
|
||||||
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
|
unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
|
||||||
|
|
||||||
long sp; /* user/kernel sp depending on where we came from */
|
unsigned long sp; /* User/Kernel depending on where we came from */
|
||||||
long orig_r0;
|
unsigned long orig_r0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To distinguish bet excp, syscall, irq
|
* To distinguish bet excp, syscall, irq
|
||||||
@ -55,13 +55,13 @@ struct pt_regs {
|
|||||||
unsigned long event;
|
unsigned long event;
|
||||||
};
|
};
|
||||||
|
|
||||||
long user_r25;
|
unsigned long user_r25;
|
||||||
};
|
};
|
||||||
#else
|
#else
|
||||||
|
|
||||||
struct pt_regs {
|
struct pt_regs {
|
||||||
|
|
||||||
long orig_r0;
|
unsigned long orig_r0;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
@ -76,26 +76,26 @@ struct pt_regs {
|
|||||||
unsigned long event;
|
unsigned long event;
|
||||||
};
|
};
|
||||||
|
|
||||||
long bta; /* bta_l1, bta_l2, erbta */
|
unsigned long bta; /* bta_l1, bta_l2, erbta */
|
||||||
|
|
||||||
long user_r25;
|
unsigned long user_r25;
|
||||||
|
|
||||||
long r26; /* gp */
|
unsigned long r26; /* gp */
|
||||||
long fp;
|
unsigned long fp;
|
||||||
long sp; /* user/kernel sp depending on where we came from */
|
unsigned long sp; /* user/kernel sp depending on where we came from */
|
||||||
|
|
||||||
long r12;
|
unsigned long r12;
|
||||||
|
|
||||||
/*------- Below list auto saved by h/w -----------*/
|
/*------- Below list auto saved by h/w -----------*/
|
||||||
long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
|
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
|
||||||
|
|
||||||
long blink;
|
unsigned long blink;
|
||||||
long lp_end, lp_start, lp_count;
|
unsigned long lp_end, lp_start, lp_count;
|
||||||
|
|
||||||
long ei, ldi, jli;
|
unsigned long ei, ldi, jli;
|
||||||
|
|
||||||
long ret;
|
unsigned long ret;
|
||||||
long status32;
|
unsigned long status32;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -103,10 +103,10 @@ struct pt_regs {
|
|||||||
/* Callee saved registers - need to be saved only when you are scheduled out */
|
/* Callee saved registers - need to be saved only when you are scheduled out */
|
||||||
|
|
||||||
struct callee_regs {
|
struct callee_regs {
|
||||||
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define instruction_pointer(regs) (unsigned long)((regs)->ret)
|
#define instruction_pointer(regs) ((regs)->ret)
|
||||||
#define profile_pc(regs) instruction_pointer(regs)
|
#define profile_pc(regs) instruction_pointer(regs)
|
||||||
|
|
||||||
/* return 1 if user mode or 0 if kernel mode */
|
/* return 1 if user mode or 0 if kernel mode */
|
||||||
@ -142,7 +142,7 @@ struct callee_regs {
|
|||||||
|
|
||||||
static inline long regs_return_value(struct pt_regs *regs)
|
static inline long regs_return_value(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
return regs->r0;
|
return (long)regs->r0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
@ -18,9 +18,518 @@
|
|||||||
#define arch_spin_unlock_wait(x) \
|
#define arch_spin_unlock_wait(x) \
|
||||||
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
|
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARC_HAS_LLSC
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
|
||||||
|
*/
|
||||||
|
#ifndef CONFIG_ARC_STAR_9000923308
|
||||||
|
|
||||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[slock]] \n"
|
||||||
|
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
|
||||||
|
" scond %[LOCKED], [%[slock]] \n" /* acquire */
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [slock] "r" (&(lock->slock)),
|
||||||
|
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[slock]] \n"
|
||||||
|
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
|
||||||
|
" scond %[LOCKED], [%[slock]] \n" /* acquire */
|
||||||
|
" bnz 1b \n"
|
||||||
|
" mov %[got_it], 1 \n"
|
||||||
|
"4: \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
: [slock] "r" (&(lock->slock)),
|
||||||
|
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||||
|
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* zero means writer holds the lock exclusively, deny Reader.
|
||||||
|
* Otherwise grant lock to first/subseq reader
|
||||||
|
*
|
||||||
|
* if (rw->counter > 0) {
|
||||||
|
* rw->counter--;
|
||||||
|
* ret = 1;
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
|
||||||
|
" sub %[val], %[val], 1 \n" /* reader lock */
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
|
||||||
|
" sub %[val], %[val], 1 \n" /* counter-- */
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n" /* retry if collided with someone */
|
||||||
|
" mov %[got_it], 1 \n"
|
||||||
|
" \n"
|
||||||
|
"4: ; --- done --- \n"
|
||||||
|
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
* deny writer. Otherwise if unlocked grant to writer
|
||||||
|
* Hence the claim that Linux rwlocks are unfair to writers.
|
||||||
|
* (can be starved for an indefinite time by readers).
|
||||||
|
*
|
||||||
|
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
|
||||||
|
* rw->counter = 0;
|
||||||
|
* ret = 1;
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
|
||||||
|
" mov %[val], %[WR_LOCKED] \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
|
||||||
|
" mov %[val], %[WR_LOCKED] \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n" /* retry if collided with someone */
|
||||||
|
" mov %[got_it], 1 \n"
|
||||||
|
" \n"
|
||||||
|
"4: ; --- done --- \n"
|
||||||
|
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rw->counter++;
|
||||||
|
*/
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" add %[val], %[val], 1 \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter))
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_ARC_STAR_9000923308 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
|
||||||
|
* coherency transactions in the SCU. The exclusive line state keeps rotating
|
||||||
|
* among contenting cores leading to a never ending cycle. So break the cycle
|
||||||
|
* by deferring the retry of failed exclusive access (SCOND). The actual delay
|
||||||
|
* needed is function of number of contending cores as well as the unrelated
|
||||||
|
* coherency traffic from other cores. To keep the code simple, start off with
|
||||||
|
* small delay of 1 which would suffice most cases and in case of contention
|
||||||
|
* double the delay. Eventually the delay is sufficient such that the coherency
|
||||||
|
* pipeline is drained, thus a subsequent exclusive access would succeed.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_VAR_DEF \
|
||||||
|
unsigned int delay, tmp; \
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_ASM \
|
||||||
|
" ; --- scond fail delay --- \n" \
|
||||||
|
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
|
||||||
|
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
|
||||||
|
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
|
||||||
|
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
|
||||||
|
" b 1b \n" /* start over */ \
|
||||||
|
" \n" \
|
||||||
|
"4: ; --- done --- \n" \
|
||||||
|
|
||||||
|
#define SCOND_FAIL_RETRY_VARS \
|
||||||
|
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
|
||||||
|
|
||||||
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"0: mov %[delay], 1 \n"
|
||||||
|
"1: llock %[val], [%[slock]] \n"
|
||||||
|
" breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
|
||||||
|
" scond %[LOCKED], [%[slock]] \n" /* acquire */
|
||||||
|
" bz 4f \n" /* done */
|
||||||
|
" \n"
|
||||||
|
SCOND_FAIL_RETRY_ASM
|
||||||
|
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
SCOND_FAIL_RETRY_VARS
|
||||||
|
: [slock] "r" (&(lock->slock)),
|
||||||
|
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"0: mov %[delay], 1 \n"
|
||||||
|
"1: llock %[val], [%[slock]] \n"
|
||||||
|
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
|
||||||
|
" scond %[LOCKED], [%[slock]] \n" /* acquire */
|
||||||
|
" bz.d 4f \n"
|
||||||
|
" mov.z %[got_it], 1 \n" /* got it */
|
||||||
|
" \n"
|
||||||
|
SCOND_FAIL_RETRY_ASM
|
||||||
|
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
SCOND_FAIL_RETRY_VARS
|
||||||
|
: [slock] "r" (&(lock->slock)),
|
||||||
|
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||||
|
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* zero means writer holds the lock exclusively, deny Reader.
|
||||||
|
* Otherwise grant lock to first/subseq reader
|
||||||
|
*
|
||||||
|
* if (rw->counter > 0) {
|
||||||
|
* rw->counter--;
|
||||||
|
* ret = 1;
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"0: mov %[delay], 1 \n"
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
|
||||||
|
" sub %[val], %[val], 1 \n" /* reader lock */
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bz 4f \n" /* done */
|
||||||
|
" \n"
|
||||||
|
SCOND_FAIL_RETRY_ASM
|
||||||
|
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
SCOND_FAIL_RETRY_VARS
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"0: mov %[delay], 1 \n"
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
|
||||||
|
" sub %[val], %[val], 1 \n" /* counter-- */
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bz.d 4f \n"
|
||||||
|
" mov.z %[got_it], 1 \n" /* got it */
|
||||||
|
" \n"
|
||||||
|
SCOND_FAIL_RETRY_ASM
|
||||||
|
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
SCOND_FAIL_RETRY_VARS
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
* deny writer. Otherwise if unlocked grant to writer
|
||||||
|
* Hence the claim that Linux rwlocks are unfair to writers.
|
||||||
|
* (can be starved for an indefinite time by readers).
|
||||||
|
*
|
||||||
|
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
|
||||||
|
* rw->counter = 0;
|
||||||
|
* ret = 1;
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"0: mov %[delay], 1 \n"
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
|
||||||
|
" mov %[val], %[WR_LOCKED] \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bz 4f \n"
|
||||||
|
" \n"
|
||||||
|
SCOND_FAIL_RETRY_ASM
|
||||||
|
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
SCOND_FAIL_RETRY_VARS
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
SCOND_FAIL_RETRY_VAR_DEF;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"0: mov %[delay], 1 \n"
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
|
||||||
|
" mov %[val], %[WR_LOCKED] \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bz.d 4f \n"
|
||||||
|
" mov.z %[got_it], 1 \n" /* got it */
|
||||||
|
" \n"
|
||||||
|
SCOND_FAIL_RETRY_ASM
|
||||||
|
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
SCOND_FAIL_RETRY_VARS
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rw->counter++;
|
||||||
|
*/
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" add %[val], %[val], 1 \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter))
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
|
||||||
|
*/
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" scond %[UNLOCKED], [%[rwlock]]\n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef SCOND_FAIL_RETRY_VAR_DEF
|
||||||
|
#undef SCOND_FAIL_RETRY_ASM
|
||||||
|
#undef SCOND_FAIL_RETRY_VARS
|
||||||
|
|
||||||
|
#endif /* CONFIG_ARC_STAR_9000923308 */
|
||||||
|
|
||||||
|
#else /* !CONFIG_ARC_HAS_LLSC */
|
||||||
|
|
||||||
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This smp_mb() is technically superfluous, we only need the one
|
* This smp_mb() is technically superfluous, we only need the one
|
||||||
@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"1: ex %0, [%1] \n"
|
"1: ex %0, [%1] \n"
|
||||||
" breq %0, %2, 1b \n"
|
" breq %0, %2, 1b \n"
|
||||||
: "+&r" (tmp)
|
: "+&r" (val)
|
||||||
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
|
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
|
||||||
: "memory");
|
: "memory");
|
||||||
|
|
||||||
@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||||||
smp_mb();
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
|
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
|
||||||
|
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"1: ex %0, [%1] \n"
|
"1: ex %0, [%1] \n"
|
||||||
: "+r" (tmp)
|
: "+r" (val)
|
||||||
: "r"(&(lock->slock))
|
: "r"(&(lock->slock))
|
||||||
: "memory");
|
: "memory");
|
||||||
|
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
|
return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
|
unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RELEASE barrier: given the instructions avail on ARCv2, full barrier
|
* RELEASE barrier: given the instructions avail on ARCv2, full barrier
|
||||||
@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" ex %0, [%1] \n"
|
" ex %0, [%1] \n"
|
||||||
: "+r" (tmp)
|
: "+r" (val)
|
||||||
: "r"(&(lock->slock))
|
: "r"(&(lock->slock))
|
||||||
: "memory");
|
: "memory");
|
||||||
|
|
||||||
@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Read-write spinlocks, allowing multiple readers but only one writer.
|
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||||
|
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
||||||
*
|
*
|
||||||
* The spinlock itself is contained in @counter and access to it is
|
* The spinlock itself is contained in @counter and access to it is
|
||||||
* serialized with @lock_mutex.
|
* serialized with @lock_mutex.
|
||||||
*
|
|
||||||
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Would read_trylock() succeed? */
|
|
||||||
#define arch_read_can_lock(x) ((x)->counter > 0)
|
|
||||||
|
|
||||||
/* Would write_trylock() succeed? */
|
|
||||||
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
|
|
||||||
|
|
||||||
/* 1 - lock taken successfully */
|
/* 1 - lock taken successfully */
|
||||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||||||
arch_spin_unlock(&(rw->lock_mutex));
|
arch_spin_unlock(&(rw->lock_mutex));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define arch_read_can_lock(x) ((x)->counter > 0)
|
||||||
|
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
|
@ -26,7 +26,9 @@ typedef struct {
|
|||||||
*/
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int counter;
|
volatile unsigned int counter;
|
||||||
|
#ifndef CONFIG_ARC_HAS_LLSC
|
||||||
arch_spinlock_t lock_mutex;
|
arch_spinlock_t lock_mutex;
|
||||||
|
#endif
|
||||||
} arch_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
|
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
|
||||||
|
@ -32,20 +32,20 @@
|
|||||||
*/
|
*/
|
||||||
struct user_regs_struct {
|
struct user_regs_struct {
|
||||||
|
|
||||||
long pad;
|
unsigned long pad;
|
||||||
struct {
|
struct {
|
||||||
long bta, lp_start, lp_end, lp_count;
|
unsigned long bta, lp_start, lp_end, lp_count;
|
||||||
long status32, ret, blink, fp, gp;
|
unsigned long status32, ret, blink, fp, gp;
|
||||||
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
|
unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
|
||||||
long sp;
|
unsigned long sp;
|
||||||
} scratch;
|
} scratch;
|
||||||
long pad2;
|
unsigned long pad2;
|
||||||
struct {
|
struct {
|
||||||
long r25, r24, r23, r22, r21, r20;
|
unsigned long r25, r24, r23, r22, r21, r20;
|
||||||
long r19, r18, r17, r16, r15, r14, r13;
|
unsigned long r19, r18, r17, r16, r15, r14, r13;
|
||||||
} callee;
|
} callee;
|
||||||
long efa; /* break pt addr, for break points in delay slots */
|
unsigned long efa; /* break pt addr, for break points in delay slots */
|
||||||
long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
|
unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
|
||||||
};
|
};
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
|
|||||||
struct bcr_perip uncached_space;
|
struct bcr_perip uncached_space;
|
||||||
struct bcr_generic bcr;
|
struct bcr_generic bcr;
|
||||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||||
|
unsigned long perip_space;
|
||||||
FIX_PTR(cpu);
|
FIX_PTR(cpu);
|
||||||
|
|
||||||
READ_BCR(AUX_IDENTITY, cpu->core);
|
READ_BCR(AUX_IDENTITY, cpu->core);
|
||||||
@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
|
|||||||
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
|
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
|
||||||
|
|
||||||
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
|
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
|
||||||
BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
|
if (uncached_space.ver < 3)
|
||||||
|
perip_space = uncached_space.start << 24;
|
||||||
|
else
|
||||||
|
perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
|
||||||
|
|
||||||
|
BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
|
||||||
|
|
||||||
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
|
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
|
||||||
|
|
||||||
@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
|
|||||||
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
|
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
|
||||||
else if (!cpu->extn.fpu_dp && fpu_enabled)
|
else if (!cpu->extn.fpu_dp && fpu_enabled)
|
||||||
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
|
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
|
||||||
|
|
||||||
|
if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
|
||||||
|
!IS_ENABLED(CONFIG_ARC_STAR_9000923308))
|
||||||
|
panic("llock/scond livelock workaround missing\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arc_clkevent_set_mode(enum clock_event_mode mode,
|
static int arc_clkevent_set_periodic(struct clock_event_device *dev)
|
||||||
struct clock_event_device *dev)
|
|
||||||
{
|
{
|
||||||
switch (mode) {
|
|
||||||
case CLOCK_EVT_MODE_PERIODIC:
|
|
||||||
/*
|
/*
|
||||||
* At X Hz, 1 sec = 1000ms -> X cycles;
|
* At X Hz, 1 sec = 1000ms -> X cycles;
|
||||||
* 10ms -> X / 100 cycles
|
* 10ms -> X / 100 cycles
|
||||||
*/
|
*/
|
||||||
arc_timer_event_setup(arc_get_core_freq() / HZ);
|
arc_timer_event_setup(arc_get_core_freq() / HZ);
|
||||||
break;
|
return 0;
|
||||||
case CLOCK_EVT_MODE_ONESHOT:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
|
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
|
||||||
.name = "ARC Timer0",
|
.name = "ARC Timer0",
|
||||||
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
|
.features = CLOCK_EVT_FEAT_ONESHOT |
|
||||||
.mode = CLOCK_EVT_MODE_UNUSED,
|
CLOCK_EVT_FEAT_PERIODIC,
|
||||||
.rating = 300,
|
.rating = 300,
|
||||||
.irq = TIMER0_IRQ, /* hardwired, no need for resources */
|
.irq = TIMER0_IRQ, /* hardwired, no need for resources */
|
||||||
.set_next_event = arc_clkevent_set_next_event,
|
.set_next_event = arc_clkevent_set_next_event,
|
||||||
.set_mode = arc_clkevent_set_mode,
|
.set_state_periodic = arc_clkevent_set_periodic,
|
||||||
};
|
};
|
||||||
|
|
||||||
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
|
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
|
||||||
@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
|
|||||||
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
|
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
|
||||||
*/
|
*/
|
||||||
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
||||||
int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
|
int irq_reenable = clockevent_state_periodic(evt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Any write to CTRL reg ACks the interrupt, we rewrite the
|
* Any write to CTRL reg ACks the interrupt, we rewrite the
|
||||||
|
@ -206,7 +206,7 @@ unalignedOffby3:
|
|||||||
ld.ab r6, [r1, 4]
|
ld.ab r6, [r1, 4]
|
||||||
prefetch [r1, 28] ;Prefetch the next read location
|
prefetch [r1, 28] ;Prefetch the next read location
|
||||||
ld.ab r8, [r1,4]
|
ld.ab r8, [r1,4]
|
||||||
prefetch [r3, 32] ;Prefetch the next write location
|
prefetchw [r3, 32] ;Prefetch the next write location
|
||||||
|
|
||||||
SHIFT_1 (r7, r6, 8)
|
SHIFT_1 (r7, r6, 8)
|
||||||
or r7, r7, r5
|
or r7, r7, r5
|
||||||
|
@ -10,12 +10,6 @@
|
|||||||
|
|
||||||
#undef PREALLOC_NOT_AVAIL
|
#undef PREALLOC_NOT_AVAIL
|
||||||
|
|
||||||
#ifdef PREALLOC_NOT_AVAIL
|
|
||||||
#define PREWRITE(A,B) prefetchw [(A),(B)]
|
|
||||||
#else
|
|
||||||
#define PREWRITE(A,B) prealloc [(A),(B)]
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ENTRY(memset)
|
ENTRY(memset)
|
||||||
prefetchw [r0] ; Prefetch the write location
|
prefetchw [r0] ; Prefetch the write location
|
||||||
mov.f 0, r2
|
mov.f 0, r2
|
||||||
@ -51,9 +45,15 @@ ENTRY(memset)
|
|||||||
|
|
||||||
;;; Convert len to Dwords, unfold x8
|
;;; Convert len to Dwords, unfold x8
|
||||||
lsr.f lp_count, lp_count, 6
|
lsr.f lp_count, lp_count, 6
|
||||||
|
|
||||||
lpnz @.Lset64bytes
|
lpnz @.Lset64bytes
|
||||||
;; LOOP START
|
;; LOOP START
|
||||||
PREWRITE(r3, 64) ;Prefetch the next write location
|
#ifdef PREALLOC_NOT_AVAIL
|
||||||
|
prefetchw [r3, 64] ;Prefetch the next write location
|
||||||
|
#else
|
||||||
|
prealloc [r3, 64]
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ARC_HAS_LL64
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
@ -62,16 +62,45 @@ ENTRY(memset)
|
|||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
|
#else
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
#endif
|
||||||
.Lset64bytes:
|
.Lset64bytes:
|
||||||
|
|
||||||
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
|
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
|
||||||
lpnz .Lset32bytes
|
lpnz .Lset32bytes
|
||||||
;; LOOP START
|
;; LOOP START
|
||||||
prefetchw [r3, 32] ;Prefetch the next write location
|
prefetchw [r3, 32] ;Prefetch the next write location
|
||||||
|
#ifdef CONFIG_ARC_HAS_LL64
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
std.ab r4, [r3, 8]
|
std.ab r4, [r3, 8]
|
||||||
|
#else
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
st.ab r4, [r3, 4]
|
||||||
|
#endif
|
||||||
.Lset32bytes:
|
.Lset32bytes:
|
||||||
|
|
||||||
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
|
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
|
||||||
|
@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
|
|||||||
|
|
||||||
static void __init axs103_early_init(void)
|
static void __init axs103_early_init(void)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* AXS103 configurations for SMP/QUAD configurations share device tree
|
||||||
|
* which defaults to 90 MHz. However recent failures of Quad config
|
||||||
|
* revealed P&R timing violations so clamp it down to safe 50 MHz
|
||||||
|
* Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
|
||||||
|
*
|
||||||
|
* This hack is really hacky as of now. Fix it properly by getting the
|
||||||
|
* number of cores as return value of platform's early SMP callback
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_ARC_MCIP
|
||||||
|
unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
|
||||||
|
if (num_cores > 2)
|
||||||
|
arc_set_core_freq(50 * 1000000);
|
||||||
|
#endif
|
||||||
|
|
||||||
switch (arc_get_core_freq()/1000000) {
|
switch (arc_get_core_freq()/1000000) {
|
||||||
case 33:
|
case 33:
|
||||||
axs103_set_freq(1, 1, 1);
|
axs103_set_freq(1, 1, 1);
|
||||||
|
@ -1140,6 +1140,7 @@
|
|||||||
ctrl-module = <&omap_control_sata>;
|
ctrl-module = <&omap_control_sata>;
|
||||||
clocks = <&sys_clkin1>, <&sata_ref_clk>;
|
clocks = <&sys_clkin1>, <&sata_ref_clk>;
|
||||||
clock-names = "sysclk", "refclk";
|
clock-names = "sysclk", "refclk";
|
||||||
|
syscon-pllreset = <&scm_conf 0x3fc>;
|
||||||
#phy-cells = <0>;
|
#phy-cells = <0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -138,8 +138,8 @@
|
|||||||
|
|
||||||
mipi_phy: video-phy@10020710 {
|
mipi_phy: video-phy@10020710 {
|
||||||
compatible = "samsung,s5pv210-mipi-video-phy";
|
compatible = "samsung,s5pv210-mipi-video-phy";
|
||||||
reg = <0x10020710 8>;
|
|
||||||
#phy-cells = <1>;
|
#phy-cells = <1>;
|
||||||
|
syscon = <&pmu_system_controller>;
|
||||||
};
|
};
|
||||||
|
|
||||||
pd_cam: cam-power-domain@10023C00 {
|
pd_cam: cam-power-domain@10023C00 {
|
||||||
|
@ -127,6 +127,10 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&cpu0 {
|
||||||
|
cpu0-supply = <&buck1_reg>;
|
||||||
|
};
|
||||||
|
|
||||||
&fimd {
|
&fimd {
|
||||||
pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
|
pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
@ -188,6 +188,10 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&cpu0 {
|
||||||
|
cpu0-supply = <&varm_breg>;
|
||||||
|
};
|
||||||
|
|
||||||
&dsi_0 {
|
&dsi_0 {
|
||||||
vddcore-supply = <&vusb_reg>;
|
vddcore-supply = <&vusb_reg>;
|
||||||
vddio-supply = <&vmipi_reg>;
|
vddio-supply = <&vmipi_reg>;
|
||||||
|
@ -548,6 +548,10 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&cpu0 {
|
||||||
|
cpu0-supply = <&vdd_arm_reg>;
|
||||||
|
};
|
||||||
|
|
||||||
&pinctrl_1 {
|
&pinctrl_1 {
|
||||||
hdmi_hpd: hdmi-hpd {
|
hdmi_hpd: hdmi-hpd {
|
||||||
samsung,pins = "gpx3-7";
|
samsung,pins = "gpx3-7";
|
||||||
|
@ -40,6 +40,18 @@
|
|||||||
device_type = "cpu";
|
device_type = "cpu";
|
||||||
compatible = "arm,cortex-a9";
|
compatible = "arm,cortex-a9";
|
||||||
reg = <0x900>;
|
reg = <0x900>;
|
||||||
|
clocks = <&clock CLK_ARM_CLK>;
|
||||||
|
clock-names = "cpu";
|
||||||
|
clock-latency = <160000>;
|
||||||
|
|
||||||
|
operating-points = <
|
||||||
|
1200000 1250000
|
||||||
|
1000000 1150000
|
||||||
|
800000 1075000
|
||||||
|
500000 975000
|
||||||
|
400000 975000
|
||||||
|
200000 950000
|
||||||
|
>;
|
||||||
cooling-min-level = <4>;
|
cooling-min-level = <4>;
|
||||||
cooling-max-level = <2>;
|
cooling-max-level = <2>;
|
||||||
#cooling-cells = <2>; /* min followed by max */
|
#cooling-cells = <2>; /* min followed by max */
|
||||||
|
@ -286,8 +286,8 @@
|
|||||||
can1: can@53fe4000 {
|
can1: can@53fe4000 {
|
||||||
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
|
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
|
||||||
reg = <0x53fe4000 0x1000>;
|
reg = <0x53fe4000 0x1000>;
|
||||||
clocks = <&clks 33>;
|
clocks = <&clks 33>, <&clks 33>;
|
||||||
clock-names = "ipg";
|
clock-names = "ipg", "per";
|
||||||
interrupts = <43>;
|
interrupts = <43>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
@ -295,8 +295,8 @@
|
|||||||
can2: can@53fe8000 {
|
can2: can@53fe8000 {
|
||||||
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
|
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
|
||||||
reg = <0x53fe8000 0x1000>;
|
reg = <0x53fe8000 0x1000>;
|
||||||
clocks = <&clks 34>;
|
clocks = <&clks 34>, <&clks 34>;
|
||||||
clock-names = "ipg";
|
clock-names = "ipg", "per";
|
||||||
interrupts = <44>;
|
interrupts = <44>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
@ -13,9 +13,8 @@ clocks {
|
|||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
compatible = "ti,keystone,main-pll-clock";
|
compatible = "ti,keystone,main-pll-clock";
|
||||||
clocks = <&refclksys>;
|
clocks = <&refclksys>;
|
||||||
reg = <0x02620350 4>, <0x02310110 4>;
|
reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
|
||||||
reg-names = "control", "multiplier";
|
reg-names = "control", "multiplier", "post-divider";
|
||||||
fixed-postdiv = <2>;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
papllclk: papllclk@2620358 {
|
papllclk: papllclk@2620358 {
|
||||||
|
@ -22,9 +22,8 @@ clocks {
|
|||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
compatible = "ti,keystone,main-pll-clock";
|
compatible = "ti,keystone,main-pll-clock";
|
||||||
clocks = <&refclksys>;
|
clocks = <&refclksys>;
|
||||||
reg = <0x02620350 4>, <0x02310110 4>;
|
reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
|
||||||
reg-names = "control", "multiplier";
|
reg-names = "control", "multiplier", "post-divider";
|
||||||
fixed-postdiv = <2>;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
papllclk: papllclk@2620358 {
|
papllclk: papllclk@2620358 {
|
||||||
|
@ -22,9 +22,8 @@ clocks {
|
|||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
compatible = "ti,keystone,main-pll-clock";
|
compatible = "ti,keystone,main-pll-clock";
|
||||||
clocks = <&refclksys>;
|
clocks = <&refclksys>;
|
||||||
reg = <0x02620350 4>, <0x02310110 4>;
|
reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
|
||||||
reg-names = "control", "multiplier";
|
reg-names = "control", "multiplier", "post-divider";
|
||||||
fixed-postdiv = <2>;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
papllclk: papllclk@2620358 {
|
papllclk: papllclk@2620358 {
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
aliases {
|
aliases {
|
||||||
|
serial1 = &uart1;
|
||||||
stmpe-i2c0 = &stmpe0;
|
stmpe-i2c0 = &stmpe0;
|
||||||
stmpe-i2c1 = &stmpe1;
|
stmpe-i2c1 = &stmpe1;
|
||||||
};
|
};
|
||||||
|
@ -15,6 +15,10 @@
|
|||||||
bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
|
bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
aliases {
|
||||||
|
serial1 = &uart1;
|
||||||
|
};
|
||||||
|
|
||||||
src@101e0000 {
|
src@101e0000 {
|
||||||
/* These chrystal drivers are not used on this board */
|
/* These chrystal drivers are not used on this board */
|
||||||
disable-sxtalo;
|
disable-sxtalo;
|
||||||
|
@ -757,6 +757,7 @@
|
|||||||
clock-names = "uartclk", "apb_pclk";
|
clock-names = "uartclk", "apb_pclk";
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&uart0_default_mux>;
|
pinctrl-0 = <&uart0_default_mux>;
|
||||||
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
uart1: uart@101fb000 {
|
uart1: uart@101fb000 {
|
||||||
|
@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
|
|||||||
* registers. This address is needed early so the OCP registers that
|
* registers. This address is needed early so the OCP registers that
|
||||||
* are part of the device's address space can be ioremapped properly.
|
* are part of the device's address space can be ioremapped properly.
|
||||||
*
|
*
|
||||||
|
* If SYSC access is not needed, the registers will not be remapped
|
||||||
|
* and non-availability of MPU access is not treated as an error.
|
||||||
|
*
|
||||||
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
|
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
|
||||||
* -ENXIO on absent or invalid register target address space.
|
* -ENXIO on absent or invalid register target address space.
|
||||||
*/
|
*/
|
||||||
@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
|
|||||||
|
|
||||||
_save_mpu_port_index(oh);
|
_save_mpu_port_index(oh);
|
||||||
|
|
||||||
|
/* if we don't need sysc access we don't need to ioremap */
|
||||||
|
if (!oh->class->sysc)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* we can't continue without MPU PORT if we need sysc access */
|
||||||
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
|
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
|
|||||||
oh->name);
|
oh->name);
|
||||||
|
|
||||||
/* Extract the IO space from device tree blob */
|
/* Extract the IO space from device tree blob */
|
||||||
if (!np)
|
if (!np) {
|
||||||
|
pr_err("omap_hwmod: %s: no dt node\n", oh->name);
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
va_start = of_iomap(np, index + oh->mpu_rt_idx);
|
va_start = of_iomap(np, index + oh->mpu_rt_idx);
|
||||||
} else {
|
} else {
|
||||||
@ -2456,14 +2466,12 @@ static int __init _init(struct omap_hwmod *oh, void *data)
|
|||||||
oh->name, np->name);
|
oh->name, np->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (oh->class->sysc) {
|
|
||||||
r = _init_mpu_rt_base(oh, NULL, index, np);
|
r = _init_mpu_rt_base(oh, NULL, index, np);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
|
WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
|
||||||
oh->name);
|
oh->name);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
r = _init_clocks(oh, NULL);
|
r = _init_clocks(oh, NULL);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
|
@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
|
|||||||
.syss_offs = 0x0014,
|
.syss_offs = 0x0014,
|
||||||
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
|
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
|
||||||
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
|
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
|
||||||
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
|
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
|
||||||
SIDLE_SMART_WKUP),
|
|
||||||
.sysc_fields = &omap_hwmod_sysc_type1,
|
.sysc_fields = &omap_hwmod_sysc_type1,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
|
|||||||
.class = &dra7xx_gpmc_hwmod_class,
|
.class = &dra7xx_gpmc_hwmod_class,
|
||||||
.clkdm_name = "l3main1_clkdm",
|
.clkdm_name = "l3main1_clkdm",
|
||||||
/* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
|
/* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
|
||||||
.flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS,
|
.flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
|
||||||
.main_clk = "l3_iclk_div",
|
.main_clk = "l3_iclk_div",
|
||||||
.prcm = {
|
.prcm = {
|
||||||
.omap4 = {
|
.omap4 = {
|
||||||
|
@ -823,7 +823,7 @@
|
|||||||
device_type = "dma";
|
device_type = "dma";
|
||||||
reg = <0x0 0x1f270000 0x0 0x10000>,
|
reg = <0x0 0x1f270000 0x0 0x10000>,
|
||||||
<0x0 0x1f200000 0x0 0x10000>,
|
<0x0 0x1f200000 0x0 0x10000>,
|
||||||
<0x0 0x1b008000 0x0 0x2000>,
|
<0x0 0x1b000000 0x0 0x400000>,
|
||||||
<0x0 0x1054a000 0x0 0x100>;
|
<0x0 0x1054a000 0x0 0x100>;
|
||||||
interrupts = <0x0 0x82 0x4>,
|
interrupts = <0x0 0x82 0x4>,
|
||||||
<0x0 0xb8 0x4>,
|
<0x0 0xb8 0x4>,
|
||||||
|
@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|||||||
* Other callers might not initialize the si_lsb field,
|
* Other callers might not initialize the si_lsb field,
|
||||||
* so check explicitely for the right codes here.
|
* so check explicitely for the right codes here.
|
||||||
*/
|
*/
|
||||||
if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
|
if (from->si_signo == SIGBUS &&
|
||||||
|
(from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
|
||||||
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
|
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|||||||
|
|
||||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||||
{
|
{
|
||||||
memset(to, 0, sizeof *to);
|
|
||||||
|
|
||||||
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
|
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
|
||||||
copy_from_user(to->_sifields._pad,
|
copy_from_user(to->_sifields._pad,
|
||||||
from->_sifields._pad, SI_PAD_SIZE))
|
from->_sifields._pad, SI_PAD_SIZE))
|
||||||
|
@ -199,16 +199,15 @@ up_fail:
|
|||||||
*/
|
*/
|
||||||
void update_vsyscall(struct timekeeper *tk)
|
void update_vsyscall(struct timekeeper *tk)
|
||||||
{
|
{
|
||||||
struct timespec xtime_coarse;
|
|
||||||
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
|
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
|
||||||
|
|
||||||
++vdso_data->tb_seq_count;
|
++vdso_data->tb_seq_count;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
xtime_coarse = __current_kernel_time();
|
|
||||||
vdso_data->use_syscall = use_syscall;
|
vdso_data->use_syscall = use_syscall;
|
||||||
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
|
vdso_data->xtime_coarse_sec = tk->xtime_sec;
|
||||||
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
|
vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
|
||||||
|
tk->tkr_mono.shift;
|
||||||
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
|
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
|
||||||
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
||||||
|
|
||||||
|
@ -151,7 +151,6 @@ config BMIPS_GENERIC
|
|||||||
select BCM7120_L2_IRQ
|
select BCM7120_L2_IRQ
|
||||||
select BRCMSTB_L2_IRQ
|
select BRCMSTB_L2_IRQ
|
||||||
select IRQ_MIPS_CPU
|
select IRQ_MIPS_CPU
|
||||||
select RAW_IRQ_ACCESSORS
|
|
||||||
select DMA_NONCOHERENT
|
select DMA_NONCOHERENT
|
||||||
select SYS_SUPPORTS_32BIT_KERNEL
|
select SYS_SUPPORTS_32BIT_KERNEL
|
||||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||||
|
@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
|
|||||||
{
|
{
|
||||||
return ATH79_MISC_IRQ(5);
|
return ATH79_MISC_IRQ(5);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||||
|
|
||||||
unsigned int get_c0_compare_int(void)
|
unsigned int get_c0_compare_int(void)
|
||||||
{
|
{
|
||||||
|
@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
|
|||||||
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
|
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
|
||||||
|
|
||||||
if (action & SMP_CALL_FUNCTION)
|
if (action & SMP_CALL_FUNCTION)
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||||
scheduler_ipi();
|
scheduler_ipi();
|
||||||
|
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
|
|
||||||
#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
|
|
||||||
|
|
||||||
#include <asm/bmips.h>
|
|
||||||
|
|
||||||
#define plat_post_dma_flush bmips_post_dma_flush
|
|
||||||
|
|
||||||
#include <asm/mach-generic/dma-coherence.h>
|
|
||||||
|
|
||||||
#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
|
|
@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
|||||||
* Make sure the buddy is global too (if it's !none,
|
* Make sure the buddy is global too (if it's !none,
|
||||||
* it better already be global)
|
* it better already be global)
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* For SMP, multiple CPUs can race, so we need to do
|
||||||
|
* this atomically.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
#define LL_INSN "lld"
|
||||||
|
#define SC_INSN "scd"
|
||||||
|
#else /* CONFIG_32BIT */
|
||||||
|
#define LL_INSN "ll"
|
||||||
|
#define SC_INSN "sc"
|
||||||
|
#endif
|
||||||
|
unsigned long page_global = _PAGE_GLOBAL;
|
||||||
|
unsigned long tmp;
|
||||||
|
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
" .set push\n"
|
||||||
|
" .set noreorder\n"
|
||||||
|
"1: " LL_INSN " %[tmp], %[buddy]\n"
|
||||||
|
" bnez %[tmp], 2f\n"
|
||||||
|
" or %[tmp], %[tmp], %[global]\n"
|
||||||
|
" " SC_INSN " %[tmp], %[buddy]\n"
|
||||||
|
" beqz %[tmp], 1b\n"
|
||||||
|
" nop\n"
|
||||||
|
"2:\n"
|
||||||
|
" .set pop"
|
||||||
|
: [buddy] "+m" (buddy->pte),
|
||||||
|
[tmp] "=&r" (tmp)
|
||||||
|
: [global] "r" (page_global));
|
||||||
|
#else /* !CONFIG_SMP */
|
||||||
if (pte_none(*buddy))
|
if (pte_none(*buddy))
|
||||||
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
|
|||||||
extern void play_dead(void);
|
extern void play_dead(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern asmlinkage void smp_call_function_interrupt(void);
|
|
||||||
|
|
||||||
static inline void arch_send_call_function_single_ipi(int cpu)
|
static inline void arch_send_call_function_single_ipi(int cpu)
|
||||||
{
|
{
|
||||||
extern struct plat_smp_ops *mp_ops; /* private */
|
extern struct plat_smp_ops *mp_ops; /* private */
|
||||||
|
@ -152,6 +152,31 @@
|
|||||||
.set noreorder
|
.set noreorder
|
||||||
bltz k0, 8f
|
bltz k0, 8f
|
||||||
move k1, sp
|
move k1, sp
|
||||||
|
#ifdef CONFIG_EVA
|
||||||
|
/*
|
||||||
|
* Flush interAptiv's Return Prediction Stack (RPS) by writing
|
||||||
|
* EntryHi. Toggling Config7.RPS is slower and less portable.
|
||||||
|
*
|
||||||
|
* The RPS isn't automatically flushed when exceptions are
|
||||||
|
* taken, which can result in kernel mode speculative accesses
|
||||||
|
* to user addresses if the RPS mispredicts. That's harmless
|
||||||
|
* when user and kernel share the same address space, but with
|
||||||
|
* EVA the same user segments may be unmapped to kernel mode,
|
||||||
|
* even containing sensitive MMIO regions or invalid memory.
|
||||||
|
*
|
||||||
|
* This can happen when the kernel sets the return address to
|
||||||
|
* ret_from_* and jr's to the exception handler, which looks
|
||||||
|
* more like a tail call than a function call. If nested calls
|
||||||
|
* don't evict the last user address in the RPS, it will
|
||||||
|
* mispredict the return and fetch from a user controlled
|
||||||
|
* address into the icache.
|
||||||
|
*
|
||||||
|
* More recent EVA-capable cores with MAAR to restrict
|
||||||
|
* speculative accesses aren't affected.
|
||||||
|
*/
|
||||||
|
MFC0 k0, CP0_ENTRYHI
|
||||||
|
MTC0 k0, CP0_ENTRYHI
|
||||||
|
#endif
|
||||||
.set reorder
|
.set reorder
|
||||||
/* Called from user mode, new stack. */
|
/* Called from user mode, new stack. */
|
||||||
get_saved_sp
|
get_saved_sp
|
||||||
|
@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|||||||
unsigned long __user *user_mask_ptr)
|
unsigned long __user *user_mask_ptr)
|
||||||
{
|
{
|
||||||
unsigned int real_len;
|
unsigned int real_len;
|
||||||
cpumask_t mask;
|
cpumask_t allowed, mask;
|
||||||
int retval;
|
int retval;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|||||||
if (retval)
|
if (retval)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
|
cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
|
||||||
|
cpumask_and(&mask, &allowed, cpu_active_mask);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
|
|||||||
return mips_machine_name;
|
return mips_machine_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OF
|
#ifdef CONFIG_USE_OF
|
||||||
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||||
{
|
{
|
||||||
return add_memory_region(base, size, BOOT_MEM_RAM);
|
return add_memory_region(base, size, BOOT_MEM_RAM);
|
||||||
|
@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
|
|||||||
|
|
||||||
process_entry:
|
process_entry:
|
||||||
PTR_L s2, (s0)
|
PTR_L s2, (s0)
|
||||||
PTR_ADD s0, s0, SZREG
|
PTR_ADDIU s0, s0, SZREG
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In case of a kdump/crash kernel, the indirection page is not
|
* In case of a kdump/crash kernel, the indirection page is not
|
||||||
@ -61,9 +61,9 @@ copy_word:
|
|||||||
/* copy page word by word */
|
/* copy page word by word */
|
||||||
REG_L s5, (s2)
|
REG_L s5, (s2)
|
||||||
REG_S s5, (s4)
|
REG_S s5, (s4)
|
||||||
PTR_ADD s4, s4, SZREG
|
PTR_ADDIU s4, s4, SZREG
|
||||||
PTR_ADD s2, s2, SZREG
|
PTR_ADDIU s2, s2, SZREG
|
||||||
LONG_SUB s6, s6, 1
|
LONG_ADDIU s6, s6, -1
|
||||||
beq s6, zero, process_entry
|
beq s6, zero, process_entry
|
||||||
b copy_word
|
b copy_word
|
||||||
b process_entry
|
b process_entry
|
||||||
|
@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
|
|||||||
|
|
||||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||||
{
|
{
|
||||||
memset(to, 0, sizeof *to);
|
|
||||||
|
|
||||||
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
||||||
copy_from_user(to->_sifields._pad,
|
copy_from_user(to->_sifields._pad,
|
||||||
from->_sifields._pad, SI_PAD_SIZE32))
|
from->_sifields._pad, SI_PAD_SIZE32))
|
||||||
|
@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
|
|||||||
if (action == 0)
|
if (action == 0)
|
||||||
scheduler_ipi();
|
scheduler_ipi();
|
||||||
else
|
else
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
|
|||||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||||
scheduler_ipi();
|
scheduler_ipi();
|
||||||
if (action & SMP_CALL_FUNCTION)
|
if (action & SMP_CALL_FUNCTION)
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
|
|||||||
cpu_startup_entry(CPUHP_ONLINE);
|
cpu_startup_entry(CPUHP_ONLINE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Call into both interrupt handlers, as we share the IPI for them
|
|
||||||
*/
|
|
||||||
void __irq_entry smp_call_function_interrupt(void)
|
|
||||||
{
|
|
||||||
irq_enter();
|
|
||||||
generic_smp_call_function_interrupt();
|
|
||||||
irq_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void stop_this_cpu(void *dummy)
|
static void stop_this_cpu(void *dummy)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
|
|||||||
void show_stack(struct task_struct *task, unsigned long *sp)
|
void show_stack(struct task_struct *task, unsigned long *sp)
|
||||||
{
|
{
|
||||||
struct pt_regs regs;
|
struct pt_regs regs;
|
||||||
|
mm_segment_t old_fs = get_fs();
|
||||||
if (sp) {
|
if (sp) {
|
||||||
regs.regs[29] = (unsigned long)sp;
|
regs.regs[29] = (unsigned long)sp;
|
||||||
regs.regs[31] = 0;
|
regs.regs[31] = 0;
|
||||||
@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
|||||||
prepare_frametrace(®s);
|
prepare_frametrace(®s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* show_stack() deals exclusively with kernel mode, so be sure to access
|
||||||
|
* the stack in the kernel (not user) address space.
|
||||||
|
*/
|
||||||
|
set_fs(KERNEL_DS);
|
||||||
show_stacktrace(task, ®s);
|
show_stacktrace(task, ®s);
|
||||||
|
set_fs(old_fs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void show_code(unsigned int __user *pc)
|
static void show_code(unsigned int __user *pc)
|
||||||
@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
|
|||||||
const int field = 2 * sizeof(unsigned long);
|
const int field = 2 * sizeof(unsigned long);
|
||||||
int multi_match = regs->cp0_status & ST0_TS;
|
int multi_match = regs->cp0_status & ST0_TS;
|
||||||
enum ctx_state prev_state;
|
enum ctx_state prev_state;
|
||||||
|
mm_segment_t old_fs = get_fs();
|
||||||
|
|
||||||
prev_state = exception_enter();
|
prev_state = exception_enter();
|
||||||
show_regs(regs);
|
show_regs(regs);
|
||||||
@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
|
|||||||
dump_tlb_all();
|
dump_tlb_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!user_mode(regs))
|
||||||
|
set_fs(KERNEL_DS);
|
||||||
|
|
||||||
show_code((unsigned int __user *) regs->cp0_epc);
|
show_code((unsigned int __user *) regs->cp0_epc);
|
||||||
|
|
||||||
|
set_fs(old_fs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some chips may have other causes of machine check (e.g. SB1
|
* Some chips may have other causes of machine check (e.g. SB1
|
||||||
* graduation timer)
|
* graduation timer)
|
||||||
|
@ -438,7 +438,7 @@ do { \
|
|||||||
: "memory"); \
|
: "memory"); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define StoreDW(addr, value, res) \
|
#define _StoreDW(addr, value, res) \
|
||||||
do { \
|
do { \
|
||||||
__asm__ __volatile__ ( \
|
__asm__ __volatile__ ( \
|
||||||
".set\tpush\n\t" \
|
".set\tpush\n\t" \
|
||||||
|
@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|||||||
|
|
||||||
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
|
|||||||
{
|
{
|
||||||
return ltq_perfcount_irq;
|
return ltq_perfcount_irq;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||||
|
|
||||||
unsigned int get_c0_compare_int(void)
|
unsigned int get_c0_compare_int(void)
|
||||||
{
|
{
|
||||||
|
@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
|
|||||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||||
scheduler_ipi();
|
scheduler_ipi();
|
||||||
|
|
||||||
if (action & SMP_CALL_FUNCTION)
|
if (action & SMP_CALL_FUNCTION) {
|
||||||
smp_call_function_interrupt();
|
irq_enter();
|
||||||
|
generic_smp_call_function_interrupt();
|
||||||
|
irq_exit();
|
||||||
|
}
|
||||||
|
|
||||||
if (action & SMP_ASK_C0COUNT) {
|
if (action & SMP_ASK_C0COUNT) {
|
||||||
BUG_ON(cpu != 0);
|
BUG_ON(cpu != 0);
|
||||||
|
@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
|
|||||||
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||||
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||||
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||||
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||||
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||||
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||||
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||||
|
|
||||||
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||||
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||||
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
|
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
|
||||||
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
|
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
|
||||||
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||||
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||||
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
|
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
||||||
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -133,7 +133,8 @@ good_area:
|
|||||||
#endif
|
#endif
|
||||||
goto bad_area;
|
goto bad_area;
|
||||||
}
|
}
|
||||||
if (!(vma->vm_flags & VM_READ)) {
|
if (!(vma->vm_flags & VM_READ) &&
|
||||||
|
exception_epc(regs) != address) {
|
||||||
#if 0
|
#if 0
|
||||||
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
|
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
|
||||||
raw_smp_processor_id(),
|
raw_smp_processor_id(),
|
||||||
|
@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|||||||
|
|
||||||
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
|
|||||||
|
|
||||||
return mips_cpu_perf_irq;
|
return mips_cpu_perf_irq;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||||
|
|
||||||
unsigned int get_c0_compare_int(void)
|
unsigned int get_c0_compare_int(void)
|
||||||
{
|
{
|
||||||
@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
|
|||||||
|
|
||||||
static void __init init_rtc(void)
|
static void __init init_rtc(void)
|
||||||
{
|
{
|
||||||
/* stop the clock whilst setting it up */
|
unsigned char freq, ctrl;
|
||||||
CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
|
|
||||||
|
|
||||||
/* 32KHz time base */
|
/* Set 32KHz time base if not already set */
|
||||||
|
freq = CMOS_READ(RTC_FREQ_SELECT);
|
||||||
|
if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
|
||||||
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
|
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
|
||||||
|
|
||||||
/* start the clock */
|
/* Ensure SET bit is clear so RTC can run */
|
||||||
CMOS_WRITE(RTC_24H, RTC_CONTROL);
|
ctrl = CMOS_READ(RTC_CONTROL);
|
||||||
|
if (ctrl & RTC_SET)
|
||||||
|
CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init plat_time_init(void)
|
void __init plat_time_init(void)
|
||||||
|
@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
|
|||||||
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
|
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||||
|
|
||||||
unsigned int get_c0_compare_int(void)
|
unsigned int get_c0_compare_int(void)
|
||||||
{
|
{
|
||||||
|
@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
|
|||||||
{
|
{
|
||||||
clear_c0_eimr(irq);
|
clear_c0_eimr(irq);
|
||||||
ack_c0_eirr(irq);
|
ack_c0_eirr(irq);
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
set_c0_eimr(irq);
|
set_c0_eimr(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
|
|||||||
|
|
||||||
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
|
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
|
|||||||
{
|
{
|
||||||
return gic_get_c0_perfcount_int();
|
return gic_get_c0_perfcount_int();
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||||
|
|
||||||
int get_c0_fdc_int(void)
|
int get_c0_fdc_int(void)
|
||||||
{
|
{
|
||||||
|
@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|||||||
|
|
||||||
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
|
|||||||
{
|
{
|
||||||
return rt_perfcount_irq;
|
return rt_perfcount_irq;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||||
|
|
||||||
unsigned int get_c0_compare_int(void)
|
unsigned int get_c0_compare_int(void)
|
||||||
{
|
{
|
||||||
|
@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
|
|||||||
scheduler_ipi();
|
scheduler_ipi();
|
||||||
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
|
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
|
||||||
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
|
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
|
||||||
smp_call_function_interrupt();
|
irq_enter();
|
||||||
|
generic_smp_call_function_interrupt();
|
||||||
|
irq_exit();
|
||||||
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
|
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
|
||||||
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
|
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
|
||||||
smp_call_function_interrupt();
|
irq_enter();
|
||||||
|
generic_smp_call_function_interrupt();
|
||||||
|
irq_exit();
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
@ -29,8 +29,6 @@
|
|||||||
#include <asm/sibyte/bcm1480_regs.h>
|
#include <asm/sibyte/bcm1480_regs.h>
|
||||||
#include <asm/sibyte/bcm1480_int.h>
|
#include <asm/sibyte/bcm1480_int.h>
|
||||||
|
|
||||||
extern void smp_call_function_interrupt(void);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These are routines for dealing with the bcm1480 smp capabilities
|
* These are routines for dealing with the bcm1480 smp capabilities
|
||||||
* independent of board/firmware
|
* independent of board/firmware
|
||||||
@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
|
|||||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||||
scheduler_ipi();
|
scheduler_ipi();
|
||||||
|
|
||||||
if (action & SMP_CALL_FUNCTION)
|
if (action & SMP_CALL_FUNCTION) {
|
||||||
smp_call_function_interrupt();
|
irq_enter();
|
||||||
|
generic_smp_call_function_interrupt();
|
||||||
|
irq_exit();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
|
|||||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||||
scheduler_ipi();
|
scheduler_ipi();
|
||||||
|
|
||||||
if (action & SMP_CALL_FUNCTION)
|
if (action & SMP_CALL_FUNCTION) {
|
||||||
smp_call_function_interrupt();
|
irq_enter();
|
||||||
|
generic_smp_call_function_interrupt();
|
||||||
|
irq_exit();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
|
|||||||
|
|
||||||
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
|
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
|
||||||
{
|
{
|
||||||
memset(to, 0, sizeof *to);
|
|
||||||
|
|
||||||
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
||||||
copy_from_user(to->_sifields._pad,
|
copy_from_user(to->_sifields._pad,
|
||||||
from->_sifields._pad, SI_PAD_SIZE32))
|
from->_sifields._pad, SI_PAD_SIZE32))
|
||||||
|
@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Unmask the event */
|
/* Unmask the event */
|
||||||
if (eeh_enabled())
|
if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
|
||||||
enable_irq(eeh_event_irq);
|
enable_irq(eeh_event_irq);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
|
|||||||
|
|
||||||
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
|
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
|
||||||
unsigned levels, unsigned long limit,
|
unsigned levels, unsigned long limit,
|
||||||
unsigned long *current_offset)
|
unsigned long *current_offset, unsigned long *total_allocated)
|
||||||
{
|
{
|
||||||
struct page *tce_mem = NULL;
|
struct page *tce_mem = NULL;
|
||||||
__be64 *addr, *tmp;
|
__be64 *addr, *tmp;
|
||||||
@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
|
|||||||
}
|
}
|
||||||
addr = page_address(tce_mem);
|
addr = page_address(tce_mem);
|
||||||
memset(addr, 0, allocated);
|
memset(addr, 0, allocated);
|
||||||
|
*total_allocated += allocated;
|
||||||
|
|
||||||
--levels;
|
--levels;
|
||||||
if (!levels) {
|
if (!levels) {
|
||||||
@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
|
|||||||
|
|
||||||
for (i = 0; i < entries; ++i) {
|
for (i = 0; i < entries; ++i) {
|
||||||
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
|
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
|
||||||
levels, limit, current_offset);
|
levels, limit, current_offset, total_allocated);
|
||||||
if (!tmp)
|
if (!tmp)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||||||
struct iommu_table *tbl)
|
struct iommu_table *tbl)
|
||||||
{
|
{
|
||||||
void *addr;
|
void *addr;
|
||||||
unsigned long offset = 0, level_shift;
|
unsigned long offset = 0, level_shift, total_allocated = 0;
|
||||||
const unsigned window_shift = ilog2(window_size);
|
const unsigned window_shift = ilog2(window_size);
|
||||||
unsigned entries_shift = window_shift - page_shift;
|
unsigned entries_shift = window_shift - page_shift;
|
||||||
unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
|
unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
|
||||||
@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||||||
|
|
||||||
/* Allocate TCE table */
|
/* Allocate TCE table */
|
||||||
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||||
levels, tce_table_size, &offset);
|
levels, tce_table_size, &offset, &total_allocated);
|
||||||
|
|
||||||
/* addr==NULL means that the first level allocation failed */
|
/* addr==NULL means that the first level allocation failed */
|
||||||
if (!addr)
|
if (!addr)
|
||||||
@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||||||
page_shift);
|
page_shift);
|
||||||
tbl->it_level_size = 1ULL << (level_shift - 3);
|
tbl->it_level_size = 1ULL << (level_shift - 3);
|
||||||
tbl->it_indirect_levels = levels - 1;
|
tbl->it_indirect_levels = levels - 1;
|
||||||
tbl->it_allocated_size = offset;
|
tbl->it_allocated_size = total_allocated;
|
||||||
|
|
||||||
pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
|
pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
|
||||||
window_size, tce_table_size, bus_offset);
|
window_size, tce_table_size, bus_offset);
|
||||||
|
@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (!vcpu->requests)
|
|
||||||
return 0;
|
|
||||||
retry:
|
retry:
|
||||||
kvm_s390_vcpu_request_handled(vcpu);
|
kvm_s390_vcpu_request_handled(vcpu);
|
||||||
|
if (!vcpu->requests)
|
||||||
|
return 0;
|
||||||
/*
|
/*
|
||||||
* We use MMU_RELOAD just to re-arm the ipte notifier for the
|
* We use MMU_RELOAD just to re-arm the ipte notifier for the
|
||||||
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
|
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
|
||||||
|
@ -28,16 +28,10 @@
|
|||||||
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
|
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
|
||||||
|
|
||||||
#define VISEntryHalf \
|
#define VISEntryHalf \
|
||||||
rd %fprs, %o5; \
|
VISEntry
|
||||||
andcc %o5, FPRS_FEF, %g0; \
|
|
||||||
be,pt %icc, 297f; \
|
#define VISExitHalf \
|
||||||
sethi %hi(298f), %g7; \
|
VISExit
|
||||||
sethi %hi(VISenterhalf), %g1; \
|
|
||||||
jmpl %g1 + %lo(VISenterhalf), %g0; \
|
|
||||||
or %g7, %lo(298f), %g7; \
|
|
||||||
clr %o5; \
|
|
||||||
297: wr %o5, FPRS_FEF, %fprs; \
|
|
||||||
298:
|
|
||||||
|
|
||||||
#define VISEntryHalfFast(fail_label) \
|
#define VISEntryHalfFast(fail_label) \
|
||||||
rd %fprs, %o5; \
|
rd %fprs, %o5; \
|
||||||
@ -47,7 +41,7 @@
|
|||||||
ba,a,pt %xcc, fail_label; \
|
ba,a,pt %xcc, fail_label; \
|
||||||
297: wr %o5, FPRS_FEF, %fprs;
|
297: wr %o5, FPRS_FEF, %fprs;
|
||||||
|
|
||||||
#define VISExitHalf \
|
#define VISExitHalfFast \
|
||||||
wr %o5, 0, %fprs;
|
wr %o5, 0, %fprs;
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||||||
add %o0, 0x40, %o0
|
add %o0, 0x40, %o0
|
||||||
bne,pt %icc, 1b
|
bne,pt %icc, 1b
|
||||||
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
|
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
|
||||||
|
#ifdef NON_USER_COPY
|
||||||
|
VISExitHalfFast
|
||||||
|
#else
|
||||||
VISExitHalf
|
VISExitHalf
|
||||||
|
#endif
|
||||||
brz,pn %o2, .Lexit
|
brz,pn %o2, .Lexit
|
||||||
cmp %o2, 19
|
cmp %o2, 19
|
||||||
ble,pn %icc, .Lsmall_unaligned
|
ble,pn %icc, .Lsmall_unaligned
|
||||||
|
@ -44,8 +44,7 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
|
|||||||
|
|
||||||
stx %g3, [%g6 + TI_GSR]
|
stx %g3, [%g6 + TI_GSR]
|
||||||
2: add %g6, %g1, %g3
|
2: add %g6, %g1, %g3
|
||||||
cmp %o5, FPRS_DU
|
mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
|
||||||
be,pn %icc, 6f
|
|
||||||
sll %g1, 3, %g1
|
sll %g1, 3, %g1
|
||||||
stb %o5, [%g3 + TI_FPSAVED]
|
stb %o5, [%g3 + TI_FPSAVED]
|
||||||
rd %gsr, %g2
|
rd %gsr, %g2
|
||||||
@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
|
|||||||
.align 32
|
.align 32
|
||||||
80: jmpl %g7 + %g0, %g0
|
80: jmpl %g7 + %g0, %g0
|
||||||
nop
|
nop
|
||||||
|
|
||||||
6: ldub [%g3 + TI_FPSAVED], %o5
|
|
||||||
or %o5, FPRS_DU, %o5
|
|
||||||
add %g6, TI_FPREGS+0x80, %g2
|
|
||||||
stb %o5, [%g3 + TI_FPSAVED]
|
|
||||||
|
|
||||||
sll %g1, 5, %g1
|
|
||||||
add %g6, TI_FPREGS+0xc0, %g3
|
|
||||||
wr %g0, FPRS_FEF, %fprs
|
|
||||||
membar #Sync
|
|
||||||
stda %f32, [%g2 + %g1] ASI_BLK_P
|
|
||||||
stda %f48, [%g3 + %g1] ASI_BLK_P
|
|
||||||
membar #Sync
|
|
||||||
ba,pt %xcc, 80f
|
|
||||||
nop
|
|
||||||
|
|
||||||
.align 32
|
|
||||||
80: jmpl %g7 + %g0, %g0
|
|
||||||
nop
|
|
||||||
|
|
||||||
.align 32
|
|
||||||
VISenterhalf:
|
|
||||||
ldub [%g6 + TI_FPDEPTH], %g1
|
|
||||||
brnz,a,pn %g1, 1f
|
|
||||||
cmp %g1, 1
|
|
||||||
stb %g0, [%g6 + TI_FPSAVED]
|
|
||||||
stx %fsr, [%g6 + TI_XFSR]
|
|
||||||
clr %o5
|
|
||||||
jmpl %g7 + %g0, %g0
|
|
||||||
wr %g0, FPRS_FEF, %fprs
|
|
||||||
|
|
||||||
1: bne,pn %icc, 2f
|
|
||||||
srl %g1, 1, %g1
|
|
||||||
ba,pt %xcc, vis1
|
|
||||||
sub %g7, 8, %g7
|
|
||||||
2: addcc %g6, %g1, %g3
|
|
||||||
sll %g1, 3, %g1
|
|
||||||
andn %o5, FPRS_DU, %g2
|
|
||||||
stb %g2, [%g3 + TI_FPSAVED]
|
|
||||||
|
|
||||||
rd %gsr, %g2
|
|
||||||
add %g6, %g1, %g3
|
|
||||||
stx %g2, [%g3 + TI_GSR]
|
|
||||||
add %g6, %g1, %g2
|
|
||||||
stx %fsr, [%g2 + TI_XFSR]
|
|
||||||
sll %g1, 5, %g1
|
|
||||||
3: andcc %o5, FPRS_DL, %g0
|
|
||||||
be,pn %icc, 4f
|
|
||||||
add %g6, TI_FPREGS, %g2
|
|
||||||
|
|
||||||
add %g6, TI_FPREGS+0x40, %g3
|
|
||||||
membar #Sync
|
|
||||||
stda %f0, [%g2 + %g1] ASI_BLK_P
|
|
||||||
stda %f16, [%g3 + %g1] ASI_BLK_P
|
|
||||||
membar #Sync
|
|
||||||
ba,pt %xcc, 4f
|
|
||||||
nop
|
|
||||||
|
|
||||||
.align 32
|
|
||||||
4: and %o5, FPRS_DU, %o5
|
|
||||||
jmpl %g7 + %g0, %g0
|
|
||||||
wr %o5, FPRS_FEF, %fprs
|
|
||||||
|
@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
|
|||||||
void VISenter(void);
|
void VISenter(void);
|
||||||
EXPORT_SYMBOL(VISenter);
|
EXPORT_SYMBOL(VISenter);
|
||||||
|
|
||||||
/* CRYPTO code needs this */
|
|
||||||
void VISenterhalf(void);
|
|
||||||
EXPORT_SYMBOL(VISenterhalf);
|
|
||||||
|
|
||||||
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
|
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
|
||||||
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
|
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
|
||||||
unsigned long *);
|
unsigned long *);
|
||||||
|
@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
|
|||||||
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
|
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
memset(to, 0, sizeof(*to));
|
|
||||||
|
|
||||||
err = __get_user(to->si_signo, &from->si_signo);
|
err = __get_user(to->si_signo, &from->si_signo);
|
||||||
err |= __get_user(to->si_errno, &from->si_errno);
|
err |= __get_user(to->si_errno, &from->si_errno);
|
||||||
err |= __get_user(to->si_code, &from->si_code);
|
err |= __get_user(to->si_code, &from->si_code);
|
||||||
|
@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
|
|||||||
unsigned int e820_type = 0;
|
unsigned int e820_type = 0;
|
||||||
unsigned long m = efi->efi_memmap;
|
unsigned long m = efi->efi_memmap;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
m |= (u64)efi->efi_memmap_hi << 32;
|
||||||
|
#endif
|
||||||
|
|
||||||
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
|
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
|
||||||
switch (d->type) {
|
switch (d->type) {
|
||||||
case EFI_RESERVED_TYPE:
|
case EFI_RESERVED_TYPE:
|
||||||
|
@ -280,21 +280,6 @@ static inline void clear_LDT(void)
|
|||||||
set_ldt(NULL, 0);
|
set_ldt(NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* load one particular LDT into the current CPU
|
|
||||||
*/
|
|
||||||
static inline void load_LDT_nolock(mm_context_t *pc)
|
|
||||||
{
|
|
||||||
set_ldt(pc->ldt, pc->size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void load_LDT(mm_context_t *pc)
|
|
||||||
{
|
|
||||||
preempt_disable();
|
|
||||||
load_LDT_nolock(pc);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long get_desc_base(const struct desc_struct *desc)
|
static inline unsigned long get_desc_base(const struct desc_struct *desc)
|
||||||
{
|
{
|
||||||
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
|
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
* we put the segment information here.
|
* we put the segment information here.
|
||||||
*/
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
void *ldt;
|
struct ldt_struct *ldt;
|
||||||
int size;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
/* True if mm supports a task running in 32 bit compatibility mode. */
|
/* True if mm supports a task running in 32 bit compatibility mode. */
|
||||||
|
@ -33,6 +33,50 @@ static inline void load_mm_cr4(struct mm_struct *mm)
|
|||||||
static inline void load_mm_cr4(struct mm_struct *mm) {}
|
static inline void load_mm_cr4(struct mm_struct *mm) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ldt_structs can be allocated, used, and freed, but they are never
|
||||||
|
* modified while live.
|
||||||
|
*/
|
||||||
|
struct ldt_struct {
|
||||||
|
/*
|
||||||
|
* Xen requires page-aligned LDTs with special permissions. This is
|
||||||
|
* needed to prevent us from installing evil descriptors such as
|
||||||
|
* call gates. On native, we could merge the ldt_struct and LDT
|
||||||
|
* allocations, but it's not worth trying to optimize.
|
||||||
|
*/
|
||||||
|
struct desc_struct *entries;
|
||||||
|
int size;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void load_mm_ldt(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
struct ldt_struct *ldt;
|
||||||
|
|
||||||
|
/* lockless_dereference synchronizes with smp_store_release */
|
||||||
|
ldt = lockless_dereference(mm->context.ldt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Any change to mm->context.ldt is followed by an IPI to all
|
||||||
|
* CPUs with the mm active. The LDT will not be freed until
|
||||||
|
* after the IPI is handled by all such CPUs. This means that,
|
||||||
|
* if the ldt_struct changes before we return, the values we see
|
||||||
|
* will be safe, and the new values will be loaded before we run
|
||||||
|
* any user code.
|
||||||
|
*
|
||||||
|
* NB: don't try to convert this to use RCU without extreme care.
|
||||||
|
* We would still need IRQs off, because we don't want to change
|
||||||
|
* the local LDT after an IPI loaded a newer value than the one
|
||||||
|
* that we can see.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (unlikely(ldt))
|
||||||
|
set_ldt(ldt->entries, ldt->size);
|
||||||
|
else
|
||||||
|
clear_LDT();
|
||||||
|
|
||||||
|
DEBUG_LOCKS_WARN_ON(preemptible());
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used for LDT copy/destruction.
|
* Used for LDT copy/destruction.
|
||||||
*/
|
*/
|
||||||
@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||||||
* was called and then modify_ldt changed
|
* was called and then modify_ldt changed
|
||||||
* prev->context.ldt but suppressed an IPI to this CPU.
|
* prev->context.ldt but suppressed an IPI to this CPU.
|
||||||
* In this case, prev->context.ldt != NULL, because we
|
* In this case, prev->context.ldt != NULL, because we
|
||||||
* never free an LDT while the mm still exists. That
|
* never set context.ldt to NULL while the mm still
|
||||||
* means that next->context.ldt != prev->context.ldt,
|
* exists. That means that next->context.ldt !=
|
||||||
* because mms never share an LDT.
|
* prev->context.ldt, because mms never share an LDT.
|
||||||
*/
|
*/
|
||||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
if (unlikely(prev->context.ldt != next->context.ldt))
|
||||||
load_LDT_nolock(&next->context);
|
load_mm_ldt(next);
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
else {
|
else {
|
||||||
@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||||||
load_cr3(next->pgd);
|
load_cr3(next->pgd);
|
||||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||||
load_mm_cr4(next);
|
load_mm_cr4(next);
|
||||||
load_LDT_nolock(&next->context);
|
load_mm_ldt(next);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -57,9 +57,9 @@ struct sigcontext {
|
|||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned short cs;
|
unsigned short cs;
|
||||||
unsigned short __pad2; /* Was called gs, but was always zero. */
|
unsigned short gs;
|
||||||
unsigned short __pad1; /* Was called fs, but was always zero. */
|
unsigned short fs;
|
||||||
unsigned short ss;
|
unsigned short __pad0;
|
||||||
unsigned long err;
|
unsigned long err;
|
||||||
unsigned long trapno;
|
unsigned long trapno;
|
||||||
unsigned long oldmask;
|
unsigned long oldmask;
|
||||||
|
@ -177,24 +177,9 @@ struct sigcontext {
|
|||||||
__u64 rip;
|
__u64 rip;
|
||||||
__u64 eflags; /* RFLAGS */
|
__u64 eflags; /* RFLAGS */
|
||||||
__u16 cs;
|
__u16 cs;
|
||||||
|
__u16 gs;
|
||||||
/*
|
__u16 fs;
|
||||||
* Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
|
__u16 __pad0;
|
||||||
* Linux saved and restored fs and gs in these slots. This
|
|
||||||
* was counterproductive, as fsbase and gsbase were never
|
|
||||||
* saved, so arch_prctl was presumably unreliable.
|
|
||||||
*
|
|
||||||
* If these slots are ever needed for any other purpose, there
|
|
||||||
* is some risk that very old 64-bit binaries could get
|
|
||||||
* confused. I doubt that many such binaries still work,
|
|
||||||
* though, since the same patch in 2.5.64 also removed the
|
|
||||||
* 64-bit set_thread_area syscall, so it appears that there is
|
|
||||||
* no TLS API that works in both pre- and post-2.5.64 kernels.
|
|
||||||
*/
|
|
||||||
__u16 __pad2; /* Was gs. */
|
|
||||||
__u16 __pad1; /* Was fs. */
|
|
||||||
|
|
||||||
__u16 ss;
|
|
||||||
__u64 err;
|
__u64 err;
|
||||||
__u64 trapno;
|
__u64 trapno;
|
||||||
__u64 oldmask;
|
__u64 oldmask;
|
||||||
|
@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
|
|||||||
*/
|
*/
|
||||||
if (irq < nr_legacy_irqs() && data->count == 1) {
|
if (irq < nr_legacy_irqs() && data->count == 1) {
|
||||||
if (info->ioapic_trigger != data->trigger)
|
if (info->ioapic_trigger != data->trigger)
|
||||||
mp_register_handler(irq, data->trigger);
|
mp_register_handler(irq, info->ioapic_trigger);
|
||||||
data->entry.trigger = data->trigger = info->ioapic_trigger;
|
data->entry.trigger = data->trigger = info->ioapic_trigger;
|
||||||
data->entry.polarity = data->polarity = info->ioapic_polarity;
|
data->entry.polarity = data->polarity = info->ioapic_polarity;
|
||||||
}
|
}
|
||||||
|
@ -1410,7 +1410,7 @@ void cpu_init(void)
|
|||||||
load_sp0(t, ¤t->thread);
|
load_sp0(t, ¤t->thread);
|
||||||
set_tss_desc(cpu, t);
|
set_tss_desc(cpu, t);
|
||||||
load_TR_desc();
|
load_TR_desc();
|
||||||
load_LDT(&init_mm.context);
|
load_mm_ldt(&init_mm);
|
||||||
|
|
||||||
clear_all_debug_regs();
|
clear_all_debug_regs();
|
||||||
dbg_restore_debug_regs();
|
dbg_restore_debug_regs();
|
||||||
@ -1459,7 +1459,7 @@ void cpu_init(void)
|
|||||||
load_sp0(t, thread);
|
load_sp0(t, thread);
|
||||||
set_tss_desc(cpu, t);
|
set_tss_desc(cpu, t);
|
||||||
load_TR_desc();
|
load_TR_desc();
|
||||||
load_LDT(&init_mm.context);
|
load_mm_ldt(&init_mm);
|
||||||
|
|
||||||
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
|
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
|
||||||
|
|
||||||
|
@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
|
|||||||
int idx = segment >> 3;
|
int idx = segment >> 3;
|
||||||
|
|
||||||
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
|
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
|
||||||
|
struct ldt_struct *ldt;
|
||||||
|
|
||||||
if (idx > LDT_ENTRIES)
|
if (idx > LDT_ENTRIES)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (idx > current->active_mm->context.size)
|
/* IRQs are off, so this synchronizes with smp_store_release */
|
||||||
|
ldt = lockless_dereference(current->active_mm->context.ldt);
|
||||||
|
if (!ldt || idx > ldt->size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
desc = current->active_mm->context.ldt;
|
desc = &ldt->entries[idx];
|
||||||
} else {
|
} else {
|
||||||
if (idx > GDT_ENTRIES)
|
if (idx > GDT_ENTRIES)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
desc = raw_cpu_ptr(gdt_page.gdt);
|
desc = raw_cpu_ptr(gdt_page.gdt) + idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
return get_desc_base(desc + idx);
|
return get_desc_base(desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
@ -20,82 +21,82 @@
|
|||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/syscalls.h>
|
#include <asm/syscalls.h>
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
/* context.lock is held for us, so we don't need any locking. */
|
||||||
static void flush_ldt(void *current_mm)
|
static void flush_ldt(void *current_mm)
|
||||||
{
|
{
|
||||||
if (current->active_mm == current_mm)
|
mm_context_t *pc;
|
||||||
load_LDT(¤t->active_mm->context);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
if (current->active_mm != current_mm)
|
||||||
{
|
return;
|
||||||
void *oldldt, *newldt;
|
|
||||||
int oldsize;
|
|
||||||
|
|
||||||
if (mincount <= pc->size)
|
pc = ¤t->active_mm->context;
|
||||||
return 0;
|
set_ldt(pc->ldt->entries, pc->ldt->size);
|
||||||
oldsize = pc->size;
|
|
||||||
mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
|
|
||||||
(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
|
|
||||||
if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
|
|
||||||
newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
|
|
||||||
else
|
|
||||||
newldt = (void *)__get_free_page(GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!newldt)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (oldsize)
|
|
||||||
memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
|
|
||||||
oldldt = pc->ldt;
|
|
||||||
memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
|
|
||||||
(mincount - oldsize) * LDT_ENTRY_SIZE);
|
|
||||||
|
|
||||||
paravirt_alloc_ldt(newldt, mincount);
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
/* CHECKME: Do we really need this ? */
|
|
||||||
wmb();
|
|
||||||
#endif
|
|
||||||
pc->ldt = newldt;
|
|
||||||
wmb();
|
|
||||||
pc->size = mincount;
|
|
||||||
wmb();
|
|
||||||
|
|
||||||
if (reload) {
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
preempt_disable();
|
|
||||||
load_LDT(pc);
|
|
||||||
if (!cpumask_equal(mm_cpumask(current->mm),
|
|
||||||
cpumask_of(smp_processor_id())))
|
|
||||||
smp_call_function(flush_ldt, current->mm, 1);
|
|
||||||
preempt_enable();
|
|
||||||
#else
|
|
||||||
load_LDT(pc);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
if (oldsize) {
|
|
||||||
paravirt_free_ldt(oldldt, oldsize);
|
|
||||||
if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
|
|
||||||
vfree(oldldt);
|
|
||||||
else
|
|
||||||
put_page(virt_to_page(oldldt));
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
|
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
|
||||||
|
static struct ldt_struct *alloc_ldt_struct(int size)
|
||||||
{
|
{
|
||||||
int err = alloc_ldt(new, old->size, 0);
|
struct ldt_struct *new_ldt;
|
||||||
int i;
|
int alloc_size;
|
||||||
|
|
||||||
if (err < 0)
|
if (size > LDT_ENTRIES)
|
||||||
return err;
|
return NULL;
|
||||||
|
|
||||||
for (i = 0; i < old->size; i++)
|
new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
|
||||||
write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
|
if (!new_ldt)
|
||||||
return 0;
|
return NULL;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
|
||||||
|
alloc_size = size * LDT_ENTRY_SIZE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Xen is very picky: it requires a page-aligned LDT that has no
|
||||||
|
* trailing nonzero bytes in any page that contains LDT descriptors.
|
||||||
|
* Keep it simple: zero the whole allocation and never allocate less
|
||||||
|
* than PAGE_SIZE.
|
||||||
|
*/
|
||||||
|
if (alloc_size > PAGE_SIZE)
|
||||||
|
new_ldt->entries = vzalloc(alloc_size);
|
||||||
|
else
|
||||||
|
new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!new_ldt->entries) {
|
||||||
|
kfree(new_ldt);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
new_ldt->size = size;
|
||||||
|
return new_ldt;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* After calling this, the LDT is immutable. */
|
||||||
|
static void finalize_ldt_struct(struct ldt_struct *ldt)
|
||||||
|
{
|
||||||
|
paravirt_alloc_ldt(ldt->entries, ldt->size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* context.lock is held */
|
||||||
|
static void install_ldt(struct mm_struct *current_mm,
|
||||||
|
struct ldt_struct *ldt)
|
||||||
|
{
|
||||||
|
/* Synchronizes with lockless_dereference in load_mm_ldt. */
|
||||||
|
smp_store_release(¤t_mm->context.ldt, ldt);
|
||||||
|
|
||||||
|
/* Activate the LDT for all CPUs using current_mm. */
|
||||||
|
on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void free_ldt_struct(struct ldt_struct *ldt)
|
||||||
|
{
|
||||||
|
if (likely(!ldt))
|
||||||
|
return;
|
||||||
|
|
||||||
|
paravirt_free_ldt(ldt->entries, ldt->size);
|
||||||
|
if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
|
||||||
|
vfree(ldt->entries);
|
||||||
|
else
|
||||||
|
kfree(ldt->entries);
|
||||||
|
kfree(ldt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
|
|||||||
*/
|
*/
|
||||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
struct ldt_struct *new_ldt;
|
||||||
struct mm_struct *old_mm;
|
struct mm_struct *old_mm;
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
||||||
mutex_init(&mm->context.lock);
|
mutex_init(&mm->context.lock);
|
||||||
mm->context.size = 0;
|
|
||||||
old_mm = current->mm;
|
old_mm = current->mm;
|
||||||
if (old_mm && old_mm->context.size > 0) {
|
if (!old_mm) {
|
||||||
mutex_lock(&old_mm->context.lock);
|
mm->context.ldt = NULL;
|
||||||
retval = copy_ldt(&mm->context, &old_mm->context);
|
return 0;
|
||||||
mutex_unlock(&old_mm->context.lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&old_mm->context.lock);
|
||||||
|
if (!old_mm->context.ldt) {
|
||||||
|
mm->context.ldt = NULL;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
|
||||||
|
if (!new_ldt) {
|
||||||
|
retval = -ENOMEM;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(new_ldt->entries, old_mm->context.ldt->entries,
|
||||||
|
new_ldt->size * LDT_ENTRY_SIZE);
|
||||||
|
finalize_ldt_struct(new_ldt);
|
||||||
|
|
||||||
|
mm->context.ldt = new_ldt;
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&old_mm->context.lock);
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|||||||
*/
|
*/
|
||||||
void destroy_context(struct mm_struct *mm)
|
void destroy_context(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
if (mm->context.size) {
|
free_ldt_struct(mm->context.ldt);
|
||||||
#ifdef CONFIG_X86_32
|
mm->context.ldt = NULL;
|
||||||
/* CHECKME: Can this ever happen ? */
|
|
||||||
if (mm == current->active_mm)
|
|
||||||
clear_LDT();
|
|
||||||
#endif
|
|
||||||
paravirt_free_ldt(mm->context.ldt, mm->context.size);
|
|
||||||
if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
|
|
||||||
vfree(mm->context.ldt);
|
|
||||||
else
|
|
||||||
put_page(virt_to_page(mm->context.ldt));
|
|
||||||
mm->context.size = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_ldt(void __user *ptr, unsigned long bytecount)
|
static int read_ldt(void __user *ptr, unsigned long bytecount)
|
||||||
{
|
{
|
||||||
int err;
|
int retval;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
|
|
||||||
if (!mm->context.size)
|
mutex_lock(&mm->context.lock);
|
||||||
return 0;
|
|
||||||
|
if (!mm->context.ldt) {
|
||||||
|
retval = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
|
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
|
||||||
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
|
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
|
||||||
|
|
||||||
mutex_lock(&mm->context.lock);
|
size = mm->context.ldt->size * LDT_ENTRY_SIZE;
|
||||||
size = mm->context.size * LDT_ENTRY_SIZE;
|
|
||||||
if (size > bytecount)
|
if (size > bytecount)
|
||||||
size = bytecount;
|
size = bytecount;
|
||||||
|
|
||||||
err = 0;
|
if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
|
||||||
if (copy_to_user(ptr, mm->context.ldt, size))
|
retval = -EFAULT;
|
||||||
err = -EFAULT;
|
goto out_unlock;
|
||||||
mutex_unlock(&mm->context.lock);
|
}
|
||||||
if (err < 0)
|
|
||||||
goto error_return;
|
|
||||||
if (size != bytecount) {
|
if (size != bytecount) {
|
||||||
/* zero-fill the rest */
|
/* Zero-fill the rest and pretend we read bytecount bytes. */
|
||||||
if (clear_user(ptr + size, bytecount - size) != 0) {
|
if (clear_user(ptr + size, bytecount - size)) {
|
||||||
err = -EFAULT;
|
retval = -EFAULT;
|
||||||
goto error_return;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return bytecount;
|
retval = bytecount;
|
||||||
error_return:
|
|
||||||
return err;
|
out_unlock:
|
||||||
|
mutex_unlock(&mm->context.lock);
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
|
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
|
||||||
@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
|||||||
struct desc_struct ldt;
|
struct desc_struct ldt;
|
||||||
int error;
|
int error;
|
||||||
struct user_desc ldt_info;
|
struct user_desc ldt_info;
|
||||||
|
int oldsize, newsize;
|
||||||
|
struct ldt_struct *new_ldt, *old_ldt;
|
||||||
|
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
if (bytecount != sizeof(ldt_info))
|
if (bytecount != sizeof(ldt_info))
|
||||||
@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&mm->context.lock);
|
if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
|
||||||
if (ldt_info.entry_number >= mm->context.size) {
|
LDT_empty(&ldt_info)) {
|
||||||
error = alloc_ldt(¤t->mm->context,
|
/* The user wants to clear the entry. */
|
||||||
ldt_info.entry_number + 1, 1);
|
|
||||||
if (error < 0)
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Allow LDTs to be cleared by the user. */
|
|
||||||
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
|
|
||||||
if (oldmode || LDT_empty(&ldt_info)) {
|
|
||||||
memset(&ldt, 0, sizeof(ldt));
|
memset(&ldt, 0, sizeof(ldt));
|
||||||
goto install;
|
} else {
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
|
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
goto out_unlock;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
fill_ldt(&ldt, &ldt_info);
|
fill_ldt(&ldt, &ldt_info);
|
||||||
if (oldmode)
|
if (oldmode)
|
||||||
ldt.avl = 0;
|
ldt.avl = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Install the new entry ... */
|
mutex_lock(&mm->context.lock);
|
||||||
install:
|
|
||||||
write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
|
old_ldt = mm->context.ldt;
|
||||||
|
oldsize = old_ldt ? old_ldt->size : 0;
|
||||||
|
newsize = max((int)(ldt_info.entry_number + 1), oldsize);
|
||||||
|
|
||||||
|
error = -ENOMEM;
|
||||||
|
new_ldt = alloc_ldt_struct(newsize);
|
||||||
|
if (!new_ldt)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
if (old_ldt)
|
||||||
|
memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
|
||||||
|
new_ldt->entries[ldt_info.entry_number] = ldt;
|
||||||
|
finalize_ldt_struct(new_ldt);
|
||||||
|
|
||||||
|
install_ldt(mm, new_ldt);
|
||||||
|
free_ldt_struct(old_ldt);
|
||||||
error = 0;
|
error = 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
|
|||||||
void release_thread(struct task_struct *dead_task)
|
void release_thread(struct task_struct *dead_task)
|
||||||
{
|
{
|
||||||
if (dead_task->mm) {
|
if (dead_task->mm) {
|
||||||
if (dead_task->mm->context.size) {
|
if (dead_task->mm->context.ldt) {
|
||||||
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
|
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
|
||||||
dead_task->comm,
|
dead_task->comm,
|
||||||
dead_task->mm->context.ldt,
|
dead_task->mm->context.ldt,
|
||||||
dead_task->mm->context.size);
|
dead_task->mm->context.ldt->size);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
|||||||
COPY(r15);
|
COPY(r15);
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
COPY_SEG_CPL3(cs);
|
COPY_SEG_CPL3(cs);
|
||||||
COPY_SEG_CPL3(ss);
|
COPY_SEG_CPL3(ss);
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
/* Kernel saves and restores only the CS segment register on signals,
|
||||||
|
* which is the bare minimum needed to allow mixed 32/64-bit code.
|
||||||
|
* App's signal handler can save/restore other segments if needed. */
|
||||||
|
COPY_SEG_CPL3(cs);
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
get_user_ex(tmpflags, &sc->flags);
|
get_user_ex(tmpflags, &sc->flags);
|
||||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||||
@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
|||||||
#else /* !CONFIG_X86_32 */
|
#else /* !CONFIG_X86_32 */
|
||||||
put_user_ex(regs->flags, &sc->flags);
|
put_user_ex(regs->flags, &sc->flags);
|
||||||
put_user_ex(regs->cs, &sc->cs);
|
put_user_ex(regs->cs, &sc->cs);
|
||||||
put_user_ex(0, &sc->__pad2);
|
put_user_ex(0, &sc->gs);
|
||||||
put_user_ex(0, &sc->__pad1);
|
put_user_ex(0, &sc->fs);
|
||||||
put_user_ex(regs->ss, &sc->ss);
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
put_user_ex(fpstate, &sc->fpstate);
|
put_user_ex(fpstate, &sc->fpstate);
|
||||||
@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
|
|||||||
|
|
||||||
regs->sp = (unsigned long)frame;
|
regs->sp = (unsigned long)frame;
|
||||||
|
|
||||||
/*
|
/* Set up the CS register to run signal handlers in 64-bit mode,
|
||||||
* Set up the CS and SS registers to run signal handlers in
|
even if the handler happens to be interrupting 32-bit code. */
|
||||||
* 64-bit mode, even if the handler happens to be interrupting
|
|
||||||
* 32-bit or 16-bit code.
|
|
||||||
*
|
|
||||||
* SS is subtle. In 64-bit mode, we don't need any particular
|
|
||||||
* SS descriptor, but we do need SS to be valid. It's possible
|
|
||||||
* that the old SS is entirely bogus -- this can happen if the
|
|
||||||
* signal we're trying to deliver is #GP or #SS caused by a bad
|
|
||||||
* SS value.
|
|
||||||
*/
|
|
||||||
regs->cs = __USER_CS;
|
regs->cs = __USER_CS;
|
||||||
regs->ss = __USER_DS;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
|
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
|
|||||||
seg &= ~7UL;
|
seg &= ~7UL;
|
||||||
|
|
||||||
mutex_lock(&child->mm->context.lock);
|
mutex_lock(&child->mm->context.lock);
|
||||||
if (unlikely((seg >> 3) >= child->mm->context.size))
|
if (unlikely(!child->mm->context.ldt ||
|
||||||
|
(seg >> 3) >= child->mm->context.ldt->size))
|
||||||
addr = -1L; /* bogus selector, access would fault */
|
addr = -1L; /* bogus selector, access would fault */
|
||||||
else {
|
else {
|
||||||
desc = child->mm->context.ldt + seg;
|
desc = &child->mm->context.ldt->entries[seg];
|
||||||
base = get_desc_base(desc);
|
base = get_desc_base(desc);
|
||||||
|
|
||||||
/* 16-bit code segment? */
|
/* 16-bit code segment? */
|
||||||
|
@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|||||||
if (iter.mtrr_disabled)
|
if (iter.mtrr_disabled)
|
||||||
return mtrr_disabled_type();
|
return mtrr_disabled_type();
|
||||||
|
|
||||||
|
/* not contained in any MTRRs. */
|
||||||
|
if (type == -1)
|
||||||
|
return mtrr_default_type(mtrr_state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We just check one page, partially covered by MTRRs is
|
* We just check one page, partially covered by MTRRs is
|
||||||
* impossible.
|
* impossible.
|
||||||
*/
|
*/
|
||||||
WARN_ON(iter.partial_map);
|
WARN_ON(iter.partial_map);
|
||||||
|
|
||||||
/* not contained in any MTRRs. */
|
|
||||||
if (type == -1)
|
|
||||||
return mtrr_default_type(mtrr_state);
|
|
||||||
|
|
||||||
return type;
|
return type;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
|
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
|
||||||
|
@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
|
|||||||
|
|
||||||
static int __init arch_parse_efi_cmdline(char *str)
|
static int __init arch_parse_efi_cmdline(char *str)
|
||||||
{
|
{
|
||||||
|
if (!str) {
|
||||||
|
pr_warn("need at least one option\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (parse_option_str(str, "old_map"))
|
if (parse_option_str(str, "old_map"))
|
||||||
set_bit(EFI_OLD_MEMMAP, &efi.flags);
|
set_bit(EFI_OLD_MEMMAP, &efi.flags);
|
||||||
if (parse_option_str(str, "debug"))
|
if (parse_option_str(str, "debug"))
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include <asm/fpu/internal.h>
|
#include <asm/fpu/internal.h>
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
__visible unsigned long saved_context_ebx;
|
__visible unsigned long saved_context_ebx;
|
||||||
@ -153,7 +154,7 @@ static void fix_processor_context(void)
|
|||||||
syscall_init(); /* This sets MSR_*STAR and related */
|
syscall_init(); /* This sets MSR_*STAR and related */
|
||||||
#endif
|
#endif
|
||||||
load_TR_desc(); /* This does ltr */
|
load_TR_desc(); /* This does ltr */
|
||||||
load_LDT(¤t->active_mm->context); /* This does lldt */
|
load_mm_ldt(current->active_mm); /* This does lldt */
|
||||||
|
|
||||||
fpu__resume_cpu();
|
fpu__resume_cpu();
|
||||||
}
|
}
|
||||||
|
@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
|
|||||||
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
|
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
|
||||||
time.o xen-asm.o xen-asm_$(BITS).o \
|
time.o xen-asm.o xen-asm_$(BITS).o \
|
||||||
grant-table.o suspend.o platform-pci-unplug.o \
|
grant-table.o suspend.o platform-pci-unplug.o \
|
||||||
p2m.o
|
p2m.o apic.o
|
||||||
|
|
||||||
obj-$(CONFIG_EVENT_TRACING) += trace.o
|
obj-$(CONFIG_EVENT_TRACING) += trace.o
|
||||||
|
|
||||||
obj-$(CONFIG_SMP) += smp.o
|
obj-$(CONFIG_SMP) += smp.o
|
||||||
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
|
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
|
||||||
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
|
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
|
||||||
obj-$(CONFIG_XEN_DOM0) += apic.o vga.o
|
obj-$(CONFIG_XEN_DOM0) += vga.o
|
||||||
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
|
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
|
||||||
obj-$(CONFIG_XEN_EFI) += efi.o
|
obj-$(CONFIG_XEN_EFI) += efi.o
|
||||||
|
@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|||||||
pte_t pte;
|
pte_t pte;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
unsigned char dummy;
|
||||||
|
|
||||||
ptep = lookup_address((unsigned long)v, &level);
|
ptep = lookup_address((unsigned long)v, &level);
|
||||||
BUG_ON(ptep == NULL);
|
BUG_ON(ptep == NULL);
|
||||||
@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|||||||
|
|
||||||
pte = pfn_pte(pfn, prot);
|
pte = pfn_pte(pfn, prot);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Careful: update_va_mapping() will fail if the virtual address
|
||||||
|
* we're poking isn't populated in the page tables. We don't
|
||||||
|
* need to worry about the direct map (that's always in the page
|
||||||
|
* tables), but we need to be careful about vmap space. In
|
||||||
|
* particular, the top level page table can lazily propagate
|
||||||
|
* entries between processes, so if we've switched mms since we
|
||||||
|
* vmapped the target in the first place, we might not have the
|
||||||
|
* top-level page table entry populated.
|
||||||
|
*
|
||||||
|
* We disable preemption because we want the same mm active when
|
||||||
|
* we probe the target and when we issue the hypercall. We'll
|
||||||
|
* have the same nominal mm, but if we're a kernel thread, lazy
|
||||||
|
* mm dropping could change our pgd.
|
||||||
|
*
|
||||||
|
* Out of an abundance of caution, this uses __get_user() to fault
|
||||||
|
* in the target address just in case there's some obscure case
|
||||||
|
* in which the target address isn't readable.
|
||||||
|
*/
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
|
pagefault_disable(); /* Avoid warnings due to being atomic. */
|
||||||
|
__get_user(dummy, (unsigned char __user __force *)v);
|
||||||
|
pagefault_enable();
|
||||||
|
|
||||||
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|||||||
BUG();
|
BUG();
|
||||||
} else
|
} else
|
||||||
kmap_flush_unused();
|
kmap_flush_unused();
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||||
@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
|||||||
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to mark the all aliases of the LDT pages RO. We
|
||||||
|
* don't need to call vm_flush_aliases(), though, since that's
|
||||||
|
* only responsible for flushing aliases out the TLBs, not the
|
||||||
|
* page tables, and Xen will flush the TLB for us if needed.
|
||||||
|
*
|
||||||
|
* To avoid confusing future readers: none of this is necessary
|
||||||
|
* to load the LDT. The hypervisor only checks this when the
|
||||||
|
* LDT is faulted in due to subsequent descriptor access.
|
||||||
|
*/
|
||||||
|
|
||||||
for(i = 0; i < entries; i += entries_per_page)
|
for(i = 0; i < entries; i += entries_per_page)
|
||||||
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
|
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
|
||||||
}
|
}
|
||||||
|
@ -101,17 +101,15 @@ struct dom0_vga_console_info;
|
|||||||
|
|
||||||
#ifdef CONFIG_XEN_DOM0
|
#ifdef CONFIG_XEN_DOM0
|
||||||
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
|
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
|
||||||
void __init xen_init_apic(void);
|
|
||||||
#else
|
#else
|
||||||
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
|
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void __init xen_init_apic(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void __init xen_init_apic(void);
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_EFI
|
#ifdef CONFIG_XEN_EFI
|
||||||
extern void xen_efi_init(void);
|
extern void xen_efi_init(void);
|
||||||
#else
|
#else
|
||||||
|
@ -296,13 +296,22 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
|||||||
if (!blk)
|
if (!blk)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
||||||
present = krealloc(rbnode->cache_present,
|
present = krealloc(rbnode->cache_present,
|
||||||
BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
|
BITS_TO_LONGS(blklen) * sizeof(*present),
|
||||||
|
GFP_KERNEL);
|
||||||
if (!present) {
|
if (!present) {
|
||||||
kfree(blk);
|
kfree(blk);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
||||||
|
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
||||||
|
* sizeof(*present));
|
||||||
|
} else {
|
||||||
|
present = rbnode->cache_present;
|
||||||
|
}
|
||||||
|
|
||||||
/* insert the register value in the correct place in the rbnode block */
|
/* insert the register value in the correct place in the rbnode block */
|
||||||
if (pos == 0) {
|
if (pos == 0) {
|
||||||
memmove(blk + offset * map->cache_word_size,
|
memmove(blk + offset * map->cache_word_size,
|
||||||
|
@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
|
|||||||
# define rbd_assert(expr) ((void) 0)
|
# define rbd_assert(expr) ((void) 0)
|
||||||
#endif /* !RBD_DEBUG */
|
#endif /* !RBD_DEBUG */
|
||||||
|
|
||||||
|
static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
|
||||||
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
|
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
|
||||||
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
|
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
|
||||||
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
|
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
|
||||||
@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
|
|||||||
obj_request_done_set(obj_request);
|
obj_request_done_set(obj_request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
|
||||||
|
{
|
||||||
|
dout("%s: obj %p\n", __func__, obj_request);
|
||||||
|
|
||||||
|
if (obj_request_img_data_test(obj_request))
|
||||||
|
rbd_osd_copyup_callback(obj_request);
|
||||||
|
else
|
||||||
|
obj_request_done_set(obj_request);
|
||||||
|
}
|
||||||
|
|
||||||
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
||||||
struct ceph_msg *msg)
|
struct ceph_msg *msg)
|
||||||
{
|
{
|
||||||
@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
|||||||
rbd_osd_discard_callback(obj_request);
|
rbd_osd_discard_callback(obj_request);
|
||||||
break;
|
break;
|
||||||
case CEPH_OSD_OP_CALL:
|
case CEPH_OSD_OP_CALL:
|
||||||
|
rbd_osd_call_callback(obj_request);
|
||||||
|
break;
|
||||||
case CEPH_OSD_OP_NOTIFY_ACK:
|
case CEPH_OSD_OP_NOTIFY_ACK:
|
||||||
case CEPH_OSD_OP_WATCH:
|
case CEPH_OSD_OP_WATCH:
|
||||||
rbd_osd_trivial_callback(obj_request);
|
rbd_osd_trivial_callback(obj_request);
|
||||||
@ -2530,13 +2543,15 @@ out_unwind:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
|
rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
|
||||||
{
|
{
|
||||||
struct rbd_img_request *img_request;
|
struct rbd_img_request *img_request;
|
||||||
struct rbd_device *rbd_dev;
|
struct rbd_device *rbd_dev;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
u32 page_count;
|
u32 page_count;
|
||||||
|
|
||||||
|
dout("%s: obj %p\n", __func__, obj_request);
|
||||||
|
|
||||||
rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
|
rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
|
||||||
obj_request->type == OBJ_REQUEST_NODATA);
|
obj_request->type == OBJ_REQUEST_NODATA);
|
||||||
rbd_assert(obj_request_img_data_test(obj_request));
|
rbd_assert(obj_request_img_data_test(obj_request));
|
||||||
@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
|
|||||||
if (!obj_request->result)
|
if (!obj_request->result)
|
||||||
obj_request->xferred = obj_request->length;
|
obj_request->xferred = obj_request->length;
|
||||||
|
|
||||||
/* Finish up with the normal image object callback */
|
obj_request_done_set(obj_request);
|
||||||
|
|
||||||
rbd_img_obj_callback(obj_request);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
|
|||||||
|
|
||||||
/* All set, send it off. */
|
/* All set, send it off. */
|
||||||
|
|
||||||
orig_request->callback = rbd_img_obj_copyup_callback;
|
|
||||||
osdc = &rbd_dev->rbd_client->client->osdc;
|
osdc = &rbd_dev->rbd_client->client->osdc;
|
||||||
img_result = rbd_obj_request_submit(osdc, orig_request);
|
img_result = rbd_obj_request_submit(osdc, orig_request);
|
||||||
if (!img_result)
|
if (!img_result)
|
||||||
|
@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (work_pending(&blkif->persistent_purge_work)) {
|
if (work_busy(&blkif->persistent_purge_work)) {
|
||||||
pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
|
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
|
|||||||
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
|
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
|
||||||
|
|
||||||
static int blkfront_setup_indirect(struct blkfront_info *info);
|
static int blkfront_setup_indirect(struct blkfront_info *info);
|
||||||
|
static int blkfront_gather_backend_features(struct blkfront_info *info);
|
||||||
|
|
||||||
static int get_id_from_freelist(struct blkfront_info *info)
|
static int get_id_from_freelist(struct blkfront_info *info)
|
||||||
{
|
{
|
||||||
@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
|||||||
* Add the used indirect page back to the list of
|
* Add the used indirect page back to the list of
|
||||||
* available pages for indirect grefs.
|
* available pages for indirect grefs.
|
||||||
*/
|
*/
|
||||||
|
if (!info->feature_persistent) {
|
||||||
indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
|
indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
|
||||||
list_add(&indirect_page->lru, &info->indirect_pages);
|
list_add(&indirect_page->lru, &info->indirect_pages);
|
||||||
|
}
|
||||||
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
|
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
|
||||||
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
|
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
|
||||||
}
|
}
|
||||||
@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
|
|||||||
info->shadow_free = info->ring.req_prod_pvt;
|
info->shadow_free = info->ring.req_prod_pvt;
|
||||||
info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
|
info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
|
||||||
|
|
||||||
rc = blkfront_setup_indirect(info);
|
rc = blkfront_gather_backend_features(info);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
kfree(copy);
|
kfree(copy);
|
||||||
return rc;
|
return rc;
|
||||||
@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
|
|||||||
|
|
||||||
static int blkfront_setup_indirect(struct blkfront_info *info)
|
static int blkfront_setup_indirect(struct blkfront_info *info)
|
||||||
{
|
{
|
||||||
unsigned int indirect_segments, segs;
|
unsigned int segs;
|
||||||
int err, i;
|
int err, i;
|
||||||
|
|
||||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
if (info->max_indirect_segments == 0)
|
||||||
"feature-max-indirect-segments", "%u", &indirect_segments,
|
|
||||||
NULL);
|
|
||||||
if (err) {
|
|
||||||
info->max_indirect_segments = 0;
|
|
||||||
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||||
} else {
|
else
|
||||||
info->max_indirect_segments = min(indirect_segments,
|
|
||||||
xen_blkif_max_segments);
|
|
||||||
segs = info->max_indirect_segments;
|
segs = info->max_indirect_segments;
|
||||||
}
|
|
||||||
|
|
||||||
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
|
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
|
||||||
if (err)
|
if (err)
|
||||||
@ -1796,6 +1792,68 @@ out_of_memory:
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Gather all backend feature-*
|
||||||
|
*/
|
||||||
|
static int blkfront_gather_backend_features(struct blkfront_info *info)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
int barrier, flush, discard, persistent;
|
||||||
|
unsigned int indirect_segments;
|
||||||
|
|
||||||
|
info->feature_flush = 0;
|
||||||
|
|
||||||
|
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||||
|
"feature-barrier", "%d", &barrier,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there's no "feature-barrier" defined, then it means
|
||||||
|
* we're dealing with a very old backend which writes
|
||||||
|
* synchronously; nothing to do.
|
||||||
|
*
|
||||||
|
* If there are barriers, then we use flush.
|
||||||
|
*/
|
||||||
|
if (!err && barrier)
|
||||||
|
info->feature_flush = REQ_FLUSH | REQ_FUA;
|
||||||
|
/*
|
||||||
|
* And if there is "feature-flush-cache" use that above
|
||||||
|
* barriers.
|
||||||
|
*/
|
||||||
|
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||||
|
"feature-flush-cache", "%d", &flush,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
if (!err && flush)
|
||||||
|
info->feature_flush = REQ_FLUSH;
|
||||||
|
|
||||||
|
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||||
|
"feature-discard", "%d", &discard,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
if (!err && discard)
|
||||||
|
blkfront_setup_discard(info);
|
||||||
|
|
||||||
|
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||||
|
"feature-persistent", "%u", &persistent,
|
||||||
|
NULL);
|
||||||
|
if (err)
|
||||||
|
info->feature_persistent = 0;
|
||||||
|
else
|
||||||
|
info->feature_persistent = persistent;
|
||||||
|
|
||||||
|
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
||||||
|
"feature-max-indirect-segments", "%u", &indirect_segments,
|
||||||
|
NULL);
|
||||||
|
if (err)
|
||||||
|
info->max_indirect_segments = 0;
|
||||||
|
else
|
||||||
|
info->max_indirect_segments = min(indirect_segments,
|
||||||
|
xen_blkif_max_segments);
|
||||||
|
|
||||||
|
return blkfront_setup_indirect(info);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invoked when the backend is finally 'ready' (and has told produced
|
* Invoked when the backend is finally 'ready' (and has told produced
|
||||||
* the details about the physical device - #sectors, size, etc).
|
* the details about the physical device - #sectors, size, etc).
|
||||||
@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||||||
unsigned int physical_sector_size;
|
unsigned int physical_sector_size;
|
||||||
unsigned int binfo;
|
unsigned int binfo;
|
||||||
int err;
|
int err;
|
||||||
int barrier, flush, discard, persistent;
|
|
||||||
|
|
||||||
switch (info->connected) {
|
switch (info->connected) {
|
||||||
case BLKIF_STATE_CONNECTED:
|
case BLKIF_STATE_CONNECTED:
|
||||||
@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||||||
if (err != 1)
|
if (err != 1)
|
||||||
physical_sector_size = sector_size;
|
physical_sector_size = sector_size;
|
||||||
|
|
||||||
info->feature_flush = 0;
|
err = blkfront_gather_backend_features(info);
|
||||||
|
|
||||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
|
||||||
"feature-barrier", "%d", &barrier,
|
|
||||||
NULL);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there's no "feature-barrier" defined, then it means
|
|
||||||
* we're dealing with a very old backend which writes
|
|
||||||
* synchronously; nothing to do.
|
|
||||||
*
|
|
||||||
* If there are barriers, then we use flush.
|
|
||||||
*/
|
|
||||||
if (!err && barrier)
|
|
||||||
info->feature_flush = REQ_FLUSH | REQ_FUA;
|
|
||||||
/*
|
|
||||||
* And if there is "feature-flush-cache" use that above
|
|
||||||
* barriers.
|
|
||||||
*/
|
|
||||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
|
||||||
"feature-flush-cache", "%d", &flush,
|
|
||||||
NULL);
|
|
||||||
|
|
||||||
if (!err && flush)
|
|
||||||
info->feature_flush = REQ_FLUSH;
|
|
||||||
|
|
||||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
|
||||||
"feature-discard", "%d", &discard,
|
|
||||||
NULL);
|
|
||||||
|
|
||||||
if (!err && discard)
|
|
||||||
blkfront_setup_discard(info);
|
|
||||||
|
|
||||||
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
|
|
||||||
"feature-persistent", "%u", &persistent,
|
|
||||||
NULL);
|
|
||||||
if (err)
|
|
||||||
info->feature_persistent = 0;
|
|
||||||
else
|
|
||||||
info->feature_persistent = persistent;
|
|
||||||
|
|
||||||
err = blkfront_setup_indirect(info);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
|
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
|
||||||
info->xbdev->otherend);
|
info->xbdev->otherend);
|
||||||
|
@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
|
|||||||
static void start_khwrngd(void)
|
static void start_khwrngd(void)
|
||||||
{
|
{
|
||||||
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
|
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
|
||||||
if (hwrng_fill == ERR_PTR(-ENOMEM)) {
|
if (IS_ERR(hwrng_fill)) {
|
||||||
pr_err("hwrng_fill thread creation failed");
|
pr_err("hwrng_fill thread creation failed");
|
||||||
hwrng_fill = NULL;
|
hwrng_fill = NULL;
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#include <asm/clock.h>
|
#include <asm/clock.h>
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
|
|
||||||
#include <asm/mach-loongson/loongson.h>
|
#include <asm/mach-loongson64/loongson.h>
|
||||||
|
|
||||||
static uint nowait;
|
static uint nowait;
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user