Merge branch 'linus' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
ec84f00567
@ -6,7 +6,7 @@ perform in-band IPMI communication with their host.
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should be "aspeed,ast2400-bt-bmc"
|
||||
- compatible : should be "aspeed,ast2400-ibt-bmc"
|
||||
- reg: physical address and size of the registers
|
||||
|
||||
Optional properties:
|
||||
@ -17,7 +17,7 @@ Optional properties:
|
||||
Example:
|
||||
|
||||
ibt@1e789140 {
|
||||
compatible = "aspeed,ast2400-bt-bmc";
|
||||
compatible = "aspeed,ast2400-ibt-bmc";
|
||||
reg = <0x1e789140 0x18>;
|
||||
interrupts = <8>;
|
||||
};
|
@ -12,7 +12,7 @@ Required properties:
|
||||
|
||||
Optional properties:
|
||||
- ti,dmic: phandle for the OMAP dmic node if the machine have it connected
|
||||
- ti,jack_detection: Need to be present if the board capable to detect jack
|
||||
- ti,jack-detection: Need to be present if the board capable to detect jack
|
||||
insertion, removal.
|
||||
|
||||
Available audio endpoints for the audio-routing table:
|
||||
|
@ -326,7 +326,7 @@ Two parent-locked sibling muxes
|
||||
|
||||
This is a good topology.
|
||||
|
||||
.--------.
|
||||
.--------.
|
||||
.----------. .--| dev D1 |
|
||||
| parent- |--' '--------'
|
||||
.--| locked | .--------.
|
||||
@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes
|
||||
|
||||
This is a good topology.
|
||||
|
||||
.--------.
|
||||
.--------.
|
||||
.----------. .--| dev D1 |
|
||||
| mux- |--' '--------'
|
||||
.--| locked | .--------.
|
||||
|
@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In
|
||||
conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
|
||||
such as migration.
|
||||
|
||||
When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the
|
||||
set of bits that KVM can return in struct kvm_clock_data's flag member.
|
||||
|
||||
The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned
|
||||
value is the exact kvmclock value seen by all VCPUs at the instant
|
||||
when KVM_GET_CLOCK was called. If clear, the returned value is simply
|
||||
CLOCK_MONOTONIC plus a constant offset; the offset can be modified
|
||||
with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock,
|
||||
but the exact value read by each VCPU could differ, because the host
|
||||
TSC is not stable.
|
||||
|
||||
struct kvm_clock_data {
|
||||
__u64 clock; /* kvmclock current value */
|
||||
__u32 flags;
|
||||
|
@ -7084,6 +7084,7 @@ F: drivers/scsi/53c700*
|
||||
LED SUBSYSTEM
|
||||
M: Richard Purdie <rpurdie@rpsys.net>
|
||||
M: Jacek Anaszewski <j.anaszewski@samsung.com>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
L: linux-leds@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
|
||||
S: Maintained
|
||||
|
7
Makefile
7
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -fno-common \
|
||||
-Werror-implicit-function-declaration \
|
||||
-Wno-format-security \
|
||||
-std=gnu89
|
||||
-std=gnu89 $(call cc-option,-fno-PIE)
|
||||
|
||||
|
||||
KBUILD_AFLAGS_KERNEL :=
|
||||
KBUILD_CFLAGS_KERNEL :=
|
||||
KBUILD_AFLAGS := -D__ASSEMBLY__
|
||||
KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
|
||||
KBUILD_AFLAGS_MODULE := -DMODULE
|
||||
KBUILD_CFLAGS_MODULE := -DMODULE
|
||||
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
|
||||
|
@ -64,8 +64,8 @@
|
||||
};
|
||||
|
||||
ldo3_reg: ldo3 {
|
||||
regulator-min-microvolt = <600000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-min-microvolt = <1725000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
@ -76,8 +76,8 @@
|
||||
};
|
||||
|
||||
ldo5_reg: ldo5 {
|
||||
regulator-min-microvolt = <1725000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-min-microvolt = <1200000>;
|
||||
regulator-max-microvolt = <3600000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
@ -100,14 +100,14 @@
|
||||
};
|
||||
|
||||
ldo9_reg: ldo9 {
|
||||
regulator-min-microvolt = <1200000>;
|
||||
regulator-min-microvolt = <1250000>;
|
||||
regulator-max-microvolt = <3600000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
ldo10_reg: ldo10 {
|
||||
regulator-min-microvolt = <1250000>;
|
||||
regulator-max-microvolt = <3650000>;
|
||||
regulator-min-microvolt = <1200000>;
|
||||
regulator-max-microvolt = <3600000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
};
|
||||
|
@ -13,6 +13,11 @@
|
||||
};
|
||||
};
|
||||
|
||||
memory@80000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0>;
|
||||
};
|
||||
|
||||
wl12xx_vmmc: wl12xx_vmmc {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vwl1271";
|
||||
|
@ -13,9 +13,9 @@
|
||||
};
|
||||
};
|
||||
|
||||
memory@0 {
|
||||
memory@80000000 {
|
||||
device_type = "memory";
|
||||
reg = <0 0>;
|
||||
reg = <0x80000000 0>;
|
||||
};
|
||||
|
||||
leds {
|
||||
|
@ -124,6 +124,7 @@
|
||||
compatible = "ti,abe-twl6040";
|
||||
ti,model = "omap5-uevm";
|
||||
|
||||
ti,jack-detection;
|
||||
ti,mclk-freq = <19200000>;
|
||||
|
||||
ti,mcpdm = <&mcpdm>;
|
||||
@ -415,7 +416,7 @@
|
||||
ti,backup-battery-charge-high-current;
|
||||
};
|
||||
|
||||
gpadc {
|
||||
gpadc: gpadc {
|
||||
compatible = "ti,palmas-gpadc";
|
||||
interrupts = <18 0
|
||||
16 0
|
||||
@ -475,8 +476,8 @@
|
||||
smps6_reg: smps6 {
|
||||
/* VDD_DDR3 - over VDD_SMPS6 */
|
||||
regulator-name = "smps6";
|
||||
regulator-min-microvolt = <1200000>;
|
||||
regulator-max-microvolt = <1200000>;
|
||||
regulator-min-microvolt = <1350000>;
|
||||
regulator-max-microvolt = <1350000>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
@ -74,7 +74,7 @@
|
||||
/* Low speed expansion connector */
|
||||
spi0: spi@9844000 {
|
||||
label = "LS-SPI0";
|
||||
cs-gpio = <&pio30 3 0>;
|
||||
cs-gpios = <&pio30 3 0>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -282,11 +282,15 @@
|
||||
uart1_pins_a: uart1@0 {
|
||||
allwinner,pins = "PG6", "PG7";
|
||||
allwinner,function = "uart1";
|
||||
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
|
||||
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
|
||||
};
|
||||
|
||||
uart1_pins_cts_rts_a: uart1-cts-rts@0 {
|
||||
allwinner,pins = "PG8", "PG9";
|
||||
allwinner,function = "uart1";
|
||||
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
|
||||
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
|
||||
};
|
||||
|
||||
mmc0_pins_a: mmc0@0 {
|
||||
|
@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
|
||||
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
|
||||
}
|
||||
|
||||
void dump_backtrace_stm(u32 *stack, u32 instruction)
|
||||
{
|
||||
char str[80], *p;
|
||||
unsigned int x;
|
||||
int reg;
|
||||
|
||||
for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
|
||||
if (instruction & BIT(reg)) {
|
||||
p += sprintf(p, " r%d:%08x", reg, *stack--);
|
||||
if (++x == 6) {
|
||||
x = 0;
|
||||
p = str;
|
||||
printk("%s\n", str);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (p != str)
|
||||
printk("%s\n", str);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_UNWIND
|
||||
/*
|
||||
* Stack pointers should always be within the kernels view of
|
||||
|
@ -3,6 +3,9 @@
|
||||
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
|
||||
*/
|
||||
|
||||
/* No __ro_after_init data in the .rodata section - which will always be ro */
|
||||
#define RO_AFTER_INIT_DATA
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/thread_info.h>
|
||||
@ -223,6 +226,8 @@ SECTIONS
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
|
||||
*(.data..ro_after_init)
|
||||
|
||||
NOSAVE_DATA
|
||||
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
|
||||
READ_MOSTLY_DATA(L1_CACHE_BYTES)
|
||||
|
@ -10,6 +10,7 @@
|
||||
* 27/03/03 Ian Molton Clean up CONFIG_CPU
|
||||
*
|
||||
*/
|
||||
#include <linux/kern_levels.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
.text
|
||||
@ -83,13 +84,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
||||
teq r3, r1, lsr #11
|
||||
ldreq r0, [frame, #-8] @ get sp
|
||||
subeq r0, r0, #4 @ point at the last arg
|
||||
bleq .Ldumpstm @ dump saved registers
|
||||
bleq dump_backtrace_stm @ dump saved registers
|
||||
|
||||
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
|
||||
ldr r3, .Ldsi @ instruction exists,
|
||||
teq r3, r1, lsr #11
|
||||
subeq r0, frame, #16
|
||||
bleq .Ldumpstm @ dump saved registers
|
||||
bleq dump_backtrace_stm @ dump saved registers
|
||||
|
||||
teq sv_fp, #0 @ zero saved fp means
|
||||
beq no_frame @ no further frames
|
||||
@ -112,38 +113,6 @@ ENDPROC(c_backtrace)
|
||||
.long 1004b, 1006b
|
||||
.popsection
|
||||
|
||||
#define instr r4
|
||||
#define reg r5
|
||||
#define stack r6
|
||||
|
||||
.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
|
||||
mov stack, r0
|
||||
mov instr, r1
|
||||
mov reg, #10
|
||||
mov r7, #0
|
||||
1: mov r3, #1
|
||||
ARM( tst instr, r3, lsl reg )
|
||||
THUMB( lsl r3, reg )
|
||||
THUMB( tst instr, r3 )
|
||||
beq 2f
|
||||
add r7, r7, #1
|
||||
teq r7, #6
|
||||
moveq r7, #0
|
||||
adr r3, .Lcr
|
||||
addne r3, r3, #1 @ skip newline
|
||||
ldr r2, [stack], #-4
|
||||
mov r1, reg
|
||||
adr r0, .Lfp
|
||||
bl printk
|
||||
2: subs reg, reg, #1
|
||||
bpl 1b
|
||||
teq r7, #0
|
||||
adrne r0, .Lcr
|
||||
blne printk
|
||||
ldmfd sp!, {instr, reg, stack, r7, pc}
|
||||
|
||||
.Lfp: .asciz " r%d:%08x%s"
|
||||
.Lcr: .asciz "\n"
|
||||
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
|
||||
.align
|
||||
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
|
||||
|
@ -71,6 +71,7 @@ config SOC_AM43XX
|
||||
select HAVE_ARM_TWD
|
||||
select ARM_ERRATA_754322
|
||||
select ARM_ERRATA_775420
|
||||
select OMAP_INTERCONNECT
|
||||
|
||||
config SOC_DRA7XX
|
||||
bool "TI DRA7XX"
|
||||
|
@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void)
|
||||
|
||||
#define OMAP3_SHOW_FEATURE(feat) \
|
||||
if (omap3_has_ ##feat()) \
|
||||
printk(#feat" ");
|
||||
n += scnprintf(buf + n, sizeof(buf) - n, #feat " ");
|
||||
|
||||
static void __init omap3_cpuinfo(void)
|
||||
{
|
||||
const char *cpu_name;
|
||||
char buf[64];
|
||||
int n = 0;
|
||||
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
||||
/*
|
||||
* OMAP3430 and OMAP3530 are assumed to be same.
|
||||
@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void)
|
||||
cpu_name = "OMAP3503";
|
||||
}
|
||||
|
||||
sprintf(soc_name, "%s", cpu_name);
|
||||
scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name);
|
||||
|
||||
/* Print verbose information */
|
||||
pr_info("%s %s (", soc_name, soc_rev);
|
||||
n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev);
|
||||
|
||||
OMAP3_SHOW_FEATURE(l2cache);
|
||||
OMAP3_SHOW_FEATURE(iva);
|
||||
@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void)
|
||||
OMAP3_SHOW_FEATURE(neon);
|
||||
OMAP3_SHOW_FEATURE(isp);
|
||||
OMAP3_SHOW_FEATURE(192mhz_clk);
|
||||
|
||||
printk(")\n");
|
||||
if (*(buf + n - 1) == ' ')
|
||||
n--;
|
||||
n += scnprintf(buf + n, sizeof(buf) - n, ")\n");
|
||||
pr_info("%s", buf);
|
||||
}
|
||||
|
||||
#define OMAP3_CHECK_FEATURE(status,feat) \
|
||||
|
@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
|
||||
if (has_uart4) {
|
||||
en_uart4_mask = OMAP3630_EN_UART4_MASK;
|
||||
grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
|
||||
} else {
|
||||
en_uart4_mask = 0;
|
||||
grpsel_uart4_mask = 0;
|
||||
}
|
||||
|
||||
/* Enable wakeups in PER */
|
||||
|
@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm,
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
if (!voltdm->volt_data) {
|
||||
pr_err("%s: No voltage data defined for vdd_%s\n",
|
||||
__func__, voltdm->name);
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
/* Adjust voltage to the exact voltage from the OPP table */
|
||||
for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
|
||||
if (voltdm->volt_data[i].volt_nominal >= target_volt) {
|
||||
|
@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void)
|
||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(dma_debug_do_init);
|
||||
core_initcall(dma_debug_do_init);
|
||||
|
||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||
|
||||
|
@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin)
|
||||
ret lr
|
||||
ENDPROC(cpu_cm7_proc_fin)
|
||||
|
||||
.section ".text.init", #alloc, #execinstr
|
||||
.section ".init.text", #alloc, #execinstr
|
||||
|
||||
__v7m_cm7_setup:
|
||||
mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
|
||||
|
@ -105,7 +105,7 @@
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
nb_perih_clk: nb-periph-clk@13000{
|
||||
nb_periph_clk: nb-periph-clk@13000 {
|
||||
compatible = "marvell,armada-3700-periph-clock-nb";
|
||||
reg = <0x13000 0x100>;
|
||||
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
|
||||
@ -113,7 +113,7 @@
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
sb_perih_clk: sb-periph-clk@18000{
|
||||
sb_periph_clk: sb-periph-clk@18000 {
|
||||
compatible = "marvell,armada-3700-periph-clock-sb";
|
||||
reg = <0x18000 0x100>;
|
||||
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
|
||||
|
@ -130,8 +130,8 @@
|
||||
reg = <0x700600 0x50>;
|
||||
#address-cells = <0x1>;
|
||||
#size-cells = <0x0>;
|
||||
cell-index = <1>;
|
||||
clocks = <&cps_syscon0 0 3>;
|
||||
cell-index = <3>;
|
||||
clocks = <&cps_syscon0 1 21>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
@ -140,7 +140,7 @@
|
||||
reg = <0x700680 0x50>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
cell-index = <2>;
|
||||
cell-index = <4>;
|
||||
clocks = <&cps_syscon0 1 21>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -46,7 +46,15 @@
|
||||
#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
|
||||
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
|
||||
|
||||
#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
|
||||
/*
|
||||
* PMUv3 event types: required events
|
||||
*/
|
||||
#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
|
||||
#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
|
||||
|
||||
/*
|
||||
* Event filters for PMUv3
|
||||
|
@ -31,17 +31,9 @@
|
||||
|
||||
/*
|
||||
* ARMv8 PMUv3 Performance Events handling code.
|
||||
* Common event types.
|
||||
* Common event types (some are defined in asm/perf_event.h).
|
||||
*/
|
||||
|
||||
/* Required events. */
|
||||
#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
|
||||
#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
|
||||
|
||||
/* At least one of the following is required. */
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
|
||||
|
@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
||||
|
||||
idx = ARMV8_PMU_CYCLE_IDX;
|
||||
} else {
|
||||
BUG();
|
||||
return false;
|
||||
}
|
||||
} else if (r->CRn == 0 && r->CRm == 9) {
|
||||
/* PMCCNTR */
|
||||
if (pmu_access_event_counter_el0_disabled(vcpu))
|
||||
return false;
|
||||
|
||||
idx = ARMV8_PMU_CYCLE_IDX;
|
||||
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
|
||||
/* PMEVCNTRn_EL0 */
|
||||
if (pmu_access_event_counter_el0_disabled(vcpu))
|
||||
@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
||||
|
||||
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
||||
} else {
|
||||
BUG();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pmu_counter_idx_valid(vcpu, idx))
|
||||
|
@ -91,7 +91,7 @@
|
||||
*/
|
||||
#define LOAD_HANDLER(reg, label) \
|
||||
ld reg,PACAKBASE(r13); /* get high part of &label */ \
|
||||
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
|
||||
ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
|
||||
|
||||
#define __LOAD_HANDLER(reg, label) \
|
||||
ld reg,PACAKBASE(r13); \
|
||||
@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943) \
|
||||
std ra,offset(r13); \
|
||||
END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||
|
||||
#define EXCEPTION_PROLOG_0(area) \
|
||||
GET_PACA(r13); \
|
||||
#define EXCEPTION_PROLOG_0_PACA(area) \
|
||||
std r9,area+EX_R9(r13); /* save r9 */ \
|
||||
OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
|
||||
HMT_MEDIUM; \
|
||||
std r10,area+EX_R10(r13); /* save r10 - r12 */ \
|
||||
OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
|
||||
|
||||
#define EXCEPTION_PROLOG_0(area) \
|
||||
GET_PACA(r13); \
|
||||
EXCEPTION_PROLOG_0_PACA(area)
|
||||
|
||||
#define __EXCEPTION_PROLOG_1(area, extra, vec) \
|
||||
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
|
||||
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
|
||||
@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||
EXCEPTION_PROLOG_1(area, extra, vec); \
|
||||
EXCEPTION_PROLOG_PSERIES_1(label, h);
|
||||
|
||||
/* Have the PACA in r13 already */
|
||||
#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \
|
||||
EXCEPTION_PROLOG_0_PACA(area); \
|
||||
EXCEPTION_PROLOG_1(area, extra, vec); \
|
||||
EXCEPTION_PROLOG_PSERIES_1(label, h);
|
||||
|
||||
#define __KVMTEST(h, n) \
|
||||
lbz r10,HSTATE_IN_GUEST(r13); \
|
||||
cmpwi r10,0; \
|
||||
|
@ -460,5 +460,6 @@
|
||||
|
||||
#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
|
||||
((IH & 0x7) << 21))
|
||||
#define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
|
||||
|
||||
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
|
||||
|
@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100)
|
||||
|
||||
EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
|
||||
GET_PACA(r13)
|
||||
clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
|
||||
EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
|
||||
IDLETEST, 0x100)
|
||||
|
||||
EXC_REAL_END(system_reset, 0x100, 0x200)
|
||||
@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200)
|
||||
|
||||
#ifdef CONFIG_PPC_P7_NAP
|
||||
EXC_COMMON_BEGIN(system_reset_idle_common)
|
||||
BEGIN_FTR_SECTION
|
||||
GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||
bl pnv_restore_hyp_resource
|
||||
|
||||
li r0,PNV_THREAD_RUNNING
|
||||
@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
|
||||
SET_SCRATCH0(r13) /* save r13 */
|
||||
/*
|
||||
* Running native on arch 2.06 or later, we may wakeup from winkle
|
||||
* inside machine check. If yes, then last bit of HSPGR0 would be set
|
||||
* inside machine check. If yes, then last bit of HSPRG0 would be set
|
||||
* to 1. Hence clear it unconditionally.
|
||||
*/
|
||||
GET_PACA(r13)
|
||||
@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
|
||||
/*
|
||||
* Go back to winkle. Please note that this thread was woken up in
|
||||
* machine check from winkle and have not restored the per-subcore
|
||||
* state. Hence before going back to winkle, set last bit of HSPGR0
|
||||
* state. Hence before going back to winkle, set last bit of HSPRG0
|
||||
* to 1. This will make sure that if this thread gets woken up
|
||||
* again at reset vector 0x100 then it will get chance to restore
|
||||
* the subcore state.
|
||||
|
@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs)
|
||||
int instr;
|
||||
|
||||
if (!(i % 8))
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
|
||||
#if !defined(CONFIG_BOOKE)
|
||||
/* If executing with the IMMU off, adjust pc rather
|
||||
@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs)
|
||||
|
||||
if (!__kernel_text_address(pc) ||
|
||||
probe_kernel_address((unsigned int __user *)pc, instr)) {
|
||||
printk(KERN_CONT "XXXXXXXX ");
|
||||
pr_cont("XXXXXXXX ");
|
||||
} else {
|
||||
if (regs->nip == pc)
|
||||
printk(KERN_CONT "<%08x> ", instr);
|
||||
pr_cont("<%08x> ", instr);
|
||||
else
|
||||
printk(KERN_CONT "%08x ", instr);
|
||||
pr_cont("%08x ", instr);
|
||||
}
|
||||
|
||||
pc += sizeof(int);
|
||||
}
|
||||
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
struct regbit {
|
||||
@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
|
||||
|
||||
for (; bits->bit; ++bits)
|
||||
if (val & bits->bit) {
|
||||
printk("%s%s", s, bits->name);
|
||||
pr_cont("%s%s", s, bits->name);
|
||||
s = sep;
|
||||
}
|
||||
}
|
||||
@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val)
|
||||
* T: Transactional (bit 34)
|
||||
*/
|
||||
if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
|
||||
printk(",TM[");
|
||||
pr_cont(",TM[");
|
||||
print_bits(val, msr_tm_bits, "");
|
||||
printk("]");
|
||||
pr_cont("]");
|
||||
}
|
||||
}
|
||||
#else
|
||||
@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {}
|
||||
|
||||
static void print_msr_bits(unsigned long val)
|
||||
{
|
||||
printk("<");
|
||||
pr_cont("<");
|
||||
print_bits(val, msr_bits, ",");
|
||||
print_tm_bits(val);
|
||||
printk(">");
|
||||
pr_cont(">");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs)
|
||||
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
|
||||
trap = TRAP(regs);
|
||||
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
|
||||
printk("CFAR: "REG" ", regs->orig_gpr3);
|
||||
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
|
||||
if (trap == 0x200 || trap == 0x300 || trap == 0x600)
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
|
||||
pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
|
||||
#else
|
||||
printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
|
||||
pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
|
||||
#endif
|
||||
#ifdef CONFIG_PPC64
|
||||
printk("SOFTE: %ld ", regs->softe);
|
||||
pr_cont("SOFTE: %ld ", regs->softe);
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (MSR_TM_ACTIVE(regs->msr))
|
||||
printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
|
||||
pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
if ((i % REGS_PER_LINE) == 0)
|
||||
printk("\nGPR%02d: ", i);
|
||||
printk(REG " ", regs->gpr[i]);
|
||||
pr_cont("\nGPR%02d: ", i);
|
||||
pr_cont(REG " ", regs->gpr[i]);
|
||||
if (i == LAST_VOLATILE && !FULL_REGS(regs))
|
||||
break;
|
||||
}
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
/*
|
||||
* Lookup NIP late so we have the best change of getting the
|
||||
@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
|
||||
printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if ((ip == rth) && curr_frame >= 0) {
|
||||
printk(" (%pS)",
|
||||
pr_cont(" (%pS)",
|
||||
(void *)current->ret_stack[curr_frame].ret);
|
||||
curr_frame--;
|
||||
}
|
||||
#endif
|
||||
if (firstframe)
|
||||
printk(" (unreliable)");
|
||||
printk("\n");
|
||||
pr_cont(" (unreliable)");
|
||||
pr_cont("\n");
|
||||
}
|
||||
firstframe = 0;
|
||||
|
||||
|
@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
|
||||
if (firmware_has_feature(FW_FEATURE_OPAL))
|
||||
opal_configure_cores();
|
||||
|
||||
/* Enable AIL if supported, and we are in hypervisor mode */
|
||||
if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
|
||||
early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
unsigned long lpcr = mfspr(SPRN_LPCR);
|
||||
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
|
||||
}
|
||||
/* AIL on native is done in cpu_ready_for_interrupts() */
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_ready_for_interrupts(void)
|
||||
{
|
||||
/*
|
||||
* Enable AIL if supported, and we are in hypervisor mode. This
|
||||
* is called once for every processor.
|
||||
*
|
||||
* If we are not in hypervisor mode the job is done once for
|
||||
* the whole partition in configure_exceptions().
|
||||
*/
|
||||
if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
|
||||
early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
unsigned long lpcr = mfspr(SPRN_LPCR);
|
||||
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
|
||||
}
|
||||
|
||||
/* Set IR and DR in PACA MSR */
|
||||
get_paca()->kernel_msr = MSR_KERNEL;
|
||||
}
|
||||
|
@ -1029,6 +1029,10 @@ void hash__early_init_mmu_secondary(void)
|
||||
{
|
||||
/* Initialize hash table for that CPU */
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
update_hid_for_hash();
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
mtspr(SPRN_SDR1, _SDR1);
|
||||
else
|
||||
|
@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void)
|
||||
* update partition table control register and UPRT
|
||||
*/
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
update_hid_for_radix();
|
||||
|
||||
lpcr = mfspr(SPRN_LPCR);
|
||||
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
|
||||
|
||||
|
@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
|
||||
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
|
||||
__tlbiel_pid(pid, set, ric);
|
||||
}
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
|
||||
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
asm volatile("ptesync": : :"memory");
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
|
||||
static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
||||
|
@ -43,6 +43,7 @@ config SPARC
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select PROVE_LOCKING_SMALL if PROVE_LOCKING
|
||||
|
||||
config SPARC32
|
||||
def_bool !64BIT
|
||||
@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
|
||||
config ARCH_PROC_KCORE_TEXT
|
||||
def_bool y
|
||||
|
||||
config ARCH_ATU
|
||||
bool
|
||||
default y if SPARC64
|
||||
|
||||
config ARCH_DMA_ADDR_T_64BIT
|
||||
bool
|
||||
default y if ARCH_ATU
|
||||
|
||||
config IOMMU_HELPER
|
||||
bool
|
||||
default y if SPARC64
|
||||
@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE
|
||||
config ARCH_SPARSEMEM_DEFAULT
|
||||
def_bool y if SPARC64
|
||||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int "Maximum zone order"
|
||||
default "13"
|
||||
help
|
||||
The kernel memory allocator divides physically contiguous memory
|
||||
blocks into "zones", where each zone is a power of two number of
|
||||
pages. This option selects the largest power of two that the kernel
|
||||
keeps in the memory allocator. If you need to allocate very large
|
||||
blocks of physically contiguous memory, then you may need to
|
||||
increase this value.
|
||||
|
||||
This config option is actually maximum order plus one. For example,
|
||||
a value of 13 means that the largest free memory block is 2^12 pages.
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
if SPARC64
|
||||
|
@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
|
||||
*/
|
||||
#define HV_FAST_PCI_MSG_SETVALID 0xd3
|
||||
|
||||
/* PCI IOMMU v2 definitions and services
|
||||
*
|
||||
* While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
|
||||
* definitions and services.
|
||||
*
|
||||
* CTE Clump Table Entry. First level table entry in the ATU.
|
||||
*
|
||||
* pci_device_list
|
||||
* A 32-bit aligned list of pci_devices.
|
||||
*
|
||||
* pci_device_listp
|
||||
* real address of a pci_device_list. 32-bit aligned.
|
||||
*
|
||||
* iotte IOMMU translation table entry.
|
||||
*
|
||||
* iotte_attributes
|
||||
* IO Attributes for IOMMU v2 mappings. In addition to
|
||||
* read, write IOMMU v2 supports relax ordering
|
||||
*
|
||||
* io_page_list A 64-bit aligned list of real addresses. Each real
|
||||
* address in an io_page_list must be properly aligned
|
||||
* to the pagesize of the given IOTSB.
|
||||
*
|
||||
* io_page_list_p Real address of an io_page_list, 64-bit aligned.
|
||||
*
|
||||
* IOTSB IO Translation Storage Buffer. An aligned table of
|
||||
* IOTTEs. Each IOTSB has a pagesize, table size, and
|
||||
* virtual address associated with it that must match
|
||||
* a pagesize and table size supported by the un-derlying
|
||||
* hardware implementation. The alignment requirements
|
||||
* for an IOTSB depend on the pagesize used for that IOTSB.
|
||||
* Each IOTTE in an IOTSB maps one pagesize-sized page.
|
||||
* The size of the IOTSB dictates how large of a virtual
|
||||
* address space the IOTSB is capable of mapping.
|
||||
*
|
||||
* iotsb_handle An opaque identifier for an IOTSB. A devhandle plus
|
||||
* iotsb_handle represents a binding of an IOTSB to a
|
||||
* PCI root complex.
|
||||
*
|
||||
* iotsb_index Zero-based IOTTE number within an IOTSB.
|
||||
*/
|
||||
|
||||
/* The index_count argument consists of two fields:
|
||||
* bits 63:48 #iottes and bits 47:0 iotsb_index
|
||||
*/
|
||||
#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
|
||||
(((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
|
||||
|
||||
/* pci_iotsb_conf()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_CONF
|
||||
* ARG0: devhandle
|
||||
* ARG1: r_addr
|
||||
* ARG2: size
|
||||
* ARG3: pagesize
|
||||
* ARG4: iova
|
||||
* RET0: status
|
||||
* RET1: iotsb_handle
|
||||
* ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize
|
||||
* EBADALIGN r_addr is not properly aligned
|
||||
* ENORADDR r_addr is not a valid real address
|
||||
* ETOOMANY No further IOTSBs may be configured
|
||||
* EBUSY Duplicate devhandle, raddir, iova combination
|
||||
*
|
||||
* Create an IOTSB suitable for the PCI root complex identified by devhandle,
|
||||
* for the DMA virtual address defined by the argument iova.
|
||||
*
|
||||
* r_addr is the properly aligned base address of the IOTSB and size is the
|
||||
* IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
|
||||
* being configured. If it contains any values other than zeros then the
|
||||
* behavior is undefined.
|
||||
*
|
||||
* pagesize is the size of each page in the IOTSB. Note that the combination of
|
||||
* size (table size) and pagesize must be valid.
|
||||
*
|
||||
* virt is the DMA virtual address this IOTSB will map.
|
||||
*
|
||||
* If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
|
||||
* Once configured, privileged access to the IOTSB memory is prohibited and
|
||||
* creates undefined behavior. The only permitted access is indirect via these
|
||||
* services.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_CONF 0x190
|
||||
|
||||
/* pci_iotsb_info()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_INFO
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* RET0: status
|
||||
* RET1: r_addr
|
||||
* RET2: size
|
||||
* RET3: pagesize
|
||||
* RET4: iova
|
||||
* RET5: #bound
|
||||
* ERRORS: EINVAL Invalid devhandle or iotsb_handle
|
||||
*
|
||||
* This service returns configuration information about an IOTSB previously
|
||||
* created with pci_iotsb_conf.
|
||||
*
|
||||
* iotsb_handle value 0 may be used with this service to inquire about the
|
||||
* legacy IOTSB that may or may not exist. If the service succeeds, the return
|
||||
* values describe the legacy IOTSB and I/O virtual addresses mapped by that
|
||||
* table. However, the table base address r_addr may contain the value -1 which
|
||||
* indicates a memory range that cannot be accessed or be reclaimed.
|
||||
*
|
||||
* The return value #bound contains the number of PCI devices that iotsb_handle
|
||||
* is currently bound to.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_INFO 0x191
|
||||
|
||||
/* pci_iotsb_unconf()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_UNCONF
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* RET0: status
|
||||
* ERRORS: EINVAL Invalid devhandle or iotsb_handle
|
||||
* EBUSY The IOTSB is bound and may not be unconfigured
|
||||
*
|
||||
* This service unconfigures the IOTSB identified by the devhandle and
|
||||
* iotsb_handle arguments, previously created with pci_iotsb_conf.
|
||||
* The IOTSB must not be currently bound to any device or the service will fail
|
||||
*
|
||||
* If the call succeeds, iotsb_handle is no longer valid.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_UNCONF 0x192
|
||||
|
||||
/* pci_iotsb_bind()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_BIND
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: pci_device
|
||||
* RET0: status
|
||||
* ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
|
||||
* EBUSY A PCI function is already bound to an IOTSB at the same
|
||||
* address range as specified by devhandle, iotsb_handle.
|
||||
*
|
||||
* This service binds the PCI function specified by the argument pci_device to
|
||||
* the IOTSB specified by the arguments devhandle and iotsb_handle.
|
||||
*
|
||||
* The PCI device function is bound to the specified IOTSB with the IOVA range
|
||||
* specified when the IOTSB was configured via pci_iotsb_conf. If the function
|
||||
* is already bound then it is unbound first.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_BIND 0x193
|
||||
|
||||
/* pci_iotsb_unbind()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_UNBIND
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: pci_device
|
||||
* RET0: status
|
||||
* ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
|
||||
* ENOMAP The PCI function was not bound to the specified IOTSB
|
||||
*
|
||||
* This service unbinds the PCI device specified by the argument pci_device
|
||||
* from the IOTSB identified * by the arguments devhandle and iotsb_handle.
|
||||
*
|
||||
* If the PCI device is not bound to the specified IOTSB then this service will
|
||||
* fail with status ENOMAP
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_UNBIND 0x194
|
||||
|
||||
/* pci_iotsb_get_binding()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: iova
|
||||
* RET0: status
|
||||
* RET1: iotsb_handle
|
||||
* ERRORS: EINVAL Invalid devhandle, pci_device, or iova
|
||||
* ENOMAP The PCI function is not bound to an IOTSB at iova
|
||||
*
|
||||
* This service returns the IOTSB binding, iotsb_handle, for a given pci_device
|
||||
* and DMA virtual address, iova.
|
||||
*
|
||||
* iova must be the base address of a DMA virtual address range as defined by
|
||||
* the iommu-address-ranges property in the root complex device node defined
|
||||
* by the argument devhandle.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195
|
||||
|
||||
/* pci_iotsb_map()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_MAP
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: index_count
|
||||
* ARG3: iotte_attributes
|
||||
* ARG4: io_page_list_p
|
||||
* RET0: status
|
||||
* RET1: #mapped
|
||||
* ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes,
|
||||
* iotsb_index or iotte_attributes
|
||||
* EBADALIGN Improperly aligned io_page_list_p or I/O page
|
||||
* address in the I/O page list.
|
||||
* ENORADDR Invalid io_page_list_p or I/O page address in
|
||||
* the I/O page list.
|
||||
*
|
||||
* This service creates and flushes mappings in the IOTSB defined by the
|
||||
* arguments devhandle, iotsb.
|
||||
*
|
||||
* The index_count argument consists of two fields. Bits 63:48 contain #iotte
|
||||
* and bits 47:0 contain iotsb_index
|
||||
*
|
||||
* The first mapping is created in the IOTSB index specified by iotsb_index.
|
||||
* Subsequent mappings are created at iotsb_index+1 and so on.
|
||||
*
|
||||
* The attributes of each mapping are defined by the argument iotte_attributes.
|
||||
*
|
||||
* The io_page_list_p specifies the real address of the 64-bit-aligned list of
|
||||
* #iottes I/O page addresses. Each page address must be a properly aligned
|
||||
* real address of a page to be mapped in the IOTSB. The first entry in the I/O
|
||||
* page list contains the real address of the first page, the 2nd entry for the
|
||||
* 2nd page, and so on.
|
||||
*
|
||||
* #iottes must be greater than zero.
|
||||
*
|
||||
* The return value #mapped is the actual number of mappings created, which may
|
||||
* be less than or equal to the argument #iottes. If the function returns
|
||||
* successfully with a #mapped value less than the requested #iottes then the
|
||||
* caller should continue to invoke the service with updated iotsb_index,
|
||||
* #iottes, and io_page_list_p arguments until all pages are mapped.
|
||||
*
|
||||
* This service must not be used to demap a mapping. In other words, all
|
||||
* mappings must be valid and have one or both of the RW attribute bits set.
|
||||
*
|
||||
* Note:
|
||||
* It is implementation-defined whether I/O page real address validity checking
|
||||
* is done at time mappings are established or deferred until they are
|
||||
* accessed.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_MAP 0x196
|
||||
|
||||
/* pci_iotsb_map_one()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: iotsb_index
|
||||
* ARG3: iotte_attributes
|
||||
* ARG4: r_addr
|
||||
* RET0: status
|
||||
* ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index
|
||||
* or iotte_attributes
|
||||
* EBADALIGN Improperly aligned r_addr
|
||||
* ENORADDR Invalid r_addr
|
||||
*
|
||||
* This service creates and flushes a single mapping in the IOTSB defined by the
|
||||
* arguments devhandle, iotsb.
|
||||
*
|
||||
* The mapping for the page at r_addr is created at the IOTSB index specified by
|
||||
* iotsb_index with the attributes iotte_attributes.
|
||||
*
|
||||
* This service must not be used to demap a mapping. In other words, the mapping
|
||||
* must be valid and have one or both of the RW attribute bits set.
|
||||
*
|
||||
* Note:
|
||||
* It is implementation-defined whether I/O page real address validity checking
|
||||
* is done at time mappings are established or deferred until they are
|
||||
* accessed.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197
|
||||
|
||||
/* pci_iotsb_demap()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_DEMAP
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: iotsb_index
|
||||
* ARG3: #iottes
|
||||
* RET0: status
|
||||
* RET1: #unmapped
|
||||
* ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes
|
||||
*
|
||||
* This service unmaps and flushes up to #iottes mappings starting at index
|
||||
* iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
|
||||
*
|
||||
* #iottes must be greater than zero.
|
||||
*
|
||||
* The actual number of IOTTEs unmapped is returned in #unmapped and may be less
|
||||
* than or equal to the requested number of IOTTEs, #iottes.
|
||||
*
|
||||
* If #unmapped is less than #iottes, the caller should continue to invoke this
|
||||
* service with updated iotsb_index and #iottes arguments until all pages are
|
||||
* demapped.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_DEMAP 0x198
|
||||
|
||||
/* pci_iotsb_getmap()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_GETMAP
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: iotsb_index
|
||||
* RET0: status
|
||||
* RET1: r_addr
|
||||
* RET2: iotte_attributes
|
||||
* ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index
|
||||
* ENOMAP No mapping was found
|
||||
*
|
||||
* This service returns the mapping specified by index iotsb_index from the
|
||||
* IOTSB defined by the arguments devhandle, iotsb.
|
||||
*
|
||||
* Upon success, the real address of the mapping shall be returned in
|
||||
* r_addr and thethe IOTTE mapping attributes shall be returned in
|
||||
* iotte_attributes.
|
||||
*
|
||||
* The return value iotte_attributes may not include optional features used in
|
||||
* the call to create the mapping.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_GETMAP 0x199
|
||||
|
||||
/* pci_iotsb_sync_mappings()
|
||||
* TRAP: HV_FAST_TRAP
|
||||
* FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
|
||||
* ARG0: devhandle
|
||||
* ARG1: iotsb_handle
|
||||
* ARG2: iotsb_index
|
||||
* ARG3: #iottes
|
||||
* RET0: status
|
||||
* RET1: #synced
|
||||
* ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
|
||||
*
|
||||
* This service synchronizes #iottes mappings starting at index iotsb_index in
|
||||
* the IOTSB defined by the arguments devhandle, iotsb.
|
||||
*
|
||||
* #iottes must be greater than zero.
|
||||
*
|
||||
* The actual number of IOTTEs synchronized is returned in #synced, which may
|
||||
* be less than or equal to the requested number, #iottes.
|
||||
*
|
||||
* Upon a successful return, #synced is less than #iottes, the caller should
|
||||
* continue to invoke this service with updated iotsb_index and #iottes
|
||||
* arguments until all pages are synchronized.
|
||||
*/
|
||||
#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a
|
||||
|
||||
/* Logical Domain Channel services. */
|
||||
|
||||
#define LDC_CHANNEL_DOWN 0
|
||||
@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
|
||||
#define HV_GRP_SDIO 0x0108
|
||||
#define HV_GRP_SDIO_ERR 0x0109
|
||||
#define HV_GRP_REBOOT_DATA 0x0110
|
||||
#define HV_GRP_ATU 0x0111
|
||||
#define HV_GRP_M7_PERF 0x0114
|
||||
#define HV_GRP_NIAG_PERF 0x0200
|
||||
#define HV_GRP_FIRE_PERF 0x0201
|
||||
|
@ -24,8 +24,36 @@ struct iommu_arena {
|
||||
unsigned int limit;
|
||||
};
|
||||
|
||||
#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
|
||||
|
||||
/* Data structures for SPARC ATU architecture */
|
||||
struct atu_iotsb {
|
||||
void *table; /* IOTSB table base virtual addr*/
|
||||
u64 ra; /* IOTSB table real addr */
|
||||
u64 dvma_size; /* ranges[3].size or OS slected 32G size */
|
||||
u64 dvma_base; /* ranges[3].base */
|
||||
u64 table_size; /* IOTSB table size */
|
||||
u64 page_size; /* IO PAGE size for IOTSB */
|
||||
u32 iotsb_num; /* tsbnum is same as iotsb_handle */
|
||||
};
|
||||
|
||||
struct atu_ranges {
|
||||
u64 base;
|
||||
u64 size;
|
||||
};
|
||||
|
||||
struct atu {
|
||||
struct atu_ranges *ranges;
|
||||
struct atu_iotsb *iotsb;
|
||||
struct iommu_map_table tbl;
|
||||
u64 base;
|
||||
u64 size;
|
||||
u64 dma_addr_mask;
|
||||
};
|
||||
|
||||
struct iommu {
|
||||
struct iommu_map_table tbl;
|
||||
struct atu *atu;
|
||||
spinlock_t lock;
|
||||
u32 dma_addr_mask;
|
||||
iopte_t *page_table;
|
||||
|
@ -39,6 +39,7 @@ static struct api_info api_table[] = {
|
||||
{ .group = HV_GRP_SDIO, },
|
||||
{ .group = HV_GRP_SDIO_ERR, },
|
||||
{ .group = HV_GRP_REBOOT_DATA, },
|
||||
{ .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
|
||||
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
|
||||
{ .group = HV_GRP_FIRE_PERF, },
|
||||
{ .group = HV_GRP_N2_CPU, },
|
||||
|
@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
|
||||
struct iommu *iommu = dev->archdata.iommu;
|
||||
u64 dma_addr_mask = iommu->dma_addr_mask;
|
||||
|
||||
if (device_mask >= (1UL << 32UL))
|
||||
return 0;
|
||||
if (device_mask > DMA_BIT_MASK(32)) {
|
||||
if (iommu->atu)
|
||||
dma_addr_mask = iommu->atu->dma_addr_mask;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((device_mask & dma_addr_mask) == dma_addr_mask)
|
||||
return 1;
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
|
||||
|
@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = {
|
||||
{ .major = 1, .minor = 1 },
|
||||
};
|
||||
|
||||
static unsigned long vatu_major = 1;
|
||||
static unsigned long vatu_minor = 1;
|
||||
|
||||
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
|
||||
|
||||
struct iommu_batch {
|
||||
@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
|
||||
}
|
||||
|
||||
/* Interrupts must be disabled. */
|
||||
static long iommu_batch_flush(struct iommu_batch *p)
|
||||
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
|
||||
{
|
||||
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
|
||||
u64 *pglist = p->pglist;
|
||||
u64 index_count;
|
||||
unsigned long devhandle = pbm->devhandle;
|
||||
unsigned long prot = p->prot;
|
||||
unsigned long entry = p->entry;
|
||||
u64 *pglist = p->pglist;
|
||||
unsigned long npages = p->npages;
|
||||
unsigned long iotsb_num;
|
||||
unsigned long ret;
|
||||
long num;
|
||||
|
||||
/* VPCI maj=1, min=[0,1] only supports read and write */
|
||||
if (vpci_major < 2)
|
||||
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
|
||||
|
||||
while (npages != 0) {
|
||||
long num;
|
||||
|
||||
num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
|
||||
npages, prot, __pa(pglist));
|
||||
if (unlikely(num < 0)) {
|
||||
if (printk_ratelimit())
|
||||
printk("iommu_batch_flush: IOMMU map of "
|
||||
"[%08lx:%08llx:%lx:%lx:%lx] failed with "
|
||||
"status %ld\n",
|
||||
devhandle, HV_PCI_TSBID(0, entry),
|
||||
npages, prot, __pa(pglist), num);
|
||||
return -1;
|
||||
if (mask <= DMA_BIT_MASK(32)) {
|
||||
num = pci_sun4v_iommu_map(devhandle,
|
||||
HV_PCI_TSBID(0, entry),
|
||||
npages,
|
||||
prot,
|
||||
__pa(pglist));
|
||||
if (unlikely(num < 0)) {
|
||||
pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
|
||||
__func__,
|
||||
devhandle,
|
||||
HV_PCI_TSBID(0, entry),
|
||||
npages, prot, __pa(pglist),
|
||||
num);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
|
||||
iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
|
||||
ret = pci_sun4v_iotsb_map(devhandle,
|
||||
iotsb_num,
|
||||
index_count,
|
||||
prot,
|
||||
__pa(pglist),
|
||||
&num);
|
||||
if (unlikely(ret != HV_EOK)) {
|
||||
pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
|
||||
__func__,
|
||||
devhandle, iotsb_num,
|
||||
index_count, prot,
|
||||
__pa(pglist), ret);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
entry += num;
|
||||
npages -= num;
|
||||
pglist += num;
|
||||
@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_batch_new_entry(unsigned long entry)
|
||||
static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
|
||||
{
|
||||
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
||||
|
||||
if (p->entry + p->npages == entry)
|
||||
return;
|
||||
if (p->entry != ~0UL)
|
||||
iommu_batch_flush(p);
|
||||
iommu_batch_flush(p, mask);
|
||||
p->entry = entry;
|
||||
}
|
||||
|
||||
/* Interrupts must be disabled. */
|
||||
static inline long iommu_batch_add(u64 phys_page)
|
||||
static inline long iommu_batch_add(u64 phys_page, u64 mask)
|
||||
{
|
||||
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
||||
|
||||
@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
|
||||
|
||||
p->pglist[p->npages++] = phys_page;
|
||||
if (p->npages == PGLIST_NENTS)
|
||||
return iommu_batch_flush(p);
|
||||
return iommu_batch_flush(p, mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Interrupts must be disabled. */
|
||||
static inline long iommu_batch_end(void)
|
||||
static inline long iommu_batch_end(u64 mask)
|
||||
{
|
||||
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
|
||||
|
||||
BUG_ON(p->npages >= PGLIST_NENTS);
|
||||
|
||||
return iommu_batch_flush(p);
|
||||
return iommu_batch_flush(p, mask);
|
||||
}
|
||||
|
||||
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
u64 mask;
|
||||
unsigned long flags, order, first_page, npages, n;
|
||||
unsigned long prot = 0;
|
||||
struct iommu *iommu;
|
||||
struct atu *atu;
|
||||
struct iommu_map_table *tbl;
|
||||
struct page *page;
|
||||
void *ret;
|
||||
long entry;
|
||||
@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||
memset((char *)first_page, 0, PAGE_SIZE << order);
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
atu = iommu->atu;
|
||||
|
||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
|
||||
mask = dev->coherent_dma_mask;
|
||||
if (mask <= DMA_BIT_MASK(32))
|
||||
tbl = &iommu->tbl;
|
||||
else
|
||||
tbl = &atu->tbl;
|
||||
|
||||
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
|
||||
(unsigned long)(-1), 0);
|
||||
|
||||
if (unlikely(entry == IOMMU_ERROR_CODE))
|
||||
goto range_alloc_fail;
|
||||
|
||||
*dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
|
||||
*dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
|
||||
ret = (void *) first_page;
|
||||
first_page = __pa(first_page);
|
||||
|
||||
@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||
entry);
|
||||
|
||||
for (n = 0; n < npages; n++) {
|
||||
long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
|
||||
long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
|
||||
if (unlikely(err < 0L))
|
||||
goto iommu_map_fail;
|
||||
}
|
||||
|
||||
if (unlikely(iommu_batch_end() < 0L))
|
||||
if (unlikely(iommu_batch_end(mask) < 0L))
|
||||
goto iommu_map_fail;
|
||||
|
||||
local_irq_restore(flags);
|
||||
@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||
return ret;
|
||||
|
||||
iommu_map_fail:
|
||||
iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
|
||||
iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
|
||||
|
||||
range_alloc_fail:
|
||||
free_pages(first_page, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
|
||||
unsigned long npages)
|
||||
unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
|
||||
unsigned long iotsb_num,
|
||||
struct pci_bus *bus_dev)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
unsigned long err;
|
||||
unsigned int bus;
|
||||
unsigned int device;
|
||||
unsigned int fun;
|
||||
|
||||
list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
|
||||
if (pdev->subordinate) {
|
||||
/* No need to bind pci bridge */
|
||||
dma_4v_iotsb_bind(devhandle, iotsb_num,
|
||||
pdev->subordinate);
|
||||
} else {
|
||||
bus = bus_dev->number;
|
||||
device = PCI_SLOT(pdev->devfn);
|
||||
fun = PCI_FUNC(pdev->devfn);
|
||||
err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
|
||||
HV_PCI_DEVICE_BUILD(bus,
|
||||
device,
|
||||
fun));
|
||||
|
||||
/* If bind fails for one device it is going to fail
|
||||
* for rest of the devices because we are sharing
|
||||
* IOTSB. So in case of failure simply return with
|
||||
* error.
|
||||
*/
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
|
||||
dma_addr_t dvma, unsigned long iotsb_num,
|
||||
unsigned long entry, unsigned long npages)
|
||||
{
|
||||
u32 devhandle = *(u32 *)demap_arg;
|
||||
unsigned long num, flags;
|
||||
unsigned long ret;
|
||||
|
||||
local_irq_save(flags);
|
||||
do {
|
||||
num = pci_sun4v_iommu_demap(devhandle,
|
||||
HV_PCI_TSBID(0, entry),
|
||||
npages);
|
||||
|
||||
if (dvma <= DMA_BIT_MASK(32)) {
|
||||
num = pci_sun4v_iommu_demap(devhandle,
|
||||
HV_PCI_TSBID(0, entry),
|
||||
npages);
|
||||
} else {
|
||||
ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
|
||||
entry, npages, &num);
|
||||
if (unlikely(ret != HV_EOK)) {
|
||||
pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
|
||||
ret);
|
||||
}
|
||||
}
|
||||
entry += num;
|
||||
npages -= num;
|
||||
} while (npages != 0);
|
||||
@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
|
||||
{
|
||||
struct pci_pbm_info *pbm;
|
||||
struct iommu *iommu;
|
||||
struct atu *atu;
|
||||
struct iommu_map_table *tbl;
|
||||
unsigned long order, npages, entry;
|
||||
unsigned long iotsb_num;
|
||||
u32 devhandle;
|
||||
|
||||
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
|
||||
iommu = dev->archdata.iommu;
|
||||
pbm = dev->archdata.host_controller;
|
||||
atu = iommu->atu;
|
||||
devhandle = pbm->devhandle;
|
||||
entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
|
||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
||||
iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
|
||||
|
||||
if (dvma <= DMA_BIT_MASK(32)) {
|
||||
tbl = &iommu->tbl;
|
||||
iotsb_num = 0; /* we don't care for legacy iommu */
|
||||
} else {
|
||||
tbl = &atu->tbl;
|
||||
iotsb_num = atu->iotsb->iotsb_num;
|
||||
}
|
||||
entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
|
||||
dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
|
||||
iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
|
||||
order = get_order(size);
|
||||
if (order < 10)
|
||||
free_pages((unsigned long)cpu, order);
|
||||
@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct iommu *iommu;
|
||||
struct atu *atu;
|
||||
struct iommu_map_table *tbl;
|
||||
u64 mask;
|
||||
unsigned long flags, npages, oaddr;
|
||||
unsigned long i, base_paddr;
|
||||
u32 bus_addr, ret;
|
||||
unsigned long prot;
|
||||
dma_addr_t bus_addr, ret;
|
||||
long entry;
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
atu = iommu->atu;
|
||||
|
||||
if (unlikely(direction == DMA_NONE))
|
||||
goto bad;
|
||||
@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
||||
npages >>= IO_PAGE_SHIFT;
|
||||
|
||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
|
||||
mask = *dev->dma_mask;
|
||||
if (mask <= DMA_BIT_MASK(32))
|
||||
tbl = &iommu->tbl;
|
||||
else
|
||||
tbl = &atu->tbl;
|
||||
|
||||
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
|
||||
(unsigned long)(-1), 0);
|
||||
|
||||
if (unlikely(entry == IOMMU_ERROR_CODE))
|
||||
goto bad;
|
||||
|
||||
bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
|
||||
bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
|
||||
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
||||
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
||||
prot = HV_PCI_MAP_ATTR_READ;
|
||||
@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
||||
iommu_batch_start(dev, prot, entry);
|
||||
|
||||
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
|
||||
long err = iommu_batch_add(base_paddr);
|
||||
long err = iommu_batch_add(base_paddr, mask);
|
||||
if (unlikely(err < 0L))
|
||||
goto iommu_map_fail;
|
||||
}
|
||||
if (unlikely(iommu_batch_end() < 0L))
|
||||
if (unlikely(iommu_batch_end(mask) < 0L))
|
||||
goto iommu_map_fail;
|
||||
|
||||
local_irq_restore(flags);
|
||||
@ -310,7 +414,7 @@ bad:
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
iommu_map_fail:
|
||||
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
||||
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
{
|
||||
struct pci_pbm_info *pbm;
|
||||
struct iommu *iommu;
|
||||
struct atu *atu;
|
||||
struct iommu_map_table *tbl;
|
||||
unsigned long npages;
|
||||
unsigned long iotsb_num;
|
||||
long entry;
|
||||
u32 devhandle;
|
||||
|
||||
@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
pbm = dev->archdata.host_controller;
|
||||
atu = iommu->atu;
|
||||
devhandle = pbm->devhandle;
|
||||
|
||||
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
|
||||
npages >>= IO_PAGE_SHIFT;
|
||||
bus_addr &= IO_PAGE_MASK;
|
||||
entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
|
||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
||||
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
||||
|
||||
if (bus_addr <= DMA_BIT_MASK(32)) {
|
||||
iotsb_num = 0; /* we don't care for legacy iommu */
|
||||
tbl = &iommu->tbl;
|
||||
} else {
|
||||
iotsb_num = atu->iotsb->iotsb_num;
|
||||
tbl = &atu->tbl;
|
||||
}
|
||||
entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
|
||||
dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
|
||||
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
||||
}
|
||||
|
||||
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
unsigned long seg_boundary_size;
|
||||
int outcount, incount, i;
|
||||
struct iommu *iommu;
|
||||
struct atu *atu;
|
||||
struct iommu_map_table *tbl;
|
||||
u64 mask;
|
||||
unsigned long base_shift;
|
||||
long err;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
atu = iommu->atu;
|
||||
|
||||
if (nelems == 0 || !iommu)
|
||||
return 0;
|
||||
|
||||
@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
max_seg_size = dma_get_max_seg_size(dev);
|
||||
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
|
||||
|
||||
mask = *dev->dma_mask;
|
||||
if (mask <= DMA_BIT_MASK(32))
|
||||
tbl = &iommu->tbl;
|
||||
else
|
||||
tbl = &atu->tbl;
|
||||
|
||||
base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
|
||||
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
unsigned long paddr, npages, entry, out_entry = 0, slen;
|
||||
|
||||
@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
/* Allocate iommu entries for that segment */
|
||||
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
|
||||
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
|
||||
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
|
||||
entry = iommu_tbl_range_alloc(dev, tbl, npages,
|
||||
&handle, (unsigned long)(-1), 0);
|
||||
|
||||
/* Handle failure */
|
||||
if (unlikely(entry == IOMMU_ERROR_CODE)) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
|
||||
" npages %lx\n", iommu, paddr, npages);
|
||||
pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
|
||||
tbl, paddr, npages);
|
||||
goto iommu_map_failed;
|
||||
}
|
||||
|
||||
iommu_batch_new_entry(entry);
|
||||
iommu_batch_new_entry(entry, mask);
|
||||
|
||||
/* Convert entry to a dma_addr_t */
|
||||
dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
|
||||
dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
|
||||
dma_addr |= (s->offset & ~IO_PAGE_MASK);
|
||||
|
||||
/* Insert into HW table */
|
||||
paddr &= IO_PAGE_MASK;
|
||||
while (npages--) {
|
||||
err = iommu_batch_add(paddr);
|
||||
err = iommu_batch_add(paddr, mask);
|
||||
if (unlikely(err < 0L))
|
||||
goto iommu_map_failed;
|
||||
paddr += IO_PAGE_SIZE;
|
||||
@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
dma_next = dma_addr + slen;
|
||||
}
|
||||
|
||||
err = iommu_batch_end();
|
||||
err = iommu_batch_end(mask);
|
||||
|
||||
if (unlikely(err < 0L))
|
||||
goto iommu_map_failed;
|
||||
@ -475,7 +603,7 @@ iommu_map_failed:
|
||||
vaddr = s->dma_address & IO_PAGE_MASK;
|
||||
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
||||
IO_PAGE_SIZE);
|
||||
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
|
||||
iommu_tbl_range_free(tbl, vaddr, npages,
|
||||
IOMMU_ERROR_CODE);
|
||||
/* XXX demap? XXX */
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
struct pci_pbm_info *pbm;
|
||||
struct scatterlist *sg;
|
||||
struct iommu *iommu;
|
||||
struct atu *atu;
|
||||
unsigned long flags, entry;
|
||||
unsigned long iotsb_num;
|
||||
u32 devhandle;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
iommu = dev->archdata.iommu;
|
||||
pbm = dev->archdata.host_controller;
|
||||
atu = iommu->atu;
|
||||
devhandle = pbm->devhandle;
|
||||
|
||||
local_irq_save(flags);
|
||||
@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
dma_addr_t dma_handle = sg->dma_address;
|
||||
unsigned int len = sg->dma_length;
|
||||
unsigned long npages;
|
||||
struct iommu_map_table *tbl = &iommu->tbl;
|
||||
struct iommu_map_table *tbl;
|
||||
unsigned long shift = IO_PAGE_SHIFT;
|
||||
|
||||
if (!len)
|
||||
break;
|
||||
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
|
||||
|
||||
if (dma_handle <= DMA_BIT_MASK(32)) {
|
||||
iotsb_num = 0; /* we don't care for legacy iommu */
|
||||
tbl = &iommu->tbl;
|
||||
} else {
|
||||
iotsb_num = atu->iotsb->iotsb_num;
|
||||
tbl = &atu->tbl;
|
||||
}
|
||||
entry = ((dma_handle - tbl->table_map_base) >> shift);
|
||||
dma_4v_iommu_demap(&devhandle, entry, npages);
|
||||
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
|
||||
dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
|
||||
entry, npages);
|
||||
iommu_tbl_range_free(tbl, dma_handle, npages,
|
||||
IOMMU_ERROR_CODE);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
|
||||
{
|
||||
struct atu *atu = pbm->iommu->atu;
|
||||
struct atu_iotsb *iotsb;
|
||||
void *table;
|
||||
u64 table_size;
|
||||
u64 iotsb_num;
|
||||
unsigned long order;
|
||||
unsigned long err;
|
||||
|
||||
iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
|
||||
if (!iotsb) {
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
atu->iotsb = iotsb;
|
||||
|
||||
/* calculate size of IOTSB */
|
||||
table_size = (atu->size / IO_PAGE_SIZE) * 8;
|
||||
order = get_order(table_size);
|
||||
table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!table) {
|
||||
err = -ENOMEM;
|
||||
goto table_failed;
|
||||
}
|
||||
iotsb->table = table;
|
||||
iotsb->ra = __pa(table);
|
||||
iotsb->dvma_size = atu->size;
|
||||
iotsb->dvma_base = atu->base;
|
||||
iotsb->table_size = table_size;
|
||||
iotsb->page_size = IO_PAGE_SIZE;
|
||||
|
||||
/* configure and register IOTSB with HV */
|
||||
err = pci_sun4v_iotsb_conf(pbm->devhandle,
|
||||
iotsb->ra,
|
||||
iotsb->table_size,
|
||||
iotsb->page_size,
|
||||
iotsb->dvma_base,
|
||||
&iotsb_num);
|
||||
if (err) {
|
||||
pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
|
||||
goto iotsb_conf_failed;
|
||||
}
|
||||
iotsb->iotsb_num = iotsb_num;
|
||||
|
||||
err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
|
||||
if (err) {
|
||||
pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
|
||||
goto iotsb_conf_failed;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
iotsb_conf_failed:
|
||||
free_pages((unsigned long)table, order);
|
||||
table_failed:
|
||||
kfree(iotsb);
|
||||
out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
|
||||
{
|
||||
struct atu *atu = pbm->iommu->atu;
|
||||
unsigned long err;
|
||||
const u64 *ranges;
|
||||
u64 map_size, num_iotte;
|
||||
u64 dma_mask;
|
||||
const u32 *page_size;
|
||||
int len;
|
||||
|
||||
ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
|
||||
&len);
|
||||
if (!ranges) {
|
||||
pr_err(PFX "No iommu-address-ranges\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
|
||||
NULL);
|
||||
if (!page_size) {
|
||||
pr_err(PFX "No iommu-pagesizes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* There are 4 iommu-address-ranges supported. Each range is pair of
|
||||
* {base, size}. The ranges[0] and ranges[1] are 32bit address space
|
||||
* while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
|
||||
* address ranges to support 64bit addressing. Because 'size' for
|
||||
* address ranges[2] and ranges[3] are same we can select either of
|
||||
* ranges[2] or ranges[3] for mapping. However due to 'size' is too
|
||||
* large for OS to allocate IOTSB we are using fix size 32G
|
||||
* (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
|
||||
* to share.
|
||||
*/
|
||||
atu->ranges = (struct atu_ranges *)ranges;
|
||||
atu->base = atu->ranges[3].base;
|
||||
atu->size = ATU_64_SPACE_SIZE;
|
||||
|
||||
/* Create IOTSB */
|
||||
err = pci_sun4v_atu_alloc_iotsb(pbm);
|
||||
if (err) {
|
||||
pr_err(PFX "Error creating ATU IOTSB\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create ATU iommu map.
|
||||
* One bit represents one iotte in IOTSB table.
|
||||
*/
|
||||
dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
|
||||
num_iotte = atu->size / IO_PAGE_SIZE;
|
||||
map_size = num_iotte / 8;
|
||||
atu->tbl.table_map_base = atu->base;
|
||||
atu->dma_addr_mask = dma_mask;
|
||||
atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
|
||||
if (!atu->tbl.map)
|
||||
return -ENOMEM;
|
||||
|
||||
iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
|
||||
NULL, false /* no large_pool */,
|
||||
0 /* default npools */,
|
||||
false /* want span boundary checking */);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
|
||||
{
|
||||
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
|
||||
@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
|
||||
|
||||
pci_sun4v_scan_bus(pbm, &op->dev);
|
||||
|
||||
/* if atu_init fails its not complete failure.
|
||||
* we can still continue using legacy iommu.
|
||||
*/
|
||||
if (pbm->iommu->atu) {
|
||||
err = pci_sun4v_atu_init(pbm);
|
||||
if (err) {
|
||||
kfree(pbm->iommu->atu);
|
||||
pbm->iommu->atu = NULL;
|
||||
pr_err(PFX "ATU init failed, err=%d\n", err);
|
||||
}
|
||||
}
|
||||
|
||||
pbm->next = pci_pbm_root;
|
||||
pci_pbm_root = pbm;
|
||||
|
||||
@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op)
|
||||
struct pci_pbm_info *pbm;
|
||||
struct device_node *dp;
|
||||
struct iommu *iommu;
|
||||
struct atu *atu;
|
||||
u32 devhandle;
|
||||
int i, err = -ENODEV;
|
||||
static bool hv_atu = true;
|
||||
|
||||
dp = op->dev.of_node;
|
||||
|
||||
@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op)
|
||||
pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
|
||||
vpci_major, vpci_minor);
|
||||
|
||||
err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
|
||||
if (err) {
|
||||
/* don't return an error if we fail to register the
|
||||
* ATU group, but ATU hcalls won't be available.
|
||||
*/
|
||||
hv_atu = false;
|
||||
pr_err(PFX "Could not register hvapi ATU err=%d\n",
|
||||
err);
|
||||
} else {
|
||||
pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
|
||||
vatu_major, vatu_minor);
|
||||
}
|
||||
|
||||
dma_ops = &sun4v_dma_ops;
|
||||
}
|
||||
|
||||
@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op)
|
||||
}
|
||||
|
||||
pbm->iommu = iommu;
|
||||
iommu->atu = NULL;
|
||||
if (hv_atu) {
|
||||
atu = kzalloc(sizeof(*atu), GFP_KERNEL);
|
||||
if (!atu)
|
||||
pr_err(PFX "Could not allocate atu\n");
|
||||
else
|
||||
iommu->atu = atu;
|
||||
}
|
||||
|
||||
err = pci_sun4v_pbm_init(pbm, op, devhandle);
|
||||
if (err)
|
||||
@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op)
|
||||
return 0;
|
||||
|
||||
out_free_iommu:
|
||||
kfree(iommu->atu);
|
||||
kfree(pbm->iommu);
|
||||
|
||||
out_free_controller:
|
||||
|
@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
|
||||
unsigned long msinum,
|
||||
unsigned long valid);
|
||||
|
||||
/* Sun4v HV IOMMU v2 APIs */
|
||||
unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
|
||||
unsigned long ra,
|
||||
unsigned long table_size,
|
||||
unsigned long page_size,
|
||||
unsigned long dvma_base,
|
||||
u64 *iotsb_num);
|
||||
unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
|
||||
unsigned long iotsb_num,
|
||||
unsigned int pci_device);
|
||||
unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
|
||||
unsigned long iotsb_num,
|
||||
unsigned long iotsb_index_iottes,
|
||||
unsigned long io_attributes,
|
||||
unsigned long io_page_list_pa,
|
||||
long *mapped);
|
||||
unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
|
||||
unsigned long iotsb_num,
|
||||
unsigned long iotsb_index,
|
||||
unsigned long iottes,
|
||||
unsigned long *demapped);
|
||||
#endif /* !(_PCI_SUN4V_H) */
|
||||
|
@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
|
||||
mov %o0, %o0
|
||||
ENDPROC(pci_sun4v_msg_setvalid)
|
||||
|
||||
/*
|
||||
* %o0: devhandle
|
||||
* %o1: r_addr
|
||||
* %o2: size
|
||||
* %o3: pagesize
|
||||
* %o4: virt
|
||||
* %o5: &iotsb_num/&iotsb_handle
|
||||
*
|
||||
* returns %o0: status
|
||||
* %o1: iotsb_num/iotsb_handle
|
||||
*/
|
||||
ENTRY(pci_sun4v_iotsb_conf)
|
||||
mov %o5, %g1
|
||||
mov HV_FAST_PCI_IOTSB_CONF, %o5
|
||||
ta HV_FAST_TRAP
|
||||
retl
|
||||
stx %o1, [%g1]
|
||||
ENDPROC(pci_sun4v_iotsb_conf)
|
||||
|
||||
/*
|
||||
* %o0: devhandle
|
||||
* %o1: iotsb_num/iotsb_handle
|
||||
* %o2: pci_device
|
||||
*
|
||||
* returns %o0: status
|
||||
*/
|
||||
ENTRY(pci_sun4v_iotsb_bind)
|
||||
mov HV_FAST_PCI_IOTSB_BIND, %o5
|
||||
ta HV_FAST_TRAP
|
||||
retl
|
||||
nop
|
||||
ENDPROC(pci_sun4v_iotsb_bind)
|
||||
|
||||
/*
|
||||
* %o0: devhandle
|
||||
* %o1: iotsb_num/iotsb_handle
|
||||
* %o2: index_count
|
||||
* %o3: iotte_attributes
|
||||
* %o4: io_page_list_p
|
||||
* %o5: &mapped
|
||||
*
|
||||
* returns %o0: status
|
||||
* %o1: #mapped
|
||||
*/
|
||||
ENTRY(pci_sun4v_iotsb_map)
|
||||
mov %o5, %g1
|
||||
mov HV_FAST_PCI_IOTSB_MAP, %o5
|
||||
ta HV_FAST_TRAP
|
||||
retl
|
||||
stx %o1, [%g1]
|
||||
ENDPROC(pci_sun4v_iotsb_map)
|
||||
|
||||
/*
|
||||
* %o0: devhandle
|
||||
* %o1: iotsb_num/iotsb_handle
|
||||
* %o2: iotsb_index
|
||||
* %o3: #iottes
|
||||
* %o4: &demapped
|
||||
*
|
||||
* returns %o0: status
|
||||
* %o1: #demapped
|
||||
*/
|
||||
ENTRY(pci_sun4v_iotsb_demap)
|
||||
mov HV_FAST_PCI_IOTSB_DEMAP, %o5
|
||||
ta HV_FAST_TRAP
|
||||
retl
|
||||
stx %o1, [%o4]
|
||||
ENDPROC(pci_sun4v_iotsb_demap)
|
||||
|
@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
|
||||
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
|
||||
|
||||
/* 1. Make sure we are not getting garbage from the user */
|
||||
if (!invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
if (invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv_and_exit;
|
||||
|
||||
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
|
||||
@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
|
||||
|
||||
synchronize_user_stack();
|
||||
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
|
||||
if (!invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
if (invalid_frame_pointer(sf, sizeof(*sf)))
|
||||
goto segv;
|
||||
|
||||
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
|
||||
|
@ -802,8 +802,10 @@ struct mdesc_mblock {
|
||||
};
|
||||
static struct mdesc_mblock *mblocks;
|
||||
static int num_mblocks;
|
||||
static int find_numa_node_for_addr(unsigned long pa,
|
||||
struct node_mem_mask *pnode_mask);
|
||||
|
||||
static unsigned long ra_to_pa(unsigned long addr)
|
||||
static unsigned long __init ra_to_pa(unsigned long addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int find_node(unsigned long addr)
|
||||
static int __init find_node(unsigned long addr)
|
||||
{
|
||||
static bool search_mdesc = true;
|
||||
static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
|
||||
static int last_index;
|
||||
int i;
|
||||
|
||||
addr = ra_to_pa(addr);
|
||||
@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
|
||||
if ((addr & p->mask) == p->val)
|
||||
return i;
|
||||
}
|
||||
/* The following condition has been observed on LDOM guests.*/
|
||||
WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
|
||||
" rule. Some physical memory will be owned by node 0.");
|
||||
return 0;
|
||||
/* The following condition has been observed on LDOM guests because
|
||||
* node_masks only contains the best latency mask and value.
|
||||
* LDOM guest's mdesc can contain a single latency group to
|
||||
* cover multiple address range. Print warning message only if the
|
||||
* address cannot be found in node_masks nor mdesc.
|
||||
*/
|
||||
if ((search_mdesc) &&
|
||||
((addr & last_mem_mask.mask) != last_mem_mask.val)) {
|
||||
/* find the available node in the mdesc */
|
||||
last_index = find_numa_node_for_addr(addr, &last_mem_mask);
|
||||
numadbg("find_node: latency group for address 0x%lx is %d\n",
|
||||
addr, last_index);
|
||||
if ((last_index < 0) || (last_index >= num_node_masks)) {
|
||||
/* WARN_ONCE() and use default group 0 */
|
||||
WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
|
||||
search_mdesc = false;
|
||||
last_index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return last_index;
|
||||
}
|
||||
|
||||
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
||||
static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
|
||||
{
|
||||
*nid = find_node(start);
|
||||
start += PAGE_SIZE;
|
||||
@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
|
||||
return numa_latency[from][to];
|
||||
}
|
||||
|
||||
static int find_numa_node_for_addr(unsigned long pa,
|
||||
struct node_mem_mask *pnode_mask)
|
||||
{
|
||||
struct mdesc_handle *md = mdesc_grab();
|
||||
u64 node, arc;
|
||||
int i = 0;
|
||||
|
||||
node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
|
||||
if (node == MDESC_NODE_NULL)
|
||||
goto out;
|
||||
|
||||
mdesc_for_each_node_by_name(md, node, "group") {
|
||||
mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
|
||||
u64 target = mdesc_arc_target(md, arc);
|
||||
struct mdesc_mlgroup *m = find_mlgroup(target);
|
||||
|
||||
if (!m)
|
||||
continue;
|
||||
if ((pa & m->mask) == m->match) {
|
||||
if (pnode_mask) {
|
||||
pnode_mask->mask = m->mask;
|
||||
pnode_mask->val = m->match;
|
||||
}
|
||||
mdesc_release(md);
|
||||
return i;
|
||||
}
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
out:
|
||||
mdesc_release(md);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
|
||||
{
|
||||
int i;
|
||||
|
@ -40,8 +40,8 @@ GCOV_PROFILE := n
|
||||
UBSAN_SANITIZE :=n
|
||||
|
||||
LDFLAGS := -m elf_$(UTS_MACHINE)
|
||||
ifeq ($(CONFIG_RELOCATABLE),y)
|
||||
# If kernel is relocatable, build compressed kernel as PIE.
|
||||
# Compressed kernel should be built as PIE since it may be loaded at any
|
||||
# address by the bootloader.
|
||||
ifeq ($(CONFIG_X86_32),y)
|
||||
LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
|
||||
else
|
||||
@ -51,7 +51,6 @@ else
|
||||
LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
|
||||
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
|
||||
endif
|
||||
endif
|
||||
LDFLAGS_vmlinux := -T
|
||||
|
||||
hostprogs-y := mkpiggy
|
||||
|
@ -87,6 +87,12 @@ int validate_cpu(void)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
|
||||
!has_eflag(X86_EFLAGS_ID)) {
|
||||
printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (err_flags) {
|
||||
puts("This kernel requires the following features "
|
||||
"not present on the CPU:\n");
|
||||
|
@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
for (; stack < stack_info.end; stack++) {
|
||||
unsigned long real_addr;
|
||||
int reliable = 0;
|
||||
unsigned long addr = *stack;
|
||||
unsigned long addr = READ_ONCE_NOCHECK(*stack);
|
||||
unsigned long *ret_addr_p =
|
||||
unwind_get_return_address_ptr(&state);
|
||||
|
||||
|
@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu)
|
||||
{
|
||||
WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
|
||||
|
||||
if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
|
||||
/* FPU state will be reallocated lazily at the first use. */
|
||||
fpu__drop(fpu);
|
||||
} else {
|
||||
if (!fpu->fpstate_active) {
|
||||
fpu__activate_curr(fpu);
|
||||
user_fpu_begin();
|
||||
}
|
||||
fpu__drop(fpu);
|
||||
|
||||
/*
|
||||
* Make sure fpstate is cleared and initialized.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_FPU)) {
|
||||
fpu__activate_curr(fpu);
|
||||
user_fpu_begin();
|
||||
copy_init_fpstate_to_fpregs();
|
||||
}
|
||||
}
|
||||
|
@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS
|
||||
initial_pg_pmd:
|
||||
.fill 1024*KPMDS,4,0
|
||||
#else
|
||||
ENTRY(initial_page_table)
|
||||
.globl initial_page_table
|
||||
initial_page_table:
|
||||
.fill 1024,4,0
|
||||
#endif
|
||||
initial_pg_fixmap:
|
||||
.fill 1024,4,0
|
||||
ENTRY(empty_zero_page)
|
||||
.globl empty_zero_page
|
||||
empty_zero_page:
|
||||
.fill 4096,1,0
|
||||
ENTRY(swapper_pg_dir)
|
||||
.globl swapper_pg_dir
|
||||
swapper_pg_dir:
|
||||
.fill 1024,4,0
|
||||
EXPORT_SYMBOL(empty_zero_page)
|
||||
|
||||
|
@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
|
||||
{
|
||||
struct platform_device *pd;
|
||||
struct resource res;
|
||||
unsigned long len;
|
||||
u64 base, size;
|
||||
u32 length;
|
||||
|
||||
/* don't use lfb_size as it may contain the whole VMEM instead of only
|
||||
* the part that is occupied by the framebuffer */
|
||||
len = mode->height * mode->stride;
|
||||
len = PAGE_ALIGN(len);
|
||||
if (len > (u64)si->lfb_size << 16) {
|
||||
/*
|
||||
* If the 64BIT_BASE capability is set, ext_lfb_base will contain the
|
||||
* upper half of the base address. Assemble the address, then make sure
|
||||
* it is valid and we can actually access it.
|
||||
*/
|
||||
base = si->lfb_base;
|
||||
if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
|
||||
base |= (u64)si->ext_lfb_base << 32;
|
||||
if (!base || (u64)(resource_size_t)base != base) {
|
||||
printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't use lfb_size as IORESOURCE size, since it may contain the
|
||||
* entire VMEM, and thus require huge mappings. Use just the part we
|
||||
* need, that is, the part where the framebuffer is located. But verify
|
||||
* that it does not exceed the advertised VMEM.
|
||||
* Note that in case of VBE, the lfb_size is shifted by 16 bits for
|
||||
* historical reasons.
|
||||
*/
|
||||
size = si->lfb_size;
|
||||
if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
|
||||
size <<= 16;
|
||||
length = mode->height * mode->stride;
|
||||
length = PAGE_ALIGN(length);
|
||||
if (length > size) {
|
||||
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
|
||||
memset(&res, 0, sizeof(res));
|
||||
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
res.name = simplefb_resname;
|
||||
res.start = si->lfb_base;
|
||||
res.end = si->lfb_base + len - 1;
|
||||
res.start = base;
|
||||
res.end = res.start + length - 1;
|
||||
if (res.end <= res.start)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -7,11 +7,13 @@
|
||||
|
||||
unsigned long unwind_get_return_address(struct unwind_state *state)
|
||||
{
|
||||
unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
|
||||
|
||||
if (unwind_done(state))
|
||||
return 0;
|
||||
|
||||
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
|
||||
*state->sp, state->sp);
|
||||
addr, state->sp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unwind_get_return_address);
|
||||
|
||||
@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
return false;
|
||||
|
||||
do {
|
||||
unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
|
||||
|
||||
for (state->sp++; state->sp < info->end; state->sp++)
|
||||
if (__kernel_text_address(*state->sp))
|
||||
if (__kernel_text_address(addr))
|
||||
return true;
|
||||
|
||||
state->sp = info->next_sp;
|
||||
|
@ -156,6 +156,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
||||
}
|
||||
|
||||
|
||||
static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm, int irq_source_id, int level,
|
||||
bool line_status)
|
||||
{
|
||||
if (!level)
|
||||
return -1;
|
||||
|
||||
return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
|
||||
}
|
||||
|
||||
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm, int irq_source_id, int level,
|
||||
bool line_status)
|
||||
@ -163,18 +173,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm_lapic_irq irq;
|
||||
int r;
|
||||
|
||||
if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
|
||||
return -EWOULDBLOCK;
|
||||
switch (e->type) {
|
||||
case KVM_IRQ_ROUTING_HV_SINT:
|
||||
return kvm_hv_set_sint(e, kvm, irq_source_id, level,
|
||||
line_status);
|
||||
|
||||
if (kvm_msi_route_invalid(kvm, e))
|
||||
return -EINVAL;
|
||||
case KVM_IRQ_ROUTING_MSI:
|
||||
if (kvm_msi_route_invalid(kvm, e))
|
||||
return -EINVAL;
|
||||
|
||||
kvm_set_msi_irq(kvm, e, &irq);
|
||||
kvm_set_msi_irq(kvm, e, &irq);
|
||||
|
||||
if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
|
||||
return r;
|
||||
else
|
||||
return -EWOULDBLOCK;
|
||||
if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
|
||||
return r;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
|
||||
int kvm_request_irq_source_id(struct kvm *kvm)
|
||||
@ -254,16 +272,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
}
|
||||
|
||||
static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm, int irq_source_id, int level,
|
||||
bool line_status)
|
||||
{
|
||||
if (!level)
|
||||
return -1;
|
||||
|
||||
return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
|
||||
}
|
||||
|
||||
int kvm_set_routing_entry(struct kvm *kvm,
|
||||
struct kvm_kernel_irq_routing_entry *e,
|
||||
const struct kvm_irq_routing_entry *ue)
|
||||
@ -423,18 +431,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
}
|
||||
|
||||
int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
|
||||
int irq_source_id, int level, bool line_status)
|
||||
{
|
||||
switch (irq->type) {
|
||||
case KVM_IRQ_ROUTING_HV_SINT:
|
||||
return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
|
||||
line_status);
|
||||
default:
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_arch_irq_routing_update(struct kvm *kvm)
|
||||
{
|
||||
kvm_hv_irq_routing_update(kvm);
|
||||
|
@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
|
||||
struct kvm_shared_msrs *locals
|
||||
= container_of(urn, struct kvm_shared_msrs, urn);
|
||||
struct kvm_shared_msr_values *values;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Disabling irqs at this point since the following code could be
|
||||
* interrupted and executed through kvm_arch_hardware_disable()
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
if (locals->registered) {
|
||||
locals->registered = false;
|
||||
user_return_notifier_unregister(urn);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
|
||||
values = &locals->values[slot];
|
||||
if (values->host != values->curr) {
|
||||
@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
|
||||
values->curr = values->host;
|
||||
}
|
||||
}
|
||||
locals->registered = false;
|
||||
user_return_notifier_unregister(urn);
|
||||
}
|
||||
|
||||
static void shared_msr_update(unsigned slot, u32 msr)
|
||||
@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
|
||||
|
||||
static u64 __get_kvmclock_ns(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
|
||||
struct kvm_arch *ka = &kvm->arch;
|
||||
s64 ns;
|
||||
struct pvclock_vcpu_time_info hv_clock;
|
||||
|
||||
if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
|
||||
u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
|
||||
ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
|
||||
} else {
|
||||
ns = ktime_get_boot_ns() + ka->kvmclock_offset;
|
||||
spin_lock(&ka->pvclock_gtod_sync_lock);
|
||||
if (!ka->use_master_clock) {
|
||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
||||
return ktime_get_boot_ns() + ka->kvmclock_offset;
|
||||
}
|
||||
|
||||
return ns;
|
||||
hv_clock.tsc_timestamp = ka->master_cycle_now;
|
||||
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
|
||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
||||
|
||||
kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
|
||||
&hv_clock.tsc_shift,
|
||||
&hv_clock.tsc_to_system_mul);
|
||||
return __pvclock_read_cycles(&hv_clock, rdtsc());
|
||||
}
|
||||
|
||||
u64 get_kvmclock_ns(struct kvm *kvm)
|
||||
@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_PIT_STATE2:
|
||||
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
|
||||
case KVM_CAP_XEN_HVM:
|
||||
case KVM_CAP_ADJUST_CLOCK:
|
||||
case KVM_CAP_VCPU_EVENTS:
|
||||
case KVM_CAP_HYPERV:
|
||||
case KVM_CAP_HYPERV_VAPIC:
|
||||
@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
#endif
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_ADJUST_CLOCK:
|
||||
r = KVM_CLOCK_TSC_STABLE;
|
||||
break;
|
||||
case KVM_CAP_X86_SMM:
|
||||
/* SMBASE is usually relocated above 1M on modern chipsets,
|
||||
* and SMM handlers might indeed rely on 4G segment limits,
|
||||
@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
};
|
||||
case KVM_SET_VAPIC_ADDR: {
|
||||
struct kvm_vapic_addr va;
|
||||
int idx;
|
||||
|
||||
r = -EINVAL;
|
||||
if (!lapic_in_kernel(vcpu))
|
||||
@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&va, argp, sizeof va))
|
||||
goto out;
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
break;
|
||||
}
|
||||
case KVM_X86_SETUP_MCE: {
|
||||
@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
struct kvm_clock_data user_ns;
|
||||
u64 now_ns;
|
||||
|
||||
now_ns = get_kvmclock_ns(kvm);
|
||||
local_irq_disable();
|
||||
now_ns = __get_kvmclock_ns(kvm);
|
||||
user_ns.clock = now_ns;
|
||||
user_ns.flags = 0;
|
||||
user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
|
||||
local_irq_enable();
|
||||
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
|
||||
|
||||
r = -EFAULT;
|
||||
|
@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
|
||||
if (early_recursion_flag > 2)
|
||||
goto halt_loop;
|
||||
|
||||
if (regs->cs != __KERNEL_CS)
|
||||
/*
|
||||
* Old CPUs leave the high bits of CS on the stack
|
||||
* undefined. I'm not sure which CPUs do this, but at least
|
||||
* the 486 DX works this way.
|
||||
*/
|
||||
if ((regs->cs & 0xFFFF) != __KERNEL_CS)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
|
@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
|
||||
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
|
||||
# MISC Devices
|
||||
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* platform_wdt.c: Watchdog platform library file
|
||||
* Intel Merrifield watchdog platform device library file
|
||||
*
|
||||
* (C) Copyright 2014 Intel Corporation
|
||||
* Author: David Cohen <david.a.cohen@linux.intel.com>
|
||||
@ -14,7 +14,9 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/intel-mid_wdt.h>
|
||||
|
||||
#include <asm/intel-mid.h>
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
||||
#define TANGIER_EXT_TIMER0_MSI 15
|
||||
@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
|
||||
.probe = tangier_probe,
|
||||
};
|
||||
|
||||
static int __init register_mid_wdt(void)
|
||||
static int wdt_scu_status_change(struct notifier_block *nb,
|
||||
unsigned long code, void *data)
|
||||
{
|
||||
if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
|
||||
wdt_dev.dev.platform_data = &tangier_pdata;
|
||||
return platform_device_register(&wdt_dev);
|
||||
if (code == SCU_DOWN) {
|
||||
platform_device_unregister(&wdt_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
return platform_device_register(&wdt_dev);
|
||||
}
|
||||
|
||||
static struct notifier_block wdt_scu_notifier = {
|
||||
.notifier_call = wdt_scu_status_change,
|
||||
};
|
||||
|
||||
static int __init register_mid_wdt(void)
|
||||
{
|
||||
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
|
||||
return -ENODEV;
|
||||
|
||||
wdt_dev.dev.platform_data = &tangier_pdata;
|
||||
|
||||
/*
|
||||
* We need to be sure that the SCU IPC is ready before watchdog device
|
||||
* can be registered:
|
||||
*/
|
||||
intel_scu_notifier_add(&wdt_scu_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
rootfs_initcall(register_mid_wdt);
|
@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
|
||||
|
||||
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
|
||||
KBUILD_CFLAGS += -m$(BITS)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6)
|
||||
#define __NR_pwritev2 347
|
||||
__SYSCALL(347, sys_pwritev2, 6)
|
||||
|
||||
#define __NR_syscall_count 348
|
||||
#define __NR_pkey_mprotect 348
|
||||
__SYSCALL(348, sys_pkey_mprotect, 4)
|
||||
#define __NR_pkey_alloc 349
|
||||
__SYSCALL(349, sys_pkey_alloc, 2)
|
||||
#define __NR_pkey_free 350
|
||||
__SYSCALL(350, sys_pkey_free, 1)
|
||||
|
||||
#define __NR_syscall_count 351
|
||||
|
||||
/*
|
||||
* sysxtensa syscall handler
|
||||
|
@ -172,10 +172,11 @@ void __init time_init(void)
|
||||
{
|
||||
of_clk_init(NULL);
|
||||
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
printk("Calibrating CPU frequency ");
|
||||
pr_info("Calibrating CPU frequency ");
|
||||
calibrate_ccount();
|
||||
printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
|
||||
(int)(ccount_freq/10000)%100);
|
||||
pr_cont("%d.%02d MHz\n",
|
||||
(int)ccount_freq / 1000000,
|
||||
(int)(ccount_freq / 10000) % 100);
|
||||
#else
|
||||
ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
|
||||
#endif
|
||||
@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
|
||||
void calibrate_delay(void)
|
||||
{
|
||||
loops_per_jiffy = ccount_freq / HZ;
|
||||
printk("Calibrating delay loop (skipped)... "
|
||||
"%lu.%02lu BogoMIPS preset\n",
|
||||
loops_per_jiffy/(1000000/HZ),
|
||||
(loops_per_jiffy/(10000/HZ)) % 100);
|
||||
pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
|
||||
loops_per_jiffy / (1000000 / HZ),
|
||||
(loops_per_jiffy / (10000 / HZ)) % 100);
|
||||
}
|
||||
#endif
|
||||
|
@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs)
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
if ((i % 8) == 0)
|
||||
printk(KERN_INFO "a%02d:", i);
|
||||
printk(KERN_CONT " %08lx", regs->areg[i]);
|
||||
pr_info("a%02d:", i);
|
||||
pr_cont(" %08lx", regs->areg[i]);
|
||||
}
|
||||
printk(KERN_CONT "\n");
|
||||
|
||||
printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
|
||||
regs->pc, regs->ps, regs->depc, regs->excvaddr);
|
||||
printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
|
||||
regs->lbeg, regs->lend, regs->lcount, regs->sar);
|
||||
pr_cont("\n");
|
||||
pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
|
||||
regs->pc, regs->ps, regs->depc, regs->excvaddr);
|
||||
pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
|
||||
regs->lbeg, regs->lend, regs->lcount, regs->sar);
|
||||
if (user_mode(regs))
|
||||
printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
|
||||
regs->windowbase, regs->windowstart, regs->wmask,
|
||||
regs->syscall);
|
||||
pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
|
||||
regs->windowbase, regs->windowstart, regs->wmask,
|
||||
regs->syscall);
|
||||
}
|
||||
|
||||
static int show_trace_cb(struct stackframe *frame, void *data)
|
||||
{
|
||||
if (kernel_text_address(frame->pc)) {
|
||||
printk(" [<%08lx>] ", frame->pc);
|
||||
print_symbol("%s\n", frame->pc);
|
||||
pr_cont(" [<%08lx>]", frame->pc);
|
||||
print_symbol(" %s\n", frame->pc);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp)
|
||||
if (!sp)
|
||||
sp = stack_pointer(task);
|
||||
|
||||
printk("Call Trace:");
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
printk("\n");
|
||||
#endif
|
||||
pr_info("Call Trace:\n");
|
||||
walk_stackframe(sp, show_trace_cb, NULL);
|
||||
printk("\n");
|
||||
#ifndef CONFIG_KALLSYMS
|
||||
pr_cont("\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine abuses get_user()/put_user() to reference pointers
|
||||
* with at least a bit of error checking ...
|
||||
*/
|
||||
|
||||
static int kstack_depth_to_print = 24;
|
||||
|
||||
void show_stack(struct task_struct *task, unsigned long *sp)
|
||||
@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
||||
sp = stack_pointer(task);
|
||||
stack = sp;
|
||||
|
||||
printk("\nStack: ");
|
||||
pr_info("Stack:\n");
|
||||
|
||||
for (i = 0; i < kstack_depth_to_print; i++) {
|
||||
if (kstack_end(sp))
|
||||
break;
|
||||
if (i && ((i % 8) == 0))
|
||||
printk("\n ");
|
||||
printk("%08lx ", *sp++);
|
||||
pr_cont(" %08lx", *sp++);
|
||||
if (i % 8 == 7)
|
||||
pr_cont("\n");
|
||||
}
|
||||
printk("\n");
|
||||
show_trace(task, stack);
|
||||
}
|
||||
|
||||
void show_code(unsigned int *pc)
|
||||
{
|
||||
long i;
|
||||
|
||||
printk("\nCode:");
|
||||
|
||||
for(i = -3 ; i < 6 ; i++) {
|
||||
unsigned long insn;
|
||||
if (__get_user(insn, pc + i)) {
|
||||
printk(" (Bad address in pc)\n");
|
||||
break;
|
||||
}
|
||||
printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
|
||||
}
|
||||
}
|
||||
|
||||
DEFINE_SPINLOCK(die_lock);
|
||||
|
||||
void die(const char * str, struct pt_regs * regs, long err)
|
||||
{
|
||||
static int die_counter;
|
||||
int nl = 0;
|
||||
|
||||
console_verbose();
|
||||
spin_lock_irq(&die_lock);
|
||||
|
||||
printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
printk("PREEMPT ");
|
||||
nl = 1;
|
||||
#endif
|
||||
if (nl)
|
||||
printk("\n");
|
||||
pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
|
||||
IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
|
||||
show_regs(regs);
|
||||
if (!user_mode(regs))
|
||||
show_stack(NULL, (unsigned long*)regs->areg[1]);
|
||||
|
@ -214,23 +214,26 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
|
||||
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
|
||||
|
||||
if (ctx->more) {
|
||||
if (!result) {
|
||||
err = af_alg_wait_for_completion(
|
||||
crypto_ahash_init(&ctx->req),
|
||||
&ctx->completion);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!result || ctx->more) {
|
||||
ctx->more = 0;
|
||||
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
|
||||
&ctx->completion);
|
||||
if (err)
|
||||
goto unlock;
|
||||
} else if (!result) {
|
||||
err = af_alg_wait_for_completion(
|
||||
crypto_ahash_digest(&ctx->req),
|
||||
&ctx->completion);
|
||||
}
|
||||
|
||||
err = memcpy_to_msg(msg, ctx->result, len);
|
||||
|
||||
hash_free_result(sk, ctx);
|
||||
|
||||
unlock:
|
||||
hash_free_result(sk, ctx);
|
||||
release_sock(sk);
|
||||
|
||||
return err ?: len;
|
||||
|
@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void)
|
||||
u32 i;
|
||||
|
||||
/*
|
||||
* For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which
|
||||
* For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
|
||||
* should be zero are indeed zero. This will workaround BIOSs that
|
||||
* inadvertently place values in these fields.
|
||||
*
|
||||
* The ACPI 1.0 reserved fields that will be zeroed are the bytes located
|
||||
* at offset 45, 55, 95, and the word located at offset 109, 110.
|
||||
*
|
||||
* Note: The FADT revision value is unreliable because of BIOS errors.
|
||||
* The table length is instead used as the final word on the version.
|
||||
*
|
||||
* Note: FADT revision 3 is the ACPI 2.0 version of the FADT.
|
||||
* Note: The FADT revision value is unreliable. Only the length can be
|
||||
* trusted.
|
||||
*/
|
||||
if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) {
|
||||
if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
|
||||
acpi_gbl_FADT.preferred_profile = 0;
|
||||
acpi_gbl_FADT.pstate_control = 0;
|
||||
acpi_gbl_FADT.cst_control = 0;
|
||||
|
@ -484,7 +484,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
static const struct of_device_id bt_bmc_match[] = {
|
||||
{ .compatible = "aspeed,ast2400-bt-bmc" },
|
||||
{ .compatible = "aspeed,ast2400-ibt-bmc" },
|
||||
{ },
|
||||
};
|
||||
|
||||
@ -502,4 +502,4 @@ module_platform_driver(bt_bmc_driver);
|
||||
MODULE_DEVICE_TABLE(of, bt_bmc_match);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
|
||||
MODULE_DESCRIPTION("Linux device interface to the BT interface");
|
||||
MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
|
||||
|
@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
|
||||
}
|
||||
|
||||
/* register clk-provider */
|
||||
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
|
||||
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
|
||||
|
||||
return;
|
||||
|
||||
|
@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
|
||||
}
|
||||
|
||||
/* register clk-provider */
|
||||
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
|
||||
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
|
||||
|
||||
return;
|
||||
|
||||
|
@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
|
||||
hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
|
||||
"HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
|
||||
|
||||
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
|
||||
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
|
||||
}
|
||||
CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
|
||||
|
@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
|
||||
static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
|
||||
0x050, 0, 3, axi_div_table, 0);
|
||||
|
||||
#define SUN6I_A31_AHB1_REG 0x054
|
||||
|
||||
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
|
||||
"axi", "pll-periph" };
|
||||
|
||||
@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
|
||||
val &= BIT(16);
|
||||
writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
|
||||
|
||||
/* Force AHB1 to PLL6 / 3 */
|
||||
val = readl(reg + SUN6I_A31_AHB1_REG);
|
||||
/* set PLL6 pre-div = 3 */
|
||||
val &= ~GENMASK(7, 6);
|
||||
val |= 0x2 << 6;
|
||||
/* select PLL6 / pre-div */
|
||||
val &= ~GENMASK(13, 12);
|
||||
val |= 0x3 << 12;
|
||||
writel(val, reg + SUN6I_A31_AHB1_REG);
|
||||
|
||||
sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
|
||||
|
||||
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
|
||||
|
@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
|
||||
else
|
||||
calcp = 3;
|
||||
|
||||
calcm = (req->parent_rate >> calcp) - 1;
|
||||
calcm = (div >> calcp) - 1;
|
||||
|
||||
req->rate = (req->parent_rate >> calcp) / (calcm + 1);
|
||||
req->m = calcm;
|
||||
|
@ -137,7 +137,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str,
|
||||
}
|
||||
|
||||
buf = it_page + it->offset;
|
||||
len = min(tlen, it->length);
|
||||
len = min_t(size_t, tlen, it->length);
|
||||
print_hex_dump(level, prefix_str, prefix_type, rowsize,
|
||||
groupsize, buf, len, ascii);
|
||||
tlen -= len;
|
||||
@ -4583,6 +4583,15 @@ static int __init caam_algapi_init(void)
|
||||
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Check support for AES modes not available
|
||||
* on LP devices.
|
||||
*/
|
||||
if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
|
||||
if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
|
||||
OP_ALG_AAI_XTS)
|
||||
continue;
|
||||
|
||||
t_alg = caam_alg_alloc(alg);
|
||||
if (IS_ERR(t_alg)) {
|
||||
err = PTR_ERR(t_alg);
|
||||
|
@ -306,6 +306,7 @@ config MMP_TDMA
|
||||
depends on ARCH_MMP || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
select MMP_SRAM if ARCH_MMP
|
||||
select GENERIC_ALLOCATOR
|
||||
help
|
||||
Support the MMP Two-Channel DMA engine.
|
||||
This engine used for MMP Audio DMA and pxa910 SQU.
|
||||
|
@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
|
||||
|
||||
while (val) {
|
||||
u32 desc, len;
|
||||
int error;
|
||||
|
||||
error = pm_runtime_get(cdd->ddev.dev);
|
||||
if (error < 0)
|
||||
dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
|
||||
__func__, error);
|
||||
|
||||
q_num = __fls(val);
|
||||
val &= ~(1 << q_num);
|
||||
@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
|
||||
dma_cookie_complete(&c->txd);
|
||||
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
|
||||
|
||||
/* Paired with cppi41_dma_issue_pending */
|
||||
pm_runtime_mark_last_busy(cdd->ddev.dev);
|
||||
pm_runtime_put_autosuspend(cdd->ddev.dev);
|
||||
}
|
||||
@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
int error;
|
||||
|
||||
error = pm_runtime_get_sync(cdd->ddev.dev);
|
||||
if (error < 0)
|
||||
if (error < 0) {
|
||||
dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
|
||||
__func__, error);
|
||||
pm_runtime_put_noidle(cdd->ddev.dev);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
dma_cookie_init(chan);
|
||||
dma_async_tx_descriptor_init(&c->txd, chan);
|
||||
@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
|
||||
int error;
|
||||
|
||||
error = pm_runtime_get_sync(cdd->ddev.dev);
|
||||
if (error < 0)
|
||||
if (error < 0) {
|
||||
pm_runtime_put_noidle(cdd->ddev.dev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
WARN_ON(!list_empty(&cdd->pending));
|
||||
|
||||
@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
|
||||
struct cppi41_dd *cdd = c->cdd;
|
||||
int error;
|
||||
|
||||
/* PM runtime paired with dmaengine_desc_get_callback_invoke */
|
||||
error = pm_runtime_get(cdd->ddev.dev);
|
||||
if ((error != -EINPROGRESS) && error < 0) {
|
||||
pm_runtime_put_noidle(cdd->ddev.dev);
|
||||
dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
|
||||
error);
|
||||
|
||||
@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
|
||||
push_desc_queue(c);
|
||||
else
|
||||
pending_desc(c);
|
||||
|
||||
pm_runtime_mark_last_busy(cdd->ddev.dev);
|
||||
pm_runtime_put_autosuspend(cdd->ddev.dev);
|
||||
}
|
||||
|
||||
static u32 get_host_pd0(u32 length)
|
||||
@ -1059,8 +1075,8 @@ err_chans:
|
||||
deinit_cppi41(dev, cdd);
|
||||
err_init_cppi:
|
||||
pm_runtime_dont_use_autosuspend(dev);
|
||||
pm_runtime_put_sync(dev);
|
||||
err_get_sync:
|
||||
pm_runtime_put_sync(dev);
|
||||
pm_runtime_disable(dev);
|
||||
iounmap(cdd->usbss_mem);
|
||||
iounmap(cdd->ctrl_mem);
|
||||
@ -1072,7 +1088,12 @@ err_get_sync:
|
||||
static int cppi41_dma_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct cppi41_dd *cdd = platform_get_drvdata(pdev);
|
||||
int error;
|
||||
|
||||
error = pm_runtime_get_sync(&pdev->dev);
|
||||
if (error < 0)
|
||||
dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
|
||||
__func__, error);
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
dma_async_device_unregister(&cdd->ddev);
|
||||
|
||||
|
@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
|
||||
if (echan->slot[0] < 0) {
|
||||
dev_err(dev, "Entry slot allocation failed for channel %u\n",
|
||||
EDMA_CHAN_SLOT(echan->ch_num));
|
||||
ret = echan->slot[0];
|
||||
goto err_slot;
|
||||
}
|
||||
|
||||
|
@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
|
||||
|
||||
burst = convert_burst(8);
|
||||
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
|
||||
v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
|
||||
DMA_CHAN_CFG_DST_LINEAR_MODE |
|
||||
DMA_CHAN_CFG_SRC_LINEAR_MODE |
|
||||
|
@ -22,10 +22,6 @@ menuconfig GPIOLIB
|
||||
|
||||
if GPIOLIB
|
||||
|
||||
config GPIO_DEVRES
|
||||
def_bool y
|
||||
depends on HAS_IOMEM
|
||||
|
||||
config OF_GPIO
|
||||
def_bool y
|
||||
depends on OF
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
|
||||
|
||||
obj-$(CONFIG_GPIO_DEVRES) += devres.o
|
||||
obj-$(CONFIG_GPIOLIB) += devres.o
|
||||
obj-$(CONFIG_GPIOLIB) += gpiolib.o
|
||||
obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
|
||||
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
|
||||
|
@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
|
||||
|
||||
bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
|
||||
|
||||
memcpy(reg_val, chip->reg_output, NBANK(chip));
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
memcpy(reg_val, chip->reg_output, NBANK(chip));
|
||||
for (bank = 0; bank < NBANK(chip); bank++) {
|
||||
bank_mask = mask[bank / sizeof(*mask)] >>
|
||||
((bank % sizeof(*mask)) * 8);
|
||||
if (bank_mask) {
|
||||
bank_val = bits[bank / sizeof(*bits)] >>
|
||||
((bank % sizeof(*bits)) * 8);
|
||||
bank_val &= bank_mask;
|
||||
reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
|
||||
}
|
||||
}
|
||||
@ -607,7 +608,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
|
||||
|
||||
if (client->irq && irq_base != -1
|
||||
&& (chip->driver_data & PCA_INT)) {
|
||||
|
||||
ret = pca953x_read_regs(chip,
|
||||
chip->regs->input, chip->irq_stat);
|
||||
if (ret)
|
||||
|
@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return !!(ret & BIT(pos));
|
||||
return !(ret & BIT(pos));
|
||||
}
|
||||
|
||||
static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
|
||||
|
@ -2737,8 +2737,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
|
||||
if (IS_ERR(desc))
|
||||
return PTR_ERR(desc);
|
||||
|
||||
/* Flush direction if something changed behind our back */
|
||||
if (chip->get_direction) {
|
||||
/*
|
||||
* If it's fast: flush the direction setting if something changed
|
||||
* behind our back
|
||||
*/
|
||||
if (!chip->can_sleep && chip->get_direction) {
|
||||
int dir = chip->get_direction(chip, offset);
|
||||
|
||||
if (dir)
|
||||
|
@ -459,6 +459,7 @@ struct amdgpu_bo {
|
||||
u64 metadata_flags;
|
||||
void *metadata;
|
||||
u32 metadata_size;
|
||||
unsigned prime_shared_count;
|
||||
/* list of all virtual address to which this bo
|
||||
* is associated to
|
||||
*/
|
||||
|
@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
||||
entry->priority = min(info[i].bo_priority,
|
||||
AMDGPU_BO_LIST_MAX_PRIORITY);
|
||||
entry->tv.bo = &entry->robj->tbo;
|
||||
entry->tv.shared = true;
|
||||
entry->tv.shared = !entry->robj->prime_shared_count;
|
||||
|
||||
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||
gds_obj = entry->robj;
|
||||
|
@ -658,12 +658,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
|
||||
return false;
|
||||
|
||||
if (amdgpu_passthrough(adev)) {
|
||||
/* for FIJI: In whole GPU pass-through virtualization case
|
||||
* old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
|
||||
* so amdgpu_card_posted return false and driver will incorrectly skip vPost.
|
||||
* but if we force vPost do in pass-through case, the driver reload will hang.
|
||||
* whether doing vPost depends on amdgpu_card_posted if smc version is above
|
||||
* 00160e00 for FIJI.
|
||||
/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
|
||||
* some old smc fw still need driver do vPost otherwise gpu hang, while
|
||||
* those smc fw version above 22.15 doesn't have this flaw, so we force
|
||||
* vpost executed for smc version below 22.15
|
||||
*/
|
||||
if (adev->asic_type == CHIP_FIJI) {
|
||||
int err;
|
||||
@ -674,22 +672,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
|
||||
return true;
|
||||
|
||||
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
|
||||
if (fw_ver >= 0x00160e00)
|
||||
return !amdgpu_card_posted(adev);
|
||||
if (fw_ver < 0x00160e00)
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
/* in bare-metal case, amdgpu_card_posted return false
|
||||
* after system reboot/boot, and return true if driver
|
||||
* reloaded.
|
||||
* we shouldn't do vPost after driver reload otherwise GPU
|
||||
* could hang.
|
||||
*/
|
||||
if (amdgpu_card_posted(adev))
|
||||
return false;
|
||||
}
|
||||
|
||||
/* we assume vPost is neede for all other cases */
|
||||
return true;
|
||||
return !amdgpu_card_posted(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
bo->prime_shared_count = 1;
|
||||
return &bo->gem_base;
|
||||
}
|
||||
|
||||
int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
int ret = 0;
|
||||
long ret = 0;
|
||||
|
||||
ret = amdgpu_bo_reserve(bo, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Wait for all shared fences to complete before we switch to future
|
||||
* use of exclusive fence on this prime shared bo.
|
||||
*/
|
||||
ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (unlikely(ret < 0)) {
|
||||
DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
|
||||
amdgpu_bo_unreserve(bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pin buffer into GTT */
|
||||
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
|
||||
if (likely(ret == 0))
|
||||
bo->prime_shared_count++;
|
||||
|
||||
amdgpu_bo_unreserve(bo);
|
||||
return ret;
|
||||
}
|
||||
@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
|
||||
return;
|
||||
|
||||
amdgpu_bo_unpin(bo);
|
||||
if (bo->prime_shared_count)
|
||||
bo->prime_shared_count--;
|
||||
amdgpu_bo_unreserve(bo);
|
||||
}
|
||||
|
||||
|
@ -1469,8 +1469,6 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
||||
table_info->vddgfx_lookup_table, vv_id, &sclk)) {
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ClockStretcher)) {
|
||||
if (table_info == NULL)
|
||||
return -EINVAL;
|
||||
sclk_table = table_info->vdd_dep_on_sclk;
|
||||
|
||||
for (j = 1; j < sclk_table->count; j++) {
|
||||
|
@ -14,170 +14,45 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_encoder_slave.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
||||
#include "arcpgu.h"
|
||||
|
||||
struct arcpgu_drm_connector {
|
||||
struct drm_connector connector;
|
||||
struct drm_encoder_slave *encoder_slave;
|
||||
};
|
||||
|
||||
static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
const struct drm_encoder_slave_funcs *sfuncs;
|
||||
struct drm_encoder_slave *slave;
|
||||
struct arcpgu_drm_connector *con =
|
||||
container_of(connector, struct arcpgu_drm_connector, connector);
|
||||
|
||||
slave = con->encoder_slave;
|
||||
if (slave == NULL) {
|
||||
dev_err(connector->dev->dev,
|
||||
"connector_get_modes: cannot find slave encoder for connector\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sfuncs = slave->slave_funcs;
|
||||
if (sfuncs->get_modes == NULL)
|
||||
return 0;
|
||||
|
||||
return sfuncs->get_modes(&slave->base, connector);
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
enum drm_connector_status status = connector_status_unknown;
|
||||
const struct drm_encoder_slave_funcs *sfuncs;
|
||||
struct drm_encoder_slave *slave;
|
||||
|
||||
struct arcpgu_drm_connector *con =
|
||||
container_of(connector, struct arcpgu_drm_connector, connector);
|
||||
|
||||
slave = con->encoder_slave;
|
||||
if (slave == NULL) {
|
||||
dev_err(connector->dev->dev,
|
||||
"connector_detect: cannot find slave encoder for connector\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
sfuncs = slave->slave_funcs;
|
||||
if (sfuncs && sfuncs->detect)
|
||||
return sfuncs->detect(&slave->base, connector);
|
||||
|
||||
dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs
|
||||
arcpgu_drm_connector_helper_funcs = {
|
||||
.get_modes = arcpgu_drm_connector_get_modes,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.detect = arcpgu_drm_connector_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = arcpgu_drm_connector_destroy,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
|
||||
.dpms = drm_i2c_encoder_dpms,
|
||||
.mode_fixup = drm_i2c_encoder_mode_fixup,
|
||||
.mode_set = drm_i2c_encoder_mode_set,
|
||||
.prepare = drm_i2c_encoder_prepare,
|
||||
.commit = drm_i2c_encoder_commit,
|
||||
.detect = drm_i2c_encoder_detect,
|
||||
};
|
||||
|
||||
static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
|
||||
.destroy = drm_encoder_cleanup,
|
||||
};
|
||||
|
||||
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
|
||||
{
|
||||
struct arcpgu_drm_connector *arcpgu_connector;
|
||||
struct drm_i2c_encoder_driver *driver;
|
||||
struct drm_encoder_slave *encoder;
|
||||
struct drm_connector *connector;
|
||||
struct i2c_client *i2c_slave;
|
||||
int ret;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_bridge *bridge;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
|
||||
if (encoder == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
i2c_slave = of_find_i2c_device_by_node(np);
|
||||
if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
|
||||
dev_err(drm->dev, "failed to find i2c slave encoder\n");
|
||||
/* Locate drm bridge from the hdmi encoder DT node */
|
||||
bridge = of_drm_find_bridge(np);
|
||||
if (!bridge)
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
if (i2c_slave->dev.driver == NULL) {
|
||||
dev_err(drm->dev, "failed to find i2c slave driver\n");
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
driver =
|
||||
to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver));
|
||||
ret = driver->encoder_init(i2c_slave, drm, encoder);
|
||||
if (ret) {
|
||||
dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
encoder->base.possible_crtcs = 1;
|
||||
encoder->base.possible_clones = 0;
|
||||
ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
|
||||
encoder->possible_crtcs = 1;
|
||||
encoder->possible_clones = 0;
|
||||
ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
|
||||
DRM_MODE_ENCODER_TMDS, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_encoder_helper_add(&encoder->base,
|
||||
&arcpgu_drm_encoder_helper_funcs);
|
||||
/* Link drm_bridge to encoder */
|
||||
bridge->encoder = encoder;
|
||||
encoder->bridge = bridge;
|
||||
|
||||
arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
|
||||
GFP_KERNEL);
|
||||
if (!arcpgu_connector) {
|
||||
ret = -ENOMEM;
|
||||
goto error_encoder_cleanup;
|
||||
}
|
||||
ret = drm_bridge_attach(drm, bridge);
|
||||
if (ret)
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
||||
connector = &arcpgu_connector->connector;
|
||||
drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
|
||||
ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_HDMIA);
|
||||
if (ret < 0) {
|
||||
dev_err(drm->dev, "failed to initialize drm connector\n");
|
||||
goto error_encoder_cleanup;
|
||||
}
|
||||
|
||||
ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
|
||||
if (ret < 0) {
|
||||
dev_err(drm->dev, "could not attach connector to encoder\n");
|
||||
drm_connector_unregister(connector);
|
||||
goto error_connector_cleanup;
|
||||
}
|
||||
|
||||
arcpgu_connector->encoder_slave = encoder;
|
||||
|
||||
return 0;
|
||||
|
||||
error_connector_cleanup:
|
||||
drm_connector_cleanup(connector);
|
||||
|
||||
error_encoder_cleanup:
|
||||
drm_encoder_cleanup(&encoder->base);
|
||||
return ret;
|
||||
}
|
||||
|
@ -25,8 +25,13 @@
|
||||
static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
|
||||
struct drm_pending_vblank_event *event = crtc->state->event;
|
||||
|
||||
regmap_write(fsl_dev->regmap,
|
||||
DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
|
||||
|
||||
if (event) {
|
||||
crtc->state->event = NULL;
|
||||
|
||||
@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
|
||||
static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
|
||||
|
||||
/* always disable planes on the CRTC */
|
||||
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
|
||||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
|
||||
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
|
||||
@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
|
||||
.atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
|
||||
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
|
||||
.disable = fsl_dcu_drm_disable_crtc,
|
||||
.enable = fsl_dcu_drm_crtc_enable,
|
||||
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
|
||||
};
|
||||
|
@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
|
||||
|
||||
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
|
||||
regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
|
||||
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
|
||||
DCU_UPDATE_MODE_READREG);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
|
||||
drm_handle_vblank(dev, 0);
|
||||
|
||||
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
|
||||
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
|
||||
DCU_UPDATE_MODE_READREG);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
|
||||
DCU_LAYER_POST_SKIP(0) |
|
||||
DCU_LAYER_PRE_SKIP(0));
|
||||
}
|
||||
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
|
||||
DCU_MODE_DCU_MODE_MASK,
|
||||
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
|
||||
regmap_write(fsl_dev->regmap,
|
||||
DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -1281,6 +1281,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return !(obj->cache_level == I915_CACHE_NONE ||
|
||||
obj->cache_level == I915_CACHE_WT);
|
||||
}
|
||||
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_request *req,
|
||||
unsigned int flags)
|
||||
@ -1311,6 +1317,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
|
||||
obj->cache_dirty = true;
|
||||
}
|
||||
|
||||
if (flags & EXEC_OBJECT_NEEDS_FENCE)
|
||||
|
@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
if (!child)
|
||||
return;
|
||||
|
||||
aux_channel = child->raw[25];
|
||||
aux_channel = child->common.aux_channel;
|
||||
ddc_pin = child->common.ddc_pin;
|
||||
|
||||
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
|
||||
@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
|
||||
static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
|
||||
enum port port)
|
||||
{
|
||||
static const struct {
|
||||
u16 dp, hdmi;
|
||||
@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
|
||||
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
|
||||
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
|
||||
};
|
||||
int i;
|
||||
|
||||
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
|
||||
return false;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
|
||||
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
|
||||
return false;
|
||||
|
||||
if (p_child->common.dvo_port == port_mapping[port].dp)
|
||||
return true;
|
||||
|
||||
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
|
||||
if (p_child->common.dvo_port == port_mapping[port].hdmi &&
|
||||
p_child->common.aux_channel != 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
const union child_device_config *p_child =
|
||||
&dev_priv->vbt.child_dev[i];
|
||||
|
||||
if ((p_child->common.dvo_port == port_mapping[port].dp ||
|
||||
p_child->common.dvo_port == port_mapping[port].hdmi) &&
|
||||
(p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
|
||||
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
|
||||
if (child_dev_is_dp_dual_mode(p_child, port))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -4463,21 +4463,11 @@ static enum drm_connector_status
|
||||
intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
enum drm_connector_status status = connector->status;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
/* MST devices are disconnected from a monitor POV */
|
||||
intel_dp_unset_edid(intel_dp);
|
||||
if (intel_encoder->type != INTEL_OUTPUT_EDP)
|
||||
intel_encoder->type = INTEL_OUTPUT_DP;
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
|
||||
/* If full detect is not performed yet, do a full detect */
|
||||
if (!intel_dp->detect_done)
|
||||
status = intel_dp_long_pulse(intel_dp->attached_connector);
|
||||
|
@ -358,7 +358,7 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
int plane = intel_plane->plane;
|
||||
u32 sprctl;
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
unsigned int rotation = dplane->state->rotation;
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->base.dst.x1;
|
||||
int crtc_y = plane_state->base.dst.y1;
|
||||
|
@ -280,7 +280,8 @@ struct common_child_dev_config {
|
||||
u8 dp_support:1;
|
||||
u8 tmds_support:1;
|
||||
u8 support_reserved:5;
|
||||
u8 not_common3[12];
|
||||
u8 aux_channel;
|
||||
u8 not_common3[11];
|
||||
u8 iboost_level;
|
||||
} __packed;
|
||||
|
||||
|
@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
|
||||
ddp_comp);
|
||||
|
||||
priv->crtc = crtc;
|
||||
writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
|
||||
writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
|
||||
}
|
||||
|
||||
|
@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
|
||||
unsigned long pll_rate;
|
||||
unsigned int factor;
|
||||
|
||||
/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
|
||||
pix_rate = 1000UL * mode->clock;
|
||||
if (mode->clock <= 74000)
|
||||
if (mode->clock <= 27000)
|
||||
factor = 16 * 3;
|
||||
else if (mode->clock <= 84000)
|
||||
factor = 8 * 3;
|
||||
else
|
||||
else if (mode->clock <= 167000)
|
||||
factor = 4 * 3;
|
||||
else
|
||||
factor = 2 * 3;
|
||||
pll_rate = pix_rate * factor;
|
||||
|
||||
dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
|
||||
|
@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
|
||||
phy_power_on(hdmi->phy);
|
||||
mtk_hdmi_aud_output_config(hdmi, mode);
|
||||
|
||||
mtk_hdmi_setup_audio_infoframe(hdmi);
|
||||
mtk_hdmi_setup_avi_infoframe(hdmi, mode);
|
||||
mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
|
||||
if (mode->flags & DRM_MODE_FLAG_3D_MASK)
|
||||
mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
|
||||
|
||||
mtk_hdmi_hw_vid_black(hdmi, false);
|
||||
mtk_hdmi_hw_aud_unmute(hdmi);
|
||||
mtk_hdmi_hw_send_av_unmute(hdmi);
|
||||
@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
hdmi->powered = true;
|
||||
}
|
||||
|
||||
static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
mtk_hdmi_setup_audio_infoframe(hdmi);
|
||||
mtk_hdmi_setup_avi_infoframe(hdmi, mode);
|
||||
mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
|
||||
if (mode->flags & DRM_MODE_FLAG_3D_MASK)
|
||||
mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
|
||||
}
|
||||
|
||||
static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
|
||||
{
|
||||
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
|
||||
@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
|
||||
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
|
||||
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
|
||||
phy_power_on(hdmi->phy);
|
||||
mtk_hdmi_send_infoframe(hdmi, &hdmi->mode);
|
||||
|
||||
hdmi->enabled = true;
|
||||
}
|
||||
|
@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
|
||||
unsigned int pre_div;
|
||||
unsigned int div;
|
||||
unsigned int pre_ibias;
|
||||
unsigned int hdmi_ibias;
|
||||
unsigned int imp_en;
|
||||
|
||||
dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
|
||||
rate, parent_rate);
|
||||
@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
(0x1 << PLL_BR_SHIFT),
|
||||
RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
|
||||
RG_HDMITX_PLL_BR);
|
||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
|
||||
if (rate < 165000000) {
|
||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
|
||||
RG_HDMITX_PRD_IMP_EN);
|
||||
pre_ibias = 0x3;
|
||||
imp_en = 0x0;
|
||||
hdmi_ibias = hdmi_phy->ibias;
|
||||
} else {
|
||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
|
||||
RG_HDMITX_PRD_IMP_EN);
|
||||
pre_ibias = 0x6;
|
||||
imp_en = 0xf;
|
||||
hdmi_ibias = hdmi_phy->ibias_up;
|
||||
}
|
||||
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
|
||||
(0x3 << PRD_IBIAS_CLK_SHIFT) |
|
||||
(0x3 << PRD_IBIAS_D2_SHIFT) |
|
||||
(0x3 << PRD_IBIAS_D1_SHIFT) |
|
||||
(0x3 << PRD_IBIAS_D0_SHIFT),
|
||||
(pre_ibias << PRD_IBIAS_CLK_SHIFT) |
|
||||
(pre_ibias << PRD_IBIAS_D2_SHIFT) |
|
||||
(pre_ibias << PRD_IBIAS_D1_SHIFT) |
|
||||
(pre_ibias << PRD_IBIAS_D0_SHIFT),
|
||||
RG_HDMITX_PRD_IBIAS_CLK |
|
||||
RG_HDMITX_PRD_IBIAS_D2 |
|
||||
RG_HDMITX_PRD_IBIAS_D1 |
|
||||
RG_HDMITX_PRD_IBIAS_D0);
|
||||
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
|
||||
(0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
|
||||
(imp_en << DRV_IMP_EN_SHIFT),
|
||||
RG_HDMITX_DRV_IMP_EN);
|
||||
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
|
||||
(hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
|
||||
(hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
|
||||
@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
|
||||
RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
|
||||
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
|
||||
(hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
|
||||
(hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
|
||||
(hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
|
||||
(hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
|
||||
RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
|
||||
RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
|
||||
(hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
|
||||
(hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
|
||||
(hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
|
||||
(hdmi_ibias << DRV_IBIAS_D0_SHIFT),
|
||||
RG_HDMITX_DRV_IBIAS_CLK |
|
||||
RG_HDMITX_DRV_IBIAS_D2 |
|
||||
RG_HDMITX_DRV_IBIAS_D1 |
|
||||
RG_HDMITX_DRV_IBIAS_D0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -142,9 +142,9 @@ static int sun4i_drv_bind(struct device *dev)
|
||||
|
||||
/* Create our layers */
|
||||
drv->layers = sun4i_layers_init(drm);
|
||||
if (!drv->layers) {
|
||||
if (IS_ERR(drv->layers)) {
|
||||
dev_err(drm->dev, "Couldn't create the planes\n");
|
||||
ret = -EINVAL;
|
||||
ret = PTR_ERR(drv->layers);
|
||||
goto free_drm;
|
||||
}
|
||||
|
||||
|
@ -152,15 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
|
||||
|
||||
DRM_DEBUG_DRIVER("Enabling RGB output\n");
|
||||
|
||||
if (!IS_ERR(tcon->panel)) {
|
||||
if (!IS_ERR(tcon->panel))
|
||||
drm_panel_prepare(tcon->panel);
|
||||
drm_panel_enable(tcon->panel);
|
||||
}
|
||||
|
||||
/* encoder->bridge can be NULL; drm_bridge_enable checks for it */
|
||||
drm_bridge_enable(encoder->bridge);
|
||||
|
||||
sun4i_tcon_channel_enable(tcon, 0);
|
||||
|
||||
if (!IS_ERR(tcon->panel))
|
||||
drm_panel_enable(tcon->panel);
|
||||
}
|
||||
|
||||
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
|
||||
@ -171,15 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
|
||||
|
||||
DRM_DEBUG_DRIVER("Disabling RGB output\n");
|
||||
|
||||
if (!IS_ERR(tcon->panel))
|
||||
drm_panel_disable(tcon->panel);
|
||||
|
||||
sun4i_tcon_channel_disable(tcon, 0);
|
||||
|
||||
/* encoder->bridge can be NULL; drm_bridge_disable checks for it */
|
||||
drm_bridge_disable(encoder->bridge);
|
||||
|
||||
if (!IS_ERR(tcon->panel)) {
|
||||
drm_panel_disable(tcon->panel);
|
||||
if (!IS_ERR(tcon->panel))
|
||||
drm_panel_unprepare(tcon->panel);
|
||||
}
|
||||
}
|
||||
|
||||
static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
|
||||
|
@ -59,7 +59,6 @@ config I2C_CHARDEV
|
||||
|
||||
config I2C_MUX
|
||||
tristate "I2C bus multiplexing support"
|
||||
depends on HAS_IOMEM
|
||||
help
|
||||
Say Y here if you want the I2C core to support the ability to
|
||||
handle multiplexed I2C bus topologies, by presenting each
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user