Merge branch 'linus' into x86/asm, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2016-06-11 11:25:50 +02:00
commit 50c0587eed
268 changed files with 1978 additions and 1393 deletions

View File

@ -3086,6 +3086,7 @@ M: Stephen Boyd <sboyd@codeaurora.org>
L: linux-clk@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
S: Maintained
F: Documentation/devicetree/bindings/clock/
F: drivers/clk/
X: drivers/clk/clkdev.c
F: include/linux/clk-pr*
@ -8008,6 +8009,7 @@ Q: http://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
S: Maintained
F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT

View File

@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
def_bool y
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
def_bool n
config ARCH_FLATMEM_ENABLE
def_bool y
@ -186,9 +186,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
config ARC_HAS_REENTRANT_IRQ_LV2
def_bool n
config ARC_MCIP
bool "ARConnect Multicore IP (MCIP) Support "
depends on ISA_ARCV2
@ -366,25 +363,10 @@ config NODES_SHIFT
if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
bool "ARCompact IRQ Priorities: High(2)/Low(1)"
bool "Setup Timer IRQ as high Priority"
default n
# Timer HAS to be high priority, for any other high priority config
select ARC_IRQ3_LV2
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
if ARC_COMPACT_IRQ_LEVELS
config ARC_IRQ3_LV2
bool
config ARC_IRQ5_LV2
bool
config ARC_IRQ6_LV2
bool
endif #ARC_COMPACT_IRQ_LEVELS
depends on !SMP
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
@ -407,11 +389,6 @@ config ARC_HAS_LLSC
default y
depends on !ARC_CANT_LLSC
config ARC_STAR_9000923308
bool "Workaround for llock/scond livelock"
default n
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
@ -471,7 +448,7 @@ config LINUX_LINK_BASE
config HIGHMEM
bool "High Memory Support"
select DISCONTIGMEM
select ARCH_DISCONTIGMEM_ENABLE
help
With ARC 2G:2G address split, only upper 2G is directly addressable by
kernel. Enable this to potentially allow access to rest of 2G and PAE

View File

@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
#default target for make without any arguements.
#default target for make without any arguments.
KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE)

View File

@ -23,8 +23,6 @@
/ {
clock-frequency = <500000000>; /* 500 MHZ */
soc100 {
bus-frequency = <166666666>;

View File

@ -23,8 +23,6 @@
/ {
clock-frequency = <500000000>; /* 500 MHZ */
soc100 {
bus-frequency = <166666666>;

View File

@ -15,7 +15,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <750000000>; /* 750 MHZ */
#address-cells = <1>;
#size-cells = <1>;

View File

@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <90000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -18,7 +18,6 @@
/ {
compatible = "ezchip,arc-nps";
clock-frequency = <83333333>; /* 83.333333 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsim";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci";
clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci_hs";
clock-frequency = <20000000>; /* 20 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -11,7 +11,6 @@
/ {
compatible = "snps,nsimosci_hs";
clock-frequency = <5000000>; /* 5 MHZ */
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&core_intc>;

View File

@ -13,7 +13,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };

View File

@ -8,7 +8,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };

View File

@ -8,7 +8,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <80000000>; /* 80 MHZ */
#address-cells = <1>;
#size-cells = <1>;
chosen { };

View File

@ -14,7 +14,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -15,7 +15,6 @@
/ {
compatible = "snps,arc";
clock-frequency = <50000000>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -25,50 +25,17 @@
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#ifdef CONFIG_ARC_STAR_9000923308
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay = 1, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" bz 4f \n" \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
"4: ; --- success --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
#else /* !CONFIG_ARC_STAR_9000923308 */
#define SCOND_FAIL_RETRY_VAR_DEF
#define SCOND_FAIL_RETRY_ASM \
" bnz 1b \n" \
#define SCOND_FAIL_RETRY_VARS
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
unsigned int val; \
\
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
" \n" \
SCOND_FAIL_RETRY_ASM \
\
" bnz 1b \n" \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
unsigned int val; \
\
/* \
* Explicit full memory barrier needed before/after as \
@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
" \n" \
SCOND_FAIL_RETRY_ASM \
\
" bnz 1b \n" \
: [val] "=&r" (val) \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \

View File

@ -76,8 +76,8 @@
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
* Above brlo alone would treat it as a valid L1-L2 sceanrio
* instead of shouting alound
* Above brlo alone would treat it as a valid L1-L2 scenario
* instead of shouting around
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack

View File

@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all();
/*
* Above checke for rollover of 8 bit ASID in 32 bit container.
* Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/

View File

@ -47,7 +47,7 @@
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos

View File

@ -78,7 +78,7 @@ struct task_struct;
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
* Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)

View File

@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.

View File

@ -20,11 +20,6 @@
#ifdef CONFIG_ARC_HAS_LLSC
/*
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
*/
#ifndef CONFIG_ARC_STAR_9000923308
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
smp_mb();
}
#else /* CONFIG_ARC_STAR_9000923308 */
/*
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
* coherency transactions in the SCU. The exclusive line state keeps rotating
* among contenting cores leading to a never ending cycle. So break the cycle
* by deferring the retry of failed exclusive access (SCOND). The actual delay
* needed is function of number of contending cores as well as the unrelated
* coherency traffic from other cores. To keep the code simple, start off with
* small delay of 1 which would suffice most cases and in case of contention
* double the delay. Eventually the delay is sufficient such that the coherency
* pipeline is drained, thus a subsequent exclusive access would succeed.
*/
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
" \n" \
"4: ; --- done --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*
* if (rw->counter > 0) {
* rw->counter--;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n"
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
" sub %[val], %[val], 1 \n" /* counter-- */
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
* rw->counter = 0;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz 4f \n"
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter++;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" add %[val], %[val], 1 \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" scond %[UNLOCKED], [%[rwlock]]\n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
: "memory", "cc");
smp_mb();
}
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
#endif /* CONFIG_ARC_STAR_9000923308 */
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)

View File

@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
* SYSCALL_TRACE is anways seperately/unconditionally tested right after a
* SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/

View File

@ -32,7 +32,7 @@
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
* Algorthmically, for __user_ok() we want do:
* Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.

View File

@ -74,7 +74,7 @@
__tmp ^ __in; \
})
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \

View File

@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1)
VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs **********************
#ifdef CONFIG_ARC_IRQ3_LV2
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
VECTOR handle_interrupt_level1
#ifdef CONFIG_ARC_IRQ5_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
#ifdef CONFIG_ARC_IRQ6_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
.rept 25
.rept 28
VECTOR handle_interrupt_level1 ; Other devices
.endr

View File

@ -28,10 +28,8 @@ void arc_init_IRQ(void)
{
int level_mask = 0;
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
/*
* Write to register, even if no LV2 IRQs configured to reset it

View File

@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
int64_t delta = new_raw_count - prev_raw_count;
/*
* We don't afaraid of hwc->prev_count changing beneath our feet
* We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime.
*/
local64_set(&hwc->prev_count, new_raw_count);

View File

@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
* Appent to embedded DT cmdline.
* append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {

View File

@ -34,7 +34,7 @@
* -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission)
* This was cauing TLB entries to be overwritten on unrelated indexes
* This was causing TLB entries to be overwritten on unrelated indexes
*
* Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes,

View File

@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
return 0;
}
/* called on user read(): display the couters */
/* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */

View File

@ -215,7 +215,7 @@ slc_chk:
* ------------------
* This ver of MMU supports variable page sizes (1k-16k): although Linux will
* only support 8k (default), 16k and 4k.
* However from hardware perspective, smaller page sizes aggrevate aliasing
* However from hardware perspective, smaller page sizes aggravate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations.
* Two new registers IC_PTAG and DC_PTAG inttoduced.
@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
/*
* This is technically for MMU v4, using the MMU v3 programming model
* Special work for HS38 aliasing I-cache configuratino with PAE40
* Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop
@ -936,7 +936,7 @@ void arc_cache_init(void)
ic->ver, CONFIG_ARC_MMU_VER);
/*
* In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->alias)

View File

@ -10,7 +10,7 @@
* DMA Coherent API Notes
*
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
* implemented by accessintg it using a kernel virtual address, with
* implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.

View File

@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
init.name = dev_name(cpu_dev);
init.ops = &clk_spc_ops;
init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &spc->hw);

View File

@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
/* convenience wrappers around the common clk API */
static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
{
return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *mpc512x_clk_factor(

View File

@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
if (rc < 0)
goto out;
skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
out:

View File

@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
for i in lib lib64 share end ; do \
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
fi ; \
break ; \
fi ; \
if [ $$i = end ] ; then exit 1 ; fi ; \

View File

@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
int i;
for (i = 0; i < rapl_pmus->maxpkg; i++)
kfree(rapl_pmus->pmus + i);
kfree(rapl_pmus->pmus[i]);
kfree(rapl_pmus);
}

View File

@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
.format_group = &hswep_uncore_cbox_format_group,
};
static struct intel_uncore_type bdx_uncore_sbox = {
.name = "sbox",
.num_counters = 4,
.num_boxes = 4,
.perf_ctr_bits = 48,
.event_ctl = HSWEP_S0_MSR_PMON_CTL0,
.perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
.ops = &hswep_uncore_sbox_msr_ops,
.format_group = &hswep_uncore_sbox_format_group,
};
#define BDX_MSR_UNCORE_SBOX 3
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
&hswep_uncore_pcu,
&bdx_uncore_sbox,
NULL,
};
@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
/* BDX-DE doesn't have SBOX */
if (boot_cpu_data.x86_model == 86)
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
static struct intel_uncore_type bdx_uncore_ha = {

View File

@ -0,0 +1,68 @@
#ifndef _ASM_X86_INTEL_FAMILY_H
#define _ASM_X86_INTEL_FAMILY_H
/*
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
* "Extreme" ones, like Broadwell-E.
*
* Things ending in "2" are usually because we have no better
* name for them. There's no processor called "WESTMERE2".
*/
#define INTEL_FAM6_CORE_YONAH 0x0E
#define INTEL_FAM6_CORE2_MEROM 0x0F
#define INTEL_FAM6_CORE2_MEROM_L 0x16
#define INTEL_FAM6_CORE2_PENRYN 0x17
#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
#define INTEL_FAM6_NEHALEM 0x1E
#define INTEL_FAM6_NEHALEM_EP 0x1A
#define INTEL_FAM6_NEHALEM_EX 0x2E
#define INTEL_FAM6_WESTMERE 0x25
#define INTEL_FAM6_WESTMERE2 0x1F
#define INTEL_FAM6_WESTMERE_EP 0x2C
#define INTEL_FAM6_WESTMERE_EX 0x2F
#define INTEL_FAM6_SANDYBRIDGE 0x2A
#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
#define INTEL_FAM6_IVYBRIDGE 0x3A
#define INTEL_FAM6_IVYBRIDGE_X 0x3E
#define INTEL_FAM6_HASWELL_CORE 0x3C
#define INTEL_FAM6_HASWELL_X 0x3F
#define INTEL_FAM6_HASWELL_ULT 0x45
#define INTEL_FAM6_HASWELL_GT3E 0x46
#define INTEL_FAM6_BROADWELL_CORE 0x3D
#define INTEL_FAM6_BROADWELL_XEON_D 0x56
#define INTEL_FAM6_BROADWELL_GT3E 0x47
#define INTEL_FAM6_BROADWELL_X 0x4F
#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
#define INTEL_FAM6_SKYLAKE_X 0x55
#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
#define INTEL_FAM6_ATOM_LINCROFT 0x26
#define INTEL_FAM6_ATOM_PENWELL 0x27
#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
/* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#endif /* _ASM_X86_INTEL_FAMILY_H */

View File

@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
mem += IOAPIC_RESOURCE_NAME_SIZE;
ioapics[i].iomem_res = &res[num];
num++;
ioapics[i].iomem_res = res;
}
ioapic_resources = res;

View File

@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
u64 value;
/* re-enable TopologyExtensions if switched off by BIOS */
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) {
rdmsrl(0xc0011005, value);
if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
}
}
}

View File

@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
local_irq_disable();
}
/*
* In IST context, we explicitly disable preemption. This serves two
* purposes: it makes it much less likely that we would accidentally
* schedule in IST context and it will force a warning if we somehow
* manage to schedule by accident.
*/
void ist_enter(struct pt_regs *regs)
{
if (user_mode(regs)) {
@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
rcu_nmi_enter();
}
/*
* We are atomic because we're on the IST stack; or we're on
* x86_32, in which case we still shouldn't schedule; or we're
* on x86_64 and entered from user mode, in which case we're
* still atomic unless ist_begin_non_atomic is called.
*/
preempt_count_add(HARDIRQ_OFFSET);
preempt_disable();
/* This code is a bit fragile. Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
void ist_exit(struct pt_regs *regs)
{
preempt_count_sub(HARDIRQ_OFFSET);
preempt_enable_no_resched();
if (!user_mode(regs))
rcu_nmi_exit();
@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE);
preempt_count_sub(HARDIRQ_OFFSET);
preempt_enable_no_resched();
}
/**
@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
*/
void ist_end_non_atomic(void)
{
preempt_count_add(HARDIRQ_OFFSET);
preempt_disable();
}
static nokprobe_inline int

View File

@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
* is necessary to enable it as early as possible.
*/
acpi_boot_ec_enable();
acpi_ec_dsdt_probe();
printk(KERN_INFO PREFIX "Interpreter enabled\n");

View File

@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
return AE_OK;
}
int __init acpi_boot_ec_enable(void)
static const struct acpi_device_id ec_device_ids[] = {
{"PNP0C09", 0},
{"", 0},
};
int __init acpi_ec_dsdt_probe(void)
{
if (!boot_ec)
acpi_status status;
if (boot_ec)
return 0;
/*
* Finding EC from DSDT if there is no ECDT EC available. When this
* function is invoked, ACPI tables have been fully loaded, we can
* walk namespace now.
*/
boot_ec = make_acpi_ec();
if (!boot_ec)
return -ENOMEM;
status = acpi_get_devices(ec_device_ids[0].id,
ec_parse_device, boot_ec, NULL);
if (ACPI_FAILURE(status) || !boot_ec->handle)
return -ENODEV;
if (!ec_install_handlers(boot_ec)) {
first_ec = boot_ec;
return 0;
@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void)
return -EFAULT;
}
static const struct acpi_device_id ec_device_ids[] = {
{"PNP0C09", 0},
{"", 0},
};
#if 0
/*
* Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not

View File

@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_boot_ec_enable(void);
int acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_unblock_transactions_early(void);

View File

@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
select MFD_SYSCON if ARCH_LPC18XX
---help---
Support for clock providers on NXP platforms.

View File

@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
/* register fixed rate clocks */
clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
CLK_IS_ROOT, 24000000);
0, 24000000);
clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
CLK_IS_ROOT, 8000000);
0, 8000000);
clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
CLK_IS_ROOT, 8000000);
0, 8000000);
clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
CLK_IS_ROOT, 32000);
0, 32000);
clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
CLK_IS_ROOT, 24000000);
0, 24000000);
/* fixed rate (optional) clock */
if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
pr_info("pic32-clk: dt requests SOSC.\n");

View File

@ -1460,6 +1460,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_clear_update_util_hook(policy->cpu);
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
cpu = all_cpu_data[0];
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_sysfs_pct);
limits->max_perf_pct = max(limits->min_policy_pct,
limits->max_perf_pct);
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
limits->min_perf = div_fp(limits->min_perf_pct, 100);
limits->max_perf = div_fp(limits->max_perf_pct, 100);
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
intel_pstate_set_update_util_hook(policy->cpu);
@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->cpuinfo.max_freq =
cpu->pstate.turbo_pstate * cpu->pstate.scaling;
update_turbo_state();
policy->cpuinfo.max_freq = limits->turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
intel_pstate_init_acpi_perf_limits(policy);
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);

View File

@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
edac_mod_work(&mci->work, value);
if (mci->op_state == OP_RUNNING_POLL)
edac_mod_work(&mci->work, value);
}
mutex_unlock(&mem_ctls_mutex);
}

View File

@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
};
#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
/* Device 16, functions 2-7 */
@ -326,6 +329,7 @@ struct pci_id_descr {
struct pci_id_table {
const struct pci_id_descr *descr;
int n_devs;
enum type type;
};
struct sbridge_dev {
@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
};
#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
#define PCI_ID_TABLE_ENTRY(A, T) { \
.descr = A, \
.n_devs = ARRAY_SIZE(A), \
.type = T \
}
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
{0,} /* 0 terminated list. */
};
@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
{0,} /* 0 terminated list. */
};
@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
};
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
{0,} /* 0 terminated list. */
};
@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
};
static const struct pci_id_table pci_dev_descr_knl_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
{0,}
};
@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
};
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
{0,} /* 0 terminated list. */
};
@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_tad[i],
rir_offset[j][k],
&reg);
tmp_mb = RIR_OFFSET(reg) << 6;
tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
i, j, k,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
(u32)RIR_RNK_TGT(reg),
(u32)RIR_RNK_TGT(pvt->info.type, reg),
reg);
}
}
@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
rir_offset[n_rir][idx],
&reg);
*rank = RIR_RNK_TGT(reg);
*rank = RIR_RNK_TGT(pvt->info.type, reg);
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
n_rir,
@ -3357,12 +3366,12 @@ fail0:
#define ICPU(model, table) \
{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
/* Order here must match "enum type" */
static const struct x86_cpu_id sbridge_cpuids[] = {
ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
{ }
};
@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id)
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
rc = sbridge_register_mci(sbridge_dev, ptable->type);
if (unlikely(rc < 0))
goto fail1;
}

View File

@ -174,6 +174,7 @@ static __init void reserve_regions(void)
{
efi_memory_desc_t *md;
u64 paddr, npages, size;
int resv;
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI memory map:\n");
@ -190,12 +191,14 @@ static __init void reserve_regions(void)
paddr = md->phys_addr;
npages = md->num_pages;
resv = is_reserve_region(md);
if (efi_enabled(EFI_DBG)) {
char buf[64];
pr_info(" 0x%012llx-0x%012llx %s",
pr_info(" 0x%012llx-0x%012llx %s%s\n",
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
efi_md_typeattr_format(buf, sizeof(buf), md));
efi_md_typeattr_format(buf, sizeof(buf), md),
resv ? "*" : "");
}
memrange_efi_to_native(&paddr, &npages);
@ -204,14 +207,9 @@ static __init void reserve_regions(void)
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
if (is_reserve_region(md)) {
if (resv)
memblock_mark_nomap(paddr, size);
if (efi_enabled(EFI_DBG))
pr_cont("*");
}
if (efi_enabled(EFI_DBG))
pr_cont("\n");
}
set_bit(EFI_MEMMAP, &efi.flags);

View File

@ -799,6 +799,7 @@ struct amdgpu_ring {
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
int vmid;
};
/*
@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size);
uint32_t oa_base, uint32_t oa_size,
bool vmid_switch);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,

View File

@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
return result;
}
static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
{
CGS_FUNC_ADEV;
if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
release_firmware(adev->pm.fw);
return 0;
}
/* cannot release other firmware because they are not created by cgs */
return -EINVAL;
}
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_pm_query_clock_limits,
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
amdgpu_cgs_rel_firmware,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,

View File

@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
*/
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
{
if (adev->mode_info.atom_context)
if (adev->mode_info.atom_context) {
kfree(adev->mode_info.atom_context->scratch);
kfree(adev->mode_info.atom_context->iio);
}
kfree(adev->mode_info.atom_context);
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_block_status[i].valid = false;
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (adev->ip_blocks[i].funcs->late_fini)
adev->ip_blocks[i].funcs->late_fini((void *)adev);
}
return 0;
}
@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_atombios_has_gpu_virtualization_table(adev);
/* Post card if necessary */
if (!amdgpu_card_posted(adev) ||
adev->virtualization.supports_sr_iov) {
if (!amdgpu_card_posted(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;

View File

@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
int vmid = 0, old_vmid = ring->vmid;
struct fence *hwf;
uint64_t ctx;
@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (job) {
vm = job->vm;
ctx = job->ctx;
vmid = job->vm_id;
} else {
vm = NULL;
ctx = 0;
vmid = 0;
}
if (!ring->ready) {
@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
job->gds_base, job->gds_size,
job->gws_base, job->gws_size,
job->oa_base, job->oa_size);
job->oa_base, job->oa_size,
(ring->current_ctx == ctx) && (old_vmid != vmid));
if (r) {
amdgpu_ring_undo(ring);
return r;
@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
continue;
@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
need_ctx_switch);
need_ctx_switch = false;
ring->vmid = vmid;
}
if (ring->funcs->emit_hdp_invalidate)
@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
amdgpu_vm_reset_id(adev, job->vm_id);
ring->vmid = old_vmid;
amdgpu_ring_undo(ring);
return r;
}

View File

@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
if (ret)
return ret;
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) {
amdgpu_pm_sysfs_fini(adev);
amd_powerplay_fini(adev->powerplay.pp_handle);
}
#endif
return ret;
}
@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
return ret;
}
static void amdgpu_pp_late_fini(void *handle)
{
#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled) {
amdgpu_pm_sysfs_fini(adev);
amd_powerplay_fini(adev->powerplay.pp_handle);
}
if (adev->powerplay.ip_funcs->late_fini)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
#endif
}
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
.late_fini = amdgpu_pp_late_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,

View File

@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
ring->ring = NULL;
ring->ring_obj = NULL;
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);

View File

@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
return r;
}
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
memset(sa_manager->cpu_ptr, 0, sa_manager->size);
amdgpu_bo_unreserve(sa_manager->bo);
return r;
}

View File

@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int r;
if (adev->uvd.vcpu_bo == NULL)
return 0;
kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
if (!r) {
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
}
if (adev->uvd.vcpu_bo) {
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
if (!r) {
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
}
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
}
amdgpu_ring_fini(&adev->uvd.ring);

View File

@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
uint32_t oa_base, uint32_t oa_size,
bool vmid_switch)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
int r;
if (ring->funcs->emit_pipeline_sync && (
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
ring->type == AMDGPU_RING_TYPE_COMPUTE))
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush &&

View File

@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle)
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return 0;
}

View File

@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
adev->sdma.instance[i].fw = NULL;
}
}
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
if (r)
return r;
/* unhalt the MEs */
cik_sdma_enable(adev, true);
/* halt the engine before programing */
cik_sdma_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
cik_sdma_free_microcode(adev);
return 0;
}

View File

@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
static int fiji_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return 0;
}

View File

@ -991,6 +991,22 @@ out:
return err;
}
static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
{
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
release_firmware(adev->gfx.me_fw);
adev->gfx.me_fw = NULL;
release_firmware(adev->gfx.ce_fw);
adev->gfx.ce_fw = NULL;
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
release_firmware(adev->gfx.mec2_fw);
adev->gfx.mec2_fw = NULL;
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
}
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
gfx_v7_0_free_microcode(adev);
return 0;
}

View File

@ -836,6 +836,26 @@ err1:
return r;
}
static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
release_firmware(adev->gfx.me_fw);
adev->gfx.me_fw = NULL;
release_firmware(adev->gfx.ce_fw);
adev->gfx.ce_fw = NULL;
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ))
release_firmware(adev->gfx.mec2_fw);
adev->gfx.mec2_fw = NULL;
kfree(adev->gfx.rlc.register_list_format);
}
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_rlc_fini(adev);
kfree(adev->gfx.rlc.register_list_format);
gfx_v8_0_free_microcode(adev);
return 0;
}
@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x3a00161a);
amdgpu_ring_write(ring, 0x0000002e);
break;
case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);
amdgpu_ring_write(ring, 0x00000000);
break;
case CHIP_TOPAZ:
amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
0x00000000 : 0x00000002);
amdgpu_ring_write(ring, 0x00000000);
break;
case CHIP_STONEY:
amdgpu_ring_write(ring, 0x00000000);
amdgpu_ring_write(ring, 0x00000000);

View File

@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
static int iceland_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return 0;
}

View File

@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
}
}
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
adev->sdma.instance[i].fw = NULL;
}
}
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
}
sdma_v2_4_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
return -EINVAL;
}
/* unhalt the MEs */
sdma_v2_4_enable(adev, true);
/* halt the engine before programing */
sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
sdma_v2_4_free_microcode(adev);
return 0;
}

View File

@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
}
}
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
adev->sdma.instance[i].fw = NULL;
}
}
/**
* sdma_v3_0_init_microcode - load ucode images from disk
*
@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
}
/* unhalt the MEs */
sdma_v3_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v3_0_ctx_switch_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
}
}
/* unhalt the MEs */
sdma_v3_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v3_0_ctx_switch_enable(adev, true);
/* disble sdma engine before programing it */
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
sdma_v3_0_free_microcode(adev);
return 0;
}

View File

@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
static int tonga_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return 0;
}

View File

@ -157,6 +157,7 @@ struct amd_ip_funcs {
int (*hw_init)(void *handle);
/* tears down the hw state */
int (*hw_fini)(void *handle);
void (*late_fini)(void *handle);
/* handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
/* handles IP specific hw/sw changes for resume */

View File

@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
enum cgs_ucode_id type);
typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
@ -645,6 +648,7 @@ struct cgs_ops {
cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
cgs_rel_firmware rel_firmware;
/* cg pg interface*/
cgs_set_powergating_state set_powergating_state;
cgs_set_clockgating_state set_clockgating_state;
@ -738,6 +742,8 @@ struct cgs_device
CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
#define cgs_rel_firmware(dev, type) \
CGS_CALL(rel_firmware, dev, type)
#define cgs_set_powergating_state(dev, block_type, state) \
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \

View File

@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
goto err;
goto err1;
pr_info("amdgpu: powerplay initialized\n");
return 0;
err1:
if (hwmgr->pptable_func->pptable_fini)
hwmgr->pptable_func->pptable_fini(hwmgr);
err:
pr_err("amdgpu: powerplay initialization failed\n");
return ret;
@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
if (hwmgr->hwmgr_func->backend_fini != NULL)
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
if (hwmgr->pptable_func->pptable_fini)
hwmgr->pptable_func->pptable_fini(hwmgr);
return ret;
}

View File

@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
pem_unregister_interrupts(eventmgr);
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
if (eventmgr != NULL)
kfree(eventmgr);
}
int eventmgr_init(struct pp_instance *handle)

View File

@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
return vddci_table->entries[i].value);
return vddci_table->entries[i-1].value);
}
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,

View File

@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
if (hwmgr == NULL || hwmgr->ps == NULL)
return -EINVAL;
/* do hwmgr finish*/
kfree(hwmgr->backend);
kfree(hwmgr->start_thermal_controller.function_list);
kfree(hwmgr->set_temperature_range.function_list);
kfree(hwmgr->ps);
kfree(hwmgr);
return 0;
@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
return vddci_table->entries[i].value);
return vddci_table->entries[i-1].value);
}
int phm_find_boot_level(void *table,

View File

@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
(sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);

View File

@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
}
/* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
/* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
/* param1 is for corresponding std voltage */
data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
}
data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
if (NULL != allowed_vdd_mclk_table) {
/* Initialize Vddci DPM table based on allow Mclk values */
for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
}
data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
}
/* setup PCIE gen speed levels*/
tonga_setup_default_pcie_tables(hwmgr);

View File

@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
if (NULL != hwmgr->soft_pp_table) {
kfree(hwmgr->soft_pp_table);
if (NULL != hwmgr->soft_pp_table)
hwmgr->soft_pp_table = NULL;
}
if (NULL != pp_table_information->vdd_dep_on_sclk)
pp_table_information->vdd_dep_on_sclk = NULL;
kfree(pp_table_information->vdd_dep_on_sclk);
pp_table_information->vdd_dep_on_sclk = NULL;
if (NULL != pp_table_information->vdd_dep_on_mclk)
pp_table_information->vdd_dep_on_mclk = NULL;
kfree(pp_table_information->vdd_dep_on_mclk);
pp_table_information->vdd_dep_on_mclk = NULL;
if (NULL != pp_table_information->valid_mclk_values)
pp_table_information->valid_mclk_values = NULL;
kfree(pp_table_information->valid_mclk_values);
pp_table_information->valid_mclk_values = NULL;
if (NULL != pp_table_information->valid_sclk_values)
pp_table_information->valid_sclk_values = NULL;
kfree(pp_table_information->valid_sclk_values);
pp_table_information->valid_sclk_values = NULL;
if (NULL != pp_table_information->vddc_lookup_table)
pp_table_information->vddc_lookup_table = NULL;
kfree(pp_table_information->vddc_lookup_table);
pp_table_information->vddc_lookup_table = NULL;
if (NULL != pp_table_information->vddgfx_lookup_table)
pp_table_information->vddgfx_lookup_table = NULL;
kfree(pp_table_information->vddgfx_lookup_table);
pp_table_information->vddgfx_lookup_table = NULL;
if (NULL != pp_table_information->mm_dep_table)
pp_table_information->mm_dep_table = NULL;
kfree(pp_table_information->mm_dep_table);
pp_table_information->mm_dep_table = NULL;
if (NULL != pp_table_information->cac_dtp_table)
pp_table_information->cac_dtp_table = NULL;
kfree(pp_table_information->cac_dtp_table);
pp_table_information->cac_dtp_table = NULL;
if (NULL != hwmgr->dyn_state.cac_dtp_table)
hwmgr->dyn_state.cac_dtp_table = NULL;
kfree(hwmgr->dyn_state.cac_dtp_table);
hwmgr->dyn_state.cac_dtp_table = NULL;
if (NULL != pp_table_information->ppm_parameter_table)
pp_table_information->ppm_parameter_table = NULL;
kfree(pp_table_information->ppm_parameter_table);
pp_table_information->ppm_parameter_table = NULL;
if (NULL != pp_table_information->pcie_table)
pp_table_information->pcie_table = NULL;
kfree(pp_table_information->pcie_table);
pp_table_information->pcie_table = NULL;
if (NULL != hwmgr->pptable) {
kfree(hwmgr->pptable);
hwmgr->pptable = NULL;
}
kfree(hwmgr->pptable);
hwmgr->pptable = NULL;
return result;
}

View File

@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
static int fiji_smu_fini(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}

View File

@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
kfree(smumgr->backend);
smumgr->backend = NULL;
}
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}

View File

@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
int smum_fini(struct pp_smumgr *smumgr)
{
kfree(smumgr->device);
kfree(smumgr);
return 0;
}

View File

@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
static int tonga_smu_fini(struct pp_smumgr *smumgr)
{
struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
if (smumgr->backend != NULL) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}

View File

@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.cache_type = REGCACHE_RBTREE,
.cache_type = REGCACHE_FLAT,
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
.max_register = 0x11fc,
};
static int fsl_dcu_drm_irq_init(struct drm_device *dev)

View File

@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
if (!adreno_gpu->memptrs) {
if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}

View File

@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
}
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;

View File

@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
return ERR_CAST(pages);
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM);
}
return msm_obj->vaddr;
}

View File

@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->dev = dev;
submit->gpu = gpu;
submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->nr_cmds = 0;
INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
/* make sure we don't have garbage flags, in case we hit
* error path before flags is initialized:
*/
submit->bos[i].flags = 0;
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
ret = -EFAULT;

View File

@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(buf))
continue;
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,

View File

@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->start = msm_gem_vaddr_locked(ring->bo);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
goto fail;
}
ring->end = ring->start + (size / 4);
ring->cur = ring->start;

View File

@ -16,9 +16,9 @@ enum nvkm_devidx {
NVKM_SUBDEV_MC,
NVKM_SUBDEV_BUS,
NVKM_SUBDEV_TIMER,
NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_FB,
NVKM_SUBDEV_LTC,
NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_PMU,

View File

@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
struct nvbios_ocfg {
u16 match;
u8 proto;
u8 flags;
u16 clkcmp[2];
};
@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
#endif

View File

@ -552,6 +552,7 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret)
goto fini;
fbcon->helper.fbdev->pixmap.buf_align = 4;
return 0;
fini:

View File

@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t fg;
uint32_t bg;
uint32_t dsize;
uint32_t width;
uint32_t *data = (uint32_t *)image->data;
int ret;
@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
width = ALIGN(image->width, 8);
dsize = ALIGN(width * image->height, 32) >> 5;
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
OUT_RING(chan, (image->height << 16) | width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
dsize = ALIGN(image->width * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;

View File

@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;

View File

@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
struct nouveau_fbdev *nfbdev = info->par;
struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
struct nouveau_channel *chan = drm->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
uint32_t dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
int ret;
@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
dwords = ALIGN(image->width * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;

View File

@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
nvkm-y += nvkm/engine/disp/sornv50.o
nvkm-y += nvkm/engine/disp/sorg94.o
nvkm-y += nvkm/engine/disp/sorgf119.o
nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/dport.o

Some files were not shown because too many files have changed in this diff Show More