ARC changes for 3.17
Mostly cleanup/refactoring in core intc, cache flush, IPI send... -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.14 (GNU/Linux) iQIcBAABAgAGBQJT5YDrAAoJEGnX8d3iisJePtgQALJDm3Nn7fUkJRlk6Smb+IpQ 5LBLysiNWpauPNIEw26FKrAbUb3EnLL79v+DVPc5+5kVsp54T8ucl0kYGWdVrA2F yXEWynROB5HLDHW+3a4BhYDUAWf51PmJoL1IO7jXdPi/fM6W+m88UnpuTV2ISNfy h6N47MxhFj7EVgjULh254kOgbzpNkaIUsT2CH1VDPlOjn3TFVCvxBL7+IEkYvzFR zHV2GI91Y9YwU+xrX2/HhvyXYZYXUIFV/zFH78q0mOH4jl8ED0cH65y0tmG66tA6 P15Gt1761vU45dSctOvQtapUQ5UI2el2py9nto8m3mhLvKn8UOZ8cvEC40kQ16me SsnypLa0ONRMMPApvcJIQkyDiazNeilSkmT1mrX2u5HYJaRozYAbuChxXBwTHkX1 04HC3fAuZgr+/kQxOWtg0WjUahkycXoYP6nzNQ0MDjrYrbKCKpn7m5zphBWFEdbp jKQS1vqgMmMOz/hkez8Ba9G35LOLfzHt+oWB5gf1ibTaLGPIuf9tLOdFk6paz2e0 lPfDMERBx0JG705+nc7pZR1cWhpRLNPy7YOxQs+HaOqqPIuzYAklUNGwQFliuB12 AmXbRC/2VOXQIFgmf/jR0oaaxkp6BC9a46N0Qlpnwh9tlpsv7IxmkdwFdO3hCG9x s4ySPRIKcBZsJqjpYwcK =Mx5r -----END PGP SIGNATURE----- Merge tag 'arc-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc Pull ARC changes from Vineet Gupta: "Mostly cleanup/refactoring in core intc, cache flush, IPI send..." * tag 'arc-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: mm, arc: remove obsolete pagefault oom killer comment ARC: help gcc elide icache helper for !SMP ARC: move common ops for line/full cache into helpers ARC: cache boot reporting updates ARC: [intc] mask/unmask can be hidden again ARC: [plat-arcfpga] No need for init_irq hack ARC: [intc] don't mask all IRQ by default ARC: prune extra header includes from smp.c ARC: update some comments ARC: [SMP] unify cpu private IRQ requests (TIMER/IPI)
This commit is contained in:
commit
e853ccf08b
@ -296,7 +296,7 @@ struct cpuinfo_arc_mmu {
|
||||
};
|
||||
|
||||
struct cpuinfo_arc_cache {
|
||||
unsigned int sz, line_len, assoc, ver;
|
||||
unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6;
|
||||
};
|
||||
|
||||
struct cpuinfo_arc_ccm {
|
||||
|
@ -16,9 +16,13 @@
|
||||
#define TIMER0_IRQ 3
|
||||
#define TIMER1_IRQ 4
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm-generic/irq.h>
|
||||
|
||||
extern void arc_init_IRQ(void);
|
||||
void arc_local_timer_setup(void);
|
||||
void arc_request_percpu_irq(int irq, int cpu,
|
||||
irqreturn_t (*isr)(int irq, void *dev),
|
||||
const char *irq_nm, void *percpu_dev);
|
||||
|
||||
#endif
|
||||
|
@ -131,24 +131,6 @@ static inline int arch_irqs_disabled(void)
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
static inline void arch_mask_irq(unsigned int irq)
|
||||
{
|
||||
unsigned int ienb;
|
||||
|
||||
ienb = read_aux_reg(AUX_IENABLE);
|
||||
ienb &= ~(1 << irq);
|
||||
write_aux_reg(AUX_IENABLE, ienb);
|
||||
}
|
||||
|
||||
static inline void arch_unmask_irq(unsigned int irq)
|
||||
{
|
||||
unsigned int ienb;
|
||||
|
||||
ienb = read_aux_reg(AUX_IENABLE);
|
||||
ienb |= (1 << irq);
|
||||
write_aux_reg(AUX_IENABLE, ienb);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
@ -19,21 +19,16 @@
|
||||
|
||||
/*
|
||||
* Early Hardware specific Interrupt setup
|
||||
* -Platform independent, needed for each CPU (not foldable into init_IRQ)
|
||||
* -Called very early (start_kernel -> setup_arch -> setup_processor)
|
||||
* -Platform Independent (must for any ARC700)
|
||||
* -Needed for each CPU (hence not foldable into init_IRQ)
|
||||
*
|
||||
* what it does ?
|
||||
* -Disable all IRQs (on CPU side)
|
||||
* -Optionally, setup the High priority Interrupts as Level 2 IRQs
|
||||
*/
|
||||
void arc_init_IRQ(void)
|
||||
{
|
||||
int level_mask = 0;
|
||||
|
||||
/* Disable all IRQs: enable them as devices request */
|
||||
write_aux_reg(AUX_IENABLE, 0);
|
||||
|
||||
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
|
||||
@ -60,20 +55,28 @@ void arc_init_IRQ(void)
|
||||
* below, per IRQ.
|
||||
*/
|
||||
|
||||
static void arc_mask_irq(struct irq_data *data)
|
||||
static void arc_irq_mask(struct irq_data *data)
|
||||
{
|
||||
arch_mask_irq(data->irq);
|
||||
unsigned int ienb;
|
||||
|
||||
ienb = read_aux_reg(AUX_IENABLE);
|
||||
ienb &= ~(1 << data->irq);
|
||||
write_aux_reg(AUX_IENABLE, ienb);
|
||||
}
|
||||
|
||||
static void arc_unmask_irq(struct irq_data *data)
|
||||
static void arc_irq_unmask(struct irq_data *data)
|
||||
{
|
||||
arch_unmask_irq(data->irq);
|
||||
unsigned int ienb;
|
||||
|
||||
ienb = read_aux_reg(AUX_IENABLE);
|
||||
ienb |= (1 << data->irq);
|
||||
write_aux_reg(AUX_IENABLE, ienb);
|
||||
}
|
||||
|
||||
static struct irq_chip onchip_intc = {
|
||||
.name = "ARC In-core Intc",
|
||||
.irq_mask = arc_mask_irq,
|
||||
.irq_unmask = arc_unmask_irq,
|
||||
.irq_mask = arc_irq_mask,
|
||||
.irq_unmask = arc_irq_unmask,
|
||||
};
|
||||
|
||||
static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
@ -150,6 +153,32 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
void arc_request_percpu_irq(int irq, int cpu,
|
||||
irqreturn_t (*isr)(int irq, void *dev),
|
||||
const char *irq_nm,
|
||||
void *percpu_dev)
|
||||
{
|
||||
/* Boot cpu calls request, all call enable */
|
||||
if (!cpu) {
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* These 2 calls are essential to making percpu IRQ APIs work
|
||||
* Ideally these details could be hidden in irq chip map function
|
||||
* but the issue is IPIs IRQs being static (non-DT) and platform
|
||||
* specific, so we can't identify them there.
|
||||
*/
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
|
||||
|
||||
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
|
||||
if (rc)
|
||||
panic("Percpu IRQ request failed for %d\n", irq);
|
||||
}
|
||||
|
||||
enable_percpu_irq(irq, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* arch_local_irq_enable - Enable interrupts.
|
||||
*
|
||||
|
@ -12,23 +12,15 @@
|
||||
* -- Initial Write (Borrowed heavily from ARM)
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/setup.h>
|
||||
@ -136,7 +128,7 @@ void start_kernel_secondary(void)
|
||||
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
|
||||
|
||||
if (machine_desc->init_smp)
|
||||
machine_desc->init_smp(smp_processor_id());
|
||||
machine_desc->init_smp(cpu);
|
||||
|
||||
arc_local_timer_setup();
|
||||
|
||||
@ -338,18 +330,11 @@ irqreturn_t do_IPI(int irq, void *dev_id)
|
||||
*/
|
||||
static DEFINE_PER_CPU(int, ipi_dev);
|
||||
|
||||
static struct irqaction arc_ipi_irq = {
|
||||
.name = "IPI Interrupt",
|
||||
.flags = IRQF_PERCPU,
|
||||
.handler = do_IPI,
|
||||
};
|
||||
|
||||
int smp_ipi_irq_setup(int cpu, int irq)
|
||||
{
|
||||
if (!cpu)
|
||||
return setup_irq(irq, &arc_ipi_irq);
|
||||
else
|
||||
arch_unmask_irq(irq);
|
||||
int *dev = per_cpu_ptr(&ipi_dev, cpu);
|
||||
|
||||
arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -144,12 +144,12 @@ static struct clocksource arc_counter = {
|
||||
/********** Clock Event Device *********/
|
||||
|
||||
/*
|
||||
* Arm the timer to interrupt after @limit cycles
|
||||
* Arm the timer to interrupt after @cycles
|
||||
* The distinction for oneshot/periodic is done in arc_event_timer_ack() below
|
||||
*/
|
||||
static void arc_timer_event_setup(unsigned int limit)
|
||||
static void arc_timer_event_setup(unsigned int cycles)
|
||||
{
|
||||
write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
|
||||
write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
|
||||
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
|
||||
|
||||
write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
|
||||
@ -168,6 +168,10 @@ static void arc_clkevent_set_mode(enum clock_event_mode mode,
|
||||
{
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
/*
|
||||
* At X Hz, 1 sec = 1000ms -> X cycles;
|
||||
* 10ms -> X / 100 cycles
|
||||
*/
|
||||
arc_timer_event_setup(arc_get_core_freq() / HZ);
|
||||
break;
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
@ -210,12 +214,6 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction arc_timer_irq = {
|
||||
.name = "Timer0 (clock-evt-dev)",
|
||||
.flags = IRQF_TIMER | IRQF_PERCPU,
|
||||
.handler = timer_irq_handler,
|
||||
};
|
||||
|
||||
/*
|
||||
* Setup the local event timer for @cpu
|
||||
*/
|
||||
@ -228,15 +226,9 @@ void arc_local_timer_setup()
|
||||
clockevents_config_and_register(evt, arc_get_core_freq(),
|
||||
0, ARC_TIMER_MAX);
|
||||
|
||||
/*
|
||||
* setup the per-cpu timer IRQ handler - for all cpus
|
||||
* For non boot CPU explicitly unmask at intc
|
||||
* setup_irq() -> .. -> irq_startup() already does this on boot-cpu
|
||||
*/
|
||||
if (!cpu)
|
||||
setup_irq(TIMER0_IRQ, &arc_timer_irq);
|
||||
else
|
||||
arch_unmask_irq(TIMER0_IRQ);
|
||||
/* setup the per-cpu timer IRQ handler - for all cpus */
|
||||
arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler,
|
||||
"Timer0 (per-cpu-tick)", evt);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -77,21 +77,19 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
#define PR_CACHE(p, enb, str) \
|
||||
{ \
|
||||
#define PR_CACHE(p, cfg, str) \
|
||||
if (!(p)->ver) \
|
||||
n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
|
||||
else \
|
||||
n += scnprintf(buf + n, len - n, \
|
||||
str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
|
||||
TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
|
||||
enb ? "" : "DISABLED (kernel-build)"); \
|
||||
}
|
||||
str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
|
||||
(p)->sz_k, (p)->assoc, (p)->line_len, \
|
||||
(p)->vipt ? "VIPT" : "PIPT", \
|
||||
(p)->alias ? " aliasing" : "", \
|
||||
IS_ENABLED(cfg) ? "" : " (not used)");
|
||||
|
||||
PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE),
|
||||
"I-Cache");
|
||||
PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
|
||||
"D-Cache");
|
||||
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
|
||||
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
|
||||
|
||||
return buf;
|
||||
}
|
||||
@ -116,20 +114,31 @@ void read_decode_cache_bcr(void)
|
||||
p_ic = &cpuinfo_arc700[cpu].icache;
|
||||
READ_BCR(ARC_REG_IC_BCR, ibcr);
|
||||
|
||||
if (!ibcr.ver)
|
||||
goto dc_chk;
|
||||
|
||||
BUG_ON(ibcr.config != 3);
|
||||
p_ic->assoc = 2; /* Fixed to 2w set assoc */
|
||||
p_ic->line_len = 8 << ibcr.line_len;
|
||||
p_ic->sz = 0x200 << ibcr.sz;
|
||||
p_ic->sz_k = 1 << (ibcr.sz - 1);
|
||||
p_ic->ver = ibcr.ver;
|
||||
p_ic->vipt = 1;
|
||||
p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
|
||||
|
||||
dc_chk:
|
||||
p_dc = &cpuinfo_arc700[cpu].dcache;
|
||||
READ_BCR(ARC_REG_DC_BCR, dbcr);
|
||||
|
||||
if (!dbcr.ver)
|
||||
return;
|
||||
|
||||
BUG_ON(dbcr.config != 2);
|
||||
p_dc->assoc = 4; /* Fixed to 4w set assoc */
|
||||
p_dc->line_len = 16 << dbcr.line_len;
|
||||
p_dc->sz = 0x200 << dbcr.sz;
|
||||
p_dc->sz_k = 1 << (dbcr.sz - 1);
|
||||
p_dc->ver = dbcr.ver;
|
||||
p_dc->vipt = 1;
|
||||
p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -142,14 +151,16 @@ void read_decode_cache_bcr(void)
|
||||
void arc_cache_init(void)
|
||||
{
|
||||
unsigned int __maybe_unused cpu = smp_processor_id();
|
||||
struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc;
|
||||
char str[256];
|
||||
|
||||
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_ICACHE
|
||||
ic = &cpuinfo_arc700[cpu].icache;
|
||||
if (ic->ver) {
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
|
||||
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
|
||||
|
||||
if (!ic->ver)
|
||||
panic("cache support enabled but non-existent cache\n");
|
||||
|
||||
if (ic->line_len != L1_CACHE_BYTES)
|
||||
panic("ICache line [%d] != kernel Config [%d]",
|
||||
ic->line_len, L1_CACHE_BYTES);
|
||||
@ -158,26 +169,26 @@ void arc_cache_init(void)
|
||||
panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
|
||||
ic->ver, CONFIG_ARC_MMU_VER);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_DCACHE
|
||||
dc = &cpuinfo_arc700[cpu].dcache;
|
||||
if (dc->ver) {
|
||||
unsigned int dcache_does_alias;
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
|
||||
struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
|
||||
int handled;
|
||||
|
||||
if (!dc->ver)
|
||||
panic("cache support enabled but non-existent cache\n");
|
||||
|
||||
if (dc->line_len != L1_CACHE_BYTES)
|
||||
panic("DCache line [%d] != kernel Config [%d]",
|
||||
dc->line_len, L1_CACHE_BYTES);
|
||||
|
||||
/* check for D-Cache aliasing */
|
||||
dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
|
||||
handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
|
||||
|
||||
if (dcache_does_alias && !cache_is_vipt_aliasing())
|
||||
if (dc->alias && !handled)
|
||||
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
else if (!dcache_does_alias && cache_is_vipt_aliasing())
|
||||
else if (!dc->alias && handled)
|
||||
panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define OP_INV 0x1
|
||||
@ -255,10 +266,32 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
|
||||
* Machine specific helpers for Entire D-Cache or Per Line ops
|
||||
*/
|
||||
|
||||
static inline void wait_for_flush(void)
|
||||
static unsigned int __before_dc_op(const int op)
|
||||
{
|
||||
while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
|
||||
;
|
||||
unsigned int reg = reg;
|
||||
|
||||
if (op == OP_FLUSH_N_INV) {
|
||||
/* Dcache provides 2 cmd: FLUSH or INV
|
||||
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
|
||||
* flush-n-inv is achieved by INV cmd but with IM=1
|
||||
* So toggle INV sub-mode depending on op request and default
|
||||
*/
|
||||
reg = read_aux_reg(ARC_REG_DC_CTRL);
|
||||
write_aux_reg(ARC_REG_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH)
|
||||
;
|
||||
}
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
static void __after_dc_op(const int op, unsigned int reg)
|
||||
{
|
||||
if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
|
||||
while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
|
||||
|
||||
/* Switch back to default Invalidate mode */
|
||||
if (op == OP_FLUSH_N_INV)
|
||||
write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -269,18 +302,10 @@ static inline void wait_for_flush(void)
|
||||
*/
|
||||
static inline void __dc_entire_op(const int cacheop)
|
||||
{
|
||||
unsigned int tmp = tmp;
|
||||
unsigned int ctrl_reg;
|
||||
int aux;
|
||||
|
||||
if (cacheop == OP_FLUSH_N_INV) {
|
||||
/* Dcache provides 2 cmd: FLUSH or INV
|
||||
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
|
||||
* flush-n-inv is achieved by INV cmd but with IM=1
|
||||
* Default INV sub-mode is DISCARD, which needs to be toggled
|
||||
*/
|
||||
tmp = read_aux_reg(ARC_REG_DC_CTRL);
|
||||
write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
|
||||
}
|
||||
ctrl_reg = __before_dc_op(cacheop);
|
||||
|
||||
if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
|
||||
aux = ARC_REG_DC_IVDC;
|
||||
@ -289,12 +314,7 @@ static inline void __dc_entire_op(const int cacheop)
|
||||
|
||||
write_aux_reg(aux, 0x1);
|
||||
|
||||
if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
|
||||
wait_for_flush();
|
||||
|
||||
/* Switch back the DISCARD ONLY Invalidate mode */
|
||||
if (cacheop == OP_FLUSH_N_INV)
|
||||
write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
|
||||
__after_dc_op(cacheop, ctrl_reg);
|
||||
}
|
||||
|
||||
/* For kernel mappings cache operation: index is same as paddr */
|
||||
@ -306,29 +326,16 @@ static inline void __dc_entire_op(const int cacheop)
|
||||
static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
|
||||
unsigned long sz, const int cacheop)
|
||||
{
|
||||
unsigned long flags, tmp = tmp;
|
||||
unsigned long flags;
|
||||
unsigned int ctrl_reg;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (cacheop == OP_FLUSH_N_INV) {
|
||||
/*
|
||||
* Dcache provides 2 cmd: FLUSH or INV
|
||||
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
|
||||
* flush-n-inv is achieved by INV cmd but with IM=1
|
||||
* Default INV sub-mode is DISCARD, which needs to be toggled
|
||||
*/
|
||||
tmp = read_aux_reg(ARC_REG_DC_CTRL);
|
||||
write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
|
||||
}
|
||||
ctrl_reg = __before_dc_op(cacheop);
|
||||
|
||||
__cache_line_loop(paddr, vaddr, sz, cacheop);
|
||||
|
||||
if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
|
||||
wait_for_flush();
|
||||
|
||||
/* Switch back the DISCARD ONLY Invalidate mode */
|
||||
if (cacheop == OP_FLUSH_N_INV)
|
||||
write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
|
||||
__after_dc_op(cacheop, ctrl_reg);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@ -389,8 +396,16 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
|
||||
/***********************************************************
|
||||
* Machine specific helper for per line I-Cache invalidate.
|
||||
*/
|
||||
static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
|
||||
unsigned long sz)
|
||||
|
||||
static inline void __ic_entire_inv(void)
|
||||
{
|
||||
write_aux_reg(ARC_REG_IC_IVIC, 1);
|
||||
read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
|
||||
}
|
||||
|
||||
static inline void
|
||||
__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
|
||||
unsigned long sz)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -399,30 +414,39 @@ static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void __ic_entire_inv(void)
|
||||
{
|
||||
write_aux_reg(ARC_REG_IC_IVIC, 1);
|
||||
read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
|
||||
}
|
||||
#ifndef CONFIG_SMP
|
||||
|
||||
struct ic_line_inv_vaddr_ipi {
|
||||
#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
|
||||
|
||||
#else
|
||||
|
||||
struct ic_inv_args {
|
||||
unsigned long paddr, vaddr;
|
||||
int sz;
|
||||
};
|
||||
|
||||
static void __ic_line_inv_vaddr_helper(void *info)
|
||||
{
|
||||
struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info;
|
||||
struct ic_inv *ic_inv_args = (struct ic_inv_args *) info;
|
||||
|
||||
__ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
|
||||
}
|
||||
|
||||
static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
|
||||
unsigned long sz)
|
||||
{
|
||||
struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz};
|
||||
struct ic_inv_args ic_inv = {
|
||||
.paddr = paddr,
|
||||
.vaddr = vaddr,
|
||||
.sz = sz
|
||||
};
|
||||
|
||||
on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
|
||||
}
|
||||
#else
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#else /* !CONFIG_ARC_HAS_ICACHE */
|
||||
|
||||
#define __ic_entire_inv()
|
||||
#define __ic_line_inv_vaddr(pstart, vstart, sz)
|
||||
|
@ -159,7 +159,6 @@ good_area:
|
||||
return;
|
||||
}
|
||||
|
||||
/* TBD: switch to pagefault_out_of_memory() */
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
else if (fault & VM_FAULT_SIGBUS)
|
||||
|
@ -220,9 +220,9 @@ ex_saved_reg1:
|
||||
|
||||
.macro CONV_PTE_TO_TLB
|
||||
and r3, r0, PTE_BITS_RWX ; r w x
|
||||
lsl r2, r3, 3 ; r w x 0 0 0
|
||||
lsl r2, r3, 3 ; r w x 0 0 0 (GLOBAL, kernel only)
|
||||
and.f 0, r0, _PAGE_GLOBAL
|
||||
or.z r2, r2, r3 ; r w x r w x
|
||||
or.z r2, r2, r3 ; r w x r w x (!GLOBAL, user page)
|
||||
|
||||
and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
|
||||
or r3, r3, r2
|
||||
|
@ -8,5 +8,5 @@
|
||||
|
||||
KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include
|
||||
|
||||
obj-y := platform.o irq.o
|
||||
obj-y := platform.o
|
||||
obj-$(CONFIG_ISS_SMP_EXTN) += smp.o
|
||||
|
@ -24,6 +24,4 @@
|
||||
#define IDU_INTERRUPT_0 16
|
||||
#endif
|
||||
|
||||
extern void __init plat_fpga_init_IRQ(void);
|
||||
|
||||
#endif
|
||||
|
@ -1,25 +0,0 @@
|
||||
/*
|
||||
* ARC FPGA Platform IRQ hookups
|
||||
*
|
||||
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <plat/irq.h>
|
||||
|
||||
void __init plat_fpga_init_IRQ(void)
|
||||
{
|
||||
/*
|
||||
* SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the
|
||||
* request_irq() comes from any other CPU, the low level IRQ unamsking
|
||||
* essential for getting Interrupts won't be enabled on cpu0, locking
|
||||
* up the UART state machine.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
arch_unmask_irq(UART0_IRQ);
|
||||
#endif
|
||||
}
|
@ -57,7 +57,6 @@ MACHINE_START(ANGEL4, "angel4")
|
||||
.dt_compat = aa4_compat,
|
||||
.init_early = plat_fpga_early_init,
|
||||
.init_machine = plat_fpga_populate_dev,
|
||||
.init_irq = plat_fpga_init_IRQ,
|
||||
#ifdef CONFIG_ISS_SMP_EXTN
|
||||
.init_smp = iss_model_init_smp,
|
||||
#endif
|
||||
@ -72,7 +71,6 @@ MACHINE_START(ML509, "ml509")
|
||||
.dt_compat = ml509_compat,
|
||||
.init_early = plat_fpga_early_init,
|
||||
.init_machine = plat_fpga_populate_dev,
|
||||
.init_irq = plat_fpga_init_IRQ,
|
||||
#ifdef CONFIG_SMP
|
||||
.init_smp = iss_model_init_smp,
|
||||
#endif
|
||||
@ -87,5 +85,4 @@ MACHINE_START(NSIMOSCI, "nsimosci")
|
||||
.dt_compat = nsimosci_compat,
|
||||
.init_early = NULL,
|
||||
.init_machine = plat_fpga_populate_dev,
|
||||
.init_irq = NULL,
|
||||
MACHINE_END
|
||||
|
Loading…
Reference in New Issue
Block a user