sh: Preparation for uncached jumps through PMB.
Presently most of the 29-bit physical parts do P1/P2 segmentation with a 1:1 cached/uncached mapping, jumping between the two to control the caching behaviour. This provides the basic infrastructure to maintain this behaviour on 32-bit physical parts that don't map P1/P2 at all, using a shiny new linker section and corresponding fixmap entry. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
325df7f204
commit
cbaa118ecf
@ -64,11 +64,11 @@ static void __init speculative_execution_init(void)
|
|||||||
* Generic first-level cache init
|
* Generic first-level cache init
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_SUPERH32
|
#ifdef CONFIG_SUPERH32
|
||||||
static void __init cache_init(void)
|
static void __uses_jump_to_uncached cache_init(void)
|
||||||
{
|
{
|
||||||
unsigned long ccr, flags;
|
unsigned long ccr, flags;
|
||||||
|
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
ccr = ctrl_inl(CCR);
|
ccr = ctrl_inl(CCR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -145,7 +145,7 @@ static void __init cache_init(void)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
ctrl_outl(flags, CCR);
|
ctrl_outl(flags, CCR);
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define cache_init() do { } while (0)
|
#define cache_init() do { } while (0)
|
||||||
|
@ -16,11 +16,11 @@
|
|||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
|
||||||
int __init detect_cpu_and_cache_system(void)
|
int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
|
||||||
{
|
{
|
||||||
unsigned long addr0, addr1, data0, data1, data2, data3;
|
unsigned long addr0, addr1, data0, data1, data2, data3;
|
||||||
|
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
/*
|
/*
|
||||||
* Check if the entry shadows or not.
|
* Check if the entry shadows or not.
|
||||||
* When shadowed, it's 128-entry system.
|
* When shadowed, it's 128-entry system.
|
||||||
@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void)
|
|||||||
ctrl_outl(data0&~SH_CACHE_VALID, addr0);
|
ctrl_outl(data0&~SH_CACHE_VALID, addr0);
|
||||||
ctrl_outl(data2&~SH_CACHE_VALID, addr1);
|
ctrl_outl(data2&~SH_CACHE_VALID, addr1);
|
||||||
|
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
|
|
||||||
boot_cpu_data.dcache.ways = 4;
|
boot_cpu_data.dcache.ways = 4;
|
||||||
boot_cpu_data.dcache.entry_shift = 4;
|
boot_cpu_data.dcache.entry_shift = 4;
|
||||||
|
@ -43,6 +43,15 @@ SECTIONS
|
|||||||
NOTES
|
NOTES
|
||||||
RO_DATA(PAGE_SIZE)
|
RO_DATA(PAGE_SIZE)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Code which must be executed uncached and the associated data
|
||||||
|
*/
|
||||||
|
. = ALIGN(PAGE_SIZE);
|
||||||
|
__uncached_start = .;
|
||||||
|
.uncached.text : { *(.uncached.text) }
|
||||||
|
.uncached.data : { *(.uncached.data) }
|
||||||
|
__uncached_end = .;
|
||||||
|
|
||||||
. = ALIGN(THREAD_SIZE);
|
. = ALIGN(THREAD_SIZE);
|
||||||
.data : { /* Data */
|
.data : { /* Data */
|
||||||
*(.data.init_task)
|
*(.data.init_task)
|
||||||
|
@ -22,7 +22,8 @@ enum cache_type {
|
|||||||
CACHE_TYPE_UNIFIED,
|
CACHE_TYPE_UNIFIED,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int cache_seq_show(struct seq_file *file, void *iter)
|
static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file,
|
||||||
|
void *iter)
|
||||||
{
|
{
|
||||||
unsigned int cache_type = (unsigned int)file->private;
|
unsigned int cache_type = (unsigned int)file->private;
|
||||||
struct cache_info *cache;
|
struct cache_info *cache;
|
||||||
@ -34,11 +35,11 @@ static int cache_seq_show(struct seq_file *file, void *iter)
|
|||||||
* Go uncached immediately so we don't skew the results any
|
* Go uncached immediately so we don't skew the results any
|
||||||
* more than we already are..
|
* more than we already are..
|
||||||
*/
|
*/
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
|
|
||||||
ccr = ctrl_inl(CCR);
|
ccr = ctrl_inl(CCR);
|
||||||
if ((ccr & CCR_CACHE_ENABLE) == 0) {
|
if ((ccr & CCR_CACHE_ENABLE) == 0) {
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
|
|
||||||
seq_printf(file, "disabled\n");
|
seq_printf(file, "disabled\n");
|
||||||
return 0;
|
return 0;
|
||||||
@ -104,7 +105,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
|
|||||||
addrstart += cache->way_incr;
|
addrstart += cache->way_incr;
|
||||||
}
|
}
|
||||||
|
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
|||||||
* .. which happens to be the same behavior as flush_icache_range().
|
* .. which happens to be the same behavior as flush_icache_range().
|
||||||
* So, we simply flush out a line.
|
* So, we simply flush out a line.
|
||||||
*/
|
*/
|
||||||
void flush_cache_sigtramp(unsigned long addr)
|
void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr)
|
||||||
{
|
{
|
||||||
unsigned long v, index;
|
unsigned long v, index;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -205,13 +205,13 @@ void flush_cache_sigtramp(unsigned long addr)
|
|||||||
(v & boot_cpu_data.icache.entry_mask);
|
(v & boot_cpu_data.icache.entry_mask);
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
|
|
||||||
for (i = 0; i < boot_cpu_data.icache.ways;
|
for (i = 0; i < boot_cpu_data.icache.ways;
|
||||||
i++, index += boot_cpu_data.icache.way_incr)
|
i++, index += boot_cpu_data.icache.way_incr)
|
||||||
ctrl_outl(0, index); /* Clear out Valid-bit */
|
ctrl_outl(0, index); /* Clear out Valid-bit */
|
||||||
|
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
wmb();
|
wmb();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
@ -256,12 +256,12 @@ void flush_dcache_page(struct page *page)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* TODO: Selective icache invalidation through IC address array.. */
|
/* TODO: Selective icache invalidation through IC address array.. */
|
||||||
static inline void flush_icache_all(void)
|
static inline void __uses_jump_to_uncached flush_icache_all(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, ccr;
|
unsigned long flags, ccr;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
|
|
||||||
/* Flush I-cache */
|
/* Flush I-cache */
|
||||||
ccr = ctrl_inl(CCR);
|
ccr = ctrl_inl(CCR);
|
||||||
@ -269,11 +269,11 @@ static inline void flush_icache_all(void)
|
|||||||
ctrl_outl(ccr, CCR);
|
ctrl_outl(ccr, CCR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* back_to_P1() will take care of the barrier for us, don't add
|
* back_to_cached() will take care of the barrier for us, don't add
|
||||||
* another one!
|
* another one!
|
||||||
*/
|
*/
|
||||||
|
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
|||||||
/*
|
/*
|
||||||
* Writeback&Invalidate the D-cache of the page
|
* Writeback&Invalidate the D-cache of the page
|
||||||
*/
|
*/
|
||||||
static void __flush_dcache_page(unsigned long phys)
|
static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
|
||||||
{
|
{
|
||||||
unsigned long ways, waysize, addrstart;
|
unsigned long ways, waysize, addrstart;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -92,7 +92,7 @@ static void __flush_dcache_page(unsigned long phys)
|
|||||||
* possible.
|
* possible.
|
||||||
*/
|
*/
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
|
|
||||||
ways = current_cpu_data.dcache.ways;
|
ways = current_cpu_data.dcache.ways;
|
||||||
waysize = current_cpu_data.dcache.sets;
|
waysize = current_cpu_data.dcache.sets;
|
||||||
@ -118,7 +118,7 @@ static void __flush_dcache_page(unsigned long phys)
|
|||||||
addrstart += current_cpu_data.dcache.way_incr;
|
addrstart += current_cpu_data.dcache.way_incr;
|
||||||
} while (--ways);
|
} while (--ways);
|
||||||
|
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,15 +132,15 @@ void flush_dcache_page(struct page *page)
|
|||||||
__flush_dcache_page(PHYSADDR(page_address(page)));
|
__flush_dcache_page(PHYSADDR(page_address(page)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush_cache_all(void)
|
void __uses_jump_to_uncached flush_cache_all(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
|
|
||||||
cache_wback_all();
|
cache_wback_all();
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||||
pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||||
|
unsigned long cached_to_uncached = 0;
|
||||||
|
|
||||||
void show_mem(void)
|
void show_mem(void)
|
||||||
{
|
{
|
||||||
@ -99,7 +100,8 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
|
|||||||
|
|
||||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
|
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
|
||||||
|
|
||||||
flush_tlb_one(get_asid(), addr);
|
if (cached_to_uncached)
|
||||||
|
flush_tlb_one(get_asid(), addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -164,6 +166,18 @@ void __init paging_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
free_area_init_nodes(max_zone_pfns);
|
free_area_init_nodes(max_zone_pfns);
|
||||||
|
|
||||||
|
/* Set up the uncached fixmap */
|
||||||
|
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
|
||||||
|
|
||||||
|
#ifdef CONFIG_29BIT
|
||||||
|
/*
|
||||||
|
* Handle trivial transitions between cached and uncached
|
||||||
|
* segments, making use of the 1:1 mapping relationship in
|
||||||
|
* 512MB lowmem.
|
||||||
|
*/
|
||||||
|
cached_to_uncached = P2SEG - P1SEG;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct kcore_list kcore_mem, kcore_vmalloc;
|
static struct kcore_list kcore_mem, kcore_vmalloc;
|
||||||
|
@ -163,18 +163,18 @@ repeat:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int set_pmb_entry(struct pmb_entry *pmbe)
|
int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
|
ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void clear_pmb_entry(struct pmb_entry *pmbe)
|
void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
|
||||||
{
|
{
|
||||||
unsigned int entry = pmbe->entry;
|
unsigned int entry = pmbe->entry;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
@ -188,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
|
|||||||
entry >= NR_PMB_ENTRIES))
|
entry >= NR_PMB_ENTRIES))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
|
|
||||||
/* Clear V-bit */
|
/* Clear V-bit */
|
||||||
addr = mk_pmb_addr(entry);
|
addr = mk_pmb_addr(entry);
|
||||||
@ -197,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
|
|||||||
addr = mk_pmb_data(entry);
|
addr = mk_pmb_data(entry);
|
||||||
ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
|
ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
|
||||||
|
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
|
|
||||||
clear_bit(entry, &pmb_map);
|
clear_bit(entry, &pmb_map);
|
||||||
}
|
}
|
||||||
@ -302,7 +302,7 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
|
|||||||
pmbe->entry = PMB_NO_ENTRY;
|
pmbe->entry = PMB_NO_ENTRY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init pmb_init(void)
|
static int __uses_jump_to_uncached pmb_init(void)
|
||||||
{
|
{
|
||||||
unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
|
unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
|
||||||
unsigned int entry, i;
|
unsigned int entry, i;
|
||||||
@ -312,7 +312,7 @@ static int __init pmb_init(void)
|
|||||||
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
|
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
|
||||||
SLAB_PANIC, pmb_cache_ctor);
|
SLAB_PANIC, pmb_cache_ctor);
|
||||||
|
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ordering is important, P2 must be mapped in the PMB before we
|
* Ordering is important, P2 must be mapped in the PMB before we
|
||||||
@ -335,7 +335,7 @@ static int __init pmb_init(void)
|
|||||||
i |= MMUCR_TI;
|
i |= MMUCR_TI;
|
||||||
ctrl_outl(i, MMUCR);
|
ctrl_outl(i, MMUCR);
|
||||||
|
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,8 @@ void update_mmu_cache(struct vm_area_struct * vma,
|
|||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void local_flush_tlb_one(unsigned long asid, unsigned long page)
|
void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
|
||||||
|
unsigned long page)
|
||||||
{
|
{
|
||||||
unsigned long addr, data;
|
unsigned long addr, data;
|
||||||
|
|
||||||
@ -91,7 +92,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
|
|||||||
*/
|
*/
|
||||||
addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
|
addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
|
||||||
data = page | asid; /* VALID bit is off */
|
data = page | asid; /* VALID bit is off */
|
||||||
jump_to_P2();
|
jump_to_uncached();
|
||||||
ctrl_outl(data, addr);
|
ctrl_outl(data, addr);
|
||||||
back_to_P1();
|
back_to_cached();
|
||||||
}
|
}
|
||||||
|
@ -49,6 +49,7 @@ enum fixed_addresses {
|
|||||||
#define FIX_N_COLOURS 16
|
#define FIX_N_COLOURS 16
|
||||||
FIX_CMAP_BEGIN,
|
FIX_CMAP_BEGIN,
|
||||||
FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
|
FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
|
||||||
|
FIX_UNCACHED,
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <asm-generic/sections.h>
|
#include <asm-generic/sections.h>
|
||||||
|
|
||||||
extern long __machvec_start, __machvec_end;
|
extern long __machvec_start, __machvec_end;
|
||||||
|
extern char __uncached_start, __uncached_end;
|
||||||
extern char _ebss[];
|
extern char _ebss[];
|
||||||
|
|
||||||
#endif /* __ASM_SH_SECTIONS_H */
|
#endif /* __ASM_SH_SECTIONS_H */
|
||||||
|
@ -144,6 +144,8 @@ extern unsigned int instruction_size(unsigned int insn);
|
|||||||
#define instruction_size(insn) (4)
|
#define instruction_size(insn) (4)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern unsigned long cached_to_uncached;
|
||||||
|
|
||||||
/* XXX
|
/* XXX
|
||||||
* disable hlt during certain critical i/o operations
|
* disable hlt during certain critical i/o operations
|
||||||
*/
|
*/
|
||||||
|
@ -58,29 +58,31 @@ do { \
|
|||||||
last = __last; \
|
last = __last; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Jump to P2 area.
|
* Jump to uncached area.
|
||||||
* When handling TLB or caches, we need to do it from P2 area.
|
* When handling TLB or caches, we need to do it from an uncached area.
|
||||||
*/
|
*/
|
||||||
#define jump_to_P2() \
|
#define jump_to_uncached() \
|
||||||
do { \
|
do { \
|
||||||
unsigned long __dummy; \
|
unsigned long __dummy; \
|
||||||
__asm__ __volatile__( \
|
\
|
||||||
"mov.l 1f, %0\n\t" \
|
__asm__ __volatile__( \
|
||||||
"or %1, %0\n\t" \
|
"mova 1f, %0\n\t" \
|
||||||
"jmp @%0\n\t" \
|
"add %1, %0\n\t" \
|
||||||
" nop\n\t" \
|
"jmp @%0\n\t" \
|
||||||
".balign 4\n" \
|
" nop\n\t" \
|
||||||
"1: .long 2f\n" \
|
".balign 4\n" \
|
||||||
"2:" \
|
"1:" \
|
||||||
: "=&r" (__dummy) \
|
: "=&z" (__dummy) \
|
||||||
: "r" (0x20000000)); \
|
: "r" (cached_to_uncached)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Back to P1 area.
|
* Back to cached area.
|
||||||
*/
|
*/
|
||||||
#define back_to_P1() \
|
#define back_to_cached() \
|
||||||
do { \
|
do { \
|
||||||
unsigned long __dummy; \
|
unsigned long __dummy; \
|
||||||
ctrl_barrier(); \
|
ctrl_barrier(); \
|
||||||
|
@ -32,8 +32,9 @@ do { \
|
|||||||
&next->thread); \
|
&next->thread); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/* No segmentation.. */
|
#define __uses_jump_to_uncached
|
||||||
#define jump_to_P2() do { } while (0)
|
|
||||||
#define back_to_P1() do { } while (0)
|
#define jump_to_uncached() do { } while (0)
|
||||||
|
#define back_to_cached() do { } while (0)
|
||||||
|
|
||||||
#endif /* __ASM_SH_SYSTEM_64_H */
|
#endif /* __ASM_SH_SYSTEM_64_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user