* Support for runtime detection of the Svnapot extension. * Support for Zicboz when clearing pages. * We've moved to GENERIC_ENTRY. * Support for !MMU on rv32 systems. * The linear region is now mapped via huge pages. * Support for building relocatable kernels. * Support for the hwprobe interface. * Various fixes and cleanups throughout the tree. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmRL5rcTHHBhbG1lckBk YWJiZWx0LmNvbQAKCRAuExnzX7sYibpcD/0RnmO+N2OJxsJXf0KtHv4LlChAFaMZ mfcsU8lv8r3Rz1USJGyVoE57885R+iUw1664ic6Gj9Ll9/A+BDVyqlNeo1BZ7nnv 6hZawSh8XGMyCJoatjaCSMW6VKObsSpHXLoA0mxtj06w1XhtpUnzjv4SZQqBYxC2 7+/cfy6l3uGdSKQ0R402sF8PE+l3HthhO+Cw9NYHQZisAHEQrfFpXRnrovhs+vX0 aVxoWo8bmIhhNke2jh6dnGhfFfAs+UClbaKgZfe8af6feboo+Tal3+OibiEy1K1j hDQ3w/G5jAdwSqnNPdXzpk4srskUOhP9is8AG79vCasMxybQIBfZcc7/kLmmQX+2 xt1EoDVD/lSO1p+CWRautLXEsInWbpBYaSJie7WcR4SHe8S7/nomTDlwkJHx5cma mkSYHJKNwCbamDTI3gXg8nrScbxsRnJQsQUolFDwAeRz7AYVwtqVh8VxAWqAdU3q xUNKrUpCAzNC3d5GL7pmRfZrqjpQhuFXkHFSy85vaCPuckBu926OzxpKBmX4Kea1 qLYWfxv78bcwuY47FWJKcd97Ib63iBYDgarJxvrHrwDaHV2xjBOmdapNPUc2PswT a938enbYYnJHIbuSmbeNBPF4iF6nKUXshyfZu7tCZl6MzsXloUckGdm++j97Bpvr g6G3ZP6STSQBmw== =oxQd -----END PGP SIGNATURE----- Merge tag 'riscv-for-linus-6.4-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux Pull RISC-V updates from Palmer Dabbelt: - Support for runtime detection of the Svnapot extension - Support for Zicboz when clearing pages - We've moved to GENERIC_ENTRY - Support for !MMU on rv32 systems - The linear region is now mapped via huge pages - Support for building relocatable kernels - Support for the hwprobe interface - Various fixes and cleanups throughout the tree * tag 'riscv-for-linus-6.4-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (57 commits) RISC-V: hwprobe: Explicity check for -1 in vdso init RISC-V: hwprobe: There can only be one first riscv: Allow to downgrade paging mode from the command line dt-bindings: riscv: add sv57 mmu-type RISC-V: hwprobe: Remove __init on probe_vendor_features() riscv: Use --emit-relocs in order to move .rela.dyn in init riscv: Check relocations at compile time powerpc: Move script to check relocations at compile time in scripts/ riscv: Introduce CONFIG_RELOCATABLE riscv: Move .rela.dyn outside of init to avoid empty relocations riscv: Prepare EFI header for relocatable kernels riscv: Unconditionnally select KASAN_VMALLOC if KASAN riscv: Fix ptdump when KASAN is enabled riscv: Fix EFI stub usage of KASAN instrumented strcmp function riscv: Move DTB_EARLY_BASE_VA to the kernel address space riscv: Rework kasan population functions riscv: Split early and final KASAN population functions riscv: Use PUD/P4D/PGD pages for the linear mapping riscv: Move the linear mapping creation in its own function riscv: Get rid of riscv_pfn_base variable ...
149 lines
3.8 KiB
C
149 lines
3.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2017 SiFive
|
|
*/
|
|
|
|
#include <linux/of.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <asm/sbi.h>
|
|
|
|
static void ipi_remote_fence_i(void *info)
|
|
{
|
|
return local_flush_icache_all();
|
|
}
|
|
|
|
void flush_icache_all(void)
|
|
{
|
|
local_flush_icache_all();
|
|
|
|
if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence())
|
|
sbi_remote_fence_i(NULL);
|
|
else
|
|
on_each_cpu(ipi_remote_fence_i, NULL, 1);
|
|
}
|
|
EXPORT_SYMBOL(flush_icache_all);
|
|
|
|
/*
|
|
* Performs an icache flush for the given MM context. RISC-V has no direct
|
|
* mechanism for instruction cache shoot downs, so instead we send an IPI that
|
|
* informs the remote harts they need to flush their local instruction caches.
|
|
* To avoid pathologically slow behavior in a common case (a bunch of
|
|
* single-hart processes on a many-hart machine, ie 'make -j') we avoid the
|
|
* IPIs for harts that are not currently executing a MM context and instead
|
|
* schedule a deferred local instruction cache flush to be performed before
|
|
* execution resumes on each hart.
|
|
*/
|
|
void flush_icache_mm(struct mm_struct *mm, bool local)
|
|
{
|
|
unsigned int cpu;
|
|
cpumask_t others, *mask;
|
|
|
|
preempt_disable();
|
|
|
|
/* Mark every hart's icache as needing a flush for this MM. */
|
|
mask = &mm->context.icache_stale_mask;
|
|
cpumask_setall(mask);
|
|
/* Flush this hart's I$ now, and mark it as flushed. */
|
|
cpu = smp_processor_id();
|
|
cpumask_clear_cpu(cpu, mask);
|
|
local_flush_icache_all();
|
|
|
|
/*
|
|
* Flush the I$ of other harts concurrently executing, and mark them as
|
|
* flushed.
|
|
*/
|
|
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
|
local |= cpumask_empty(&others);
|
|
if (mm == current->active_mm && local) {
|
|
/*
|
|
* It's assumed that at least one strongly ordered operation is
|
|
* performed on this hart between setting a hart's cpumask bit
|
|
* and scheduling this MM context on that hart. Sending an SBI
|
|
* remote message will do this, but in the case where no
|
|
* messages are sent we still need to order this hart's writes
|
|
* with flush_icache_deferred().
|
|
*/
|
|
smp_mb();
|
|
} else if (IS_ENABLED(CONFIG_RISCV_SBI) &&
|
|
!riscv_use_ipi_for_rfence()) {
|
|
sbi_remote_fence_i(&others);
|
|
} else {
|
|
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
|
|
}
|
|
|
|
preempt_enable();
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#ifdef CONFIG_MMU
|
|
void flush_icache_pte(pte_t pte)
|
|
{
|
|
struct page *page = pte_page(pte);
|
|
|
|
/*
|
|
* HugeTLB pages are always fully mapped, so only setting head page's
|
|
* PG_dcache_clean flag is enough.
|
|
*/
|
|
if (PageHuge(page))
|
|
page = compound_head(page);
|
|
|
|
if (!test_bit(PG_dcache_clean, &page->flags)) {
|
|
flush_icache_all();
|
|
set_bit(PG_dcache_clean, &page->flags);
|
|
}
|
|
}
|
|
#endif /* CONFIG_MMU */
|
|
|
|
unsigned int riscv_cbom_block_size;
|
|
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
|
|
|
|
unsigned int riscv_cboz_block_size;
|
|
EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
|
|
|
|
static void cbo_get_block_size(struct device_node *node,
|
|
const char *name, u32 *block_size,
|
|
unsigned long *first_hartid)
|
|
{
|
|
unsigned long hartid;
|
|
u32 val;
|
|
|
|
if (riscv_of_processor_hartid(node, &hartid))
|
|
return;
|
|
|
|
if (of_property_read_u32(node, name, &val))
|
|
return;
|
|
|
|
if (!*block_size) {
|
|
*block_size = val;
|
|
*first_hartid = hartid;
|
|
} else if (*block_size != val) {
|
|
pr_warn("%s mismatched between harts %lu and %lu\n",
|
|
name, *first_hartid, hartid);
|
|
}
|
|
}
|
|
|
|
void riscv_init_cbo_blocksizes(void)
|
|
{
|
|
unsigned long cbom_hartid, cboz_hartid;
|
|
u32 cbom_block_size = 0, cboz_block_size = 0;
|
|
struct device_node *node;
|
|
|
|
for_each_of_cpu_node(node) {
|
|
/* set block-size for cbom and/or cboz extension if available */
|
|
cbo_get_block_size(node, "riscv,cbom-block-size",
|
|
&cbom_block_size, &cbom_hartid);
|
|
cbo_get_block_size(node, "riscv,cboz-block-size",
|
|
&cboz_block_size, &cboz_hartid);
|
|
}
|
|
|
|
if (cbom_block_size)
|
|
riscv_cbom_block_size = cbom_block_size;
|
|
|
|
if (cboz_block_size)
|
|
riscv_cboz_block_size = cboz_block_size;
|
|
}
|