ARM: cacheflush: add synchronization helpers for mixed cache state accesses
Algorithms used by the MCPM layer rely on state variables which are accessed while the cache is either active or inactive, depending on the code path and the active state. This patch introduces generic cache maintenance helpers to provide the necessary cache synchronization for such state variables to always hit main memory in an ordered way. Signed-off-by: Nicolas Pitre <nico@linaro.org> Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Dave Martin <dave.martin@linaro.org>
This commit is contained in:
parent
6210d421c2
commit
0c91e7e07e
@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory synchronization helpers for mixed cached vs non cached accesses.
|
||||
*
|
||||
* Some synchronization algorithms have to set states in memory with the
|
||||
* cache enabled or disabled depending on the code path. It is crucial
|
||||
* to always ensure proper cache maintenance to update main memory right
|
||||
* away in that case.
|
||||
*
|
||||
* Any cached write must be followed by a cache clean operation.
|
||||
* Any cached read must be preceded by a cache invalidate operation.
|
||||
* Yet, in the read case, a cache flush i.e. atomic clean+invalidate
|
||||
* operation is needed to avoid discarding possible concurrent writes to the
|
||||
* accessed memory.
|
||||
*
|
||||
* Also, in order to prevent a cached writer from interfering with an
|
||||
* adjacent non-cached writer, each state variable must be located to
|
||||
* a separate cache line.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This needs to be >= the max cache writeback size of all
|
||||
* supported platforms included in the current kernel configuration.
|
||||
* This is used to align state variables to their own cache lines.
|
||||
*/
|
||||
#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
|
||||
#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
|
||||
|
||||
/*
|
||||
* There is no __cpuc_clean_dcache_area but we use it anyway for
|
||||
* code intent clarity, and alias it to __cpuc_flush_dcache_area.
|
||||
*/
|
||||
#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
|
||||
|
||||
/*
|
||||
* Ensure preceding writes to *p by this CPU are visible to
|
||||
* subsequent reads by other CPUs:
|
||||
*/
|
||||
static inline void __sync_cache_range_w(volatile void *p, size_t size)
|
||||
{
|
||||
char *_p = (char *)p;
|
||||
|
||||
__cpuc_clean_dcache_area(_p, size);
|
||||
outer_clean_range(__pa(_p), __pa(_p + size));
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure preceding writes to *p by other CPUs are visible to
|
||||
* subsequent reads by this CPU. We must be careful not to
|
||||
* discard data simultaneously written by another CPU, hence the
|
||||
* usage of flush rather than invalidate operations.
|
||||
*/
|
||||
static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
{
|
||||
char *_p = (char *)p;
|
||||
|
||||
#ifdef CONFIG_OUTER_CACHE
|
||||
if (outer_cache.flush_range) {
|
||||
/*
|
||||
* Ensure dirty data migrated from other CPUs into our cache
|
||||
* are cleaned out safely before the outer cache is cleaned:
|
||||
*/
|
||||
__cpuc_clean_dcache_area(_p, size);
|
||||
|
||||
/* Clean and invalidate stale data for *p from outer ... */
|
||||
outer_flush_range(__pa(_p), __pa(_p + size));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ... and inner cache: */
|
||||
__cpuc_flush_dcache_area(_p, size);
|
||||
}
|
||||
|
||||
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
|
||||
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user