ARM: 7755/1: handle user space mapped pages in flush_kernel_dcache_page
Commit f8b63c1 made flush_kernel_dcache_page a no-op assuming that the pages it needs to handle are kernel mapped only. However, for example when doing direct I/O, pages with user space mappings may occur. Thus, continue to do lazy flushing if there are no user space mappings. Otherwise, flush the kernel cache lines directly. Signed-off-by: Simon Baatz <gmbnomis@gmail.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: <stable@vger.kernel.org> # 3.2+ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
049be07053
commit
1bc39742aa
@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
||||||
static inline void flush_kernel_dcache_page(struct page *page)
|
extern void flush_kernel_dcache_page(struct page *);
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#define flush_dcache_mmap_lock(mapping) \
|
#define flush_dcache_mmap_lock(mapping) \
|
||||||
spin_lock_irq(&(mapping)->tree_lock)
|
spin_lock_irq(&(mapping)->tree_lock)
|
||||||
|
@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(flush_dcache_page);
|
EXPORT_SYMBOL(flush_dcache_page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure cache coherency for the kernel mapping of this page. We can
|
||||||
|
* assume that the page is pinned via kmap.
|
||||||
|
*
|
||||||
|
* If the page only exists in the page cache and there are no user
|
||||||
|
* space mappings, this is a no-op since the page was already marked
|
||||||
|
* dirty at creation. Otherwise, we need to flush the dirty kernel
|
||||||
|
* cache lines directly.
|
||||||
|
*/
|
||||||
|
void flush_kernel_dcache_page(struct page *page)
|
||||||
|
{
|
||||||
|
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
|
||||||
|
struct address_space *mapping;
|
||||||
|
|
||||||
|
mapping = page_mapping(page);
|
||||||
|
|
||||||
|
if (!mapping || mapping_mapped(mapping)) {
|
||||||
|
void *addr;
|
||||||
|
|
||||||
|
addr = page_address(page);
|
||||||
|
/*
|
||||||
|
* kmap_atomic() doesn't set the page virtual
|
||||||
|
* address for highmem pages, and
|
||||||
|
* kunmap_atomic() takes care of cache
|
||||||
|
* flushing already.
|
||||||
|
*/
|
||||||
|
if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
|
||||||
|
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(flush_kernel_dcache_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush an anonymous page so that users of get_user_pages()
|
* Flush an anonymous page so that users of get_user_pages()
|
||||||
* can safely access the data. The expected sequence is:
|
* can safely access the data. The expected sequence is:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user