sh: Assume new page cache pages have dirty dcache lines.
This follows the ARM change c01778001a
("ARM: 6379/1: Assume new page cache pages have dirty D-cache") for the
same rationale:
There are places in Linux where writes to newly allocated page
cache pages happen without a subsequent call to flush_dcache_page()
(several PIO drivers including USB HCD). This patch changes the
meaning of PG_arch_1 to be PG_dcache_clean and always flush the
D-cache for a newly mapped page in update_mmu_cache().
This addresses issues seen with executing binaries from MMC, in
addition to some of the other HCDs that don't explicitly do cache
management for their pipe-in buffers.
Requested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
22a5b566c8
commit
55661fc1f1
@ -96,7 +96,7 @@ void kmap_coherent_init(void);
|
|||||||
void *kmap_coherent(struct page *page, unsigned long addr);
|
void *kmap_coherent(struct page *page, unsigned long addr);
|
||||||
void kunmap_coherent(void *kvaddr);
|
void kunmap_coherent(void *kvaddr);
|
||||||
|
|
||||||
#define PG_dcache_dirty PG_arch_1
|
#define PG_dcache_clean PG_arch_1
|
||||||
|
|
||||||
void cpu_cache_init(void);
|
void cpu_cache_init(void);
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ static void sh4_flush_dcache_page(void *arg)
|
|||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = page_mapping(page);
|
||||||
|
|
||||||
if (mapping && !mapping_mapped(mapping))
|
if (mapping && !mapping_mapped(mapping))
|
||||||
set_bit(PG_dcache_dirty, &page->flags);
|
clear_bit(PG_dcache_clean, &page->flags);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
|
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
|
||||||
@ -239,7 +239,7 @@ static void sh4_flush_cache_page(void *args)
|
|||||||
* another ASID than the current one.
|
* another ASID than the current one.
|
||||||
*/
|
*/
|
||||||
map_coherent = (current_cpu_data.dcache.n_aliases &&
|
map_coherent = (current_cpu_data.dcache.n_aliases &&
|
||||||
!test_bit(PG_dcache_dirty, &page->flags) &&
|
test_bit(PG_dcache_clean, &page->flags) &&
|
||||||
page_mapped(page));
|
page_mapped(page));
|
||||||
if (map_coherent)
|
if (map_coherent)
|
||||||
vaddr = kmap_coherent(page, address);
|
vaddr = kmap_coherent(page, address);
|
||||||
|
@ -139,7 +139,7 @@ static void sh7705_flush_dcache_page(void *arg)
|
|||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = page_mapping(page);
|
||||||
|
|
||||||
if (mapping && !mapping_mapped(mapping))
|
if (mapping && !mapping_mapped(mapping))
|
||||||
set_bit(PG_dcache_dirty, &page->flags);
|
clear_bit(PG_dcache_clean, &page->flags);
|
||||||
else
|
else
|
||||||
__flush_dcache_page(__pa(page_address(page)));
|
__flush_dcache_page(__pa(page_address(page)));
|
||||||
}
|
}
|
||||||
|
@ -60,14 +60,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
|||||||
unsigned long len)
|
unsigned long len)
|
||||||
{
|
{
|
||||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
||||||
!test_bit(PG_dcache_dirty, &page->flags)) {
|
test_bit(PG_dcache_clean, &page->flags)) {
|
||||||
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||||
memcpy(vto, src, len);
|
memcpy(vto, src, len);
|
||||||
kunmap_coherent(vto);
|
kunmap_coherent(vto);
|
||||||
} else {
|
} else {
|
||||||
memcpy(dst, src, len);
|
memcpy(dst, src, len);
|
||||||
if (boot_cpu_data.dcache.n_aliases)
|
if (boot_cpu_data.dcache.n_aliases)
|
||||||
set_bit(PG_dcache_dirty, &page->flags);
|
clear_bit(PG_dcache_clean, &page->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma->vm_flags & VM_EXEC)
|
if (vma->vm_flags & VM_EXEC)
|
||||||
@ -79,14 +79,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
|||||||
unsigned long len)
|
unsigned long len)
|
||||||
{
|
{
|
||||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
||||||
!test_bit(PG_dcache_dirty, &page->flags)) {
|
test_bit(PG_dcache_clean, &page->flags)) {
|
||||||
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||||
memcpy(dst, vfrom, len);
|
memcpy(dst, vfrom, len);
|
||||||
kunmap_coherent(vfrom);
|
kunmap_coherent(vfrom);
|
||||||
} else {
|
} else {
|
||||||
memcpy(dst, src, len);
|
memcpy(dst, src, len);
|
||||||
if (boot_cpu_data.dcache.n_aliases)
|
if (boot_cpu_data.dcache.n_aliases)
|
||||||
set_bit(PG_dcache_dirty, &page->flags);
|
clear_bit(PG_dcache_clean, &page->flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +98,7 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|||||||
vto = kmap_atomic(to, KM_USER1);
|
vto = kmap_atomic(to, KM_USER1);
|
||||||
|
|
||||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
|
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
|
||||||
!test_bit(PG_dcache_dirty, &from->flags)) {
|
test_bit(PG_dcache_clean, &from->flags)) {
|
||||||
vfrom = kmap_coherent(from, vaddr);
|
vfrom = kmap_coherent(from, vaddr);
|
||||||
copy_page(vto, vfrom);
|
copy_page(vto, vfrom);
|
||||||
kunmap_coherent(vfrom);
|
kunmap_coherent(vfrom);
|
||||||
@ -141,7 +141,7 @@ void __update_cache(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
if (pfn_valid(pfn)) {
|
if (pfn_valid(pfn)) {
|
||||||
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
|
int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
|
||||||
if (dirty)
|
if (dirty)
|
||||||
__flush_purge_region(page_address(page), PAGE_SIZE);
|
__flush_purge_region(page_address(page), PAGE_SIZE);
|
||||||
}
|
}
|
||||||
@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
|
|||||||
|
|
||||||
if (pages_do_alias(addr, vmaddr)) {
|
if (pages_do_alias(addr, vmaddr)) {
|
||||||
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
|
||||||
!test_bit(PG_dcache_dirty, &page->flags)) {
|
test_bit(PG_dcache_clean, &page->flags)) {
|
||||||
void *kaddr;
|
void *kaddr;
|
||||||
|
|
||||||
kaddr = kmap_coherent(page, vmaddr);
|
kaddr = kmap_coherent(page, vmaddr);
|
||||||
|
@ -34,7 +34,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
|
|||||||
enum fixed_addresses idx;
|
enum fixed_addresses idx;
|
||||||
unsigned long vaddr;
|
unsigned long vaddr;
|
||||||
|
|
||||||
BUG_ON(test_bit(PG_dcache_dirty, &page->flags));
|
BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user