2005-04-16 15:20:36 -07:00
/*
* arch / sh / mm / pg - sh4 . c
*
* Copyright ( C ) 1999 , 2000 , 2002 Niibe Yutaka
2007-07-24 13:28:26 +09:00
* Copyright ( C ) 2002 - 2007 Paul Mundt
2005-04-16 15:20:36 -07:00
*
* Released under the terms of the GNU GPL v2 .0 .
*/
# include <linux/mm.h>
2008-11-10 20:00:45 +09:00
# include <linux/init.h>
2006-11-21 11:09:41 +09:00
# include <linux/mutex.h>
2007-07-31 13:01:43 +09:00
# include <linux/fs.h>
2007-11-05 16:12:32 +09:00
# include <linux/highmem.h>
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <asm/mmu_context.h>
# include <asm/cacheflush.h>
2006-12-25 10:19:56 +09:00
# define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
2006-09-27 14:38:02 +09:00
2008-11-10 20:00:45 +09:00
# define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel ( pmd_offset ( pud_offset ( pgd_offset_k ( vaddr ) , ( vaddr ) ) , ( vaddr ) ) , ( vaddr ) )
static pte_t * kmap_coherent_pte ;
void __init kmap_coherent_init ( void )
{
unsigned long vaddr ;
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt ( FIX_CMAP_BEGIN ) ;
kmap_coherent_pte = kmap_get_fixmap_pte ( vaddr ) ;
}
2007-07-24 13:28:26 +09:00
static inline void * kmap_coherent ( struct page * page , unsigned long addr )
{
enum fixed_addresses idx ;
unsigned long vaddr , flags ;
pte_t pte ;
inc_preempt_count ( ) ;
idx = ( addr & current_cpu_data . dcache . alias_mask ) > > PAGE_SHIFT ;
vaddr = __fix_to_virt ( FIX_CMAP_END - idx ) ;
pte = mk_pte ( page , PAGE_KERNEL ) ;
local_irq_save ( flags ) ;
flush_tlb_one ( get_asid ( ) , vaddr ) ;
local_irq_restore ( flags ) ;
update_mmu_cache ( NULL , vaddr , pte ) ;
2008-11-10 20:00:45 +09:00
set_pte ( kmap_coherent_pte - ( FIX_CMAP_END - idx ) , pte ) ;
2007-07-24 13:28:26 +09:00
return ( void * ) vaddr ;
}
static inline void kunmap_coherent ( struct page * page )
{
dec_preempt_count ( ) ;
preempt_check_resched ( ) ;
}
2005-04-16 15:20:36 -07:00
/*
* clear_user_page
* @ to : P1 address
* @ address : U0 address to be mapped
* @ page : page ( virt_to_page ( to ) )
*/
void clear_user_page ( void * to , unsigned long address , struct page * page )
{
2007-03-05 19:46:47 +09:00
__set_bit ( PG_mapped , & page - > flags ) ;
2007-11-05 16:18:16 +09:00
clear_page ( to ) ;
if ( ( ( ( address & PAGE_MASK ) ^ ( unsigned long ) to ) & CACHE_ALIAS ) )
__flush_wback_region ( to , PAGE_SIZE ) ;
2005-04-16 15:20:36 -07:00
}
2007-11-05 16:18:16 +09:00
void copy_to_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long vaddr , void * dst , const void * src ,
unsigned long len )
2005-04-16 15:20:36 -07:00
{
2007-11-05 16:18:16 +09:00
void * vto ;
2007-03-05 19:46:47 +09:00
__set_bit ( PG_mapped , & page - > flags ) ;
2007-11-05 16:18:16 +09:00
vto = kmap_coherent ( page , vaddr ) + ( vaddr & ~ PAGE_MASK ) ;
memcpy ( vto , src , len ) ;
kunmap_coherent ( vto ) ;
if ( vma - > vm_flags & VM_EXEC )
flush_cache_page ( vma , vaddr , page_to_pfn ( page ) ) ;
}
void copy_from_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long vaddr , void * dst , const void * src ,
unsigned long len )
{
void * vfrom ;
__set_bit ( PG_mapped , & page - > flags ) ;
vfrom = kmap_coherent ( page , vaddr ) + ( vaddr & ~ PAGE_MASK ) ;
memcpy ( dst , vfrom , len ) ;
kunmap_coherent ( vfrom ) ;
2005-04-16 15:20:36 -07:00
}
2007-03-05 19:46:47 +09:00
2007-11-05 16:12:32 +09:00
void copy_user_highpage ( struct page * to , struct page * from ,
unsigned long vaddr , struct vm_area_struct * vma )
{
void * vfrom , * vto ;
__set_bit ( PG_mapped , & to - > flags ) ;
vto = kmap_atomic ( to , KM_USER1 ) ;
vfrom = kmap_coherent ( from , vaddr ) ;
copy_page ( vto , vfrom ) ;
kunmap_coherent ( vfrom ) ;
if ( ( ( vaddr ^ ( unsigned long ) vto ) & CACHE_ALIAS ) )
__flush_wback_region ( vto , PAGE_SIZE ) ;
kunmap_atomic ( vto , KM_USER1 ) ;
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb ( ) ;
}
EXPORT_SYMBOL ( copy_user_highpage ) ;
2007-03-05 19:46:47 +09:00
/*
* For SH - 4 , we have our own implementation for ptep_get_and_clear
*/
2008-07-05 12:33:30 +09:00
pte_t ptep_get_and_clear ( struct mm_struct * mm , unsigned long addr , pte_t * ptep )
2007-03-05 19:46:47 +09:00
{
pte_t pte = * ptep ;
pte_clear ( mm , addr , ptep ) ;
if ( ! pte_not_present ( pte ) ) {
unsigned long pfn = pte_pfn ( pte ) ;
if ( pfn_valid ( pfn ) ) {
struct page * page = pfn_to_page ( pfn ) ;
struct address_space * mapping = page_mapping ( page ) ;
if ( ! mapping | | ! mapping_writably_mapped ( mapping ) )
__clear_bit ( PG_mapped , & page - > flags ) ;
}
}
return pte ;
}