2008-10-31 05:37:09 +03:00
/*
* Copyright © 2008 Ingo Molnar
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License along
* with this program ; if not , write to the Free Software Foundation , Inc . ,
* 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA .
*/
# include <asm/iomap.h>
2009-01-24 01:14:21 +03:00
# include <asm/pat.h>
2008-10-31 05:37:09 +03:00
# include <linux/module.h>
2009-04-01 02:23:25 +04:00
# include <linux/highmem.h>
2008-10-31 05:37:09 +03:00
2009-07-10 20:57:35 +04:00
static int is_io_mapping_possible ( resource_size_t base , unsigned long size )
2009-02-25 04:35:12 +03:00
{
2009-04-03 03:44:38 +04:00
# if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
2009-02-25 04:35:12 +03:00
/* There is no way to map greater than 1 << 32 address without PAE */
if ( base + size > 0x100000000ULL )
return 0 ;
2009-02-28 16:09:27 +03:00
# endif
2009-02-25 04:35:12 +03:00
return 1 ;
}
2009-07-10 20:57:35 +04:00
int iomap_create_wc ( resource_size_t base , unsigned long size , pgprot_t * prot )
{
unsigned long flag = _PAGE_CACHE_WC ;
int ret ;
if ( ! is_io_mapping_possible ( base , size ) )
return - EINVAL ;
ret = io_reserve_memtype ( base , base + size , & flag ) ;
if ( ret )
return ret ;
* prot = __pgprot ( __PAGE_KERNEL | flag ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( iomap_create_wc ) ;
2010-10-27 01:21:51 +04:00
void iomap_free ( resource_size_t base , unsigned long size )
2009-07-10 20:57:35 +04:00
{
io_free_memtype ( base , base + size ) ;
}
EXPORT_SYMBOL_GPL ( iomap_free ) ;
2009-02-25 04:35:12 +03:00
2010-10-27 01:21:51 +04:00
void * kmap_atomic_prot_pfn ( unsigned long pfn , pgprot_t prot )
2008-10-31 05:37:09 +03:00
{
unsigned long vaddr ;
2010-10-27 01:21:51 +04:00
int idx , type ;
2008-10-31 05:37:09 +03:00
pagefault_disable ( ) ;
2010-10-27 01:21:51 +04:00
type = kmap_atomic_idx_push ( ) ;
2009-03-13 05:20:49 +03:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
set_pte ( kmap_pte - idx , pfn_pte ( pfn , prot ) ) ;
arch_flush_lazy_mmu_mode ( ) ;
return ( void * ) vaddr ;
}
/*
2010-10-27 01:21:51 +04:00
* Map ' pfn ' using protections ' prot '
2008-10-31 05:37:09 +03:00
*/
2010-09-05 00:56:43 +04:00
void __iomem *
2010-10-27 01:21:51 +04:00
iomap_atomic_prot_pfn ( unsigned long pfn , pgprot_t prot )
2008-10-31 05:37:09 +03:00
{
2009-01-24 01:14:21 +03:00
/*
* For non - PAT systems , promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS .
* PAGE_KERNEL_WC maps to PWT , which translates to uncached if the
* MTRR is UC or WC . UC_MINUS gets the real intention , of the
* user , which is " WC if the MTRR is WC, UC if you can't do that. "
*/
if ( ! pat_enabled & & pgprot_val ( prot ) = = pgprot_val ( PAGE_KERNEL_WC ) )
prot = PAGE_KERNEL_UC_MINUS ;
2010-10-27 01:21:51 +04:00
return ( void __force __iomem * ) kmap_atomic_prot_pfn ( pfn , prot ) ;
2008-10-31 05:37:09 +03:00
}
EXPORT_SYMBOL_GPL ( iomap_atomic_prot_pfn ) ;
void
2010-10-27 01:21:51 +04:00
iounmap_atomic ( void __iomem * kvaddr )
2008-10-31 05:37:09 +03:00
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
2010-10-27 01:21:51 +04:00
if ( vaddr > = __fix_to_virt ( FIX_KMAP_END ) & &
vaddr < = __fix_to_virt ( FIX_KMAP_BEGIN ) ) {
int idx , type ;
2010-10-28 02:32:58 +04:00
type = kmap_atomic_idx ( ) ;
2010-10-27 01:21:51 +04:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
# ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
# endif
/*
* Force other mappings to Oops if they ' ll try to access this
* pte without first remap it . Keeping stale mappings around
* is a bad idea also , in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor .
*/
2008-10-31 05:37:09 +03:00
kpte_clear_flush ( kmap_pte - idx , vaddr ) ;
2010-10-28 02:32:58 +04:00
kmap_atomic_idx_pop ( ) ;
2010-10-27 01:21:51 +04:00
}
2008-10-31 05:37:09 +03:00
pagefault_enable ( ) ;
}
EXPORT_SYMBOL_GPL ( iounmap_atomic ) ;