2019-05-27 08:55:05 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2008-10-30 19:37:09 -07:00
/*
* Copyright © 2008 Ingo Molnar
*/
# include <asm/iomap.h>
2009-01-23 14:14:21 -08:00
# include <asm/pat.h>
2016-07-13 20:18:55 -04:00
# include <linux/export.h>
2009-03-31 15:23:25 -07:00
# include <linux/highmem.h>
2008-10-30 19:37:09 -07:00
2009-07-10 09:57:35 -07:00
static int is_io_mapping_possible ( resource_size_t base , unsigned long size )
2009-02-24 17:35:12 -08:00
{
2009-04-02 16:44:38 -07:00
# if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
2009-02-24 17:35:12 -08:00
/* There is no way to map greater than 1 << 32 address without PAE */
if ( base + size > 0x100000000ULL )
return 0 ;
2009-02-28 14:09:27 +01:00
# endif
2009-02-24 17:35:12 -08:00
return 1 ;
}
2009-07-10 09:57:35 -07:00
int iomap_create_wc ( resource_size_t base , unsigned long size , pgprot_t * prot )
{
2014-11-03 14:01:54 +01:00
enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC ;
2009-07-10 09:57:35 -07:00
int ret ;
if ( ! is_io_mapping_possible ( base , size ) )
return - EINVAL ;
2019-11-20 15:30:44 +01:00
ret = memtype_reserve_io ( base , base + size , & pcm ) ;
2009-07-10 09:57:35 -07:00
if ( ret )
return ret ;
2014-11-03 14:01:54 +01:00
* prot = __pgprot ( __PAGE_KERNEL | cachemode2protval ( pcm ) ) ;
2018-04-06 13:55:09 -07:00
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val ( * prot ) & = __default_kernel_pte_mask ;
2009-07-10 09:57:35 -07:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( iomap_create_wc ) ;
2010-10-26 14:21:51 -07:00
void iomap_free ( resource_size_t base , unsigned long size )
2009-07-10 09:57:35 -07:00
{
2019-11-20 15:30:44 +01:00
memtype_free_io ( base , base + size ) ;
2009-07-10 09:57:35 -07:00
}
EXPORT_SYMBOL_GPL ( iomap_free ) ;
2009-02-24 17:35:12 -08:00
2010-10-26 14:21:51 -07:00
void * kmap_atomic_prot_pfn ( unsigned long pfn , pgprot_t prot )
2008-10-30 19:37:09 -07:00
{
unsigned long vaddr ;
2010-10-26 14:21:51 -07:00
int idx , type ;
2008-10-30 19:37:09 -07:00
2015-05-11 17:52:09 +02:00
preempt_disable ( ) ;
2008-10-30 19:37:09 -07:00
pagefault_disable ( ) ;
2010-10-26 14:21:51 -07:00
type = kmap_atomic_idx_push ( ) ;
2009-03-13 03:20:49 +01:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
vaddr = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ;
set_pte ( kmap_pte - idx , pfn_pte ( pfn , prot ) ) ;
arch_flush_lazy_mmu_mode ( ) ;
return ( void * ) vaddr ;
}
/*
2010-10-26 14:21:51 -07:00
* Map ' pfn ' using protections ' prot '
2008-10-30 19:37:09 -07:00
*/
2010-09-04 22:56:43 +02:00
void __iomem *
2010-10-26 14:21:51 -07:00
iomap_atomic_prot_pfn ( unsigned long pfn , pgprot_t prot )
2008-10-30 19:37:09 -07:00
{
2009-01-23 14:14:21 -08:00
/*
2015-06-04 18:55:11 +02:00
* For non - PAT systems , translate non - WB request to UC - just in
* case the caller set the PWT bit to prot directly without using
* pgprot_writecombine ( ) . UC - translates to uncached if the MTRR
* is UC or WC . UC - gets the real intention , of the user , which is
* " WC if the MTRR is WC, UC if you can't do that. "
2009-01-23 14:14:21 -08:00
*/
2015-06-04 18:55:11 +02:00
if ( ! pat_enabled ( ) & & pgprot2cachemode ( prot ) ! = _PAGE_CACHE_MODE_WB )
2014-11-03 14:01:54 +01:00
prot = __pgprot ( __PAGE_KERNEL |
cachemode2protval ( _PAGE_CACHE_MODE_UC_MINUS ) ) ;
2009-01-23 14:14:21 -08:00
2018-04-06 13:55:09 -07:00
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val ( prot ) & = __default_kernel_pte_mask ;
2010-10-26 14:21:51 -07:00
return ( void __force __iomem * ) kmap_atomic_prot_pfn ( pfn , prot ) ;
2008-10-30 19:37:09 -07:00
}
EXPORT_SYMBOL_GPL ( iomap_atomic_prot_pfn ) ;
void
2010-10-26 14:21:51 -07:00
iounmap_atomic ( void __iomem * kvaddr )
2008-10-30 19:37:09 -07:00
{
unsigned long vaddr = ( unsigned long ) kvaddr & PAGE_MASK ;
2010-10-26 14:21:51 -07:00
if ( vaddr > = __fix_to_virt ( FIX_KMAP_END ) & &
vaddr < = __fix_to_virt ( FIX_KMAP_BEGIN ) ) {
int idx , type ;
2010-10-27 15:32:58 -07:00
type = kmap_atomic_idx ( ) ;
2010-10-26 14:21:51 -07:00
idx = type + KM_TYPE_NR * smp_processor_id ( ) ;
# ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE ( vaddr ! = __fix_to_virt ( FIX_KMAP_BEGIN + idx ) ) ;
# endif
/*
* Force other mappings to Oops if they ' ll try to access this
* pte without first remap it . Keeping stale mappings around
* is a bad idea also , in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor .
*/
2008-10-30 19:37:09 -07:00
kpte_clear_flush ( kmap_pte - idx , vaddr ) ;
2010-10-27 15:32:58 -07:00
kmap_atomic_idx_pop ( ) ;
2010-10-26 14:21:51 -07:00
}
2008-10-30 19:37:09 -07:00
pagefault_enable ( ) ;
2015-05-11 17:52:09 +02:00
preempt_enable ( ) ;
2008-10-30 19:37:09 -07:00
}
EXPORT_SYMBOL_GPL ( iounmap_atomic ) ;