io-mapping: Cleanup atomic iomap
Switch the atomic iomap implementation over to kmap_local and stick the preempt/pagefault mechanics into the generic code similar to the kmap_atomic variants. Rename the x86 map function in preparation for a non-atomic variant. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linuxfoundation.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lore.kernel.org/r/20201103095858.625310005@linutronix.de
This commit is contained in:
parent
3c1016b53c
commit
351191ad55
@ -13,14 +13,7 @@
|
|||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
|
void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
|
||||||
|
|
||||||
static inline void iounmap_atomic(void __iomem *vaddr)
|
|
||||||
{
|
|
||||||
kunmap_local_indexed((void __force *)vaddr);
|
|
||||||
pagefault_enable();
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
|
int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_free);
|
EXPORT_SYMBOL_GPL(iomap_free);
|
||||||
|
|
||||||
void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
|
void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* For non-PAT systems, translate non-WB request to UC- just in
|
* For non-PAT systems, translate non-WB request to UC- just in
|
||||||
@ -60,8 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
|
|||||||
/* Filter out unsupported __PAGE_KERNEL* bits: */
|
/* Filter out unsupported __PAGE_KERNEL* bits: */
|
||||||
pgprot_val(prot) &= __default_kernel_pte_mask;
|
pgprot_val(prot) &= __default_kernel_pte_mask;
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
pagefault_disable();
|
|
||||||
return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
|
return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
|
EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);
|
||||||
|
@ -69,13 +69,17 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
|||||||
|
|
||||||
BUG_ON(offset >= mapping->size);
|
BUG_ON(offset >= mapping->size);
|
||||||
phys_addr = mapping->base + offset;
|
phys_addr = mapping->base + offset;
|
||||||
return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
|
preempt_disable();
|
||||||
|
pagefault_disable();
|
||||||
|
return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
io_mapping_unmap_atomic(void __iomem *vaddr)
|
io_mapping_unmap_atomic(void __iomem *vaddr)
|
||||||
{
|
{
|
||||||
iounmap_atomic(vaddr);
|
kunmap_local_indexed((void __force *)vaddr);
|
||||||
|
pagefault_enable();
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __iomem *
|
static inline void __iomem *
|
||||||
|
Loading…
Reference in New Issue
Block a user