2016-08-19 16:54:28 +01:00
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
*/
# include <linux/mm.h>
# include <linux/io-mapping.h>
# include "i915_drv.h"
2021-12-08 09:42:15 +05:30
# include "i915_mm.h"
2016-08-19 16:54:28 +01:00
2021-05-19 05:55:57 -10:00
struct remap_pfn {
struct mm_struct * mm ;
unsigned long pfn ;
pgprot_t prot ;
struct sgt_iter sgt ;
resource_size_t iobase ;
} ;
2016-08-19 16:54:28 +01:00
2020-01-03 20:41:35 +00:00
# define use_dma(io) ((io) != -1)
2021-05-19 05:55:57 -10:00
static inline unsigned long sgt_pfn ( const struct remap_pfn * r )
{
if ( use_dma ( r - > iobase ) )
return ( r - > sgt . dma + r - > sgt . curr + r - > iobase ) > > PAGE_SHIFT ;
else
return r - > sgt . pfn + ( r - > sgt . curr > > PAGE_SHIFT ) ;
}
static int remap_sg ( pte_t * pte , unsigned long addr , void * data )
{
struct remap_pfn * r = data ;
if ( GEM_WARN_ON ( ! r - > sgt . sgp ) )
return - EINVAL ;
/* Special PTE are not associated with any struct page */
set_pte_at ( r - > mm , addr , pte ,
pte_mkspecial ( pfn_pte ( sgt_pfn ( r ) , r - > prot ) ) ) ;
r - > pfn + + ; /* track insertions in case we need to unwind later */
r - > sgt . curr + = PAGE_SIZE ;
if ( r - > sgt . curr > = r - > sgt . max )
r - > sgt = __sgt_iter ( __sg_next ( r - > sgt . sgp ) , use_dma ( r - > iobase ) ) ;
return 0 ;
}
2021-12-08 09:42:15 +05:30
# define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
# if IS_ENABLED(CONFIG_X86)
static int remap_pfn ( pte_t * pte , unsigned long addr , void * data )
{
struct remap_pfn * r = data ;
/* Special PTE are not associated with any struct page */
set_pte_at ( r - > mm , addr , pte , pte_mkspecial ( pfn_pte ( r - > pfn , r - > prot ) ) ) ;
r - > pfn + + ;
return 0 ;
}
2021-05-27 19:51:45 +01:00
/**
* remap_io_mapping - remap an IO mapping to userspace
* @ vma : user vma to map to
* @ addr : target user address to start at
* @ pfn : physical address of kernel memory
* @ size : size of map area
* @ iomap : the source io_mapping
*
* Note : this is only safe if the mm semaphore is held when called .
*/
int remap_io_mapping ( struct vm_area_struct * vma ,
unsigned long addr , unsigned long pfn , unsigned long size ,
struct io_mapping * iomap )
{
struct remap_pfn r ;
int err ;
GEM_BUG_ON ( ( vma - > vm_flags & EXPECTED_FLAGS ) ! = EXPECTED_FLAGS ) ;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
r . mm = vma - > vm_mm ;
r . pfn = pfn ;
r . prot = __pgprot ( ( pgprot_val ( iomap - > prot ) & _PAGE_CACHE_MASK ) |
( pgprot_val ( vma - > vm_page_prot ) & ~ _PAGE_CACHE_MASK ) ) ;
err = apply_to_page_range ( r . mm , addr , size , remap_pfn , & r ) ;
if ( unlikely ( err ) ) {
zap_vma_ptes ( vma , addr , ( r . pfn - pfn ) < < PAGE_SHIFT ) ;
return err ;
}
return 0 ;
}
2021-12-08 09:42:15 +05:30
# endif
2021-05-19 05:55:57 -10:00
2019-12-31 20:03:56 +00:00
/**
2020-01-03 20:41:35 +00:00
* remap_io_sg - remap an IO mapping to userspace
2019-12-31 20:03:56 +00:00
* @ vma : user vma to map to
* @ addr : target user address to start at
* @ size : size of map area
* @ sgl : Start sg entry
2020-01-03 20:41:35 +00:00
* @ iobase : Use stored dma address offset by this address or pfn if - 1
2019-12-31 20:03:56 +00:00
*
* Note : this is only safe if the mm semaphore is held when called .
*/
2020-01-03 20:41:35 +00:00
int remap_io_sg ( struct vm_area_struct * vma ,
unsigned long addr , unsigned long size ,
struct scatterlist * sgl , resource_size_t iobase )
2019-12-31 20:03:56 +00:00
{
2021-05-19 05:55:57 -10:00
struct remap_pfn r = {
. mm = vma - > vm_mm ,
. prot = vma - > vm_page_prot ,
. sgt = __sgt_iter ( sgl , use_dma ( iobase ) ) ,
. iobase = iobase ,
} ;
2019-12-31 20:03:56 +00:00
int err ;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON ( ( vma - > vm_flags & EXPECTED_FLAGS ) ! = EXPECTED_FLAGS ) ;
2020-01-03 20:41:35 +00:00
if ( ! use_dma ( iobase ) )
flush_cache_range ( vma , addr , size ) ;
2021-05-19 05:55:57 -10:00
err = apply_to_page_range ( r . mm , addr , size , remap_sg , & r ) ;
if ( unlikely ( err ) ) {
zap_vma_ptes ( vma , addr , r . pfn < < PAGE_SHIFT ) ;
return err ;
}
return 0 ;
2019-12-31 20:03:56 +00:00
}