2019-05-28 10:29:46 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2014 - 2016 Intel Corporation
*/
# include <linux/highmem.h>
# include <linux/shmem_fs.h>
# include <linux/swap.h>
# include <drm/drm_cache.h>
2019-06-21 08:08:02 +01:00
# include "gt/intel_gt.h"
2019-05-28 10:29:46 +01:00
# include "i915_drv.h"
# include "i915_gem_object.h"
2019-10-18 10:07:50 +01:00
# include "i915_gem_region.h"
2022-03-16 11:50:18 +02:00
# include "i915_gem_tiling.h"
2019-05-28 10:29:50 +01:00
# include "i915_scatterlist.h"
2019-05-28 10:29:46 +01:00
static int i915_gem_object_get_pages_phys ( struct drm_i915_gem_object * obj )
{
struct address_space * mapping = obj - > base . filp - > f_mapping ;
2021-12-14 21:33:35 +02:00
struct drm_i915_private * i915 = to_i915 ( obj - > base . dev ) ;
2019-05-28 10:29:46 +01:00
struct scatterlist * sg ;
2020-02-02 15:39:34 +00:00
struct sg_table * st ;
dma_addr_t dma ;
void * vaddr ;
void * dst ;
2019-05-28 10:29:46 +01:00
int i ;
2020-05-25 15:19:57 +01:00
if ( GEM_WARN_ON ( i915_gem_object_needs_bit17_swizzle ( obj ) ) )
2019-05-28 10:29:46 +01:00
return - EINVAL ;
2020-02-02 15:39:34 +00:00
/*
* Always aligning to the object size , allows a single allocation
2019-05-28 10:29:46 +01:00
* to handle all possible callers , and given typical object sizes ,
* the alignment of the buddy allocation will naturally match .
*/
2021-01-28 14:31:23 +01:00
vaddr = dma_alloc_coherent ( obj - > base . dev - > dev ,
2020-02-02 15:39:34 +00:00
roundup_pow_of_two ( obj - > base . size ) ,
& dma , GFP_KERNEL ) ;
if ( ! vaddr )
2019-05-28 10:29:46 +01:00
return - ENOMEM ;
2020-02-02 15:39:34 +00:00
st = kmalloc ( sizeof ( * st ) , GFP_KERNEL ) ;
if ( ! st )
goto err_pci ;
if ( sg_alloc_table ( st , 1 , GFP_KERNEL ) )
goto err_st ;
sg = st - > sgl ;
sg - > offset = 0 ;
sg - > length = obj - > base . size ;
sg_assign_page ( sg , ( struct page * ) vaddr ) ;
sg_dma_address ( sg ) = dma ;
sg_dma_len ( sg ) = obj - > base . size ;
dst = vaddr ;
2019-05-28 10:29:46 +01:00
for ( i = 0 ; i < obj - > base . size / PAGE_SIZE ; i + + ) {
struct page * page ;
2020-02-02 15:39:34 +00:00
void * src ;
2019-05-28 10:29:46 +01:00
page = shmem_read_mapping_page ( mapping , i ) ;
2020-02-02 15:39:34 +00:00
if ( IS_ERR ( page ) )
goto err_st ;
2019-05-28 10:29:46 +01:00
src = kmap_atomic ( page ) ;
2020-02-02 15:39:34 +00:00
memcpy ( dst , src , PAGE_SIZE ) ;
drm_clflush_virt_range ( dst , PAGE_SIZE ) ;
2019-05-28 10:29:46 +01:00
kunmap_atomic ( src ) ;
put_page ( page ) ;
2020-02-02 15:39:34 +00:00
dst + = PAGE_SIZE ;
2019-05-28 10:29:46 +01:00
}
2021-12-14 21:33:35 +02:00
intel_gt_chipset_flush ( to_gt ( i915 ) ) ;
2019-05-28 10:29:46 +01:00
2021-03-23 16:49:57 +01:00
/* We're no longer struct page backed */
2021-06-24 10:42:38 +02:00
obj - > mem_flags & = ~ I915_BO_FLAG_STRUCT_PAGE ;
2019-05-28 10:29:46 +01:00
__i915_gem_object_set_pages ( obj , st , sg - > length ) ;
return 0 ;
2020-02-02 15:39:34 +00:00
err_st :
kfree ( st ) ;
err_pci :
2021-01-28 14:31:23 +01:00
dma_free_coherent ( obj - > base . dev - > dev ,
2020-02-02 15:39:34 +00:00
roundup_pow_of_two ( obj - > base . size ) ,
vaddr , dma ) ;
return - ENOMEM ;
2019-05-28 10:29:46 +01:00
}
2021-03-23 16:49:57 +01:00
void
2019-05-28 10:29:46 +01:00
i915_gem_object_put_pages_phys ( struct drm_i915_gem_object * obj ,
struct sg_table * pages )
{
2020-02-02 15:39:34 +00:00
dma_addr_t dma = sg_dma_address ( pages - > sgl ) ;
void * vaddr = sg_page ( pages - > sgl ) ;
2019-05-28 10:29:46 +01:00
__i915_gem_object_release_shmem ( obj , pages , false ) ;
if ( obj - > mm . dirty ) {
struct address_space * mapping = obj - > base . filp - > f_mapping ;
2020-02-02 15:39:34 +00:00
void * src = vaddr ;
2019-05-28 10:29:46 +01:00
int i ;
for ( i = 0 ; i < obj - > base . size / PAGE_SIZE ; i + + ) {
struct page * page ;
char * dst ;
page = shmem_read_mapping_page ( mapping , i ) ;
if ( IS_ERR ( page ) )
continue ;
dst = kmap_atomic ( page ) ;
2020-02-02 15:39:34 +00:00
drm_clflush_virt_range ( src , PAGE_SIZE ) ;
memcpy ( dst , src , PAGE_SIZE ) ;
2019-05-28 10:29:46 +01:00
kunmap_atomic ( dst ) ;
set_page_dirty ( page ) ;
if ( obj - > mm . madv = = I915_MADV_WILLNEED )
mark_page_accessed ( page ) ;
put_page ( page ) ;
2020-02-02 15:39:34 +00:00
src + = PAGE_SIZE ;
2019-05-28 10:29:46 +01:00
}
obj - > mm . dirty = false ;
}
sg_free_table ( pages ) ;
kfree ( pages ) ;
2021-01-28 14:31:23 +01:00
dma_free_coherent ( obj - > base . dev - > dev ,
2020-02-02 15:39:34 +00:00
roundup_pow_of_two ( obj - > base . size ) ,
vaddr , dma ) ;
2019-05-28 10:29:46 +01:00
}
2021-03-23 16:49:57 +01:00
int i915_gem_object_pwrite_phys ( struct drm_i915_gem_object * obj ,
const struct drm_i915_gem_pwrite * args )
2020-11-05 15:49:34 +00:00
{
void * vaddr = sg_page ( obj - > mm . pages - > sgl ) + args - > offset ;
char __user * user_data = u64_to_user_ptr ( args - > data_ptr ) ;
2021-12-14 21:33:35 +02:00
struct drm_i915_private * i915 = to_i915 ( obj - > base . dev ) ;
2020-11-05 15:49:34 +00:00
int err ;
err = i915_gem_object_wait ( obj ,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL ,
MAX_SCHEDULE_TIMEOUT ) ;
if ( err )
return err ;
/*
* We manually control the domain here and pretend that it
* remains coherent i . e . in the GTT domain , like shmem_pwrite .
*/
i915_gem_object_invalidate_frontbuffer ( obj , ORIGIN_CPU ) ;
if ( copy_from_user ( vaddr , user_data , args - > size ) )
return - EFAULT ;
drm_clflush_virt_range ( vaddr , args - > size ) ;
2021-12-14 21:33:35 +02:00
intel_gt_chipset_flush ( to_gt ( i915 ) ) ;
2020-11-05 15:49:34 +00:00
i915_gem_object_flush_frontbuffer ( obj , ORIGIN_CPU ) ;
return 0 ;
}
2021-03-23 16:49:57 +01:00
int i915_gem_object_pread_phys ( struct drm_i915_gem_object * obj ,
const struct drm_i915_gem_pread * args )
2020-11-05 15:49:34 +00:00
{
void * vaddr = sg_page ( obj - > mm . pages - > sgl ) + args - > offset ;
char __user * user_data = u64_to_user_ptr ( args - > data_ptr ) ;
int err ;
err = i915_gem_object_wait ( obj ,
I915_WAIT_INTERRUPTIBLE ,
MAX_SCHEDULE_TIMEOUT ) ;
if ( err )
return err ;
drm_clflush_virt_range ( vaddr , args - > size ) ;
if ( copy_to_user ( user_data , vaddr , args - > size ) )
return - EFAULT ;
return 0 ;
}
2021-03-23 16:49:57 +01:00
static int i915_gem_object_shmem_to_phys ( struct drm_i915_gem_object * obj )
2019-08-09 12:07:52 +01:00
{
2021-03-23 16:49:57 +01:00
struct sg_table * pages ;
int err ;
2019-08-09 12:07:52 +01:00
2021-03-23 16:49:57 +01:00
pages = __i915_gem_object_unset_pages ( obj ) ;
err = i915_gem_object_get_pages_phys ( obj ) ;
if ( err )
goto err_xfer ;
2019-08-09 12:07:52 +01:00
2021-03-23 16:49:57 +01:00
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages ( obj ) ;
2020-11-05 15:49:34 +00:00
2021-03-23 16:49:57 +01:00
if ( ! IS_ERR_OR_NULL ( pages ) )
2021-03-23 16:49:58 +01:00
i915_gem_object_put_pages_shmem ( obj , pages ) ;
2021-03-23 16:49:57 +01:00
i915_gem_object_release_memory_region ( obj ) ;
return 0 ;
err_xfer :
if ( ! IS_ERR_OR_NULL ( pages ) ) {
2021-06-01 09:46:42 +02:00
unsigned int sg_page_sizes = i915_sg_dma_sizes ( pages - > sgl ) ;
2021-03-23 16:49:57 +01:00
__i915_gem_object_set_pages ( obj , pages , sg_page_sizes ) ;
}
return err ;
}
2019-05-28 10:29:46 +01:00
int i915_gem_object_attach_phys ( struct drm_i915_gem_object * obj , int align )
{
int err ;
2021-03-23 16:50:11 +01:00
assert_object_held ( obj ) ;
2019-05-28 10:29:46 +01:00
if ( align > obj - > base . size )
return - EINVAL ;
2021-01-19 21:43:33 +00:00
if ( ! i915_gem_object_is_shmem ( obj ) )
2019-05-28 10:29:46 +01:00
return - EINVAL ;
2021-03-23 16:49:57 +01:00
if ( ! i915_gem_object_has_struct_page ( obj ) )
return 0 ;
2019-07-03 10:17:17 +01:00
err = i915_gem_object_unbind ( obj , I915_GEM_OBJECT_UNBIND_ACTIVE ) ;
2019-05-28 10:29:46 +01:00
if ( err )
return err ;
2021-03-23 16:50:50 +01:00
if ( obj - > mm . madv ! = I915_MADV_WILLNEED )
return - EFAULT ;
2019-05-28 10:29:46 +01:00
2021-03-23 16:50:50 +01:00
if ( i915_gem_object_has_tiling_quirk ( obj ) )
return - EFAULT ;
2019-05-28 10:29:46 +01:00
2021-03-23 16:50:50 +01:00
if ( obj - > mm . mapping | | i915_gem_object_has_pinned_pages ( obj ) )
return - EBUSY ;
2019-05-28 10:29:46 +01:00
2021-03-23 16:49:57 +01:00
if ( unlikely ( obj - > mm . madv ! = I915_MADV_WILLNEED ) ) {
drm_dbg ( obj - > base . dev ,
" Attempting to obtain a purgeable object \n " ) ;
2021-03-23 16:50:50 +01:00
return - EFAULT ;
2021-03-23 16:49:57 +01:00
}
2019-05-28 10:29:46 +01:00
2021-03-23 16:50:50 +01:00
return i915_gem_object_shmem_to_phys ( obj ) ;
2019-05-28 10:29:46 +01:00
}
# if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
# include "selftests/i915_gem_phys.c"
# endif