2019-05-28 10:29:45 +01:00
/*
* SPDX - License - Identifier : MIT
*
* Copyright © 2014 - 2016 Intel Corporation
*/
# include <linux/pagevec.h>
2022-02-10 17:45:49 +02:00
# include <linux/shmem_fs.h>
2019-05-28 10:29:45 +01:00
# include <linux/swap.h>
2022-02-10 17:45:48 +02:00
# include <drm/drm_cache.h>
2019-10-18 10:07:50 +01:00
# include "gem/i915_gem_region.h"
2019-05-28 10:29:45 +01:00
# include "i915_drv.h"
# include "i915_gem_object.h"
2022-03-16 11:50:18 +02:00
# include "i915_gem_tiling.h"
# include "i915_gemfs.h"
2019-05-28 10:29:50 +01:00
# include "i915_scatterlist.h"
2019-08-06 13:07:28 +03:00
# include "i915_trace.h"
2019-05-28 10:29:45 +01:00
/*
* Move pages to appropriate lru and release the pagevec , decrementing the
* ref count of those pages .
*/
static void check_release_pagevec ( struct pagevec * pvec )
{
check_move_unevictable_pages ( pvec ) ;
__pagevec_release ( pvec ) ;
cond_resched ( ) ;
}
2021-11-01 13:24:44 +01:00
void shmem_sg_free_table ( struct sg_table * st , struct address_space * mapping ,
bool dirty , bool backup )
2019-05-28 10:29:45 +01:00
{
2021-10-18 10:10:48 +01:00
struct sgt_iter sgt_iter ;
struct pagevec pvec ;
struct page * page ;
mapping_clear_unevictable ( mapping ) ;
pagevec_init ( & pvec ) ;
for_each_sgt_page ( page , sgt_iter , st ) {
if ( dirty )
set_page_dirty ( page ) ;
if ( backup )
mark_page_accessed ( page ) ;
if ( ! pagevec_add ( & pvec , page ) )
check_release_pagevec ( & pvec ) ;
}
if ( pagevec_count ( & pvec ) )
check_release_pagevec ( & pvec ) ;
sg_free_table ( st ) ;
}
2021-11-01 13:24:44 +01:00
int shmem_sg_alloc_table ( struct drm_i915_private * i915 , struct sg_table * st ,
size_t size , struct intel_memory_region * mr ,
struct address_space * mapping ,
unsigned int max_segment )
2021-10-18 10:10:48 +01:00
{
const unsigned long page_count = size / PAGE_SIZE ;
2019-05-28 10:29:45 +01:00
unsigned long i ;
struct scatterlist * sg ;
struct page * page ;
unsigned long last_pfn = 0 ; /* suppress gcc warning */
gfp_t noreclaim ;
int ret ;
/*
* If there ' s no chance of allocating enough pages for the whole
* object , bail early .
*/
2021-10-18 10:10:48 +01:00
if ( size > resource_size ( & mr - > region ) )
2021-11-01 13:24:44 +01:00
return - ENOMEM ;
2019-05-28 10:29:45 +01:00
2021-11-01 13:24:44 +01:00
if ( sg_alloc_table ( st , page_count , GFP_KERNEL ) )
return - ENOMEM ;
2019-05-28 10:29:45 +01:00
/*
* Get the list of pages out of our struct file . They ' ll be pinned
* at this point until we release them .
*
* Fail silently without starting the shrinker
*/
mapping_set_unevictable ( mapping ) ;
noreclaim = mapping_gfp_constraint ( mapping , ~ __GFP_RECLAIM ) ;
noreclaim | = __GFP_NORETRY | __GFP_NOWARN ;
sg = st - > sgl ;
st - > nents = 0 ;
for ( i = 0 ; i < page_count ; i + + ) {
const unsigned int shrink [ ] = {
2019-05-30 21:34:59 +01:00
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND ,
2019-05-28 10:29:45 +01:00
0 ,
} , * s = shrink ;
gfp_t gfp = noreclaim ;
do {
cond_resched ( ) ;
page = shmem_read_mapping_page_gfp ( mapping , i , gfp ) ;
if ( ! IS_ERR ( page ) )
break ;
if ( ! * s ) {
ret = PTR_ERR ( page ) ;
goto err_sg ;
}
2021-03-23 16:50:50 +01:00
i915_gem_shrink ( NULL , i915 , 2 * page_count , NULL , * s + + ) ;
2019-05-28 10:29:45 +01:00
/*
* We ' ve tried hard to allocate the memory by reaping
* our own buffer , now let the real VM do its job and
* go down in flames if truly OOM .
*
* However , since graphics tend to be disposable ,
* defer the oom here by reporting the ENOMEM back
* to userspace .
*/
if ( ! * s ) {
/* reclaim and warn, but no oom */
gfp = mapping_gfp_mask ( mapping ) ;
/*
* Our bo are always dirty and so we require
* kswapd to reclaim our pages ( direct reclaim
* does not effectively begin pageout of our
* buffers on its own ) . However , direct reclaim
* only waits for kswapd when under allocation
* congestion . So as a result __GFP_RECLAIM is
* unreliable and fails to actually reclaim our
* dirty pages - - unless you try over and over
* again with ! __GFP_NORETRY . However , we still
* want to fail this allocation rather than
* trigger the out - of - memory killer and for
* this we want __GFP_RETRY_MAYFAIL .
*/
gfp | = __GFP_RETRY_MAYFAIL ;
}
} while ( 1 ) ;
if ( ! i | |
sg - > length > = max_segment | |
page_to_pfn ( page ) ! = last_pfn + 1 ) {
2021-10-18 10:10:48 +01:00
if ( i )
2019-05-28 10:29:45 +01:00
sg = sg_next ( sg ) ;
2021-10-18 10:10:48 +01:00
2019-05-28 10:29:45 +01:00
st - > nents + + ;
sg_set_page ( sg , page , PAGE_SIZE , 0 ) ;
} else {
sg - > length + = PAGE_SIZE ;
}
last_pfn = page_to_pfn ( page ) ;
/* Check that the i965g/gm workaround works. */
2020-05-25 15:19:57 +01:00
GEM_BUG_ON ( gfp & __GFP_DMA32 & & last_pfn > = 0x00100000UL ) ;
2019-05-28 10:29:45 +01:00
}
2021-10-18 10:10:48 +01:00
if ( sg ) /* loop terminated early; short sg table */
2019-05-28 10:29:45 +01:00
sg_mark_end ( sg ) ;
/* Trim unused sg entries to avoid wasting memory. */
i915_sg_trim ( st ) ;
2021-11-01 13:24:44 +01:00
return 0 ;
2021-10-18 10:10:48 +01:00
err_sg :
sg_mark_end ( sg ) ;
if ( sg ! = st - > sgl ) {
2021-11-01 13:24:44 +01:00
shmem_sg_free_table ( st , mapping , false , false ) ;
2021-10-18 10:10:48 +01:00
} else {
mapping_clear_unevictable ( mapping ) ;
sg_free_table ( st ) ;
}
/*
* shmemfs first checks if there is enough memory to allocate the page
* and reports ENOSPC should there be insufficient , along with the usual
* ENOMEM for a genuine allocation failure .
*
* We use ENOSPC in our driver to mean that we have run out of aperture
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM .
*/
if ( ret = = - ENOSPC )
ret = - ENOMEM ;
2021-11-01 13:24:44 +01:00
return ret ;
2021-10-18 10:10:48 +01:00
}
static int shmem_get_pages ( struct drm_i915_gem_object * obj )
{
struct drm_i915_private * i915 = to_i915 ( obj - > base . dev ) ;
struct intel_memory_region * mem = obj - > mm . region ;
struct address_space * mapping = obj - > base . filp - > f_mapping ;
const unsigned long page_count = obj - > base . size / PAGE_SIZE ;
unsigned int max_segment = i915_sg_segment_size ( ) ;
struct sg_table * st ;
struct sgt_iter sgt_iter ;
struct page * page ;
int ret ;
/*
* Assert that the object is not currently in any GPU domain . As it
* wasn ' t in the GTT , there shouldn ' t be any way it could have been in
* a GPU cache
*/
GEM_BUG_ON ( obj - > read_domains & I915_GEM_GPU_DOMAINS ) ;
GEM_BUG_ON ( obj - > write_domain & I915_GEM_GPU_DOMAINS ) ;
rebuild_st :
2021-11-01 13:24:44 +01:00
st = kmalloc ( sizeof ( * st ) , GFP_KERNEL ) ;
if ( ! st )
return - ENOMEM ;
ret = shmem_sg_alloc_table ( i915 , st , obj - > base . size , mem , mapping ,
max_segment ) ;
if ( ret )
2021-10-18 10:10:48 +01:00
goto err_st ;
2019-05-28 10:29:45 +01:00
ret = i915_gem_gtt_prepare_pages ( obj , st ) ;
if ( ret ) {
/*
* DMA remapping failed ? One possible cause is that
* it could not reserve enough large entries , asking
* for PAGE_SIZE chunks instead may be helpful .
*/
if ( max_segment > PAGE_SIZE ) {
for_each_sgt_page ( page , sgt_iter , st )
put_page ( page ) ;
sg_free_table ( st ) ;
2021-10-18 10:10:48 +01:00
kfree ( st ) ;
2019-05-28 10:29:45 +01:00
max_segment = PAGE_SIZE ;
goto rebuild_st ;
} else {
2021-01-28 14:31:23 +01:00
dev_warn ( i915 - > drm . dev ,
2019-05-28 10:29:45 +01:00
" Failed to DMA remap %lu pages \n " ,
page_count ) ;
goto err_pages ;
}
}
if ( i915_gem_object_needs_bit17_swizzle ( obj ) )
i915_gem_object_do_bit_17_swizzle ( obj , st ) ;
2021-10-18 18:45:02 +01:00
if ( i915_gem_object_can_bypass_llc ( obj ) )
2021-07-23 11:50:45 +01:00
obj - > cache_dirty = true ;
2021-10-18 10:10:48 +01:00
__i915_gem_object_set_pages ( obj , st , i915_sg_dma_sizes ( st - > sgl ) ) ;
2019-05-28 10:29:45 +01:00
return 0 ;
err_pages :
2021-11-01 13:24:44 +01:00
shmem_sg_free_table ( st , mapping , false , false ) ;
2019-05-28 10:29:45 +01:00
/*
* shmemfs first checks if there is enough memory to allocate the page
* and reports ENOSPC should there be insufficient , along with the usual
* ENOMEM for a genuine allocation failure .
*
* We use ENOSPC in our driver to mean that we have run out of aperture
* space and so want to translate the error from shmemfs back to our
* usual understanding of ENOMEM .
*/
2021-10-18 10:10:48 +01:00
err_st :
2019-05-28 10:29:45 +01:00
if ( ret = = - ENOSPC )
ret = - ENOMEM ;
2021-11-01 13:24:44 +01:00
kfree ( st ) ;
2019-05-28 10:29:45 +01:00
return ret ;
}
2021-10-18 10:10:49 +01:00
static int
2019-05-28 10:29:46 +01:00
shmem_truncate ( struct drm_i915_gem_object * obj )
{
/*
* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM .
* To do this we must instruct the shmfs to drop all of its
* backing pages , * now * .
*/
shmem_truncate_range ( file_inode ( obj - > base . filp ) , 0 , ( loff_t ) - 1 ) ;
obj - > mm . madv = __I915_MADV_PURGED ;
obj - > mm . pages = ERR_PTR ( - EFAULT ) ;
2021-10-18 10:10:49 +01:00
return 0 ;
2019-05-28 10:29:46 +01:00
}
2021-10-18 10:10:49 +01:00
void __shmem_writeback ( size_t size , struct address_space * mapping )
2019-05-28 10:29:46 +01:00
{
struct writeback_control wbc = {
. sync_mode = WB_SYNC_NONE ,
. nr_to_write = SWAP_CLUSTER_MAX ,
. range_start = 0 ,
. range_end = LLONG_MAX ,
. for_reclaim = 1 ,
} ;
unsigned long i ;
/*
* Leave mmapings intact ( GTT will have been revoked on unbinding ,
* leaving only CPU mmapings around ) and add those pages to the LRU
* instead of invoking writeback so they are aged and paged out
* as normal .
*/
/* Begin writeback on each dirty page */
2021-10-18 10:10:48 +01:00
for ( i = 0 ; i < size > > PAGE_SHIFT ; i + + ) {
2019-05-28 10:29:46 +01:00
struct page * page ;
2020-10-13 16:51:31 -07:00
page = find_lock_page ( mapping , i ) ;
if ( ! page )
2019-05-28 10:29:46 +01:00
continue ;
if ( ! page_mapped ( page ) & & clear_page_dirty_for_io ( page ) ) {
int ret ;
SetPageReclaim ( page ) ;
ret = mapping - > a_ops - > writepage ( page , & wbc ) ;
if ( ! PageWriteback ( page ) )
ClearPageReclaim ( page ) ;
if ( ! ret )
goto put ;
}
unlock_page ( page ) ;
put :
put_page ( page ) ;
}
}
2021-10-18 10:10:48 +01:00
static void
shmem_writeback ( struct drm_i915_gem_object * obj )
{
__shmem_writeback ( obj - > base . size , obj - > base . filp - > f_mapping ) ;
}
2021-12-15 11:07:46 +00:00
static int shmem_shrink ( struct drm_i915_gem_object * obj , unsigned int flags )
2021-12-15 11:07:45 +00:00
{
switch ( obj - > mm . madv ) {
case I915_MADV_DONTNEED :
return i915_gem_object_truncate ( obj ) ;
case __I915_MADV_PURGED :
return 0 ;
}
2021-12-15 11:07:46 +00:00
if ( flags & I915_GEM_OBJECT_SHRINK_WRITEBACK )
2021-12-15 11:07:45 +00:00
shmem_writeback ( obj ) ;
return 0 ;
}
2019-05-28 10:29:45 +01:00
void
__i915_gem_object_release_shmem ( struct drm_i915_gem_object * obj ,
struct sg_table * pages ,
bool needs_clflush )
{
2021-10-18 18:45:05 +01:00
struct drm_i915_private * i915 = to_i915 ( obj - > base . dev ) ;
2019-05-28 10:29:45 +01:00
GEM_BUG_ON ( obj - > mm . madv = = __I915_MADV_PURGED ) ;
if ( obj - > mm . madv = = I915_MADV_DONTNEED )
obj - > mm . dirty = false ;
if ( needs_clflush & &
( obj - > read_domains & I915_GEM_DOMAIN_CPU ) = = 0 & &
! ( obj - > cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ) )
drm_clflush_sg ( pages ) ;
__start_cpu_write ( obj ) ;
2021-10-18 18:45:05 +01:00
/*
* On non - LLC platforms , force the flush - on - acquire if this is ever
* swapped - in . Our async flush path is not trust worthy enough yet ( and
* happens in the wrong order ) , and with some tricks it ' s conceivable
* for userspace to change the cache - level to I915_CACHE_NONE after the
* pages are swapped - in , and since execbuf binds the object before doing
* the async flush , we have a race window .
*/
if ( ! HAS_LLC ( i915 ) )
obj - > cache_dirty = true ;
2019-05-28 10:29:45 +01:00
}
2021-03-23 16:49:58 +01:00
void i915_gem_object_put_pages_shmem ( struct drm_i915_gem_object * obj , struct sg_table * pages )
2019-05-28 10:29:45 +01:00
{
__i915_gem_object_release_shmem ( obj , pages , true ) ;
i915_gem_gtt_finish_pages ( obj , pages ) ;
if ( i915_gem_object_needs_bit17_swizzle ( obj ) )
i915_gem_object_save_bit_17_swizzle ( obj , pages ) ;
2021-11-01 13:24:44 +01:00
shmem_sg_free_table ( pages , file_inode ( obj - > base . filp ) - > i_mapping ,
obj - > mm . dirty , obj - > mm . madv = = I915_MADV_WILLNEED ) ;
kfree ( pages ) ;
2019-05-28 10:29:45 +01:00
obj - > mm . dirty = false ;
}
2021-03-23 16:49:58 +01:00
static void
shmem_put_pages ( struct drm_i915_gem_object * obj , struct sg_table * pages )
{
if ( likely ( i915_gem_object_has_struct_page ( obj ) ) )
i915_gem_object_put_pages_shmem ( obj , pages ) ;
else
i915_gem_object_put_pages_phys ( obj , pages ) ;
}
2019-05-28 10:29:45 +01:00
static int
shmem_pwrite ( struct drm_i915_gem_object * obj ,
const struct drm_i915_gem_pwrite * arg )
{
struct address_space * mapping = obj - > base . filp - > f_mapping ;
char __user * user_data = u64_to_user_ptr ( arg - > data_ptr ) ;
u64 remain , offset ;
unsigned int pg ;
/* Caller already validated user args */
GEM_BUG_ON ( ! access_ok ( user_data , arg - > size ) ) ;
2021-03-23 16:49:57 +01:00
if ( ! i915_gem_object_has_struct_page ( obj ) )
return i915_gem_object_pwrite_phys ( obj , arg ) ;
2019-05-28 10:29:45 +01:00
/*
* Before we instantiate / pin the backing store for our use , we
* can prepopulate the shmemfs filp efficiently using a write into
* the pagecache . We avoid the penalty of instantiating all the
* pages , important if the user is just writing to a few and never
* uses the object on the GPU , and using a direct write into shmemfs
* allows it to avoid the cost of retrieving a page ( either swapin
* or clearing - before - use ) before it is overwritten .
*/
if ( i915_gem_object_has_pages ( obj ) )
return - ENODEV ;
if ( obj - > mm . madv ! = I915_MADV_WILLNEED )
return - EFAULT ;
/*
* Before the pages are instantiated the object is treated as being
* in the CPU domain . The pages will be clflushed as required before
* use , and we can freely write into the pages directly . If userspace
* races pwrite with any other operation ; corruption will ensue -
* that is userspace ' s prerogative !
*/
remain = arg - > size ;
offset = arg - > offset ;
pg = offset_in_page ( offset ) ;
do {
unsigned int len , unwritten ;
struct page * page ;
void * data , * vaddr ;
int err ;
char c ;
len = PAGE_SIZE - pg ;
if ( len > remain )
len = remain ;
/* Prefault the user page to reduce potential recursion */
err = __get_user ( c , user_data ) ;
if ( err )
return err ;
err = __get_user ( c , user_data + len - 1 ) ;
if ( err )
return err ;
err = pagecache_write_begin ( obj - > base . filp , mapping ,
offset , len , 0 ,
& page , & data ) ;
if ( err < 0 )
return err ;
vaddr = kmap_atomic ( page ) ;
unwritten = __copy_from_user_inatomic ( vaddr + pg ,
user_data ,
len ) ;
kunmap_atomic ( vaddr ) ;
err = pagecache_write_end ( obj - > base . filp , mapping ,
offset , len , len - unwritten ,
page , data ) ;
if ( err < 0 )
return err ;
/* We don't handle -EFAULT, leave it to the caller to check */
if ( unwritten )
return - ENODEV ;
remain - = len ;
user_data + = len ;
offset + = len ;
pg = 0 ;
} while ( remain ) ;
return 0 ;
}
2021-03-23 16:49:57 +01:00
static int
shmem_pread ( struct drm_i915_gem_object * obj ,
const struct drm_i915_gem_pread * arg )
{
if ( ! i915_gem_object_has_struct_page ( obj ) )
return i915_gem_object_pread_phys ( obj , arg ) ;
return - ENODEV ;
}
2019-07-03 19:06:01 +01:00
static void shmem_release ( struct drm_i915_gem_object * obj )
{
2021-06-24 10:42:38 +02:00
if ( i915_gem_object_has_struct_page ( obj ) )
2021-03-23 16:49:57 +01:00
i915_gem_object_release_memory_region ( obj ) ;
2019-10-18 10:07:50 +01:00
2019-07-03 19:06:01 +01:00
fput ( obj - > base . filp ) ;
}
2019-05-28 10:29:45 +01:00
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
2020-05-29 19:32:04 +01:00
. name = " i915_gem_object_shmem " ,
2021-03-23 16:49:56 +01:00
. flags = I915_GEM_OBJECT_IS_SHRINKABLE ,
2019-05-28 10:29:45 +01:00
. get_pages = shmem_get_pages ,
. put_pages = shmem_put_pages ,
2019-05-28 10:29:46 +01:00
. truncate = shmem_truncate ,
2021-12-15 11:07:46 +00:00
. shrink = shmem_shrink ,
2019-05-28 10:29:45 +01:00
. pwrite = shmem_pwrite ,
2021-03-23 16:49:57 +01:00
. pread = shmem_pread ,
2019-07-03 19:06:01 +01:00
. release = shmem_release ,
2019-05-28 10:29:45 +01:00
} ;
2019-10-18 10:07:50 +01:00
static int __create_shmem ( struct drm_i915_private * i915 ,
struct drm_gem_object * obj ,
resource_size_t size )
2019-05-28 10:29:45 +01:00
{
unsigned long flags = VM_NORESERVE ;
struct file * filp ;
drm_gem_private_object_init ( & i915 - > drm , obj , size ) ;
if ( i915 - > mm . gemfs )
filp = shmem_file_setup_with_mnt ( i915 - > mm . gemfs , " i915 " , size ,
flags ) ;
else
filp = shmem_file_setup ( " i915 " , size , flags ) ;
if ( IS_ERR ( filp ) )
return PTR_ERR ( filp ) ;
obj - > filp = filp ;
return 0 ;
}
2021-01-14 18:24:02 +00:00
static int shmem_object_init ( struct intel_memory_region * mem ,
struct drm_i915_gem_object * obj ,
resource_size_t size ,
2021-06-25 11:38:23 +01:00
resource_size_t page_size ,
2021-01-14 18:24:02 +00:00
unsigned int flags )
2019-05-28 10:29:45 +01:00
{
2019-10-22 15:45:01 +01:00
static struct lock_class_key lock_class ;
2019-10-18 10:07:50 +01:00
struct drm_i915_private * i915 = mem - > i915 ;
2019-05-28 10:29:45 +01:00
struct address_space * mapping ;
unsigned int cache_level ;
gfp_t mask ;
int ret ;
2019-10-18 10:07:50 +01:00
ret = __create_shmem ( i915 , & obj - > base , size ) ;
2019-05-28 10:29:45 +01:00
if ( ret )
2021-01-14 18:24:02 +00:00
return ret ;
2019-05-28 10:29:45 +01:00
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE ;
if ( IS_I965GM ( i915 ) | | IS_I965G ( i915 ) ) {
/* 965gm cannot relocate objects above 4GiB. */
mask & = ~ __GFP_HIGHMEM ;
mask | = __GFP_DMA32 ;
}
mapping = obj - > base . filp - > f_mapping ;
mapping_set_gfp_mask ( mapping , mask ) ;
GEM_BUG_ON ( ! ( mapping_gfp_mask ( mapping ) & __GFP_RECLAIM ) ) ;
2021-06-24 10:42:38 +02:00
i915_gem_object_init ( obj , & i915_gem_shmem_ops , & lock_class , 0 ) ;
obj - > mem_flags | = I915_BO_FLAG_STRUCT_PAGE ;
2019-05-28 10:29:45 +01:00
obj - > write_domain = I915_GEM_DOMAIN_CPU ;
obj - > read_domains = I915_GEM_DOMAIN_CPU ;
if ( HAS_LLC ( i915 ) )
/* On some devices, we can have the GPU use the LLC (the CPU
* cache ) for about a 10 % performance improvement
* compared to uncached . Graphics requests other than
* display scanout are coherent with the CPU in
* accessing this cache . This means in this mode we
* don ' t need to clflush on the CPU side , and on the
* GPU side we only need to flush internal caches to
* get data visible to the CPU .
*
* However , we maintain the display planes as UC , and so
* need to rebind when first used as such .
*/
cache_level = I915_CACHE_LLC ;
else
cache_level = I915_CACHE_NONE ;
i915_gem_object_set_cache_coherency ( obj , cache_level ) ;
2021-03-23 16:49:56 +01:00
i915_gem_object_init_memory_region ( obj , mem ) ;
2019-05-28 10:29:45 +01:00
2021-01-14 18:24:02 +00:00
return 0 ;
2019-05-28 10:29:45 +01:00
}
2019-10-18 10:07:50 +01:00
struct drm_i915_gem_object *
i915_gem_object_create_shmem ( struct drm_i915_private * i915 ,
resource_size_t size )
{
return i915_gem_object_create_region ( i915 - > mm . regions [ INTEL_REGION_SMEM ] ,
2021-06-25 11:38:23 +01:00
size , 0 , 0 ) ;
2019-10-18 10:07:50 +01:00
}
2019-05-28 10:29:45 +01:00
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data ( struct drm_i915_private * dev_priv ,
2019-10-18 10:07:50 +01:00
const void * data , resource_size_t size )
2019-05-28 10:29:45 +01:00
{
struct drm_i915_gem_object * obj ;
struct file * file ;
2019-10-18 10:07:50 +01:00
resource_size_t offset ;
2019-05-28 10:29:45 +01:00
int err ;
2021-06-24 10:42:40 +02:00
GEM_WARN_ON ( IS_DGFX ( dev_priv ) ) ;
2019-05-28 10:29:45 +01:00
obj = i915_gem_object_create_shmem ( dev_priv , round_up ( size , PAGE_SIZE ) ) ;
if ( IS_ERR ( obj ) )
return obj ;
GEM_BUG_ON ( obj - > write_domain ! = I915_GEM_DOMAIN_CPU ) ;
file = obj - > base . filp ;
offset = 0 ;
do {
unsigned int len = min_t ( typeof ( size ) , size , PAGE_SIZE ) ;
struct page * page ;
void * pgdata , * vaddr ;
err = pagecache_write_begin ( file , file - > f_mapping ,
offset , len , 0 ,
& page , & pgdata ) ;
if ( err < 0 )
goto fail ;
vaddr = kmap ( page ) ;
memcpy ( vaddr , data , len ) ;
kunmap ( page ) ;
err = pagecache_write_end ( file , file - > f_mapping ,
offset , len , len ,
page , pgdata ) ;
if ( err < 0 )
goto fail ;
size - = len ;
data + = len ;
offset + = len ;
} while ( size ) ;
return obj ;
fail :
i915_gem_object_put ( obj ) ;
return ERR_PTR ( err ) ;
}
2019-10-18 10:07:50 +01:00
static int init_shmem ( struct intel_memory_region * mem )
{
int err ;
err = i915_gemfs_init ( mem - > i915 ) ;
if ( err ) {
DRM_NOTE ( " Unable to create a private tmpfs mount, hugepage support will be disabled(%d). \n " ,
err ) ;
}
2019-12-27 19:07:48 +05:30
intel_memory_region_set_name ( mem , " system " ) ;
2019-10-18 10:07:50 +01:00
return 0 ; /* Don't error, we can simply fallback to the kernel mnt */
}
2021-11-22 22:45:51 +01:00
static int release_shmem ( struct intel_memory_region * mem )
2019-10-18 10:07:50 +01:00
{
i915_gemfs_fini ( mem - > i915 ) ;
2021-11-22 22:45:51 +01:00
return 0 ;
2019-10-18 10:07:50 +01:00
}
static const struct intel_memory_region_ops shmem_region_ops = {
. init = init_shmem ,
. release = release_shmem ,
2021-01-14 18:24:02 +00:00
. init_object = shmem_object_init ,
2019-10-18 10:07:50 +01:00
} ;
2021-06-02 10:38:08 +02:00
struct intel_memory_region * i915_gem_shmem_setup ( struct drm_i915_private * i915 ,
u16 type , u16 instance )
2019-10-18 10:07:50 +01:00
{
return intel_memory_region_create ( i915 , 0 ,
totalram_pages ( ) < < PAGE_SHIFT ,
2022-02-25 14:54:56 +00:00
PAGE_SIZE , 0 , 0 ,
2021-06-02 10:38:08 +02:00
type , instance ,
2019-10-18 10:07:50 +01:00
& shmem_region_ops ) ;
}
2021-01-19 21:43:33 +00:00
bool i915_gem_object_is_shmem ( const struct drm_i915_gem_object * obj )
{
return obj - > ops = = & i915_gem_shmem_ops ;
}