2019-10-08 17:01:14 +01:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
2021-12-09 21:56:20 +05:30
# include <linux/prandom.h>
2019-10-08 17:01:14 +01:00
# include "intel_memory_region.h"
# include "i915_drv.h"
2021-06-16 16:25:00 +01:00
# include "i915_ttm_buddy_manager.h"
2019-10-08 17:01:14 +01:00
2021-02-05 10:20:25 +00:00
static const struct {
u16 class ;
u16 instance ;
} intel_region_map [ ] = {
[ INTEL_REGION_SMEM ] = {
. class = INTEL_MEMORY_SYSTEM ,
. instance = 0 ,
} ,
2022-03-19 01:39:31 +02:00
[ INTEL_REGION_LMEM_0 ] = {
2021-02-05 10:20:25 +00:00
. class = INTEL_MEMORY_LOCAL ,
. instance = 0 ,
} ,
2021-02-05 10:20:26 +00:00
[ INTEL_REGION_STOLEN_SMEM ] = {
. class = INTEL_MEMORY_STOLEN_SYSTEM ,
2021-02-05 10:20:25 +00:00
. instance = 0 ,
} ,
2021-04-21 11:46:55 +01:00
[ INTEL_REGION_STOLEN_LMEM ] = {
. class = INTEL_MEMORY_STOLEN_LOCAL ,
. instance = 0 ,
} ,
2019-10-18 10:07:50 +01:00
} ;
2021-12-09 21:56:20 +05:30
static int __iopagetest ( struct intel_memory_region * mem ,
u8 __iomem * va , int pagesize ,
u8 value , resource_size_t offset ,
const void * caller )
{
int byte = prandom_u32_max ( pagesize ) ;
u8 result [ 3 ] ;
memset_io ( va , value , pagesize ) ; /* or GPF! */
wmb ( ) ;
result [ 0 ] = ioread8 ( va ) ;
result [ 1 ] = ioread8 ( va + byte ) ;
result [ 2 ] = ioread8 ( va + pagesize - 1 ) ;
if ( memchr_inv ( result , value , sizeof ( result ) ) ) {
dev_err ( mem - > i915 - > drm . dev ,
" Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x) \n " ,
& mem - > region , & mem - > io_start , & offset , caller ,
value , result [ 0 ] , result [ 1 ] , result [ 2 ] ) ;
return - EINVAL ;
}
return 0 ;
}
static int iopagetest ( struct intel_memory_region * mem ,
resource_size_t offset ,
const void * caller )
{
const u8 val [ ] = { 0x0 , 0xa5 , 0xc3 , 0xf0 } ;
void __iomem * va ;
int err ;
int i ;
va = ioremap_wc ( mem - > io_start + offset , PAGE_SIZE ) ;
if ( ! va ) {
dev_err ( mem - > i915 - > drm . dev ,
" Failed to ioremap memory region [%pa + %pa] for %ps \n " ,
& mem - > io_start , & offset , caller ) ;
return - EFAULT ;
}
for ( i = 0 ; i < ARRAY_SIZE ( val ) ; i + + ) {
err = __iopagetest ( mem , va , PAGE_SIZE , val [ i ] , offset , caller ) ;
if ( err )
break ;
err = __iopagetest ( mem , va , PAGE_SIZE , ~ val [ i ] , offset , caller ) ;
if ( err )
break ;
}
iounmap ( va ) ;
return err ;
}
static resource_size_t random_page ( resource_size_t last )
{
/* Limited to low 44b (16TiB), but should suffice for a spot check */
return prandom_u32_max ( last > > PAGE_SHIFT ) < < PAGE_SHIFT ;
}
2021-12-08 21:04:04 +05:30
static int iomemtest ( struct intel_memory_region * mem ,
bool test_all ,
const void * caller )
2021-12-09 21:56:20 +05:30
{
2022-02-23 11:49:46 -08:00
resource_size_t last , page ;
2021-12-09 21:56:20 +05:30
int err ;
2022-02-25 14:54:56 +00:00
if ( mem - > io_size < PAGE_SIZE )
2022-02-23 11:49:46 -08:00
return 0 ;
2022-02-25 14:54:56 +00:00
last = mem - > io_size - PAGE_SIZE ;
2022-02-23 11:49:46 -08:00
2021-12-09 21:56:20 +05:30
/*
* Quick test to check read / write access to the iomap ( backing store ) .
*
* Write a byte , read it back . If the iomapping fails , we expect
* a GPF preventing further execution . If the backing store does not
* exist , the read back will return garbage . We check a couple of pages ,
* the first and last of the specified region to confirm the backing
* store + iomap does cover the entire memory region ; and we check
* a random offset within as a quick spot check for bad memory .
*/
2021-12-08 21:04:04 +05:30
if ( test_all ) {
for ( page = 0 ; page < = last ; page + = PAGE_SIZE ) {
err = iopagetest ( mem , page , caller ) ;
if ( err )
return err ;
}
} else {
err = iopagetest ( mem , 0 , caller ) ;
if ( err )
return err ;
2021-12-09 21:56:20 +05:30
2021-12-08 21:04:04 +05:30
err = iopagetest ( mem , last , caller ) ;
if ( err )
return err ;
2021-12-09 21:56:20 +05:30
2021-12-08 21:04:04 +05:30
err = iopagetest ( mem , random_page ( last ) , caller ) ;
if ( err )
return err ;
}
2021-12-09 21:56:20 +05:30
return 0 ;
}
2021-04-29 11:30:53 +01:00
struct intel_memory_region *
intel_memory_region_lookup ( struct drm_i915_private * i915 ,
u16 class , u16 instance )
{
struct intel_memory_region * mr ;
int id ;
/* XXX: consider maybe converting to an rb tree at some point */
for_each_memory_region ( mr , i915 , id ) {
if ( mr - > type = = class & & mr - > instance = = instance )
return mr ;
}
return NULL ;
}
2020-01-04 19:10:42 +00:00
struct intel_memory_region *
intel_memory_region_by_type ( struct drm_i915_private * i915 ,
enum intel_memory_type mem_type )
{
struct intel_memory_region * mr ;
int id ;
for_each_memory_region ( mr , i915 , id )
if ( mr - > type = = mem_type )
return mr ;
return NULL ;
}
2021-06-02 10:38:08 +02:00
/**
* intel_memory_region_reserve - Reserve a memory range
* @ mem : The region for which we want to reserve a range .
* @ offset : Start of the range to reserve .
* @ size : The size of the range to reserve .
*
* Return : 0 on success , negative error code on failure .
*/
int intel_memory_region_reserve ( struct intel_memory_region * mem ,
resource_size_t offset ,
resource_size_t size )
2019-10-08 17:01:14 +01:00
{
2021-06-16 16:25:00 +01:00
struct ttm_resource_manager * man = mem - > region_private ;
2019-10-08 17:01:14 +01:00
2021-06-16 16:25:00 +01:00
GEM_BUG_ON ( mem - > is_range_manager ) ;
2019-10-08 17:01:14 +01:00
2021-06-16 16:25:00 +01:00
return i915_ttm_buddy_man_reserve ( man , offset , size ) ;
2021-01-27 13:14:13 +00:00
}
2021-08-19 10:34:19 +01:00
void intel_memory_region_debug ( struct intel_memory_region * mr ,
struct drm_printer * printer )
{
drm_printf ( printer , " %s: " , mr - > name ) ;
if ( mr - > region_private )
ttm_resource_manager_debug ( mr - > region_private , printer ) ;
else
drm_printf ( printer , " total:%pa, available:%pa bytes \n " ,
& mr - > total , & mr - > avail ) ;
}
2021-12-09 21:56:20 +05:30
static int intel_memory_region_memtest ( struct intel_memory_region * mem ,
void * caller )
{
2021-12-08 21:04:04 +05:30
struct drm_i915_private * i915 = mem - > i915 ;
2021-12-09 21:56:20 +05:30
int err = 0 ;
if ( ! mem - > io_start )
return 0 ;
2021-12-08 21:04:04 +05:30
if ( IS_ENABLED ( CONFIG_DRM_I915_DEBUG_GEM ) | | i915 - > params . memtest )
err = iomemtest ( mem , i915 - > params . memtest , caller ) ;
2021-12-09 21:56:20 +05:30
return err ;
}
2019-10-08 17:01:14 +01:00
struct intel_memory_region *
intel_memory_region_create ( struct drm_i915_private * i915 ,
resource_size_t start ,
resource_size_t size ,
resource_size_t min_page_size ,
resource_size_t io_start ,
2022-02-25 14:54:56 +00:00
resource_size_t io_size ,
2021-06-02 10:38:08 +02:00
u16 type ,
u16 instance ,
2019-10-08 17:01:14 +01:00
const struct intel_memory_region_ops * ops )
{
struct intel_memory_region * mem ;
int err ;
mem = kzalloc ( sizeof ( * mem ) , GFP_KERNEL ) ;
if ( ! mem )
return ERR_PTR ( - ENOMEM ) ;
mem - > i915 = i915 ;
mem - > region = ( struct resource ) DEFINE_RES_MEM ( start , size ) ;
mem - > io_start = io_start ;
2022-02-25 14:54:56 +00:00
mem - > io_size = io_size ;
2019-10-08 17:01:14 +01:00
mem - > min_page_size = min_page_size ;
mem - > ops = ops ;
2019-12-27 19:07:48 +05:30
mem - > total = size ;
mem - > avail = mem - > total ;
2021-06-02 10:38:08 +02:00
mem - > type = type ;
mem - > instance = instance ;
2019-10-08 17:01:14 +01:00
2019-10-08 17:01:16 +01:00
mutex_init ( & mem - > objects . lock ) ;
INIT_LIST_HEAD ( & mem - > objects . list ) ;
2019-10-08 17:01:14 +01:00
if ( ops - > init ) {
err = ops - > init ( mem ) ;
if ( err )
goto err_free ;
}
2021-12-09 21:56:20 +05:30
err = intel_memory_region_memtest ( mem , ( void * ) _RET_IP_ ) ;
if ( err )
goto err_release ;
2019-10-08 17:01:14 +01:00
return mem ;
2021-12-09 21:56:20 +05:30
err_release :
if ( mem - > ops - > release )
mem - > ops - > release ( mem ) ;
2019-10-08 17:01:14 +01:00
err_free :
kfree ( mem ) ;
return ERR_PTR ( err ) ;
}
2019-12-27 19:07:48 +05:30
void intel_memory_region_set_name ( struct intel_memory_region * mem ,
const char * fmt , . . . )
{
va_list ap ;
va_start ( ap , fmt ) ;
vsnprintf ( mem - > name , sizeof ( mem - > name ) , fmt , ap ) ;
va_end ( ap ) ;
}
2021-11-22 22:45:51 +01:00
void intel_memory_region_destroy ( struct intel_memory_region * mem )
2019-10-08 17:01:14 +01:00
{
2021-11-22 22:45:51 +01:00
int ret = 0 ;
2019-10-08 17:01:14 +01:00
if ( mem - > ops - > release )
2021-11-22 22:45:51 +01:00
ret = mem - > ops - > release ( mem ) ;
2019-10-08 17:01:14 +01:00
2021-11-22 22:45:51 +01:00
GEM_WARN_ON ( ! list_empty_careful ( & mem - > objects . list ) ) ;
2019-10-08 17:01:16 +01:00
mutex_destroy ( & mem - > objects . lock ) ;
2021-11-22 22:45:51 +01:00
if ( ! ret )
kfree ( mem ) ;
2019-10-08 17:01:14 +01:00
}
2019-10-26 21:20:32 +01:00
/* Global memory region registration -- only slight layer inversions! */
int intel_memory_regions_hw_probe ( struct drm_i915_private * i915 )
{
int err , i ;
for ( i = 0 ; i < ARRAY_SIZE ( i915 - > mm . regions ) ; i + + ) {
struct intel_memory_region * mem = ERR_PTR ( - ENODEV ) ;
2021-02-05 10:20:25 +00:00
u16 type , instance ;
2019-10-26 21:20:32 +01:00
if ( ! HAS_REGION ( i915 , BIT ( i ) ) )
continue ;
2021-02-05 10:20:25 +00:00
type = intel_region_map [ i ] . class ;
instance = intel_region_map [ i ] . instance ;
2019-10-26 21:20:32 +01:00
switch ( type ) {
case INTEL_MEMORY_SYSTEM :
2021-06-24 10:42:40 +02:00
if ( IS_DGFX ( i915 ) )
mem = i915_gem_ttm_system_setup ( i915 , type ,
instance ) ;
else
mem = i915_gem_shmem_setup ( i915 , type ,
instance ) ;
2019-10-26 21:20:32 +01:00
break ;
2021-04-21 11:46:55 +01:00
case INTEL_MEMORY_STOLEN_LOCAL :
2021-06-02 10:38:08 +02:00
mem = i915_gem_stolen_lmem_setup ( i915 , type , instance ) ;
2021-04-21 11:46:55 +01:00
if ( ! IS_ERR ( mem ) )
i915 - > mm . stolen_region = mem ;
break ;
2021-02-05 10:20:26 +00:00
case INTEL_MEMORY_STOLEN_SYSTEM :
2021-06-02 10:38:08 +02:00
mem = i915_gem_stolen_smem_setup ( i915 , type , instance ) ;
2021-04-21 11:46:55 +01:00
if ( ! IS_ERR ( mem ) )
i915 - > mm . stolen_region = mem ;
2019-10-26 21:20:32 +01:00
break ;
2021-01-27 13:14:10 +00:00
default :
continue ;
2019-10-26 21:20:32 +01:00
}
if ( IS_ERR ( mem ) ) {
err = PTR_ERR ( mem ) ;
2020-01-09 12:06:46 +03:00
drm_err ( & i915 - > drm ,
" Failed to setup region(%d) type=%d \n " ,
err , type ) ;
2019-10-26 21:20:32 +01:00
goto out_cleanup ;
}
2021-02-05 10:20:25 +00:00
mem - > id = i ;
2019-10-26 21:20:32 +01:00
i915 - > mm . regions [ i ] = mem ;
}
return 0 ;
out_cleanup :
intel_memory_regions_driver_release ( i915 ) ;
return err ;
}
void intel_memory_regions_driver_release ( struct drm_i915_private * i915 )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( i915 - > mm . regions ) ; i + + ) {
struct intel_memory_region * region =
fetch_and_zero ( & i915 - > mm . regions [ i ] ) ;
if ( region )
2021-11-22 22:45:51 +01:00
intel_memory_region_destroy ( region ) ;
2019-10-26 21:20:32 +01:00
}
}
2019-10-08 17:01:14 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
# include "selftests/intel_memory_region.c"
# include "selftests/mock_region.c"
# endif