2017-02-13 17:15:32 +00:00
/*
2019-05-28 10:29:49 +01:00
* SPDX - License - Identifier : MIT
2017-02-13 17:15:32 +00:00
*
2019-05-28 10:29:49 +01:00
* Copyright © 2017 Intel Corporation
2017-02-13 17:15:32 +00:00
*/
# include <linux/prime_numbers.h>
2019-11-25 10:58:56 +00:00
# include "gt/intel_engine_pm.h"
2020-12-16 13:54:52 +00:00
# include "gt/intel_gpu_commands.h"
2019-07-12 20:29:53 +01:00
# include "gt/intel_gt.h"
2019-10-04 14:40:02 +01:00
# include "gt/intel_gt_pm.h"
2019-10-24 11:03:44 +01:00
# include "gt/intel_ring.h"
2019-07-12 20:29:53 +01:00
2019-05-28 10:29:49 +01:00
# include "i915_selftest.h"
# include "selftests/i915_random.h"
2017-02-13 17:15:32 +00:00
2019-10-27 22:58:07 +00:00
struct context {
struct drm_i915_gem_object * obj ;
struct intel_engine_cs * engine ;
} ;
static int cpu_set ( struct context * ctx , unsigned long offset , u32 v )
2017-02-13 17:15:32 +00:00
{
unsigned int needs_clflush ;
struct page * page ;
2018-07-30 08:53:51 +01:00
void * map ;
u32 * cpu ;
2017-02-13 17:15:32 +00:00
int err ;
2020-08-19 16:08:46 +02:00
i915_gem_object_lock ( ctx - > obj , NULL ) ;
2019-10-27 22:58:07 +00:00
err = i915_gem_object_prepare_write ( ctx - > obj , & needs_clflush ) ;
2017-02-13 17:15:32 +00:00
if ( err )
2020-08-19 16:08:46 +02:00
goto out ;
2017-02-13 17:15:32 +00:00
2019-10-27 22:58:07 +00:00
page = i915_gem_object_get_page ( ctx - > obj , offset > > PAGE_SHIFT ) ;
2017-02-13 17:15:32 +00:00
map = kmap_atomic ( page ) ;
2018-07-30 08:53:51 +01:00
cpu = map + offset_in_page ( offset ) ;
2018-07-06 18:49:26 +01:00
2018-07-30 08:53:51 +01:00
if ( needs_clflush & CLFLUSH_BEFORE )
drm_clflush_virt_range ( cpu , sizeof ( * cpu ) ) ;
2018-07-06 18:49:26 +01:00
2018-07-30 08:53:51 +01:00
* cpu = v ;
2018-07-06 18:49:26 +01:00
2018-07-30 08:53:51 +01:00
if ( needs_clflush & CLFLUSH_AFTER )
drm_clflush_virt_range ( cpu , sizeof ( * cpu ) ) ;
2018-07-06 18:49:26 +01:00
2017-02-13 17:15:32 +00:00
kunmap_atomic ( map ) ;
2019-10-27 22:58:07 +00:00
i915_gem_object_finish_access ( ctx - > obj ) ;
2018-07-30 08:53:51 +01:00
2020-08-19 16:08:46 +02:00
out :
i915_gem_object_unlock ( ctx - > obj ) ;
return err ;
2017-02-13 17:15:32 +00:00
}
2019-10-27 22:58:07 +00:00
static int cpu_get ( struct context * ctx , unsigned long offset , u32 * v )
2017-02-13 17:15:32 +00:00
{
unsigned int needs_clflush ;
struct page * page ;
2018-07-30 08:53:51 +01:00
void * map ;
u32 * cpu ;
2017-02-13 17:15:32 +00:00
int err ;
2020-08-19 16:08:46 +02:00
i915_gem_object_lock ( ctx - > obj , NULL ) ;
2019-10-27 22:58:07 +00:00
err = i915_gem_object_prepare_read ( ctx - > obj , & needs_clflush ) ;
2017-02-13 17:15:32 +00:00
if ( err )
2020-08-19 16:08:46 +02:00
goto out ;
2017-02-13 17:15:32 +00:00
2019-10-27 22:58:07 +00:00
page = i915_gem_object_get_page ( ctx - > obj , offset > > PAGE_SHIFT ) ;
2017-02-13 17:15:32 +00:00
map = kmap_atomic ( page ) ;
2018-07-30 08:53:51 +01:00
cpu = map + offset_in_page ( offset ) ;
2018-07-06 18:49:26 +01:00
2018-07-30 08:53:51 +01:00
if ( needs_clflush & CLFLUSH_BEFORE )
drm_clflush_virt_range ( cpu , sizeof ( * cpu ) ) ;
2018-07-06 18:49:26 +01:00
2018-07-30 08:53:51 +01:00
* v = * cpu ;
2017-02-13 17:15:32 +00:00
2018-07-30 08:53:51 +01:00
kunmap_atomic ( map ) ;
2019-10-27 22:58:07 +00:00
i915_gem_object_finish_access ( ctx - > obj ) ;
2018-07-30 08:53:51 +01:00
2020-08-19 16:08:46 +02:00
out :
i915_gem_object_unlock ( ctx - > obj ) ;
return err ;
2017-02-13 17:15:32 +00:00
}
2019-10-27 22:58:07 +00:00
static int gtt_set ( struct context * ctx , unsigned long offset , u32 v )
2017-02-13 17:15:32 +00:00
{
struct i915_vma * vma ;
2017-11-14 19:18:42 +00:00
u32 __iomem * map ;
2019-10-04 14:40:02 +01:00
int err = 0 ;
2017-02-13 17:15:32 +00:00
2020-08-19 16:08:45 +02:00
i915_gem_object_lock ( ctx - > obj , NULL ) ;
2019-10-27 22:58:07 +00:00
err = i915_gem_object_set_to_gtt_domain ( ctx - > obj , true ) ;
i915_gem_object_unlock ( ctx - > obj ) ;
2017-02-13 17:15:32 +00:00
if ( err )
return err ;
2019-10-27 22:58:07 +00:00
vma = i915_gem_object_ggtt_pin ( ctx - > obj , NULL , 0 , 0 , PIN_MAPPABLE ) ;
2017-02-13 17:15:32 +00:00
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
2019-10-04 14:40:02 +01:00
intel_gt_pm_get ( vma - > vm - > gt ) ;
2017-02-13 17:15:32 +00:00
map = i915_vma_pin_iomap ( vma ) ;
i915_vma_unpin ( vma ) ;
2019-10-04 14:40:02 +01:00
if ( IS_ERR ( map ) ) {
err = PTR_ERR ( map ) ;
goto out_rpm ;
}
2017-02-13 17:15:32 +00:00
2017-11-14 19:18:42 +00:00
iowrite32 ( v , & map [ offset / sizeof ( * map ) ] ) ;
2017-02-13 17:15:32 +00:00
i915_vma_unpin_iomap ( vma ) ;
2019-10-04 14:40:02 +01:00
out_rpm :
intel_gt_pm_put ( vma - > vm - > gt ) ;
return err ;
2017-02-13 17:15:32 +00:00
}
2019-10-27 22:58:07 +00:00
static int gtt_get ( struct context * ctx , unsigned long offset , u32 * v )
2017-02-13 17:15:32 +00:00
{
struct i915_vma * vma ;
2017-11-14 19:18:42 +00:00
u32 __iomem * map ;
2019-10-04 14:40:02 +01:00
int err = 0 ;
2017-02-13 17:15:32 +00:00
2020-08-19 16:08:45 +02:00
i915_gem_object_lock ( ctx - > obj , NULL ) ;
2019-10-27 22:58:07 +00:00
err = i915_gem_object_set_to_gtt_domain ( ctx - > obj , false ) ;
i915_gem_object_unlock ( ctx - > obj ) ;
2017-02-13 17:15:32 +00:00
if ( err )
return err ;
2019-10-27 22:58:07 +00:00
vma = i915_gem_object_ggtt_pin ( ctx - > obj , NULL , 0 , 0 , PIN_MAPPABLE ) ;
2017-02-13 17:15:32 +00:00
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
2019-10-04 14:40:02 +01:00
intel_gt_pm_get ( vma - > vm - > gt ) ;
2017-02-13 17:15:32 +00:00
map = i915_vma_pin_iomap ( vma ) ;
i915_vma_unpin ( vma ) ;
2019-10-04 14:40:02 +01:00
if ( IS_ERR ( map ) ) {
err = PTR_ERR ( map ) ;
goto out_rpm ;
}
2017-02-13 17:15:32 +00:00
2017-11-14 19:18:42 +00:00
* v = ioread32 ( & map [ offset / sizeof ( * map ) ] ) ;
2017-02-13 17:15:32 +00:00
i915_vma_unpin_iomap ( vma ) ;
2019-10-04 14:40:02 +01:00
out_rpm :
intel_gt_pm_put ( vma - > vm - > gt ) ;
return err ;
2017-02-13 17:15:32 +00:00
}
2019-10-27 22:58:07 +00:00
static int wc_set ( struct context * ctx , unsigned long offset , u32 v )
2017-02-13 17:15:32 +00:00
{
2017-11-14 19:18:42 +00:00
u32 * map ;
2017-02-13 17:15:32 +00:00
int err ;
2020-08-19 16:08:45 +02:00
i915_gem_object_lock ( ctx - > obj , NULL ) ;
2019-10-27 22:58:07 +00:00
err = i915_gem_object_set_to_wc_domain ( ctx - > obj , true ) ;
i915_gem_object_unlock ( ctx - > obj ) ;
2017-02-13 17:15:32 +00:00
if ( err )
return err ;
2021-03-23 16:50:32 +01:00
map = i915_gem_object_pin_map_unlocked ( ctx - > obj , I915_MAP_WC ) ;
2017-02-13 17:15:32 +00:00
if ( IS_ERR ( map ) )
return PTR_ERR ( map ) ;
map [ offset / sizeof ( * map ) ] = v ;
2020-05-11 15:13:03 +01:00
__i915_gem_object_flush_map ( ctx - > obj , offset , sizeof ( * map ) ) ;
2019-10-27 22:58:07 +00:00
i915_gem_object_unpin_map ( ctx - > obj ) ;
2017-02-13 17:15:32 +00:00
return 0 ;
}
2019-10-27 22:58:07 +00:00
static int wc_get ( struct context * ctx , unsigned long offset , u32 * v )
2017-02-13 17:15:32 +00:00
{
2017-11-14 19:18:42 +00:00
u32 * map ;
2017-02-13 17:15:32 +00:00
int err ;
2020-08-19 16:08:45 +02:00
i915_gem_object_lock ( ctx - > obj , NULL ) ;
2019-10-27 22:58:07 +00:00
err = i915_gem_object_set_to_wc_domain ( ctx - > obj , false ) ;
i915_gem_object_unlock ( ctx - > obj ) ;
2017-02-13 17:15:32 +00:00
if ( err )
return err ;
2021-03-23 16:50:32 +01:00
map = i915_gem_object_pin_map_unlocked ( ctx - > obj , I915_MAP_WC ) ;
2017-02-13 17:15:32 +00:00
if ( IS_ERR ( map ) )
return PTR_ERR ( map ) ;
* v = map [ offset / sizeof ( * map ) ] ;
2019-10-27 22:58:07 +00:00
i915_gem_object_unpin_map ( ctx - > obj ) ;
2017-02-13 17:15:32 +00:00
return 0 ;
}
2019-10-27 22:58:07 +00:00
static int gpu_set ( struct context * ctx , unsigned long offset , u32 v )
2017-02-13 17:15:32 +00:00
{
2018-02-21 09:56:36 +00:00
struct i915_request * rq ;
2017-02-13 17:15:32 +00:00
struct i915_vma * vma ;
2017-02-14 11:32:42 +00:00
u32 * cs ;
2017-02-13 17:15:32 +00:00
int err ;
2021-03-23 16:50:14 +01:00
vma = i915_gem_object_ggtt_pin ( ctx - > obj , NULL , 0 , 0 , 0 ) ;
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
2020-08-19 16:08:45 +02:00
i915_gem_object_lock ( ctx - > obj , NULL ) ;
2019-10-27 22:58:07 +00:00
err = i915_gem_object_set_to_gtt_domain ( ctx - > obj , true ) ;
2017-02-13 17:15:32 +00:00
if ( err )
2020-08-19 16:09:01 +02:00
goto out_unlock ;
2017-02-13 17:15:32 +00:00
2019-11-25 10:58:56 +00:00
rq = intel_engine_create_kernel_request ( ctx - > engine ) ;
2017-02-13 17:15:32 +00:00
if ( IS_ERR ( rq ) ) {
2020-08-19 16:09:01 +02:00
err = PTR_ERR ( rq ) ;
goto out_unpin ;
2017-02-13 17:15:32 +00:00
}
2017-02-14 11:32:42 +00:00
cs = intel_ring_begin ( rq , 4 ) ;
if ( IS_ERR ( cs ) ) {
2020-08-19 16:09:01 +02:00
err = PTR_ERR ( cs ) ;
goto out_rq ;
2017-02-13 17:15:32 +00:00
}
2021-06-05 08:53:54 -07:00
if ( GRAPHICS_VER ( ctx - > engine - > i915 ) > = 8 ) {
2017-02-14 11:32:42 +00:00
* cs + + = MI_STORE_DWORD_IMM_GEN4 | 1 < < 22 ;
* cs + + = lower_32_bits ( i915_ggtt_offset ( vma ) + offset ) ;
* cs + + = upper_32_bits ( i915_ggtt_offset ( vma ) + offset ) ;
* cs + + = v ;
2021-06-05 08:53:54 -07:00
} else if ( GRAPHICS_VER ( ctx - > engine - > i915 ) > = 4 ) {
2018-07-06 15:23:22 +01:00
* cs + + = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT ;
2017-02-14 11:32:42 +00:00
* cs + + = 0 ;
* cs + + = i915_ggtt_offset ( vma ) + offset ;
* cs + + = v ;
2017-02-13 17:15:32 +00:00
} else {
2018-07-06 15:23:22 +01:00
* cs + + = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL ;
2017-02-14 11:32:42 +00:00
* cs + + = i915_ggtt_offset ( vma ) + offset ;
* cs + + = v ;
* cs + + = MI_NOOP ;
2017-02-13 17:15:32 +00:00
}
2017-02-14 11:32:42 +00:00
intel_ring_advance ( rq , cs ) ;
2017-02-13 17:15:32 +00:00
2019-08-19 12:20:33 +01:00
err = i915_request_await_object ( rq , vma - > obj , true ) ;
if ( err = = 0 )
err = i915_vma_move_to_active ( vma , rq , EXEC_OBJECT_WRITE ) ;
2017-02-13 17:15:32 +00:00
2020-08-19 16:09:01 +02:00
out_rq :
2018-06-12 11:51:35 +01:00
i915_request_add ( rq ) ;
2020-08-19 16:09:01 +02:00
out_unpin :
i915_vma_unpin ( vma ) ;
out_unlock :
i915_gem_object_unlock ( ctx - > obj ) ;
2017-02-13 17:15:32 +00:00
2018-07-06 11:39:44 +01:00
return err ;
2017-02-13 17:15:32 +00:00
}
2019-10-27 22:58:07 +00:00
static bool always_valid ( struct context * ctx )
2017-02-13 17:15:32 +00:00
{
return true ;
}
2019-10-27 22:58:07 +00:00
static bool needs_fence_registers ( struct context * ctx )
2018-07-06 07:53:08 +01:00
{
2019-10-29 09:58:56 +00:00
struct intel_gt * gt = ctx - > engine - > gt ;
if ( intel_gt_is_wedged ( gt ) )
return false ;
return gt - > ggtt - > num_fences ;
2018-07-06 07:53:08 +01:00
}
2019-10-27 22:58:07 +00:00
static bool needs_mi_store_dword ( struct context * ctx )
2017-02-13 17:15:32 +00:00
{
2019-10-27 22:58:07 +00:00
if ( intel_gt_is_wedged ( ctx - > engine - > gt ) )
2019-07-04 22:23:43 +01:00
return false ;
2019-10-27 22:58:07 +00:00
return intel_engine_can_store_dword ( ctx - > engine ) ;
2017-02-13 17:15:32 +00:00
}
static const struct igt_coherency_mode {
const char * name ;
2019-10-27 22:58:07 +00:00
int ( * set ) ( struct context * ctx , unsigned long offset , u32 v ) ;
int ( * get ) ( struct context * ctx , unsigned long offset , u32 * v ) ;
bool ( * valid ) ( struct context * ctx ) ;
2017-02-13 17:15:32 +00:00
} igt_coherency_mode [ ] = {
{ " cpu " , cpu_set , cpu_get , always_valid } ,
2018-07-06 07:53:08 +01:00
{ " gtt " , gtt_set , gtt_get , needs_fence_registers } ,
2017-02-13 17:15:32 +00:00
{ " wc " , wc_set , wc_get , always_valid } ,
{ " gpu " , gpu_set , NULL , needs_mi_store_dword } ,
{ } ,
} ;
2019-10-27 22:58:07 +00:00
static struct intel_engine_cs *
random_engine ( struct drm_i915_private * i915 , struct rnd_state * prng )
{
struct intel_engine_cs * engine ;
unsigned int count ;
count = 0 ;
for_each_uabi_engine ( engine , i915 )
count + + ;
count = i915_prandom_u32_max_state ( count , prng ) ;
for_each_uabi_engine ( engine , i915 )
if ( count - - = = 0 )
return engine ;
return NULL ;
}
2017-02-13 17:15:32 +00:00
static int igt_gem_coherency ( void * arg )
{
const unsigned int ncachelines = PAGE_SIZE / 64 ;
struct drm_i915_private * i915 = arg ;
const struct igt_coherency_mode * read , * write , * over ;
unsigned long count , n ;
u32 * offsets , * values ;
2019-10-27 22:58:07 +00:00
I915_RND_STATE ( prng ) ;
struct context ctx ;
2017-02-14 14:35:09 +00:00
int err = 0 ;
2017-02-13 17:15:32 +00:00
2019-10-27 22:58:07 +00:00
/*
* We repeatedly write , overwrite and read from a sequence of
2017-02-13 17:15:32 +00:00
* cachelines in order to try and detect incoherency ( unflushed writes
* from either the CPU or GPU ) . Each setter / getter uses our cache
* domain API which should prevent incoherency .
*/
offsets = kmalloc_array ( ncachelines , 2 * sizeof ( u32 ) , GFP_KERNEL ) ;
if ( ! offsets )
return - ENOMEM ;
for ( count = 0 ; count < ncachelines ; count + + )
offsets [ count ] = count * 64 + 4 * ( count % 16 ) ;
values = offsets + ncachelines ;
2019-10-27 22:58:07 +00:00
ctx . engine = random_engine ( i915 , & prng ) ;
2019-12-27 10:30:50 +00:00
if ( ! ctx . engine ) {
err = - ENODEV ;
goto out_free ;
}
2019-10-27 22:58:07 +00:00
pr_info ( " %s: using %s \n " , __func__ , ctx . engine - > name ) ;
2019-11-29 22:27:02 +00:00
intel_engine_pm_get ( ctx . engine ) ;
2019-10-27 22:58:07 +00:00
2017-02-13 17:15:32 +00:00
for ( over = igt_coherency_mode ; over - > name ; over + + ) {
if ( ! over - > set )
continue ;
2019-10-27 22:58:07 +00:00
if ( ! over - > valid ( & ctx ) )
2017-02-13 17:15:32 +00:00
continue ;
for ( write = igt_coherency_mode ; write - > name ; write + + ) {
if ( ! write - > set )
continue ;
2019-10-27 22:58:07 +00:00
if ( ! write - > valid ( & ctx ) )
2017-02-13 17:15:32 +00:00
continue ;
for ( read = igt_coherency_mode ; read - > name ; read + + ) {
if ( ! read - > get )
continue ;
2019-10-27 22:58:07 +00:00
if ( ! read - > valid ( & ctx ) )
2017-02-13 17:15:32 +00:00
continue ;
for_each_prime_number_from ( count , 1 , ncachelines ) {
2019-10-27 22:58:07 +00:00
ctx . obj = i915_gem_object_create_internal ( i915 , PAGE_SIZE ) ;
if ( IS_ERR ( ctx . obj ) ) {
err = PTR_ERR ( ctx . obj ) ;
2019-12-27 10:30:50 +00:00
goto out_pm ;
2017-02-13 17:15:32 +00:00
}
i915_random_reorder ( offsets , ncachelines , & prng ) ;
for ( n = 0 ; n < count ; n + + )
values [ n ] = prandom_u32_state ( & prng ) ;
for ( n = 0 ; n < count ; n + + ) {
2019-10-27 22:58:07 +00:00
err = over - > set ( & ctx , offsets [ n ] , ~ values [ n ] ) ;
2017-02-13 17:15:32 +00:00
if ( err ) {
pr_err ( " Failed to set stale value[%ld/%ld] in object using %s, err=%d \n " ,
n , count , over - > name , err ) ;
goto put_object ;
}
}
for ( n = 0 ; n < count ; n + + ) {
2019-10-27 22:58:07 +00:00
err = write - > set ( & ctx , offsets [ n ] , values [ n ] ) ;
2017-02-13 17:15:32 +00:00
if ( err ) {
pr_err ( " Failed to set value[%ld/%ld] in object using %s, err=%d \n " ,
n , count , write - > name , err ) ;
goto put_object ;
}
}
for ( n = 0 ; n < count ; n + + ) {
u32 found ;
2019-10-27 22:58:07 +00:00
err = read - > get ( & ctx , offsets [ n ] , & found ) ;
2017-02-13 17:15:32 +00:00
if ( err ) {
pr_err ( " Failed to get value[%ld/%ld] in object using %s, err=%d \n " ,
n , count , read - > name , err ) ;
goto put_object ;
}
if ( found ! = values [ n ] ) {
pr_err ( " Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x \n " ,
n , count , over - > name ,
write - > name , values [ n ] ,
read - > name , found ,
~ values [ n ] , offsets [ n ] ) ;
err = - EINVAL ;
goto put_object ;
}
}
2019-10-27 22:58:07 +00:00
i915_gem_object_put ( ctx . obj ) ;
2017-02-13 17:15:32 +00:00
}
}
}
}
2019-12-27 10:30:50 +00:00
out_pm :
2019-11-29 22:27:02 +00:00
intel_engine_pm_put ( ctx . engine ) ;
2019-12-27 10:30:50 +00:00
out_free :
2017-02-13 17:15:32 +00:00
kfree ( offsets ) ;
return err ;
put_object :
2019-10-27 22:58:07 +00:00
i915_gem_object_put ( ctx . obj ) ;
2019-12-27 10:30:50 +00:00
goto out_pm ;
2017-02-13 17:15:32 +00:00
}
int i915_gem_coherency_live_selftests ( struct drm_i915_private * i915 )
{
static const struct i915_subtest tests [ ] = {
SUBTEST ( igt_gem_coherency ) ,
} ;
return i915_subtests ( tests , i915 ) ;
}