2009-02-17 20:08:50 -05:00
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eric Anholt < eric @ anholt . net >
* Keith Packard < keithp @ keithp . com >
*
*/
2017-03-03 10:13:38 +01:00
# include <linux/sched/mm.h>
2019-04-05 14:00:08 +03:00
# include <linux/sort.h>
2022-02-25 15:46:28 -08:00
# include <linux/string_helpers.h>
2019-04-05 14:00:08 +03:00
2019-01-17 22:03:34 +01:00
# include <drm/drm_debugfs.h>
2019-06-13 11:44:15 +03:00
2019-05-28 10:29:49 +01:00
# include "gem/i915_gem_context.h"
2021-09-17 19:57:54 -07:00
# include "gt/intel_gt.h"
2020-04-30 12:18:12 +01:00
# include "gt/intel_gt_buffer_pool.h"
2020-04-24 17:28:05 +01:00
# include "gt/intel_gt_clock_utils.h"
2021-10-13 00:17:38 +02:00
# include "gt/intel_gt_debugfs.h"
2019-08-08 21:27:58 +01:00
# include "gt/intel_gt_pm.h"
2021-09-17 19:57:54 -07:00
# include "gt/intel_gt_pm_debugfs.h"
2022-01-27 15:43:33 -08:00
# include "gt/intel_gt_regs.h"
2019-10-04 14:40:06 +01:00
# include "gt/intel_gt_requests.h"
2019-09-27 12:08:49 +01:00
# include "gt/intel_rc6.h"
2021-09-17 19:57:54 -07:00
# include "gt/intel_reset.h"
2019-10-24 22:16:41 +01:00
# include "gt/intel_rps.h"
2020-07-07 17:39:52 -07:00
# include "gt/intel_sseu_debugfs.h"
2019-04-24 18:48:39 +01:00
2019-05-02 18:02:43 +03:00
# include "i915_debugfs.h"
2019-12-05 17:43:40 +02:00
# include "i915_debugfs_params.h"
2019-04-29 15:29:27 +03:00
# include "i915_irq.h"
drm/i915: Show timeline dependencies for debug
Include the signalers each request in the timeline is waiting on, as a
means to try and identify the cause of a stall. This can be quite
verbose, even as for now we only show each request in the timeline and
its immediate antecedents.
This generates output like:
Timeline 886: { count 1, ready: 0, inflight: 0, seqno: { current: 664, last: 666 }, engine: rcs0 }
U 886:29a- prio=0 @ 134ms: gem_exec_parall<4621>
U bc1:27a- prio=0 @ 134ms: gem_exec_parall[4917]
Timeline 825: { count 1, ready: 0, inflight: 0, seqno: { current: 802, last: 804 }, engine: vcs0 }
U 825:324 prio=0 @ 107ms: gem_exec_parall<4518>
U b75:140- prio=0 @ 110ms: gem_exec_parall<5486>
Timeline b46: { count 1, ready: 0, inflight: 0, seqno: { current: 782, last: 784 }, engine: vcs0 }
U b46:310- prio=0 @ 70ms: gem_exec_parall<5428>
U c11:170- prio=0 @ 70ms: gem_exec_parall[5501]
Timeline 96b: { count 1, ready: 0, inflight: 0, seqno: { current: 632, last: 634 }, engine: vcs0 }
U 96b:27a- prio=0 @ 67ms: gem_exec_parall<4878>
U b75:19e- prio=0 @ 67ms: gem_exec_parall<5486>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201119165616.10834-6-chris@chris-wilson.co.uk
2020-11-19 16:56:16 +00:00
# include "i915_scheduler.h"
2022-02-14 22:13:42 -08:00
# include "intel_mchbar_regs.h"
2019-04-05 14:00:15 +03:00
# include "intel_pm.h"
2019-01-16 15:33:04 +00:00
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
static inline struct drm_i915_private * node_to_i915 ( struct drm_info_node * node )
{
return to_i915 ( node - > minor - > dev ) ;
}
2010-08-25 16:03:34 +01:00
static int i915_capabilities ( struct seq_file * m , void * data )
{
2019-12-07 18:29:37 +00:00
struct drm_i915_private * i915 = node_to_i915 ( m - > private ) ;
2017-12-19 11:43:44 +00:00
struct drm_printer p = drm_seq_file_printer ( m ) ;
2010-08-25 16:03:34 +01:00
2019-12-07 18:29:37 +00:00
seq_printf ( m , " pch: %d \n " , INTEL_PCH_TYPE ( i915 ) ) ;
2019-09-11 12:46:55 +01:00
2019-12-07 18:29:37 +00:00
intel_device_info_print_static ( INTEL_INFO ( i915 ) , & p ) ;
intel_device_info_print_runtime ( RUNTIME_INFO ( i915 ) , & p ) ;
2021-11-26 14:14:24 +00:00
i915_print_iommu_status ( i915 , & p ) ;
2021-12-14 21:33:39 +02:00
intel_gt_info_print ( & to_gt ( i915 ) - > info , & p ) ;
2019-12-07 18:29:37 +00:00
intel_driver_caps_print ( & i915 - > caps , & p ) ;
2010-08-25 16:03:34 +01:00
2017-02-06 21:36:08 +00:00
kernel_param_lock ( THIS_MODULE ) ;
2020-06-18 18:04:02 +03:00
i915_params_dump ( & i915 - > params , & p ) ;
2017-02-06 21:36:08 +00:00
kernel_param_unlock ( THIS_MODULE ) ;
2010-08-25 16:03:34 +01:00
return 0 ;
}
2009-02-17 20:08:50 -05:00
2016-05-12 16:18:52 +03:00
static char get_tiling_flag ( struct drm_i915_gem_object * obj )
2009-02-11 14:26:38 +00:00
{
2016-08-05 10:14:23 +01:00
switch ( i915_gem_object_get_tiling ( obj ) ) {
2011-08-16 15:34:10 -04:00
default :
2016-04-15 11:34:52 +01:00
case I915_TILING_NONE : return ' ' ;
case I915_TILING_X : return ' X ' ;
case I915_TILING_Y : return ' Y ' ;
2011-08-16 15:34:10 -04:00
}
2009-02-11 14:26:38 +00:00
}
2016-05-12 16:18:52 +03:00
static char get_global_flag ( struct drm_i915_gem_object * obj )
2016-04-15 11:34:52 +01:00
{
2019-08-22 07:09:13 +01:00
return READ_ONCE ( obj - > userfault_count ) ? ' g ' : ' ' ;
2016-04-15 11:34:52 +01:00
}
2016-05-12 16:18:52 +03:00
static char get_pin_mapped_flag ( struct drm_i915_gem_object * obj )
2013-07-31 17:00:00 -07:00
{
2016-10-28 13:58:35 +01:00
return obj - > mm . mapping ? ' M ' : ' ' ;
2013-07-31 17:00:00 -07:00
}
2017-10-06 23:18:28 +01:00
static const char *
stringify_page_sizes ( unsigned int page_sizes , char * buf , size_t len )
{
size_t x = 0 ;
switch ( page_sizes ) {
case 0 :
return " " ;
case I915_GTT_PAGE_SIZE_4K :
return " 4K " ;
case I915_GTT_PAGE_SIZE_64K :
return " 64K " ;
case I915_GTT_PAGE_SIZE_2M :
return " 2M " ;
default :
if ( ! buf )
return " M " ;
if ( page_sizes & I915_GTT_PAGE_SIZE_2M )
x + = snprintf ( buf + x , len - x , " 2M, " ) ;
if ( page_sizes & I915_GTT_PAGE_SIZE_64K )
x + = snprintf ( buf + x , len - x , " 64K, " ) ;
if ( page_sizes & I915_GTT_PAGE_SIZE_4K )
x + = snprintf ( buf + x , len - x , " 4K, " ) ;
buf [ x - 2 ] = ' \0 ' ;
return buf ;
}
}
2021-05-24 20:27:03 +03:00
static const char * stringify_vma_type ( const struct i915_vma * vma )
{
if ( i915_vma_is_ggtt ( vma ) )
return " ggtt " ;
if ( i915_vma_is_dpt ( vma ) )
return " dpt " ;
return " ppgtt " ;
}
2022-02-10 17:45:44 +02:00
static const char * i915_cache_level_str ( struct drm_i915_private * i915 , int type )
{
switch ( type ) {
case I915_CACHE_NONE : return " uncached " ;
case I915_CACHE_LLC : return HAS_LLC ( i915 ) ? " LLC " : " snooped " ;
case I915_CACHE_L3_LLC : return " L3+LLC " ;
case I915_CACHE_WT : return " WT " ;
default : return " " ;
}
}
2020-02-11 18:14:51 +02:00
void
i915_debugfs_describe_obj ( struct seq_file * m , struct drm_i915_gem_object * obj )
2010-08-25 22:45:57 +01:00
{
2015-04-27 13:41:17 +01:00
struct drm_i915_private * dev_priv = to_i915 ( obj - > base . dev ) ;
2013-07-31 17:00:00 -07:00
struct i915_vma * vma ;
2013-12-06 14:10:55 -08:00
int pin_count = 0 ;
2019-09-02 05:02:47 +01:00
seq_printf ( m , " %pK: %c%c%c %8zdKiB %02x %02x %s%s%s " ,
2010-08-25 22:45:57 +01:00
& obj - > base ,
get_tiling_flag ( obj ) ,
2013-07-31 17:00:00 -07:00
get_global_flag ( obj ) ,
2016-04-15 11:34:52 +01:00
get_pin_mapped_flag ( obj ) ,
2011-12-20 08:54:15 -08:00
obj - > base . size / 1024 ,
2018-02-16 13:43:38 +01:00
obj - > read_domains ,
obj - > write_domain ,
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
i915_cache_level_str ( dev_priv , obj - > cache_level ) ,
2016-10-28 13:58:35 +01:00
obj - > mm . dirty ? " dirty " : " " ,
obj - > mm . madv = = I915_MADV_DONTNEED ? " purgeable " : " " ) ;
2010-08-25 22:45:57 +01:00
if ( obj - > base . name )
seq_printf ( m , " (name: %d) " , obj - > base . name ) ;
2019-06-13 08:32:54 +01:00
spin_lock ( & obj - > vma . lock ) ;
2019-01-28 10:23:54 +00:00
list_for_each_entry ( vma , & obj - > vma . list , obj_link ) {
2016-08-04 07:52:26 +01:00
if ( ! drm_mm_node_allocated ( & vma - > node ) )
continue ;
2019-06-13 08:32:54 +01:00
spin_unlock ( & obj - > vma . lock ) ;
if ( i915_vma_is_pinned ( vma ) )
pin_count + + ;
2021-05-24 20:27:03 +03:00
seq_printf ( m , " (%s offset: %08llx, size: %08llx, pages: %s " ,
stringify_vma_type ( vma ) ,
2017-10-06 23:18:28 +01:00
vma - > node . start , vma - > node . size ,
2022-01-10 18:22:15 +01:00
stringify_page_sizes ( vma - > resource - > page_sizes_gtt ,
NULL , 0 ) ) ;
2021-05-24 20:27:03 +03:00
if ( i915_vma_is_ggtt ( vma ) | | i915_vma_is_dpt ( vma ) ) {
2017-01-12 11:21:08 +00:00
switch ( vma - > ggtt_view . type ) {
case I915_GGTT_VIEW_NORMAL :
seq_puts ( m , " , normal " ) ;
break ;
case I915_GGTT_VIEW_PARTIAL :
seq_printf ( m , " , partial [%08llx+%x] " ,
2017-01-14 00:28:25 +00:00
vma - > ggtt_view . partial . offset < < PAGE_SHIFT ,
vma - > ggtt_view . partial . size < < PAGE_SHIFT ) ;
2017-01-12 11:21:08 +00:00
break ;
case I915_GGTT_VIEW_ROTATED :
2021-03-25 23:48:06 +02:00
seq_printf ( m , " , rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)] " ,
2017-01-14 00:28:25 +00:00
vma - > ggtt_view . rotated . plane [ 0 ] . width ,
vma - > ggtt_view . rotated . plane [ 0 ] . height ,
2021-03-25 23:48:05 +02:00
vma - > ggtt_view . rotated . plane [ 0 ] . src_stride ,
2021-03-25 23:48:06 +02:00
vma - > ggtt_view . rotated . plane [ 0 ] . dst_stride ,
2017-01-14 00:28:25 +00:00
vma - > ggtt_view . rotated . plane [ 0 ] . offset ,
vma - > ggtt_view . rotated . plane [ 1 ] . width ,
vma - > ggtt_view . rotated . plane [ 1 ] . height ,
2021-03-25 23:48:05 +02:00
vma - > ggtt_view . rotated . plane [ 1 ] . src_stride ,
2021-03-25 23:48:06 +02:00
vma - > ggtt_view . rotated . plane [ 1 ] . dst_stride ,
2017-01-14 00:28:25 +00:00
vma - > ggtt_view . rotated . plane [ 1 ] . offset ) ;
2017-01-12 11:21:08 +00:00
break ;
2019-05-09 15:21:52 +03:00
case I915_GGTT_VIEW_REMAPPED :
2021-03-25 23:48:06 +02:00
seq_printf ( m , " , remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)] " ,
2019-05-09 15:21:52 +03:00
vma - > ggtt_view . remapped . plane [ 0 ] . width ,
vma - > ggtt_view . remapped . plane [ 0 ] . height ,
2021-03-25 23:48:05 +02:00
vma - > ggtt_view . remapped . plane [ 0 ] . src_stride ,
2021-03-25 23:48:06 +02:00
vma - > ggtt_view . remapped . plane [ 0 ] . dst_stride ,
2019-05-09 15:21:52 +03:00
vma - > ggtt_view . remapped . plane [ 0 ] . offset ,
vma - > ggtt_view . remapped . plane [ 1 ] . width ,
vma - > ggtt_view . remapped . plane [ 1 ] . height ,
2021-03-25 23:48:05 +02:00
vma - > ggtt_view . remapped . plane [ 1 ] . src_stride ,
2021-03-25 23:48:06 +02:00
vma - > ggtt_view . remapped . plane [ 1 ] . dst_stride ,
2019-05-09 15:21:52 +03:00
vma - > ggtt_view . remapped . plane [ 1 ] . offset ) ;
break ;
2017-01-12 11:21:08 +00:00
default :
MISSING_CASE ( vma - > ggtt_view . type ) ;
break ;
}
}
2016-08-18 17:17:00 +01:00
if ( vma - > fence )
2019-08-12 18:48:03 +01:00
seq_printf ( m , " , fence: %d " , vma - > fence - > id ) ;
2016-02-26 11:03:20 +00:00
seq_puts ( m , " ) " ) ;
2019-06-13 08:32:54 +01:00
spin_lock ( & obj - > vma . lock ) ;
2013-07-31 17:00:00 -07:00
}
2019-06-13 08:32:54 +01:00
spin_unlock ( & obj - > vma . lock ) ;
seq_printf ( m , " (pinned x %d) " , pin_count ) ;
2021-01-19 21:43:33 +00:00
if ( i915_gem_object_is_stolen ( obj ) )
2015-01-23 09:05:06 +01:00
seq_printf ( m , " (stolen: %08llx) " , obj - > stolen - > start ) ;
2019-09-02 05:02:47 +01:00
if ( i915_gem_object_is_framebuffer ( obj ) )
seq_printf ( m , " (fb) " ) ;
2010-08-25 22:45:57 +01:00
}
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
static int i915_gem_object_info ( struct seq_file * m , void * data )
2010-09-30 11:46:12 +01:00
{
2019-06-12 11:57:20 +01:00
struct drm_i915_private * i915 = node_to_i915 ( m - > private ) ;
2021-08-19 10:34:19 +01:00
struct drm_printer p = drm_seq_file_printer ( m ) ;
2019-12-27 19:07:48 +05:30
struct intel_memory_region * mr ;
enum intel_region_id id ;
2010-09-30 11:46:12 +01:00
2019-08-02 22:21:36 +01:00
seq_printf ( m , " %u shrinkable [%u free] objects, %llu bytes \n " ,
2019-06-12 11:57:20 +01:00
i915 - > mm . shrink_count ,
2019-08-02 22:21:36 +01:00
atomic_read ( & i915 - > mm . free_count ) ,
2019-06-12 11:57:20 +01:00
i915 - > mm . shrink_memory ) ;
2019-12-27 19:07:48 +05:30
for_each_memory_region ( mr , i915 , id )
2021-08-19 10:34:19 +01:00
intel_memory_region_debug ( mr , & p ) ;
2010-09-30 11:46:12 +01:00
return 0 ;
}
2016-10-12 10:05:18 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
2017-02-14 16:46:11 +00:00
static ssize_t gpu_state_read ( struct file * file , char __user * ubuf ,
size_t count , loff_t * pos )
2012-04-27 15:17:40 +02:00
{
2020-01-10 12:30:56 +00:00
struct i915_gpu_coredump * error ;
2017-02-14 16:46:11 +00:00
ssize_t ret ;
2018-11-23 13:23:25 +00:00
void * buf ;
2012-04-27 15:17:40 +02:00
2018-11-23 13:23:25 +00:00
error = file - > private_data ;
2017-02-14 16:46:11 +00:00
if ( ! error )
return 0 ;
2012-04-27 15:17:40 +02:00
2018-11-23 13:23:25 +00:00
/* Bounce buffer required because of kernfs __user API convenience. */
buf = kmalloc ( count , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
2012-04-27 15:17:40 +02:00
2020-01-10 12:30:56 +00:00
ret = i915_gpu_coredump_copy_to_buffer ( error , buf , * pos , count ) ;
2018-11-23 13:23:25 +00:00
if ( ret < = 0 )
2017-02-14 16:46:11 +00:00
goto out ;
2012-04-27 15:17:40 +02:00
2018-11-23 13:23:25 +00:00
if ( ! copy_to_user ( ubuf , buf , ret ) )
* pos + = ret ;
else
ret = - EFAULT ;
2012-04-27 15:17:40 +02:00
2017-02-14 16:46:11 +00:00
out :
2018-11-23 13:23:25 +00:00
kfree ( buf ) ;
2017-02-14 16:46:11 +00:00
return ret ;
}
2013-05-23 13:55:35 +03:00
2017-02-14 16:46:11 +00:00
static int gpu_state_release ( struct inode * inode , struct file * file )
{
2020-01-10 12:30:56 +00:00
i915_gpu_coredump_put ( file - > private_data ) ;
2013-05-23 13:55:35 +03:00
return 0 ;
2012-04-27 15:17:40 +02:00
}
2017-02-14 16:46:11 +00:00
static int i915_gpu_info_open ( struct inode * inode , struct file * file )
2012-04-27 15:17:40 +02:00
{
2017-03-28 14:14:07 +01:00
struct drm_i915_private * i915 = inode - > i_private ;
2020-01-10 12:30:56 +00:00
struct i915_gpu_coredump * gpu ;
2019-01-14 14:21:14 +00:00
intel_wakeref_t wakeref ;
2012-04-27 15:17:40 +02:00
2019-01-14 14:21:23 +00:00
gpu = NULL ;
2019-06-13 16:21:55 -07:00
with_intel_runtime_pm ( & i915 - > runtime_pm , wakeref )
2021-12-14 21:33:39 +02:00
gpu = i915_gpu_coredump ( to_gt ( i915 ) , ALL_ENGINES ) ;
2018-12-07 11:05:54 +00:00
if ( IS_ERR ( gpu ) )
return PTR_ERR ( gpu ) ;
2012-04-27 15:17:40 +02:00
2017-02-14 16:46:11 +00:00
file - > private_data = gpu ;
2013-05-23 13:55:35 +03:00
return 0 ;
}
2017-02-14 16:46:11 +00:00
static const struct file_operations i915_gpu_info_fops = {
. owner = THIS_MODULE ,
. open = i915_gpu_info_open ,
. read = gpu_state_read ,
. llseek = default_llseek ,
. release = gpu_state_release ,
} ;
static ssize_t
i915_error_state_write ( struct file * filp ,
const char __user * ubuf ,
size_t cnt ,
loff_t * ppos )
2013-06-06 15:18:41 +03:00
{
2020-01-10 12:30:56 +00:00
struct i915_gpu_coredump * error = filp - > private_data ;
2013-06-06 15:18:41 +03:00
2017-02-14 16:46:11 +00:00
if ( ! error )
return 0 ;
2013-05-23 13:55:35 +03:00
2020-04-02 14:48:08 +03:00
drm_dbg ( & error - > i915 - > drm , " Resetting error state \n " ) ;
2017-02-14 16:46:11 +00:00
i915_reset_error_state ( error - > i915 ) ;
2013-05-23 13:55:35 +03:00
2017-02-14 16:46:11 +00:00
return cnt ;
}
2013-05-23 13:55:35 +03:00
2017-02-14 16:46:11 +00:00
static int i915_error_state_open ( struct inode * inode , struct file * file )
{
2020-01-10 12:30:56 +00:00
struct i915_gpu_coredump * error ;
2018-12-07 11:05:54 +00:00
error = i915_first_error_state ( inode - > i_private ) ;
if ( IS_ERR ( error ) )
return PTR_ERR ( error ) ;
file - > private_data = error ;
2017-02-14 16:46:11 +00:00
return 0 ;
2012-04-27 15:17:40 +02:00
}
static const struct file_operations i915_error_state_fops = {
. owner = THIS_MODULE ,
. open = i915_error_state_open ,
2017-02-14 16:46:11 +00:00
. read = gpu_state_read ,
2012-04-27 15:17:40 +02:00
. write = i915_error_state_write ,
. llseek = default_llseek ,
2017-02-14 16:46:11 +00:00
. release = gpu_state_release ,
2012-04-27 15:17:40 +02:00
} ;
2016-10-12 10:05:18 +01:00
# endif
2014-03-31 11:30:02 +05:30
static int i915_frequency_info ( struct seq_file * m , void * unused )
2010-01-29 11:27:07 -08:00
{
2021-09-17 19:57:54 -07:00
struct drm_i915_private * i915 = node_to_i915 ( m - > private ) ;
2021-12-14 21:33:39 +02:00
struct intel_gt * gt = to_gt ( i915 ) ;
2021-09-17 19:57:54 -07:00
struct drm_printer p = drm_seq_file_printer ( m ) ;
2010-01-29 11:27:07 -08:00
2021-09-17 19:57:54 -07:00
intel_gt_pm_frequency_dump ( gt , & p ) ;
2015-09-25 14:00:32 +03:00
2020-10-29 10:18:45 +08:00
return 0 ;
2010-01-29 11:27:07 -08:00
}
2011-12-14 13:57:16 +01:00
static const char * swizzle_string ( unsigned swizzle )
{
2013-06-24 22:59:49 +01:00
switch ( swizzle ) {
2011-12-14 13:57:16 +01:00
case I915_BIT_6_SWIZZLE_NONE :
return " none " ;
case I915_BIT_6_SWIZZLE_9 :
return " bit9 " ;
case I915_BIT_6_SWIZZLE_9_10 :
return " bit9/bit10 " ;
case I915_BIT_6_SWIZZLE_9_11 :
return " bit9/bit11 " ;
case I915_BIT_6_SWIZZLE_9_10_11 :
return " bit9/bit10/bit11 " ;
case I915_BIT_6_SWIZZLE_9_17 :
return " bit9/bit17 " ;
case I915_BIT_6_SWIZZLE_9_10_17 :
return " bit9/bit10/bit17 " ;
case I915_BIT_6_SWIZZLE_UNKNOWN :
2012-12-29 02:00:09 +09:00
return " unknown " ;
2011-12-14 13:57:16 +01:00
}
return " bug " ;
}
static int i915_swizzle_info ( struct seq_file * m , void * data )
{
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
struct drm_i915_private * dev_priv = node_to_i915 ( m - > private ) ;
2019-06-11 11:45:48 +01:00
struct intel_uncore * uncore = & dev_priv - > uncore ;
2019-01-14 14:21:14 +00:00
intel_wakeref_t wakeref ;
2012-08-09 15:07:02 +02:00
2011-12-14 13:57:16 +01:00
seq_printf ( m , " bit6 swizzle for X-tiling = %s \n " ,
2022-01-05 00:35:50 +02:00
swizzle_string ( to_gt ( dev_priv ) - > ggtt - > bit_6_swizzle_x ) ) ;
2011-12-14 13:57:16 +01:00
seq_printf ( m , " bit6 swizzle for Y-tiling = %s \n " ,
2022-01-05 00:35:50 +02:00
swizzle_string ( to_gt ( dev_priv ) - > ggtt - > bit_6_swizzle_y ) ) ;
2011-12-14 13:57:16 +01:00
2020-07-02 13:07:14 -07:00
if ( dev_priv - > quirks & QUIRK_PIN_SWIZZLED_PAGES )
seq_puts ( m , " L-shaped memory detected \n " ) ;
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
2021-06-05 21:50:49 -07:00
if ( GRAPHICS_VER ( dev_priv ) > = 8 | | IS_VALLEYVIEW ( dev_priv ) )
2020-07-02 13:07:14 -07:00
return 0 ;
wakeref = intel_runtime_pm_get ( & dev_priv - > runtime_pm ) ;
2021-06-05 21:50:49 -07:00
if ( IS_GRAPHICS_VER ( dev_priv , 3 , 4 ) ) {
2011-12-14 13:57:16 +01:00
seq_printf ( m , " DDC = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , DCC ) ) ;
2014-11-20 09:26:30 +01:00
seq_printf ( m , " DDC2 = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , DCC2 ) ) ;
2011-12-14 13:57:16 +01:00
seq_printf ( m , " C0DRB3 = 0x%04x \n " ,
2021-04-21 18:34:00 +03:00
intel_uncore_read16 ( uncore , C0DRB3_BW ) ) ;
2011-12-14 13:57:16 +01:00
seq_printf ( m , " C1DRB3 = 0x%04x \n " ,
2021-04-21 18:34:00 +03:00
intel_uncore_read16 ( uncore , C1DRB3_BW ) ) ;
2021-07-07 11:13:24 -07:00
} else if ( GRAPHICS_VER ( dev_priv ) > = 6 ) {
2012-01-31 16:47:56 +01:00
seq_printf ( m , " MAD_DIMM_C0 = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , MAD_DIMM_C0 ) ) ;
2012-01-31 16:47:56 +01:00
seq_printf ( m , " MAD_DIMM_C1 = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , MAD_DIMM_C1 ) ) ;
2012-01-31 16:47:56 +01:00
seq_printf ( m , " MAD_DIMM_C2 = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , MAD_DIMM_C2 ) ) ;
2012-01-31 16:47:56 +01:00
seq_printf ( m , " TILECTL = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , TILECTL ) ) ;
2021-06-05 21:50:49 -07:00
if ( GRAPHICS_VER ( dev_priv ) > = 8 )
2013-11-02 21:07:14 -07:00
seq_printf ( m , " GAMTARBMODE = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , GAMTARBMODE ) ) ;
2013-11-02 21:07:14 -07:00
else
seq_printf ( m , " ARB_MODE = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , ARB_MODE ) ) ;
2012-01-31 16:47:56 +01:00
seq_printf ( m , " DISP_ARB_CTL = 0x%08x \n " ,
2019-06-11 11:45:48 +01:00
intel_uncore_read ( uncore , DISP_ARB_CTL ) ) ;
2011-12-14 13:57:16 +01:00
}
2014-11-20 09:26:30 +01:00
2019-06-13 16:21:54 -07:00
intel_runtime_pm_put ( & dev_priv - > runtime_pm , wakeref ) ;
2011-12-14 13:57:16 +01:00
return 0 ;
}
2015-04-07 16:20:32 +01:00
static int i915_rps_boost_info ( struct seq_file * m , void * data )
{
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
struct drm_i915_private * dev_priv = node_to_i915 ( m - > private ) ;
2021-12-14 21:33:39 +02:00
struct intel_rps * rps = & to_gt ( dev_priv ) - > rps ;
2018-10-02 12:32:21 +01:00
2022-02-25 15:46:28 -08:00
seq_printf ( m , " RPS enabled? %s \n " ,
str_yes_no ( intel_rps_is_enabled ( rps ) ) ) ;
seq_printf ( m , " RPS active? %s \n " ,
str_yes_no ( intel_rps_is_active ( rps ) ) ) ;
seq_printf ( m , " GPU busy? %s \n " , str_yes_no ( to_gt ( dev_priv ) - > awake ) ) ;
2017-06-28 13:35:48 +01:00
seq_printf ( m , " Boosts outstanding? %d \n " ,
2017-10-10 22:30:06 +01:00
atomic_read ( & rps - > num_waiters ) ) ;
drm/i915: Interactive RPS mode
RPS provides a feedback loop where we use the load during the previous
evaluation interval to decide whether to up or down clock the GPU
frequency. Our responsiveness is split into 3 regimes, a high and low
plateau with the intent to keep the gpu clocked high to cover occasional
stalls under high load, and low despite occasional glitches under steady
low load, and inbetween. However, we run into situations like kodi where
we want to stay at low power (video decoding is done efficiently
inside the fixed function HW and doesn't need high clocks even for high
bitrate streams), but just occasionally the pipeline is more complex
than a video decode and we need a smidgen of extra GPU power to present
on time. In the high power regime, we sample at sub frame intervals with
a bias to upclocking, and conversely at low power we sample over a few
frames worth to provide what we consider to be the right levels of
responsiveness respectively. At low power, we more or less expect to be
kicked out to high power at the start of a busy sequence by waitboosting.
Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active
request") whenever we missed the frame or stalled, we would immediate go
full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we
relaxed the waitboosting to only apply if the pipeline was deep to avoid
over-committing resources for a near miss. Sadly though, a near miss is
still a miss, and perceptible as jitter in the frame delivery.
To try and prevent the near miss before having to resort to boosting
after the fact, we use the pageflip queue as an indication that we are
in an "interactive" regime and so should sample the load more frequently
to provide power before the frame misses it vblank. This will make us
more favorable to providing a small power increase (one or two bins) as
required rather than going all the way to maximum and then having to
work back down again. (We still keep the waitboosting mechanism around
just in case a dramatic change in system load requires urgent uplocking,
faster than we can provide in a few evaluation intervals.)
v2: Reduce rps_set_interactive to a boolean parameter to avoid the
confusion of what if they wanted a new power mode after pinning to a
different mode (which to choose?)
v3: Only reprogram RPS while the GT is awake, it will be set when we
wake the GT, and while off warns about being used outside of rpm.
v4: Fix deferred application of interactive mode
v5: s/state/interactive/
v6: Group the mutex with its principle in a substruct
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111
Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk
2018-07-31 14:26:29 +01:00
seq_printf ( m , " Interactive? %d \n " , READ_ONCE ( rps - > power . interactive ) ) ;
2018-10-02 12:32:21 +01:00
seq_printf ( m , " Frequency requested %d, actual %d \n " ,
2019-10-24 22:16:41 +01:00
intel_gpu_freq ( rps , rps - > cur_freq ) ,
2019-12-13 20:37:35 +02:00
intel_rps_read_actual_frequency ( rps ) ) ;
2016-08-15 09:49:33 +01:00
seq_printf ( m , " min hard:%d, soft:%d; max soft:%d, hard:%d \n " ,
2019-10-24 22:16:41 +01:00
intel_gpu_freq ( rps , rps - > min_freq ) ,
intel_gpu_freq ( rps , rps - > min_freq_softlimit ) ,
intel_gpu_freq ( rps , rps - > max_freq_softlimit ) ,
intel_gpu_freq ( rps , rps - > max_freq ) ) ;
2016-08-15 09:49:33 +01:00
seq_printf ( m , " idle:%d, efficient:%d, boost:%d \n " ,
2019-10-24 22:16:41 +01:00
intel_gpu_freq ( rps , rps - > idle_freq ) ,
intel_gpu_freq ( rps , rps - > efficient_freq ) ,
intel_gpu_freq ( rps , rps - > boost_freq ) ) ;
2016-04-26 19:29:41 +02:00
2020-12-31 09:31:49 +00:00
seq_printf ( m , " Wait boosts: %d \n " , READ_ONCE ( rps - > boosts ) ) ;
2015-04-07 16:20:32 +01:00
2013-07-04 11:02:07 -07:00
return 0 ;
}
2015-06-04 18:23:57 +01:00
static int i915_runtime_pm_status ( struct seq_file * m , void * unused )
2013-08-19 13:18:10 -03:00
{
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
struct drm_i915_private * dev_priv = node_to_i915 ( m - > private ) ;
2021-01-28 14:31:23 +01:00
struct pci_dev * pdev = to_pci_dev ( dev_priv - > drm . dev ) ;
2013-08-19 13:18:10 -03:00
2016-04-03 14:14:21 +01:00
if ( ! HAS_RUNTIME_PM ( dev_priv ) )
seq_puts ( m , " Runtime power management not supported \n " ) ;
2013-08-19 13:18:10 -03:00
2019-01-14 14:21:25 +00:00
seq_printf ( m , " Runtime power status: %s \n " ,
2022-02-25 15:46:30 -08:00
str_enabled_disabled ( ! dev_priv - > power_domains . init_wakeref ) ) ;
2019-01-14 14:21:25 +00:00
2022-02-25 15:46:28 -08:00
seq_printf ( m , " GPU idle: %s \n " , str_yes_no ( ! to_gt ( dev_priv ) - > awake ) ) ;
2013-08-19 13:18:10 -03:00
seq_printf ( m , " IRQs disabled: %s \n " ,
2022-02-25 15:46:28 -08:00
str_yes_no ( ! intel_irqs_enabled ( dev_priv ) ) ) ;
2015-06-15 12:52:28 +01:00
# ifdef CONFIG_PM
2015-06-04 18:23:58 +01:00
seq_printf ( m , " Usage count: %d \n " ,
drm/i915: debugfs spring cleaning
Just like with sysfs, we do some major overhaul.
Pass dev_priv instead of dev to all feature macros (IS_, HAS_,
INTEL_, etc.). This has the side effect that a bunch of functions
now get dev_priv passed instead of dev.
All calls to INTEL_INFO()->gen have been replaced with
INTEL_GEN().
We want access to to_i915(node->minor->dev) in a lot of places,
so add the node_to_i915() helper to accommodate for this.
Finally, we have quite a few cases where we get a void * pointer,
and need to cast it to drm_device *, only to run to_i915() on it.
Add cast_to_i915() to do this.
v2: Don't introduce extra dev (Chris)
v3: Make pipe_crc_info have a pointer to drm_i915_private instead of
drm_device. This saves a bit of space, since we never use
drm_device anywhere in these functions.
Also some minor fixup that I missed in the previous version.
v4: Changed the code a bit so that dev_priv is passed directly
to various functions, thus removing the need for the
cast_to_i915() helper. Also did some additional cleanup.
v5: Additional cleanup of newly introduced changes.
v6: Rebase again because of conflict.
Signed-off-by: David Weinehall <david.weinehall@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160822105931.pcbe2lpsgzckzboa@boom
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
2016-08-22 13:59:31 +03:00
atomic_read ( & dev_priv - > drm . dev - > power . usage_count ) ) ;
2015-06-15 12:52:28 +01:00
# else
seq_printf ( m , " Device Power Management (CONFIG_PM) disabled \n " ) ;
# endif
2016-04-03 14:14:21 +01:00
seq_printf ( m , " PCI device power state: %s [%d] \n " ,
2016-08-22 13:32:44 +03:00
pci_power_name ( pdev - > current_state ) ,
pdev - > current_state ) ;
2013-08-19 13:18:10 -03:00
2019-01-14 14:21:09 +00:00
if ( IS_ENABLED ( CONFIG_DRM_I915_DEBUG_RUNTIME_PM ) ) {
struct drm_printer p = drm_seq_file_printer ( m ) ;
2019-06-13 16:21:53 -07:00
print_intel_runtime_pm_wakeref ( & dev_priv - > runtime_pm , & p ) ;
2019-01-14 14:21:09 +00:00
}
2013-08-20 10:29:23 +01:00
return 0 ;
}
2020-02-11 18:14:51 +02:00
static int i915_engine_info ( struct seq_file * m , void * unused )
2013-11-25 17:15:35 +02:00
{
2020-11-19 16:56:14 +00:00
struct drm_i915_private * i915 = node_to_i915 ( m - > private ) ;
2020-02-11 18:14:51 +02:00
struct intel_engine_cs * engine ;
intel_wakeref_t wakeref ;
struct drm_printer p ;
2013-11-25 17:15:35 +02:00
2020-11-19 16:56:14 +00:00
wakeref = intel_runtime_pm_get ( & i915 - > runtime_pm ) ;
2013-11-25 17:15:35 +02:00
2020-12-15 15:44:56 +00:00
seq_printf ( m , " GT awake? %s [%d], %llums \n " ,
2022-02-25 15:46:28 -08:00
str_yes_no ( to_gt ( i915 ) - > awake ) ,
2021-12-14 21:33:39 +02:00
atomic_read ( & to_gt ( i915 ) - > wakeref . count ) ,
ktime_to_ms ( intel_gt_get_awake_time ( to_gt ( i915 ) ) ) ) ;
2020-12-23 12:23:59 +00:00
seq_printf ( m , " CS timestamp frequency: %u Hz, %d ns \n " ,
2021-12-14 21:33:39 +02:00
to_gt ( i915 ) - > clock_frequency ,
to_gt ( i915 ) - > clock_period_ns ) ;
2013-11-25 17:15:35 +02:00
2020-02-11 18:14:51 +02:00
p = drm_seq_file_printer ( m ) ;
2020-11-19 16:56:14 +00:00
for_each_uabi_engine ( engine , i915 )
2020-02-11 18:14:51 +02:00
intel_engine_dump ( engine , & p , " %s \n " , engine - > name ) ;
2013-11-25 17:15:35 +02:00
2021-12-14 21:33:39 +02:00
intel_gt_show_timelines ( to_gt ( i915 ) , & p , i915_request_show_with_schedule ) ;
2020-11-19 16:56:14 +00:00
intel_runtime_pm_put ( & i915 - > runtime_pm , wakeref ) ;
2013-11-25 17:15:35 +02:00
return 0 ;
}
2020-02-11 18:14:51 +02:00
static int i915_wa_registers ( struct seq_file * m , void * unused )
2014-02-07 12:48:15 -08:00
{
2020-02-11 18:14:51 +02:00
struct drm_i915_private * i915 = node_to_i915 ( m - > private ) ;
struct intel_engine_cs * engine ;
2019-11-29 20:54:27 +02:00
2020-02-11 18:14:51 +02:00
for_each_uabi_engine ( engine , i915 ) {
const struct i915_wa_list * wal = & engine - > ctx_wa_list ;
const struct i915_wa * wa ;
unsigned int count ;
2019-11-29 20:54:34 +02:00
2020-02-11 18:14:51 +02:00
count = wal - > count ;
if ( ! count )
continue ;
2014-11-04 17:06:50 +00:00
2020-02-11 18:14:51 +02:00
seq_printf ( m , " %s: Workarounds applied: %u \n " ,
engine - > name , count ) ;
2014-01-22 14:36:08 +02:00
2020-02-11 18:14:51 +02:00
for ( wa = wal - > list ; count - - ; wa + + )
seq_printf ( m , " 0x%X: 0x%08X, mask: 0x%08X \n " ,
i915_mmio_reg_offset ( wa - > reg ) ,
wa - > set , wa - > clr ) ;
2014-11-04 17:06:50 +00:00
2020-02-11 18:14:51 +02:00
seq_printf ( m , " \n " ) ;
}
2014-01-22 14:36:08 +02:00
2020-02-11 18:14:51 +02:00
return 0 ;
2014-01-22 14:36:08 +02:00
}
2021-10-13 00:17:38 +02:00
static int i915_wedged_get ( void * data , u64 * val )
2009-10-13 22:20:20 +01:00
{
2019-07-12 20:29:53 +01:00
struct drm_i915_private * i915 = data ;
2009-10-13 22:20:20 +01:00
2021-12-14 21:33:39 +02:00
return intel_gt_debugfs_reset_show ( to_gt ( i915 ) , val ) ;
2009-10-13 22:20:20 +01:00
}
2021-10-13 00:17:38 +02:00
static int i915_wedged_set ( void * data , u64 val )
2009-10-13 22:20:20 +01:00
{
2017-03-25 13:47:35 +00:00
struct drm_i915_private * i915 = data ;
2014-04-14 20:24:27 +03:00
2021-12-14 21:33:39 +02:00
return intel_gt_debugfs_reset_store ( to_gt ( i915 ) , val ) ;
2009-10-13 22:20:20 +01:00
}
2013-03-10 14:10:06 -07:00
DEFINE_SIMPLE_ATTRIBUTE ( i915_wedged_fops ,
i915_wedged_get , i915_wedged_set ,
2013-04-12 12:10:05 +03:00
" %llu \n " ) ;
2009-10-13 22:20:20 +01:00
2019-10-12 08:23:07 +01:00
static int
i915_perf_noa_delay_set ( void * data , u64 val )
{
struct drm_i915_private * i915 = data ;
/*
* This would lead to infinite waits as we ' re doing timestamp
* difference on the CS with only 32 bits .
*/
2021-12-14 21:33:39 +02:00
if ( intel_gt_ns_to_clock_interval ( to_gt ( i915 ) , val ) > U32_MAX )
2019-10-12 08:23:07 +01:00
return - EINVAL ;
atomic64_set ( & i915 - > perf . noa_programming_delay , val ) ;
return 0 ;
}
static int
i915_perf_noa_delay_get ( void * data , u64 * val )
{
struct drm_i915_private * i915 = data ;
* val = atomic64_read ( & i915 - > perf . noa_programming_delay ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( i915_perf_noa_delay_fops ,
i915_perf_noa_delay_get ,
i915_perf_noa_delay_set ,
" %llu \n " ) ;
2017-10-18 13:16:21 +01:00
# define DROP_UNBOUND BIT(0)
# define DROP_BOUND BIT(1)
# define DROP_RETIRE BIT(2)
# define DROP_ACTIVE BIT(3)
# define DROP_FREED BIT(4)
# define DROP_SHRINK_ALL BIT(5)
# define DROP_IDLE BIT(6)
2018-09-03 09:33:37 +01:00
# define DROP_RESET_ACTIVE BIT(7)
# define DROP_RESET_SEQNO BIT(8)
2019-10-11 18:38:23 +01:00
# define DROP_RCU BIT(9)
2016-10-28 13:58:42 +01:00
# define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE | \
2017-03-08 14:46:22 +00:00
DROP_FREED | \
2017-10-18 13:16:21 +01:00
DROP_SHRINK_ALL | \
2018-09-03 09:33:37 +01:00
DROP_IDLE | \
DROP_RESET_ACTIVE | \
2019-10-11 18:38:23 +01:00
DROP_RESET_SEQNO | \
DROP_RCU )
2013-03-10 14:10:06 -07:00
static int
i915_drop_caches_get ( void * data , u64 * val )
2013-01-15 12:39:35 +00:00
{
2013-03-10 14:10:06 -07:00
* val = DROP_ALL ;
2013-01-15 12:39:35 +00:00
2013-03-10 14:10:06 -07:00
return 0 ;
2013-01-15 12:39:35 +00:00
}
2013-03-10 14:10:06 -07:00
static int
2019-10-22 10:47:21 +01:00
gt_drop_caches ( struct intel_gt * gt , u64 val )
2013-01-15 12:39:35 +00:00
{
2019-10-04 14:40:02 +01:00
int ret ;
2013-01-15 12:39:35 +00:00
2019-01-28 01:02:18 +00:00
if ( val & DROP_RESET_ACTIVE & &
2019-10-04 14:40:06 +01:00
wait_for ( intel_engines_are_idle ( gt ) , I915_IDLE_ENGINES_TIMEOUT ) )
intel_gt_set_wedged ( gt ) ;
2018-09-03 09:33:37 +01:00
2019-10-04 14:40:02 +01:00
if ( val & DROP_RETIRE )
2019-10-04 14:40:06 +01:00
intel_gt_retire_requests ( gt ) ;
2019-03-18 09:51:49 +00:00
2019-10-04 14:40:02 +01:00
if ( val & ( DROP_IDLE | DROP_ACTIVE ) ) {
2019-10-04 14:40:06 +01:00
ret = intel_gt_wait_for_idle ( gt , MAX_SCHEDULE_TIMEOUT ) ;
2013-01-15 12:39:35 +00:00
if ( ret )
2019-03-18 09:51:49 +00:00
return ret ;
2019-10-04 14:40:02 +01:00
}
2013-01-15 12:39:35 +00:00
2019-10-04 14:40:02 +01:00
if ( val & DROP_IDLE ) {
2019-10-04 14:40:06 +01:00
ret = intel_gt_pm_wait_for_idle ( gt ) ;
2019-10-04 14:40:02 +01:00
if ( ret )
return ret ;
2018-09-03 09:33:37 +01:00
}
2019-10-04 14:40:06 +01:00
if ( val & DROP_RESET_ACTIVE & & intel_gt_terminally_wedged ( gt ) )
intel_gt_handle_error ( gt , ALL_ENGINES , 0 , NULL ) ;
2013-01-15 12:39:35 +00:00
2020-04-30 12:18:12 +01:00
if ( val & DROP_FREED )
intel_gt_flush_buffer_pool ( gt ) ;
2019-10-22 10:47:21 +01:00
return 0 ;
}
static int
i915_drop_caches_set ( void * data , u64 val )
{
struct drm_i915_private * i915 = data ;
drm/i915/debugfs: add noreclaim annotations
We have a debugfs hook to directly call into i915_gem_shrink() with the
fs_reclaim acquire annotations to simulate hitting direct reclaim.
However we should also annotate this with memalloc_noreclaim, which will
set PF_MEMALLOC for us on the current context, to ensure we can't
re-enter direct reclaim(just like "real" direct reclaim does). This is
an issue now that ttm_bo_validate could potentially be called here,
which might try to allocate a tiny amount of memory to hold the new
ttm_resource struct, as per the below splat:
[ 2507.913844] WARNING: possible recursive locking detected
[ 2507.913848] 5.16.0-rc4+ #5 Tainted: G U
[ 2507.913853] --------------------------------------------
[ 2507.913856] gem_exec_captur/1825 is trying to acquire lock:
[ 2507.913861] ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: kmem_cache_alloc_trace+0x30/0x390
[ 2507.913875]
but task is already holding lock:
[ 2507.913879] ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: i915_drop_caches_set+0x1c9/0x2c0 [i915]
[ 2507.913962]
other info that might help us debug this:
[ 2507.913966] Possible unsafe locking scenario:
[ 2507.913970] CPU0
[ 2507.913973] ----
[ 2507.913975] lock(fs_reclaim);
[ 2507.913979] lock(fs_reclaim);
[ 2507.913983]
DEADLOCK ***
[ 2507.913988] May be due to missing lock nesting notation
[ 2507.913992] 4 locks held by gem_exec_captur/1825:
[ 2507.913997] #0: ffff888101f6e460 (sb_writers#17){..}-{0:0}, at: ksys_write+0xe9/0x1b0
[ 2507.914009] #1: ffff88812d99e2b8 (&attr->mutex){..}-{3:3}, at: simple_attr_write+0xbb/0x220
[ 2507.914019] #2: ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: i915_drop_caches_set+0x1c9/0x2c0 [i915]
[ 2507.914085] #3: ffff8881b4a11b20 (reservation_ww_class_mutex){..}-{3:3}, at: ww_mutex_trylock+0x43f/0xcb0
[ 2507.914097]
stack backtrace:
[ 2507.914102] CPU: 0 PID: 1825 Comm: gem_exec_captur Tainted: G U 5.16.0-rc4+ #5
[ 2507.914109] Hardware name: ASUS System Product Name/PRIME B560M-A AC, BIOS 0403 01/26/2021
[ 2507.914115] Call Trace:
[ 2507.914118] <TASK>
[ 2507.914121] dump_stack_lvl+0x59/0x73
[ 2507.914128] __lock_acquire.cold+0x227/0x3b0
[ 2507.914135] ? lockdep_hardirqs_on_prepare+0x410/0x410
[ 2507.914141] ? __lock_acquire+0x23ca/0x5000
[ 2507.914147] lock_acquire+0x19c/0x4b0
[ 2507.914152] ? kmem_cache_alloc_trace+0x30/0x390
[ 2507.914157] ? lock_release+0x690/0x690
[ 2507.914163] ? lock_is_held_type+0xe4/0x140
[ 2507.914170] ? ttm_sys_man_alloc+0x47/0xb0 [ttm]
[ 2507.914178] fs_reclaim_acquire+0x11a/0x160
[ 2507.914183] ? kmem_cache_alloc_trace+0x30/0x390
[ 2507.914188] kmem_cache_alloc_trace+0x30/0x390
[ 2507.914192] ? lock_release+0x37f/0x690
[ 2507.914198] ttm_sys_man_alloc+0x47/0xb0 [ttm]
[ 2507.914206] ttm_bo_pipeline_gutting+0x70/0x440 [ttm]
[ 2507.914214] ? ttm_mem_io_free+0x150/0x150 [ttm]
[ 2507.914221] ? lock_is_held_type+0xe4/0x140
[ 2507.914227] ttm_bo_validate+0x2fb/0x370 [ttm]
[ 2507.914234] ? lock_acquire+0x19c/0x4b0
[ 2507.914239] ? ttm_bo_bounce_temp_buffer.constprop.0+0xf0/0xf0 [ttm]
[ 2507.914246] ? lock_acquire+0x131/0x4b0
[ 2507.914251] ? lock_is_held_type+0xe4/0x140
[ 2507.914257] i915_ttm_shrinker_release_pages+0x2bc/0x490 [i915]
[ 2507.914339] ? i915_ttm_swap_notify+0x130/0x130 [i915]
[ 2507.914429] ? i915_gem_object_release_mmap_offset+0x32/0x250 [i915]
[ 2507.914529] i915_gem_shrink+0xb14/0x1290 [i915]
[ 2507.914616] ? ___i915_gem_object_make_shrinkable+0x3e0/0x3e0 [i915]
[ 2507.914698] ? _raw_spin_unlock_irqrestore+0x2d/0x60
[ 2507.914705] ? track_intel_runtime_pm_wakeref+0x180/0x230 [i915]
[ 2507.914777] i915_gem_shrink_all+0x4b/0x70 [i915]
[ 2507.914857] i915_drop_caches_set+0x227/0x2c0 [i915]
Reported-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211213125530.3960007-1-matthew.auld@intel.com
2021-12-13 12:55:30 +00:00
unsigned int flags ;
2019-10-22 10:47:21 +01:00
int ret ;
DRM_DEBUG ( " Dropping caches: 0x%08llx [0x%08llx] \n " ,
val , val & DROP_ALL ) ;
2021-12-14 21:33:39 +02:00
ret = gt_drop_caches ( to_gt ( i915 ) , val ) ;
2019-10-22 10:47:21 +01:00
if ( ret )
return ret ;
2017-03-03 10:13:38 +01:00
fs_reclaim_acquire ( GFP_KERNEL ) ;
drm/i915/debugfs: add noreclaim annotations
We have a debugfs hook to directly call into i915_gem_shrink() with the
fs_reclaim acquire annotations to simulate hitting direct reclaim.
However we should also annotate this with memalloc_noreclaim, which will
set PF_MEMALLOC for us on the current context, to ensure we can't
re-enter direct reclaim(just like "real" direct reclaim does). This is
an issue now that ttm_bo_validate could potentially be called here,
which might try to allocate a tiny amount of memory to hold the new
ttm_resource struct, as per the below splat:
[ 2507.913844] WARNING: possible recursive locking detected
[ 2507.913848] 5.16.0-rc4+ #5 Tainted: G U
[ 2507.913853] --------------------------------------------
[ 2507.913856] gem_exec_captur/1825 is trying to acquire lock:
[ 2507.913861] ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: kmem_cache_alloc_trace+0x30/0x390
[ 2507.913875]
but task is already holding lock:
[ 2507.913879] ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: i915_drop_caches_set+0x1c9/0x2c0 [i915]
[ 2507.913962]
other info that might help us debug this:
[ 2507.913966] Possible unsafe locking scenario:
[ 2507.913970] CPU0
[ 2507.913973] ----
[ 2507.913975] lock(fs_reclaim);
[ 2507.913979] lock(fs_reclaim);
[ 2507.913983]
DEADLOCK ***
[ 2507.913988] May be due to missing lock nesting notation
[ 2507.913992] 4 locks held by gem_exec_captur/1825:
[ 2507.913997] #0: ffff888101f6e460 (sb_writers#17){..}-{0:0}, at: ksys_write+0xe9/0x1b0
[ 2507.914009] #1: ffff88812d99e2b8 (&attr->mutex){..}-{3:3}, at: simple_attr_write+0xbb/0x220
[ 2507.914019] #2: ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: i915_drop_caches_set+0x1c9/0x2c0 [i915]
[ 2507.914085] #3: ffff8881b4a11b20 (reservation_ww_class_mutex){..}-{3:3}, at: ww_mutex_trylock+0x43f/0xcb0
[ 2507.914097]
stack backtrace:
[ 2507.914102] CPU: 0 PID: 1825 Comm: gem_exec_captur Tainted: G U 5.16.0-rc4+ #5
[ 2507.914109] Hardware name: ASUS System Product Name/PRIME B560M-A AC, BIOS 0403 01/26/2021
[ 2507.914115] Call Trace:
[ 2507.914118] <TASK>
[ 2507.914121] dump_stack_lvl+0x59/0x73
[ 2507.914128] __lock_acquire.cold+0x227/0x3b0
[ 2507.914135] ? lockdep_hardirqs_on_prepare+0x410/0x410
[ 2507.914141] ? __lock_acquire+0x23ca/0x5000
[ 2507.914147] lock_acquire+0x19c/0x4b0
[ 2507.914152] ? kmem_cache_alloc_trace+0x30/0x390
[ 2507.914157] ? lock_release+0x690/0x690
[ 2507.914163] ? lock_is_held_type+0xe4/0x140
[ 2507.914170] ? ttm_sys_man_alloc+0x47/0xb0 [ttm]
[ 2507.914178] fs_reclaim_acquire+0x11a/0x160
[ 2507.914183] ? kmem_cache_alloc_trace+0x30/0x390
[ 2507.914188] kmem_cache_alloc_trace+0x30/0x390
[ 2507.914192] ? lock_release+0x37f/0x690
[ 2507.914198] ttm_sys_man_alloc+0x47/0xb0 [ttm]
[ 2507.914206] ttm_bo_pipeline_gutting+0x70/0x440 [ttm]
[ 2507.914214] ? ttm_mem_io_free+0x150/0x150 [ttm]
[ 2507.914221] ? lock_is_held_type+0xe4/0x140
[ 2507.914227] ttm_bo_validate+0x2fb/0x370 [ttm]
[ 2507.914234] ? lock_acquire+0x19c/0x4b0
[ 2507.914239] ? ttm_bo_bounce_temp_buffer.constprop.0+0xf0/0xf0 [ttm]
[ 2507.914246] ? lock_acquire+0x131/0x4b0
[ 2507.914251] ? lock_is_held_type+0xe4/0x140
[ 2507.914257] i915_ttm_shrinker_release_pages+0x2bc/0x490 [i915]
[ 2507.914339] ? i915_ttm_swap_notify+0x130/0x130 [i915]
[ 2507.914429] ? i915_gem_object_release_mmap_offset+0x32/0x250 [i915]
[ 2507.914529] i915_gem_shrink+0xb14/0x1290 [i915]
[ 2507.914616] ? ___i915_gem_object_make_shrinkable+0x3e0/0x3e0 [i915]
[ 2507.914698] ? _raw_spin_unlock_irqrestore+0x2d/0x60
[ 2507.914705] ? track_intel_runtime_pm_wakeref+0x180/0x230 [i915]
[ 2507.914777] i915_gem_shrink_all+0x4b/0x70 [i915]
[ 2507.914857] i915_drop_caches_set+0x227/0x2c0 [i915]
Reported-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211213125530.3960007-1-matthew.auld@intel.com
2021-12-13 12:55:30 +00:00
flags = memalloc_noreclaim_save ( ) ;
2014-09-09 11:16:08 +01:00
if ( val & DROP_BOUND )
2021-03-23 16:50:50 +01:00
i915_gem_shrink ( NULL , i915 , LONG_MAX , NULL , I915_SHRINK_BOUND ) ;
2014-09-03 19:23:37 +01:00
2014-09-09 11:16:08 +01:00
if ( val & DROP_UNBOUND )
2021-03-23 16:50:50 +01:00
i915_gem_shrink ( NULL , i915 , LONG_MAX , NULL , I915_SHRINK_UNBOUND ) ;
2013-01-15 12:39:35 +00:00
2017-03-08 14:46:22 +00:00
if ( val & DROP_SHRINK_ALL )
2018-09-03 09:33:37 +01:00
i915_gem_shrink_all ( i915 ) ;
drm/i915/debugfs: add noreclaim annotations
We have a debugfs hook to directly call into i915_gem_shrink() with the
fs_reclaim acquire annotations to simulate hitting direct reclaim.
However we should also annotate this with memalloc_noreclaim, which will
set PF_MEMALLOC for us on the current context, to ensure we can't
re-enter direct reclaim(just like "real" direct reclaim does). This is
an issue now that ttm_bo_validate could potentially be called here,
which might try to allocate a tiny amount of memory to hold the new
ttm_resource struct, as per the below splat:
[ 2507.913844] WARNING: possible recursive locking detected
[ 2507.913848] 5.16.0-rc4+ #5 Tainted: G U
[ 2507.913853] --------------------------------------------
[ 2507.913856] gem_exec_captur/1825 is trying to acquire lock:
[ 2507.913861] ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: kmem_cache_alloc_trace+0x30/0x390
[ 2507.913875]
but task is already holding lock:
[ 2507.913879] ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: i915_drop_caches_set+0x1c9/0x2c0 [i915]
[ 2507.913962]
other info that might help us debug this:
[ 2507.913966] Possible unsafe locking scenario:
[ 2507.913970] CPU0
[ 2507.913973] ----
[ 2507.913975] lock(fs_reclaim);
[ 2507.913979] lock(fs_reclaim);
[ 2507.913983]
DEADLOCK ***
[ 2507.913988] May be due to missing lock nesting notation
[ 2507.913992] 4 locks held by gem_exec_captur/1825:
[ 2507.913997] #0: ffff888101f6e460 (sb_writers#17){..}-{0:0}, at: ksys_write+0xe9/0x1b0
[ 2507.914009] #1: ffff88812d99e2b8 (&attr->mutex){..}-{3:3}, at: simple_attr_write+0xbb/0x220
[ 2507.914019] #2: ffffffffb9df2500 (fs_reclaim){..}-{0:0}, at: i915_drop_caches_set+0x1c9/0x2c0 [i915]
[ 2507.914085] #3: ffff8881b4a11b20 (reservation_ww_class_mutex){..}-{3:3}, at: ww_mutex_trylock+0x43f/0xcb0
[ 2507.914097]
stack backtrace:
[ 2507.914102] CPU: 0 PID: 1825 Comm: gem_exec_captur Tainted: G U 5.16.0-rc4+ #5
[ 2507.914109] Hardware name: ASUS System Product Name/PRIME B560M-A AC, BIOS 0403 01/26/2021
[ 2507.914115] Call Trace:
[ 2507.914118] <TASK>
[ 2507.914121] dump_stack_lvl+0x59/0x73
[ 2507.914128] __lock_acquire.cold+0x227/0x3b0
[ 2507.914135] ? lockdep_hardirqs_on_prepare+0x410/0x410
[ 2507.914141] ? __lock_acquire+0x23ca/0x5000
[ 2507.914147] lock_acquire+0x19c/0x4b0
[ 2507.914152] ? kmem_cache_alloc_trace+0x30/0x390
[ 2507.914157] ? lock_release+0x690/0x690
[ 2507.914163] ? lock_is_held_type+0xe4/0x140
[ 2507.914170] ? ttm_sys_man_alloc+0x47/0xb0 [ttm]
[ 2507.914178] fs_reclaim_acquire+0x11a/0x160
[ 2507.914183] ? kmem_cache_alloc_trace+0x30/0x390
[ 2507.914188] kmem_cache_alloc_trace+0x30/0x390
[ 2507.914192] ? lock_release+0x37f/0x690
[ 2507.914198] ttm_sys_man_alloc+0x47/0xb0 [ttm]
[ 2507.914206] ttm_bo_pipeline_gutting+0x70/0x440 [ttm]
[ 2507.914214] ? ttm_mem_io_free+0x150/0x150 [ttm]
[ 2507.914221] ? lock_is_held_type+0xe4/0x140
[ 2507.914227] ttm_bo_validate+0x2fb/0x370 [ttm]
[ 2507.914234] ? lock_acquire+0x19c/0x4b0
[ 2507.914239] ? ttm_bo_bounce_temp_buffer.constprop.0+0xf0/0xf0 [ttm]
[ 2507.914246] ? lock_acquire+0x131/0x4b0
[ 2507.914251] ? lock_is_held_type+0xe4/0x140
[ 2507.914257] i915_ttm_shrinker_release_pages+0x2bc/0x490 [i915]
[ 2507.914339] ? i915_ttm_swap_notify+0x130/0x130 [i915]
[ 2507.914429] ? i915_gem_object_release_mmap_offset+0x32/0x250 [i915]
[ 2507.914529] i915_gem_shrink+0xb14/0x1290 [i915]
[ 2507.914616] ? ___i915_gem_object_make_shrinkable+0x3e0/0x3e0 [i915]
[ 2507.914698] ? _raw_spin_unlock_irqrestore+0x2d/0x60
[ 2507.914705] ? track_intel_runtime_pm_wakeref+0x180/0x230 [i915]
[ 2507.914777] i915_gem_shrink_all+0x4b/0x70 [i915]
[ 2507.914857] i915_drop_caches_set+0x227/0x2c0 [i915]
Reported-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211213125530.3960007-1-matthew.auld@intel.com
2021-12-13 12:55:30 +00:00
memalloc_noreclaim_restore ( flags ) ;
2017-03-03 10:13:38 +01:00
fs_reclaim_release ( GFP_KERNEL ) ;
2017-03-08 14:46:22 +00:00
2019-10-11 18:38:23 +01:00
if ( val & DROP_RCU )
rcu_barrier ( ) ;
2018-02-19 22:06:31 +00:00
if ( val & DROP_FREED )
2018-09-03 09:33:37 +01:00
i915_gem_drain_freed_objects ( i915 ) ;
2016-10-28 13:58:42 +01:00
2019-03-18 09:51:49 +00:00
return 0 ;
2013-01-15 12:39:35 +00:00
}
2013-03-10 14:10:06 -07:00
DEFINE_SIMPLE_ATTRIBUTE ( i915_drop_caches_fops ,
i915_drop_caches_get , i915_drop_caches_set ,
" 0x%08llx \n " ) ;
2013-01-15 12:39:35 +00:00
2015-02-13 10:27:54 -06:00
static int i915_sseu_status ( struct seq_file * m , void * unused )
{
2020-07-07 17:39:51 -07:00
struct drm_i915_private * i915 = node_to_i915 ( m - > private ) ;
2021-12-14 21:33:39 +02:00
struct intel_gt * gt = to_gt ( i915 ) ;
2015-02-13 10:27:54 -06:00
2020-07-07 17:39:52 -07:00
return intel_sseu_status ( m , gt ) ;
2015-02-13 10:27:54 -06:00
}
2011-04-25 11:25:56 -07:00
static int i915_forcewake_open ( struct inode * inode , struct file * file )
{
2017-09-07 14:44:41 +01:00
struct drm_i915_private * i915 = inode - > i_private ;
2011-04-25 11:25:56 -07:00
2021-12-14 21:33:39 +02:00
return intel_gt_pm_debugfs_forcewake_user_open ( to_gt ( i915 ) ) ;
2011-04-25 11:25:56 -07:00
}
2012-04-16 14:07:40 -07:00
static int i915_forcewake_release ( struct inode * inode , struct file * file )
2011-04-25 11:25:56 -07:00
{
2017-09-07 14:44:41 +01:00
struct drm_i915_private * i915 = inode - > i_private ;
2011-04-25 11:25:56 -07:00
2021-12-14 21:33:39 +02:00
return intel_gt_pm_debugfs_forcewake_user_release ( to_gt ( i915 ) ) ;
2011-04-25 11:25:56 -07:00
}
static const struct file_operations i915_forcewake_fops = {
. owner = THIS_MODULE ,
. open = i915_forcewake_open ,
. release = i915_forcewake_release ,
} ;
2013-10-17 19:09:56 +01:00
static const struct drm_info_list i915_debugfs_list [ ] = {
2011-01-13 19:06:50 +00:00
{ " i915_capabilities " , i915_capabilities , 0 } ,
2010-09-30 11:46:12 +01:00
{ " i915_gem_objects " , i915_gem_object_info , 0 } ,
2014-03-31 11:30:02 +05:30
{ " i915_frequency_info " , i915_frequency_info , 0 } ,
2011-12-14 13:57:16 +01:00
{ " i915_swizzle_info " , i915_swizzle_info , 0 } ,
2015-06-04 18:23:57 +01:00
{ " i915_runtime_pm_status " , i915_runtime_pm_status , 0 } ,
2016-10-04 21:11:31 +01:00
{ " i915_engine_info " , i915_engine_info , 0 } ,
2014-08-30 16:50:59 +01:00
{ " i915_wa_registers " , i915_wa_registers , 0 } ,
2015-02-13 10:27:54 -06:00
{ " i915_sseu_status " , i915_sseu_status , 0 } ,
2015-04-07 16:20:32 +01:00
{ " i915_rps_boost_info " , i915_rps_boost_info , 0 } ,
2009-02-17 20:08:50 -05:00
} ;
2009-07-01 22:26:52 -04:00
# define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2009-02-17 20:08:50 -05:00
2013-10-17 19:09:56 +01:00
static const struct i915_debugfs_files {
2013-07-04 20:49:44 +02:00
const char * name ;
const struct file_operations * fops ;
} i915_debugfs_files [ ] = {
2019-10-12 08:23:07 +01:00
{ " i915_perf_noa_delay " , & i915_perf_noa_delay_fops } ,
2013-07-04 20:49:44 +02:00
{ " i915_wedged " , & i915_wedged_fops } ,
{ " i915_gem_drop_caches " , & i915_drop_caches_fops } ,
2016-10-12 10:05:18 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
2013-07-04 20:49:44 +02:00
{ " i915_error_state " , & i915_error_state_fops } ,
2017-02-14 16:46:11 +00:00
{ " i915_gpu_info " , & i915_gpu_info_fops } ,
2016-10-12 10:05:18 +01:00
# endif
2013-07-04 20:49:44 +02:00
} ;
2020-03-10 16:31:18 +03:00
void i915_debugfs_register ( struct drm_i915_private * dev_priv )
2009-02-17 20:08:50 -05:00
{
2016-07-05 10:40:23 +01:00
struct drm_minor * minor = dev_priv - > drm . primary ;
2018-06-28 09:23:02 +02:00
int i ;
2009-10-13 22:20:20 +01:00
2019-12-05 17:43:40 +02:00
i915_debugfs_params ( dev_priv ) ;
2019-06-13 17:52:29 +03:00
debugfs_create_file ( " i915_forcewake_user " , S_IRUSR , minor - > debugfs_root ,
to_i915 ( minor - > dev ) , & i915_forcewake_fops ) ;
2013-07-04 20:49:44 +02:00
for ( i = 0 ; i < ARRAY_SIZE ( i915_debugfs_files ) ; i + + ) {
2019-06-13 17:52:29 +03:00
debugfs_create_file ( i915_debugfs_files [ i ] . name ,
S_IRUGO | S_IWUSR ,
minor - > debugfs_root ,
to_i915 ( minor - > dev ) ,
i915_debugfs_files [ i ] . fops ) ;
2013-07-04 20:49:44 +02:00
}
2012-12-04 15:12:00 +02:00
2020-03-10 16:31:18 +03:00
drm_debugfs_create_files ( i915_debugfs_list ,
I915_DEBUGFS_ENTRIES ,
minor - > debugfs_root , minor ) ;
2009-02-17 20:08:50 -05:00
}