2017-10-04 18:13:41 +00:00
/*
* Copyright © 2014 - 2017 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
*/
# include "intel_guc.h"
2018-01-02 13:20:24 -08:00
# include "intel_guc_ads.h"
2017-11-16 19:02:41 +05:30
# include "intel_guc_submission.h"
2017-10-04 18:13:41 +00:00
# include "i915_drv.h"
static void gen8_guc_raise_irq ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
I915_WRITE ( GUC_SEND_INTERRUPT , GUC_SEND_TRIGGER ) ;
}
static inline i915_reg_t guc_send_reg ( struct intel_guc * guc , u32 i )
{
GEM_BUG_ON ( ! guc - > send_regs . base ) ;
GEM_BUG_ON ( ! guc - > send_regs . count ) ;
GEM_BUG_ON ( i > = guc - > send_regs . count ) ;
return _MMIO ( guc - > send_regs . base + 4 * i ) ;
}
void intel_guc_init_send_regs ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
enum forcewake_domains fw_domains = 0 ;
unsigned int i ;
guc - > send_regs . base = i915_mmio_reg_offset ( SOFT_SCRATCH ( 0 ) ) ;
guc - > send_regs . count = SOFT_SCRATCH_COUNT - 1 ;
for ( i = 0 ; i < guc - > send_regs . count ; i + + ) {
fw_domains | = intel_uncore_forcewake_for_reg ( dev_priv ,
guc_send_reg ( guc , i ) ,
FW_REG_READ | FW_REG_WRITE ) ;
}
guc - > send_regs . fw_domains = fw_domains ;
}
void intel_guc_init_early ( struct intel_guc * guc )
{
2017-12-06 13:53:11 +00:00
intel_guc_fw_init_early ( guc ) ;
2017-10-04 18:13:41 +00:00
intel_guc_ct_init_early ( & guc - > ct ) ;
2018-03-14 14:45:39 +00:00
intel_guc_log_init_early ( & guc - > log ) ;
2017-10-04 18:13:41 +00:00
mutex_init ( & guc - > send_mutex ) ;
2018-03-19 10:53:36 +01:00
spin_lock_init ( & guc - > irq_lock ) ;
2017-10-04 18:13:41 +00:00
guc - > send = intel_guc_send_nop ;
2018-03-26 19:48:22 +00:00
guc - > handler = intel_guc_to_host_event_handler_nop ;
2017-10-04 18:13:41 +00:00
guc - > notify = gen8_guc_raise_irq ;
}
2017-12-13 23:13:47 +01:00
int intel_guc_init_wq ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
/*
* GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item , if not synced before
* suspend , can potentially get executed after the GFX device is
* suspended .
* By marking the WQ as freezable , we don ' t have to bother about
* flushing of this work item from the suspend hooks , the pending
* work item if any will be either executed before the suspend
* or scheduled later on resume . This way the handling of work
* item can be kept same between system suspend & rpm suspend .
*/
2018-03-19 10:53:42 +01:00
guc - > log . relay . flush_wq =
alloc_ordered_workqueue ( " i915-guc_log " ,
WQ_HIGHPRI | WQ_FREEZABLE ) ;
if ( ! guc - > log . relay . flush_wq ) {
drm/i915/guc: Fix lockdep due to log relay channel handling under struct_mutex
This patch fixes lockdep issue due to circular locking dependency of
struct_mutex, i_mutex_key, mmap_sem, relay_channels_mutex.
For GuC log relay channel we create debugfs file that requires i_mutex_key
lock and we are doing that under struct_mutex. So we introduced newer
dependency as:
&dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem
However, there is dependency from mmap_sem to struct_mutex. Hence we
separate the relay create/destroy operation from under struct_mutex.
Also added runtime check of relay buffer status.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
======================================================
WARNING: possible circular locking dependency detected
4.15.0-rc6-CI-Patchwork_7614+ #1 Not tainted
------------------------------------------------------
debugfs_test/1388 is trying to acquire lock:
(&dev->struct_mutex){+.+.}, at: [<00000000d5e1d915>] i915_mutex_lock_interruptible+0x47/0x130 [i915]
but task is already holding lock:
(&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (&mm->mmap_sem){++++}:
_copy_to_user+0x1e/0x70
filldir+0x8c/0xf0
dcache_readdir+0xeb/0x160
iterate_dir+0xdc/0x140
SyS_getdents+0xa0/0x130
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #2 (&sb->s_type->i_mutex_key#3){++++}:
start_creating+0x59/0x110
__debugfs_create_file+0x2e/0xe0
relay_create_buf_file+0x62/0x80
relay_late_setup_files+0x84/0x250
guc_log_late_setup+0x4f/0x110 [i915]
i915_guc_log_register+0x32/0x40 [i915]
i915_driver_load+0x7b6/0x1720 [i915]
i915_pci_probe+0x2e/0x90 [i915]
pci_device_probe+0x9c/0x120
driver_probe_device+0x2a3/0x480
__driver_attach+0xd9/0xe0
bus_for_each_dev+0x57/0x90
bus_add_driver+0x168/0x260
driver_register+0x52/0xc0
do_one_initcall+0x39/0x150
do_init_module+0x56/0x1ef
load_module+0x231c/0x2d70
SyS_finit_module+0xa5/0xe0
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #1 (relay_channels_mutex){+.+.}:
relay_open+0x12c/0x2b0
intel_guc_log_runtime_create+0xab/0x230 [i915]
intel_guc_init+0x81/0x120 [i915]
intel_uc_init+0x29/0xa0 [i915]
i915_gem_init+0x182/0x530 [i915]
i915_driver_load+0xaa9/0x1720 [i915]
i915_pci_probe+0x2e/0x90 [i915]
pci_device_probe+0x9c/0x120
driver_probe_device+0x2a3/0x480
__driver_attach+0xd9/0xe0
bus_for_each_dev+0x57/0x90
bus_add_driver+0x168/0x260
driver_register+0x52/0xc0
do_one_initcall+0x39/0x150
do_init_module+0x56/0x1ef
load_module+0x231c/0x2d70
SyS_finit_module+0xa5/0xe0
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #0 (&dev->struct_mutex){+.+.}:
__mutex_lock+0x81/0x9b0
i915_mutex_lock_interruptible+0x47/0x130 [i915]
i915_gem_fault+0x201/0x790 [i915]
__do_fault+0x15/0x70
__handle_mm_fault+0x677/0xdc0
handle_mm_fault+0x14f/0x2f0
__do_page_fault+0x2d1/0x560
page_fault+0x4c/0x60
other info that might help us debug this:
Chain exists of:
&dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&mm->mmap_sem);
lock(&sb->s_type->i_mutex_key#3);
lock(&mm->mmap_sem);
lock(&dev->struct_mutex);
*** DEADLOCK ***
1 lock held by debugfs_test/1388:
#0: (&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560
stack backtrace:
CPU: 2 PID: 1388 Comm: debugfs_test Not tainted 4.15.0-rc6-CI-Patchwork_7614+ #1
Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./J4205-ITX, BIOS P1.10 09/29/2016
Call Trace:
dump_stack+0x5f/0x86
print_circular_bug.isra.18+0x1d0/0x2c0
__lock_acquire+0x14ae/0x1b60
? lock_acquire+0xaf/0x200
lock_acquire+0xaf/0x200
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
__mutex_lock+0x81/0x9b0
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
i915_mutex_lock_interruptible+0x47/0x130 [i915]
? __pm_runtime_resume+0x4f/0x80
i915_gem_fault+0x201/0x790 [i915]
__do_fault+0x15/0x70
? _raw_spin_unlock+0x29/0x40
__handle_mm_fault+0x677/0xdc0
handle_mm_fault+0x14f/0x2f0
__do_page_fault+0x2d1/0x560
? page_fault+0x36/0x60
page_fault+0x4c/0x60
v2: Added lock protection to guc->log.runtime.relay_chan (Chris)
Fixed locking inside guc_flush_logs uncovered by new lockdep.
v3: Locking guc_read_update_log_buffer entirely with relay_lock. (Chris)
Prepared intel_guc_init_early. Moved relay_lock inside relay_create
relay_destroy, relay_file_create, guc_read_update_log_buffer. (Michal)
Removed struct_mutex lock around guc_log_flush and removed usage
of guc_log_has_relay() from runtime_create path as it needs
struct_mutex lock.
v4: Handle NULL relay sub buffer pointer earlier in read_update_log_buffer
(Chris). Fixed comment suffix **/. (Michal)
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104693
Testcase: igt/debugfs_test/read_all_entries # with enable_guc=1 and guc_log_level=1
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Marta Lofstedt <marta.lofstedt@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1516808821-3638-3-git-send-email-sagar.a.kamble@intel.com
2018-01-24 21:16:58 +05:30
DRM_ERROR ( " Couldn't allocate workqueue for GuC log \n " ) ;
2017-12-13 23:13:47 +01:00
return - ENOMEM ;
drm/i915/guc: Fix lockdep due to log relay channel handling under struct_mutex
This patch fixes lockdep issue due to circular locking dependency of
struct_mutex, i_mutex_key, mmap_sem, relay_channels_mutex.
For GuC log relay channel we create debugfs file that requires i_mutex_key
lock and we are doing that under struct_mutex. So we introduced newer
dependency as:
&dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem
However, there is dependency from mmap_sem to struct_mutex. Hence we
separate the relay create/destroy operation from under struct_mutex.
Also added runtime check of relay buffer status.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
======================================================
WARNING: possible circular locking dependency detected
4.15.0-rc6-CI-Patchwork_7614+ #1 Not tainted
------------------------------------------------------
debugfs_test/1388 is trying to acquire lock:
(&dev->struct_mutex){+.+.}, at: [<00000000d5e1d915>] i915_mutex_lock_interruptible+0x47/0x130 [i915]
but task is already holding lock:
(&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (&mm->mmap_sem){++++}:
_copy_to_user+0x1e/0x70
filldir+0x8c/0xf0
dcache_readdir+0xeb/0x160
iterate_dir+0xdc/0x140
SyS_getdents+0xa0/0x130
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #2 (&sb->s_type->i_mutex_key#3){++++}:
start_creating+0x59/0x110
__debugfs_create_file+0x2e/0xe0
relay_create_buf_file+0x62/0x80
relay_late_setup_files+0x84/0x250
guc_log_late_setup+0x4f/0x110 [i915]
i915_guc_log_register+0x32/0x40 [i915]
i915_driver_load+0x7b6/0x1720 [i915]
i915_pci_probe+0x2e/0x90 [i915]
pci_device_probe+0x9c/0x120
driver_probe_device+0x2a3/0x480
__driver_attach+0xd9/0xe0
bus_for_each_dev+0x57/0x90
bus_add_driver+0x168/0x260
driver_register+0x52/0xc0
do_one_initcall+0x39/0x150
do_init_module+0x56/0x1ef
load_module+0x231c/0x2d70
SyS_finit_module+0xa5/0xe0
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #1 (relay_channels_mutex){+.+.}:
relay_open+0x12c/0x2b0
intel_guc_log_runtime_create+0xab/0x230 [i915]
intel_guc_init+0x81/0x120 [i915]
intel_uc_init+0x29/0xa0 [i915]
i915_gem_init+0x182/0x530 [i915]
i915_driver_load+0xaa9/0x1720 [i915]
i915_pci_probe+0x2e/0x90 [i915]
pci_device_probe+0x9c/0x120
driver_probe_device+0x2a3/0x480
__driver_attach+0xd9/0xe0
bus_for_each_dev+0x57/0x90
bus_add_driver+0x168/0x260
driver_register+0x52/0xc0
do_one_initcall+0x39/0x150
do_init_module+0x56/0x1ef
load_module+0x231c/0x2d70
SyS_finit_module+0xa5/0xe0
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #0 (&dev->struct_mutex){+.+.}:
__mutex_lock+0x81/0x9b0
i915_mutex_lock_interruptible+0x47/0x130 [i915]
i915_gem_fault+0x201/0x790 [i915]
__do_fault+0x15/0x70
__handle_mm_fault+0x677/0xdc0
handle_mm_fault+0x14f/0x2f0
__do_page_fault+0x2d1/0x560
page_fault+0x4c/0x60
other info that might help us debug this:
Chain exists of:
&dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&mm->mmap_sem);
lock(&sb->s_type->i_mutex_key#3);
lock(&mm->mmap_sem);
lock(&dev->struct_mutex);
*** DEADLOCK ***
1 lock held by debugfs_test/1388:
#0: (&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560
stack backtrace:
CPU: 2 PID: 1388 Comm: debugfs_test Not tainted 4.15.0-rc6-CI-Patchwork_7614+ #1
Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./J4205-ITX, BIOS P1.10 09/29/2016
Call Trace:
dump_stack+0x5f/0x86
print_circular_bug.isra.18+0x1d0/0x2c0
__lock_acquire+0x14ae/0x1b60
? lock_acquire+0xaf/0x200
lock_acquire+0xaf/0x200
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
__mutex_lock+0x81/0x9b0
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
i915_mutex_lock_interruptible+0x47/0x130 [i915]
? __pm_runtime_resume+0x4f/0x80
i915_gem_fault+0x201/0x790 [i915]
__do_fault+0x15/0x70
? _raw_spin_unlock+0x29/0x40
__handle_mm_fault+0x677/0xdc0
handle_mm_fault+0x14f/0x2f0
__do_page_fault+0x2d1/0x560
? page_fault+0x36/0x60
page_fault+0x4c/0x60
v2: Added lock protection to guc->log.runtime.relay_chan (Chris)
Fixed locking inside guc_flush_logs uncovered by new lockdep.
v3: Locking guc_read_update_log_buffer entirely with relay_lock. (Chris)
Prepared intel_guc_init_early. Moved relay_lock inside relay_create
relay_destroy, relay_file_create, guc_read_update_log_buffer. (Michal)
Removed struct_mutex lock around guc_log_flush and removed usage
of guc_log_has_relay() from runtime_create path as it needs
struct_mutex lock.
v4: Handle NULL relay sub buffer pointer earlier in read_update_log_buffer
(Chris). Fixed comment suffix **/. (Michal)
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104693
Testcase: igt/debugfs_test/read_all_entries # with enable_guc=1 and guc_log_level=1
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Marta Lofstedt <marta.lofstedt@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1516808821-3638-3-git-send-email-sagar.a.kamble@intel.com
2018-01-24 21:16:58 +05:30
}
2017-12-13 23:13:47 +01:00
/*
* Even though both sending GuC action , and adding a new workitem to
* GuC workqueue are serialized ( each with its own locking ) , since
* we ' re using mutliple engines , it ' s possible that we ' re going to
* issue a preempt request with two ( or more - each for different
* engine ) workitems in GuC queue . In this situation , GuC may submit
* all of them , which will make us very confused .
* Our preemption contexts may even already be complete - before we
* even had the chance to sent the preempt action to GuC ! . Rather
* than introducing yet another lock , we can just use ordered workqueue
* to make sure we ' re always sending a single preemption request with a
* single workitem .
*/
if ( HAS_LOGICAL_RING_PREEMPTION ( dev_priv ) & &
USES_GUC_SUBMISSION ( dev_priv ) ) {
guc - > preempt_wq = alloc_ordered_workqueue ( " i915-guc_preempt " ,
WQ_HIGHPRI ) ;
if ( ! guc - > preempt_wq ) {
2018-03-19 10:53:42 +01:00
destroy_workqueue ( guc - > log . relay . flush_wq ) ;
drm/i915/guc: Fix lockdep due to log relay channel handling under struct_mutex
This patch fixes lockdep issue due to circular locking dependency of
struct_mutex, i_mutex_key, mmap_sem, relay_channels_mutex.
For GuC log relay channel we create debugfs file that requires i_mutex_key
lock and we are doing that under struct_mutex. So we introduced newer
dependency as:
&dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem
However, there is dependency from mmap_sem to struct_mutex. Hence we
separate the relay create/destroy operation from under struct_mutex.
Also added runtime check of relay buffer status.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
======================================================
WARNING: possible circular locking dependency detected
4.15.0-rc6-CI-Patchwork_7614+ #1 Not tainted
------------------------------------------------------
debugfs_test/1388 is trying to acquire lock:
(&dev->struct_mutex){+.+.}, at: [<00000000d5e1d915>] i915_mutex_lock_interruptible+0x47/0x130 [i915]
but task is already holding lock:
(&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (&mm->mmap_sem){++++}:
_copy_to_user+0x1e/0x70
filldir+0x8c/0xf0
dcache_readdir+0xeb/0x160
iterate_dir+0xdc/0x140
SyS_getdents+0xa0/0x130
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #2 (&sb->s_type->i_mutex_key#3){++++}:
start_creating+0x59/0x110
__debugfs_create_file+0x2e/0xe0
relay_create_buf_file+0x62/0x80
relay_late_setup_files+0x84/0x250
guc_log_late_setup+0x4f/0x110 [i915]
i915_guc_log_register+0x32/0x40 [i915]
i915_driver_load+0x7b6/0x1720 [i915]
i915_pci_probe+0x2e/0x90 [i915]
pci_device_probe+0x9c/0x120
driver_probe_device+0x2a3/0x480
__driver_attach+0xd9/0xe0
bus_for_each_dev+0x57/0x90
bus_add_driver+0x168/0x260
driver_register+0x52/0xc0
do_one_initcall+0x39/0x150
do_init_module+0x56/0x1ef
load_module+0x231c/0x2d70
SyS_finit_module+0xa5/0xe0
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #1 (relay_channels_mutex){+.+.}:
relay_open+0x12c/0x2b0
intel_guc_log_runtime_create+0xab/0x230 [i915]
intel_guc_init+0x81/0x120 [i915]
intel_uc_init+0x29/0xa0 [i915]
i915_gem_init+0x182/0x530 [i915]
i915_driver_load+0xaa9/0x1720 [i915]
i915_pci_probe+0x2e/0x90 [i915]
pci_device_probe+0x9c/0x120
driver_probe_device+0x2a3/0x480
__driver_attach+0xd9/0xe0
bus_for_each_dev+0x57/0x90
bus_add_driver+0x168/0x260
driver_register+0x52/0xc0
do_one_initcall+0x39/0x150
do_init_module+0x56/0x1ef
load_module+0x231c/0x2d70
SyS_finit_module+0xa5/0xe0
entry_SYSCALL_64_fastpath+0x1c/0x89
-> #0 (&dev->struct_mutex){+.+.}:
__mutex_lock+0x81/0x9b0
i915_mutex_lock_interruptible+0x47/0x130 [i915]
i915_gem_fault+0x201/0x790 [i915]
__do_fault+0x15/0x70
__handle_mm_fault+0x677/0xdc0
handle_mm_fault+0x14f/0x2f0
__do_page_fault+0x2d1/0x560
page_fault+0x4c/0x60
other info that might help us debug this:
Chain exists of:
&dev->struct_mutex --> &sb->s_type->i_mutex_key#3 --> &mm->mmap_sem
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&mm->mmap_sem);
lock(&sb->s_type->i_mutex_key#3);
lock(&mm->mmap_sem);
lock(&dev->struct_mutex);
*** DEADLOCK ***
1 lock held by debugfs_test/1388:
#0: (&mm->mmap_sem){++++}, at: [<0000000029a9c131>] __do_page_fault+0x106/0x560
stack backtrace:
CPU: 2 PID: 1388 Comm: debugfs_test Not tainted 4.15.0-rc6-CI-Patchwork_7614+ #1
Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./J4205-ITX, BIOS P1.10 09/29/2016
Call Trace:
dump_stack+0x5f/0x86
print_circular_bug.isra.18+0x1d0/0x2c0
__lock_acquire+0x14ae/0x1b60
? lock_acquire+0xaf/0x200
lock_acquire+0xaf/0x200
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
__mutex_lock+0x81/0x9b0
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
? i915_mutex_lock_interruptible+0x47/0x130 [i915]
i915_mutex_lock_interruptible+0x47/0x130 [i915]
? __pm_runtime_resume+0x4f/0x80
i915_gem_fault+0x201/0x790 [i915]
__do_fault+0x15/0x70
? _raw_spin_unlock+0x29/0x40
__handle_mm_fault+0x677/0xdc0
handle_mm_fault+0x14f/0x2f0
__do_page_fault+0x2d1/0x560
? page_fault+0x36/0x60
page_fault+0x4c/0x60
v2: Added lock protection to guc->log.runtime.relay_chan (Chris)
Fixed locking inside guc_flush_logs uncovered by new lockdep.
v3: Locking guc_read_update_log_buffer entirely with relay_lock. (Chris)
Prepared intel_guc_init_early. Moved relay_lock inside relay_create
relay_destroy, relay_file_create, guc_read_update_log_buffer. (Michal)
Removed struct_mutex lock around guc_log_flush and removed usage
of guc_log_has_relay() from runtime_create path as it needs
struct_mutex lock.
v4: Handle NULL relay sub buffer pointer earlier in read_update_log_buffer
(Chris). Fixed comment suffix **/. (Michal)
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104693
Testcase: igt/debugfs_test/read_all_entries # with enable_guc=1 and guc_log_level=1
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Marta Lofstedt <marta.lofstedt@intel.com>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1516808821-3638-3-git-send-email-sagar.a.kamble@intel.com
2018-01-24 21:16:58 +05:30
DRM_ERROR ( " Couldn't allocate workqueue for GuC "
" preemption \n " ) ;
2017-12-13 23:13:47 +01:00
return - ENOMEM ;
}
}
return 0 ;
}
void intel_guc_fini_wq ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
if ( HAS_LOGICAL_RING_PREEMPTION ( dev_priv ) & &
USES_GUC_SUBMISSION ( dev_priv ) )
destroy_workqueue ( guc - > preempt_wq ) ;
2018-03-19 10:53:42 +01:00
destroy_workqueue ( guc - > log . relay . flush_wq ) ;
2017-12-13 23:13:47 +01:00
}
2017-12-13 23:13:46 +01:00
static int guc_shared_data_create ( struct intel_guc * guc )
{
struct i915_vma * vma ;
void * vaddr ;
vma = intel_guc_allocate_vma ( guc , PAGE_SIZE ) ;
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
vaddr = i915_gem_object_pin_map ( vma - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( vaddr ) ) {
i915_vma_unpin_and_release ( & vma ) ;
return PTR_ERR ( vaddr ) ;
}
guc - > shared_data = vma ;
guc - > shared_data_vaddr = vaddr ;
return 0 ;
}
static void guc_shared_data_destroy ( struct intel_guc * guc )
{
i915_gem_object_unpin_map ( guc - > shared_data - > obj ) ;
i915_vma_unpin_and_release ( & guc - > shared_data ) ;
}
int intel_guc_init ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
int ret ;
ret = guc_shared_data_create ( guc ) ;
if ( ret )
return ret ;
GEM_BUG_ON ( ! guc - > shared_data ) ;
2018-03-14 14:45:39 +00:00
ret = intel_guc_log_create ( & guc - > log ) ;
2018-01-02 13:20:24 -08:00
if ( ret )
goto err_shared ;
ret = intel_guc_ads_create ( guc ) ;
if ( ret )
goto err_log ;
GEM_BUG_ON ( ! guc - > ads_vma ) ;
2017-12-13 23:13:46 +01:00
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc ( dev_priv ) ;
return 0 ;
2018-01-02 13:20:24 -08:00
err_log :
2018-03-14 14:45:39 +00:00
intel_guc_log_destroy ( & guc - > log ) ;
2018-01-02 13:20:24 -08:00
err_shared :
guc_shared_data_destroy ( guc ) ;
return ret ;
2017-12-13 23:13:46 +01:00
}
void intel_guc_fini ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
i915_ggtt_disable_guc ( dev_priv ) ;
2018-01-02 13:20:24 -08:00
intel_guc_ads_destroy ( guc ) ;
2018-03-14 14:45:39 +00:00
intel_guc_log_destroy ( & guc - > log ) ;
2017-12-13 23:13:46 +01:00
guc_shared_data_destroy ( guc ) ;
}
2018-06-04 16:19:41 +02:00
static u32 guc_ctl_debug_flags ( struct intel_guc * guc )
2018-01-11 15:24:40 +00:00
{
2018-06-04 16:19:41 +02:00
u32 level = intel_guc_log_get_level ( & guc - > log ) ;
2018-03-19 10:53:45 +01:00
u32 flags = 0 ;
2018-01-11 15:24:40 +00:00
2018-03-20 12:55:17 +01:00
if ( ! GUC_LOG_LEVEL_IS_ENABLED ( level ) )
2018-03-19 10:53:45 +01:00
flags | = GUC_LOG_DEFAULT_DISABLED ;
2018-03-20 12:55:17 +01:00
if ( ! GUC_LOG_LEVEL_IS_VERBOSE ( level ) )
2018-03-19 10:53:45 +01:00
flags | = GUC_LOG_DISABLED ;
else
flags | = GUC_LOG_LEVEL_TO_VERBOSITY ( level ) < <
GUC_LOG_VERBOSITY_SHIFT ;
2018-01-11 15:24:40 +00:00
2018-06-04 16:19:42 +02:00
if ( USES_GUC_SUBMISSION ( guc_to_i915 ( guc ) ) ) {
u32 ads = intel_guc_ggtt_offset ( guc , guc - > ads_vma )
> > PAGE_SHIFT ;
flags | = ads < < GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED ;
}
2018-03-19 10:53:45 +01:00
return flags ;
2018-01-11 15:24:40 +00:00
}
2018-06-04 16:19:43 +02:00
static u32 guc_ctl_feature_flags ( struct intel_guc * guc )
{
u32 flags = 0 ;
flags | = GUC_CTL_VCS2_ENABLED ;
if ( USES_GUC_SUBMISSION ( guc_to_i915 ( guc ) ) )
flags | = GUC_CTL_KERNEL_SUBMISSIONS ;
else
flags | = GUC_CTL_DISABLE_SCHEDULER ;
return flags ;
}
2018-06-04 16:19:44 +02:00
2018-06-04 16:19:45 +02:00
static u32 guc_ctl_ctxinfo_flags ( struct intel_guc * guc )
{
u32 flags = 0 ;
if ( USES_GUC_SUBMISSION ( guc_to_i915 ( guc ) ) ) {
u32 ctxnum , base ;
base = intel_guc_ggtt_offset ( guc , guc - > stage_desc_pool ) ;
ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16 ;
base > > = PAGE_SHIFT ;
flags | = ( base < < GUC_CTL_BASE_ADDR_SHIFT ) |
( ctxnum < < GUC_CTL_CTXNUM_IN16_SHIFT ) ;
}
return flags ;
}
2018-06-04 16:19:44 +02:00
static u32 guc_ctl_log_params_flags ( struct intel_guc * guc )
{
u32 offset = intel_guc_ggtt_offset ( guc , guc - > log . vma ) > > PAGE_SHIFT ;
u32 flags ;
/* each allocated unit is a page */
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
( GUC_LOG_CRASH_PAGES < < GUC_LOG_CRASH_SHIFT ) |
( GUC_LOG_DPC_PAGES < < GUC_LOG_DPC_SHIFT ) |
( GUC_LOG_ISR_PAGES < < GUC_LOG_ISR_SHIFT ) |
( offset < < GUC_LOG_BUF_ADDR_SHIFT ) ;
return flags ;
}
2017-10-16 14:47:11 +00:00
/*
* Initialise the GuC parameter block before starting the firmware
* transfer . These parameters are read by the firmware on startup
* and cannot be changed thereafter .
*/
void intel_guc_init_params ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
u32 params [ GUC_CTL_MAX_DWORDS ] ;
int i ;
2017-10-16 14:47:12 +00:00
memset ( params , 0 , sizeof ( params ) ) ;
2017-10-16 14:47:11 +00:00
/*
* GuC ARAT increment is 10 ns . GuC default scheduler quantum is one
* second . This ARAR is calculated by :
* Scheduler - Quantum - in - ns / ARAT - increment - in - ns = 1000000000 / 10
*/
params [ GUC_CTL_ARAT_HIGH ] = 0 ;
params [ GUC_CTL_ARAT_LOW ] = 100000000 ;
params [ GUC_CTL_WA ] | = GUC_CTL_WA_UK_BY_DRIVER ;
2018-06-04 16:19:43 +02:00
params [ GUC_CTL_FEATURE ] = guc_ctl_feature_flags ( guc ) ;
2018-06-04 16:19:44 +02:00
params [ GUC_CTL_LOG_PARAMS ] = guc_ctl_log_params_flags ( guc ) ;
2018-06-04 16:19:41 +02:00
params [ GUC_CTL_DEBUG ] = guc_ctl_debug_flags ( guc ) ;
2018-06-04 16:19:45 +02:00
params [ GUC_CTL_CTXINFO ] = guc_ctl_ctxinfo_flags ( guc ) ;
2017-10-16 14:47:11 +00:00
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
* they are power context saved so it ' s ok to release forcewake
* when we are done here and take it again at xfer time .
*/
intel_uncore_forcewake_get ( dev_priv , FORCEWAKE_BLITTER ) ;
I915_WRITE ( SOFT_SCRATCH ( 0 ) , 0 ) ;
for ( i = 0 ; i < GUC_CTL_MAX_DWORDS ; i + + )
I915_WRITE ( SOFT_SCRATCH ( 1 + i ) , params [ i ] ) ;
intel_uncore_forcewake_put ( dev_priv , FORCEWAKE_BLITTER ) ;
}
2018-03-26 19:48:20 +00:00
int intel_guc_send_nop ( struct intel_guc * guc , const u32 * action , u32 len ,
u32 * response_buf , u32 response_buf_size )
2017-10-04 18:13:41 +00:00
{
WARN ( 1 , " Unexpected send: action=%#x \n " , * action ) ;
return - ENODEV ;
}
2018-03-26 19:48:22 +00:00
void intel_guc_to_host_event_handler_nop ( struct intel_guc * guc )
{
WARN ( 1 , " Unexpected event: no suitable handler \n " ) ;
}
2017-10-04 18:13:41 +00:00
/*
* This function implements the MMIO based host to GuC interface .
*/
2018-03-26 19:48:20 +00:00
int intel_guc_send_mmio ( struct intel_guc * guc , const u32 * action , u32 len ,
u32 * response_buf , u32 response_buf_size )
2017-10-04 18:13:41 +00:00
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
u32 status ;
int i ;
int ret ;
GEM_BUG_ON ( ! len ) ;
GEM_BUG_ON ( len > guc - > send_regs . count ) ;
2018-03-26 19:48:18 +00:00
/* We expect only action code */
GEM_BUG_ON ( * action & ~ INTEL_GUC_MSG_CODE_MASK ) ;
2017-10-04 18:13:41 +00:00
/* If CT is available, we expect to use MMIO only during init/fini */
GEM_BUG_ON ( HAS_GUC_CT ( dev_priv ) & &
* action ! = INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER & &
* action ! = INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER ) ;
mutex_lock ( & guc - > send_mutex ) ;
intel_uncore_forcewake_get ( dev_priv , guc - > send_regs . fw_domains ) ;
for ( i = 0 ; i < len ; i + + )
I915_WRITE ( guc_send_reg ( guc , i ) , action [ i ] ) ;
POSTING_READ ( guc_send_reg ( guc , i - 1 ) ) ;
intel_guc_notify ( guc ) ;
/*
* No GuC command should ever take longer than 10 ms .
* Fast commands should still complete in 10u s .
*/
ret = __intel_wait_for_register_fw ( dev_priv ,
guc_send_reg ( guc , 0 ) ,
2018-03-26 19:48:18 +00:00
INTEL_GUC_MSG_TYPE_MASK ,
INTEL_GUC_MSG_TYPE_RESPONSE < <
INTEL_GUC_MSG_TYPE_SHIFT ,
2017-10-04 18:13:41 +00:00
10 , 10 , & status ) ;
2018-03-26 19:48:18 +00:00
/* If GuC explicitly returned an error, convert it to -EIO */
if ( ! ret & & ! INTEL_GUC_MSG_IS_RESPONSE_SUCCESS ( status ) )
ret = - EIO ;
2017-10-04 18:13:41 +00:00
2018-03-26 19:48:18 +00:00
if ( ret ) {
2018-05-28 17:16:18 +00:00
DRM_ERROR ( " MMIO: GuC action %#x failed with error %d %#x \n " ,
action [ 0 ] , ret , status ) ;
2018-03-26 19:48:21 +00:00
goto out ;
2017-10-04 18:13:41 +00:00
}
2018-03-26 19:48:21 +00:00
if ( response_buf ) {
int count = min ( response_buf_size , guc - > send_regs . count - 1 ) ;
for ( i = 0 ; i < count ; i + + )
response_buf [ i ] = I915_READ ( guc_send_reg ( guc , i + 1 ) ) ;
}
/* Use data from the GuC response as our return value */
ret = INTEL_GUC_MSG_TO_DATA ( status ) ;
out :
2017-10-04 18:13:41 +00:00
intel_uncore_forcewake_put ( dev_priv , guc - > send_regs . fw_domains ) ;
mutex_unlock ( & guc - > send_mutex ) ;
return ret ;
}
2018-03-26 19:48:22 +00:00
void intel_guc_to_host_event_handler_mmio ( struct intel_guc * guc )
2018-03-08 16:46:55 +01:00
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
2018-03-19 10:53:36 +01:00
u32 msg , val ;
2018-03-08 16:46:55 +01:00
/*
* Sample the log buffer flush related bits & clear them out now
* itself from the message identity register to minimize the
* probability of losing a flush interrupt , when there are back
* to back flush interrupts .
* There can be a new flush interrupt , for different log buffer
* type ( like for ISR ) , whilst Host is handling one ( for DPC ) .
* Since same bit is used in message register for ISR & DPC , it
* could happen that GuC sets the bit for 2 nd interrupt but Host
* clears out the bit on handling the 1 st interrupt .
*/
2018-03-19 10:53:36 +01:00
spin_lock ( & guc - > irq_lock ) ;
val = I915_READ ( SOFT_SCRATCH ( 15 ) ) ;
msg = val & guc - > msg_enabled_mask ;
I915_WRITE ( SOFT_SCRATCH ( 15 ) , val & ~ msg ) ;
spin_unlock ( & guc - > irq_lock ) ;
2018-03-27 21:41:24 +00:00
intel_guc_to_host_process_recv_msg ( guc , msg ) ;
}
void intel_guc_to_host_process_recv_msg ( struct intel_guc * guc , u32 msg )
{
/* Make sure to handle only enabled messages */
msg & = guc - > msg_enabled_mask ;
2018-03-19 10:53:36 +01:00
if ( msg & ( INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
2018-03-19 10:53:44 +01:00
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED ) )
2018-03-19 12:50:49 +00:00
intel_guc_log_handle_flush_event ( & guc - > log ) ;
2018-03-08 16:46:55 +01:00
}
2017-10-04 18:13:41 +00:00
int intel_guc_sample_forcewake ( struct intel_guc * guc )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
u32 action [ 2 ] ;
action [ 0 ] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE ;
2018-02-22 12:05:35 -08:00
/* WaRsDisableCoarsePowerGating:skl,cnl */
2017-12-01 11:30:30 +00:00
if ( ! HAS_RC6 ( dev_priv ) | | NEEDS_WaRsDisableCoarsePowerGating ( dev_priv ) )
2017-10-04 18:13:41 +00:00
action [ 1 ] = 0 ;
else
/* bit 0 and 1 are for Render and Media domain separately */
action [ 1 ] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA ;
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
}
/**
* intel_guc_auth_huc ( ) - Send action to GuC to authenticate HuC ucode
* @ guc : intel_guc structure
* @ rsa_offset : rsa offset w . r . t ggtt base of huc vma
*
* Triggers a HuC firmware authentication request to the GuC via intel_guc_send
* INTEL_GUC_ACTION_AUTHENTICATE_HUC interface . This function is invoked by
* intel_huc_auth ( ) .
*
* Return : non - zero code on error
*/
int intel_guc_auth_huc ( struct intel_guc * guc , u32 rsa_offset )
{
u32 action [ ] = {
INTEL_GUC_ACTION_AUTHENTICATE_HUC ,
rsa_offset
} ;
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
}
/**
* intel_guc_suspend ( ) - notify GuC entering suspend state
2018-03-02 11:15:49 +00:00
* @ guc : the guc
2017-10-04 18:13:41 +00:00
*/
2018-03-02 11:15:49 +00:00
int intel_guc_suspend ( struct intel_guc * guc )
2017-10-04 18:13:41 +00:00
{
2018-03-02 11:15:49 +00:00
u32 data [ ] = {
INTEL_GUC_ACTION_ENTER_S_STATE ,
GUC_POWER_D1 , /* any value greater than GUC_POWER_D0 */
2018-03-13 17:32:49 -07:00
intel_guc_ggtt_offset ( guc , guc - > shared_data )
2018-03-02 11:15:49 +00:00
} ;
2017-10-04 18:13:41 +00:00
return intel_guc_send ( guc , data , ARRAY_SIZE ( data ) ) ;
}
2017-10-31 15:53:09 -07:00
/**
* intel_guc_reset_engine ( ) - ask GuC to reset an engine
* @ guc : intel_guc structure
* @ engine : engine to be reset
*/
int intel_guc_reset_engine ( struct intel_guc * guc ,
struct intel_engine_cs * engine )
{
u32 data [ 7 ] ;
GEM_BUG_ON ( ! guc - > execbuf_client ) ;
data [ 0 ] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET ;
data [ 1 ] = engine - > guc_id ;
data [ 2 ] = 0 ;
data [ 3 ] = 0 ;
data [ 4 ] = 0 ;
data [ 5 ] = guc - > execbuf_client - > stage_id ;
2018-03-13 17:32:49 -07:00
data [ 6 ] = intel_guc_ggtt_offset ( guc , guc - > shared_data ) ;
2017-10-31 15:53:09 -07:00
return intel_guc_send ( guc , data , ARRAY_SIZE ( data ) ) ;
}
2017-10-04 18:13:41 +00:00
/**
* intel_guc_resume ( ) - notify GuC resuming from suspend state
2018-03-02 11:15:49 +00:00
* @ guc : the guc
2017-10-04 18:13:41 +00:00
*/
2018-03-02 11:15:49 +00:00
int intel_guc_resume ( struct intel_guc * guc )
2017-10-04 18:13:41 +00:00
{
2018-03-02 11:15:49 +00:00
u32 data [ ] = {
INTEL_GUC_ACTION_EXIT_S_STATE ,
GUC_POWER_D0 ,
2018-03-13 17:32:49 -07:00
intel_guc_ggtt_offset ( guc , guc - > shared_data )
2018-03-02 11:15:49 +00:00
} ;
2017-10-04 18:13:41 +00:00
return intel_guc_send ( guc , data , ARRAY_SIZE ( data ) ) ;
}
2018-03-13 17:32:50 -07:00
/**
* DOC : GuC Address Space
*
2018-03-22 16:59:22 -07:00
* The layout of GuC address space is shown below :
2018-03-13 17:32:50 -07:00
*
2018-03-22 16:59:22 -07:00
* : :
2018-03-13 17:32:50 -07:00
*
2018-03-22 16:59:22 -07:00
* + = = = = = = = = = = = = = = > + = = = = = = = = = = = = = = = = = = = = + < = = GUC_GGTT_TOP
* ^ | |
* | | |
* | | DRAM |
* | | Memory |
* | | |
* GuC | |
* Address + = = = = = = = = > + = = = = = = = = = = = = = = = = = = = = + < = = WOPCM Top
* Space ^ | HW contexts RSVD |
* | | | WOPCM |
* | | + = = > + - - - - - - - - - - - - - - - - - - - - + < = = GuC WOPCM Top
* | GuC ^ | |
* | GGTT | | |
* | Pin GuC | GuC |
* | Bias WOPCM | WOPCM |
* | | Size | |
* | | | | |
* v v v | |
* + = = = = = + = = = = = + = = > + = = = = = = = = = = = = = = = = = = = = + < = = GuC WOPCM Base
* | Non - GuC WOPCM |
* | ( HuC / Reserved ) |
* + = = = = = = = = = = = = = = = = = = = = + < = = WOPCM Base
*
* The lower part of GuC Address Space [ 0 , ggtt_pin_bias ) is mapped to WOPCM
* while upper part of GuC Address Space [ ggtt_pin_bias , GUC_GGTT_TOP ) is mapped
* to DRAM . The value of the GuC ggtt_pin_bias is determined by WOPCM size and
* actual GuC WOPCM size .
2018-03-13 17:32:50 -07:00
*/
/**
* intel_guc_init_ggtt_pin_bias ( ) - Initialize the GuC ggtt_pin_bias value .
* @ guc : intel_guc structure .
*
* This function will calculate and initialize the ggtt_pin_bias value based on
* overall WOPCM size and GuC WOPCM size .
*/
void intel_guc_init_ggtt_pin_bias ( struct intel_guc * guc )
{
struct drm_i915_private * i915 = guc_to_i915 ( guc ) ;
GEM_BUG_ON ( ! i915 - > wopcm . size ) ;
GEM_BUG_ON ( i915 - > wopcm . size < i915 - > wopcm . guc . base ) ;
guc - > ggtt_pin_bias = i915 - > wopcm . size - i915 - > wopcm . guc . base ;
}
2017-10-04 18:13:41 +00:00
/**
* intel_guc_allocate_vma ( ) - Allocate a GGTT VMA for GuC usage
* @ guc : the guc
* @ size : size of area to allocate ( both virtual space and memory )
*
* This is a wrapper to create an object for use with the GuC . In order to
* use it inside the GuC , an object needs to be pinned lifetime , so we allocate
* both some backing storage and a range inside the Global GTT . We must pin
2018-03-13 17:32:50 -07:00
* it in the GGTT somewhere other than than [ 0 , GUC ggtt_pin_bias ) because that
2017-10-04 18:13:41 +00:00
* range is reserved inside GuC .
*
* Return : A i915_vma if successful , otherwise an ERR_PTR .
*/
struct i915_vma * intel_guc_allocate_vma ( struct intel_guc * guc , u32 size )
{
struct drm_i915_private * dev_priv = guc_to_i915 ( guc ) ;
struct drm_i915_gem_object * obj ;
struct i915_vma * vma ;
int ret ;
obj = i915_gem_object_create ( dev_priv , size ) ;
if ( IS_ERR ( obj ) )
return ERR_CAST ( obj ) ;
2018-06-05 16:37:58 +01:00
vma = i915_vma_instance ( obj , & dev_priv - > ggtt . vm , NULL ) ;
2017-10-04 18:13:41 +00:00
if ( IS_ERR ( vma ) )
goto err ;
ret = i915_vma_pin ( vma , 0 , PAGE_SIZE ,
2018-03-13 17:32:50 -07:00
PIN_GLOBAL | PIN_OFFSET_BIAS | guc - > ggtt_pin_bias ) ;
2017-10-04 18:13:41 +00:00
if ( ret ) {
vma = ERR_PTR ( ret ) ;
goto err ;
}
return vma ;
err :
i915_gem_object_put ( obj ) ;
return vma ;
}