2017-05-26 11:13:25 +00:00
/*
* Copyright © 2016 - 2017 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*/
# include "i915_drv.h"
# include "intel_guc_ct.h"
2018-03-26 19:48:28 +00:00
# ifdef CONFIG_DRM_I915_DEBUG_GUC
# define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
# else
# define CT_DEBUG_DRIVER(...) do { } while (0)
# endif
2018-03-27 12:14:39 +00:00
struct ct_request {
struct list_head link ;
u32 fence ;
u32 status ;
u32 response_len ;
u32 * response_buf ;
} ;
2018-03-26 19:48:26 +00:00
struct ct_incoming_request {
struct list_head link ;
u32 msg [ ] ;
} ;
2017-05-26 11:13:25 +00:00
enum { CTB_SEND = 0 , CTB_RECV = 1 } ;
enum { CTB_OWNER_HOST = 0 } ;
2018-03-26 19:48:26 +00:00
static void ct_incoming_request_worker_func ( struct work_struct * w ) ;
2018-03-20 16:20:20 +00:00
/**
* intel_guc_ct_init_early - Initialize CT state without requiring device access
* @ ct : pointer to CT struct
*/
2017-05-26 11:13:25 +00:00
void intel_guc_ct_init_early ( struct intel_guc_ct * ct )
{
/* we're using static channel owners */
ct - > host_channel . owner = CTB_OWNER_HOST ;
2018-03-27 12:14:39 +00:00
spin_lock_init ( & ct - > lock ) ;
INIT_LIST_HEAD ( & ct - > pending_requests ) ;
2018-03-26 19:48:26 +00:00
INIT_LIST_HEAD ( & ct - > incoming_requests ) ;
INIT_WORK ( & ct - > worker , ct_incoming_request_worker_func ) ;
2017-05-26 11:13:25 +00:00
}
2018-03-20 16:20:20 +00:00
static inline struct intel_guc * ct_to_guc ( struct intel_guc_ct * ct )
{
return container_of ( ct , struct intel_guc , ct ) ;
}
2017-05-26 11:13:25 +00:00
static inline const char * guc_ct_buffer_type_to_str ( u32 type )
{
switch ( type ) {
case INTEL_GUC_CT_BUFFER_TYPE_SEND :
return " SEND " ;
case INTEL_GUC_CT_BUFFER_TYPE_RECV :
return " RECV " ;
default :
return " <invalid> " ;
}
}
static void guc_ct_buffer_desc_init ( struct guc_ct_buffer_desc * desc ,
u32 cmds_addr , u32 size , u32 owner )
{
2018-03-26 19:48:28 +00:00
CT_DEBUG_DRIVER ( " CT: desc %p init addr=%#x size=%u owner=%u \n " ,
desc , cmds_addr , size , owner ) ;
2017-05-26 11:13:25 +00:00
memset ( desc , 0 , sizeof ( * desc ) ) ;
desc - > addr = cmds_addr ;
desc - > size = size ;
desc - > owner = owner ;
}
static void guc_ct_buffer_desc_reset ( struct guc_ct_buffer_desc * desc )
{
2018-03-26 19:48:28 +00:00
CT_DEBUG_DRIVER ( " CT: desc %p reset head=%u tail=%u \n " ,
desc , desc - > head , desc - > tail ) ;
2017-05-26 11:13:25 +00:00
desc - > head = 0 ;
desc - > tail = 0 ;
desc - > is_in_error = 0 ;
}
static int guc_action_register_ct_buffer ( struct intel_guc * guc ,
u32 desc_addr ,
u32 type )
{
u32 action [ ] = {
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER ,
desc_addr ,
sizeof ( struct guc_ct_buffer_desc ) ,
type
} ;
int err ;
/* Can't use generic send(), CT registration must go over MMIO */
2018-03-26 19:48:20 +00:00
err = intel_guc_send_mmio ( guc , action , ARRAY_SIZE ( action ) , NULL , 0 ) ;
2017-05-26 11:13:25 +00:00
if ( err )
DRM_ERROR ( " CT: register %s buffer failed; err=%d \n " ,
guc_ct_buffer_type_to_str ( type ) , err ) ;
return err ;
}
static int guc_action_deregister_ct_buffer ( struct intel_guc * guc ,
u32 owner ,
u32 type )
{
u32 action [ ] = {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER ,
owner ,
type
} ;
int err ;
/* Can't use generic send(), CT deregistration must go over MMIO */
2018-03-26 19:48:20 +00:00
err = intel_guc_send_mmio ( guc , action , ARRAY_SIZE ( action ) , NULL , 0 ) ;
2017-05-26 11:13:25 +00:00
if ( err )
DRM_ERROR ( " CT: deregister %s buffer failed; owner=%d err=%d \n " ,
guc_ct_buffer_type_to_str ( type ) , owner , err ) ;
return err ;
}
static int ctch_init ( struct intel_guc * guc ,
struct intel_guc_ct_channel * ctch )
{
struct i915_vma * vma ;
void * blob ;
int err ;
int i ;
GEM_BUG_ON ( ctch - > vma ) ;
/* We allocate 1 page to hold both descriptors and both buffers.
* ___________ . . . . . . . . . . . . . . . . . . . . .
* | desc ( SEND ) | :
* | ___________ | PAGE / 4
* : ___________ . . . . . . . . . . . . . . . . . . . . :
* | desc ( RECV ) | :
* | ___________ | PAGE / 4
* : _______________________________ :
* | cmds ( SEND ) |
* | PAGE / 4
* | _______________________________ |
* | cmds ( RECV ) |
* | PAGE / 4
* | _______________________________ |
*
* Each message can use a maximum of 32 dwords and we don ' t expect to
* have more than 1 in flight at any time , so we have enough space .
* Some logic further ahead will rely on the fact that there is only 1
* page and that it is always mapped , so if the size is changed the
* other code will need updating as well .
*/
/* allocate vma */
vma = intel_guc_allocate_vma ( guc , PAGE_SIZE ) ;
if ( IS_ERR ( vma ) ) {
err = PTR_ERR ( vma ) ;
goto err_out ;
}
ctch - > vma = vma ;
/* map first page */
blob = i915_gem_object_pin_map ( vma - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( blob ) ) {
err = PTR_ERR ( blob ) ;
goto err_vma ;
}
2018-03-26 19:48:28 +00:00
CT_DEBUG_DRIVER ( " CT: vma base=%#x \n " ,
intel_guc_ggtt_offset ( guc , ctch - > vma ) ) ;
2017-05-26 11:13:25 +00:00
/* store pointers to desc and cmds */
for ( i = 0 ; i < ARRAY_SIZE ( ctch - > ctbs ) ; i + + ) {
GEM_BUG_ON ( ( i ! = CTB_SEND ) & & ( i ! = CTB_RECV ) ) ;
ctch - > ctbs [ i ] . desc = blob + PAGE_SIZE / 4 * i ;
ctch - > ctbs [ i ] . cmds = blob + PAGE_SIZE / 4 * i + PAGE_SIZE / 2 ;
}
return 0 ;
err_vma :
2018-07-21 13:50:37 +01:00
i915_vma_unpin_and_release ( & ctch - > vma , 0 ) ;
2017-05-26 11:13:25 +00:00
err_out :
2018-03-26 19:48:28 +00:00
CT_DEBUG_DRIVER ( " CT: channel %d initialization failed; err=%d \n " ,
ctch - > owner , err ) ;
2017-05-26 11:13:25 +00:00
return err ;
}
static void ctch_fini ( struct intel_guc * guc ,
struct intel_guc_ct_channel * ctch )
{
2019-02-19 17:39:26 -08:00
GEM_BUG_ON ( ctch - > enabled ) ;
2018-07-21 13:50:37 +01:00
i915_vma_unpin_and_release ( & ctch - > vma , I915_VMA_RELEASE_MAP ) ;
2017-05-26 11:13:25 +00:00
}
2019-02-19 17:39:26 -08:00
static int ctch_enable ( struct intel_guc * guc ,
struct intel_guc_ct_channel * ctch )
2017-05-26 11:13:25 +00:00
{
u32 base ;
int err ;
int i ;
2019-02-19 17:39:26 -08:00
GEM_BUG_ON ( ! ctch - > vma ) ;
2017-05-26 11:13:25 +00:00
2019-02-19 17:39:26 -08:00
GEM_BUG_ON ( ctch - > enabled ) ;
2017-05-26 11:13:25 +00:00
/* vma should be already allocated and map'ed */
2018-03-13 17:32:49 -07:00
base = intel_guc_ggtt_offset ( guc , ctch - > vma ) ;
2017-05-26 11:13:25 +00:00
/* (re)initialize descriptors
* cmds buffers are in the second half of the blob page
*/
for ( i = 0 ; i < ARRAY_SIZE ( ctch - > ctbs ) ; i + + ) {
GEM_BUG_ON ( ( i ! = CTB_SEND ) & & ( i ! = CTB_RECV ) ) ;
guc_ct_buffer_desc_init ( ctch - > ctbs [ i ] . desc ,
base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2 ,
PAGE_SIZE / 4 ,
ctch - > owner ) ;
}
/* register buffers, starting wirh RECV buffer
* descriptors are in first half of the blob
*/
err = guc_action_register_ct_buffer ( guc ,
base + PAGE_SIZE / 4 * CTB_RECV ,
INTEL_GUC_CT_BUFFER_TYPE_RECV ) ;
if ( unlikely ( err ) )
2019-02-19 17:39:26 -08:00
goto err_out ;
2017-05-26 11:13:25 +00:00
err = guc_action_register_ct_buffer ( guc ,
base + PAGE_SIZE / 4 * CTB_SEND ,
INTEL_GUC_CT_BUFFER_TYPE_SEND ) ;
if ( unlikely ( err ) )
goto err_deregister ;
2019-02-19 17:39:26 -08:00
ctch - > enabled = true ;
2017-05-26 11:13:25 +00:00
return 0 ;
err_deregister :
guc_action_deregister_ct_buffer ( guc ,
ctch - > owner ,
INTEL_GUC_CT_BUFFER_TYPE_RECV ) ;
err_out :
DRM_ERROR ( " CT: can't open channel %d; err=%d \n " , ctch - > owner , err ) ;
return err ;
}
2019-02-19 17:39:26 -08:00
static void ctch_disable ( struct intel_guc * guc ,
struct intel_guc_ct_channel * ctch )
2017-05-26 11:13:25 +00:00
{
2019-02-19 17:39:26 -08:00
GEM_BUG_ON ( ! ctch - > enabled ) ;
ctch - > enabled = false ;
2017-05-26 11:13:25 +00:00
guc_action_deregister_ct_buffer ( guc ,
ctch - > owner ,
INTEL_GUC_CT_BUFFER_TYPE_SEND ) ;
guc_action_deregister_ct_buffer ( guc ,
ctch - > owner ,
INTEL_GUC_CT_BUFFER_TYPE_RECV ) ;
}
static u32 ctch_get_next_fence ( struct intel_guc_ct_channel * ctch )
{
/* For now it's trivial */
return + + ctch - > next_fence ;
}
2018-03-26 19:48:23 +00:00
/**
* DOC : CTB Host to GuC request
*
* Format of the CTB Host to GuC request message is as follows : :
*
* + - - - - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | msg [ 0 ] | [ 1 ] | [ 2 ] | . . . | [ n - 1 ] |
* + - - - - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | MESSAGE | MESSAGE PAYLOAD |
* + HEADER + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | | 0 | 1 | . . . | n |
* + = = = = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = +
* | len > = 1 | FENCE | request specific data |
* + - - - - - - + - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
*
* ^ - - - - - - - - - - - - - - - - - len - - - - - - - - - - - - - - - - - - - ^
*/
2017-05-26 11:13:25 +00:00
static int ctb_write ( struct intel_guc_ct_buffer * ctb ,
const u32 * action ,
u32 len /* in dwords */ ,
2018-03-27 12:14:39 +00:00
u32 fence ,
bool want_response )
2017-05-26 11:13:25 +00:00
{
struct guc_ct_buffer_desc * desc = ctb - > desc ;
u32 head = desc - > head / 4 ; /* in dwords */
u32 tail = desc - > tail / 4 ; /* in dwords */
u32 size = desc - > size / 4 ; /* in dwords */
u32 used ; /* in dwords */
u32 header ;
u32 * cmds = ctb - > cmds ;
unsigned int i ;
GEM_BUG_ON ( desc - > size % 4 ) ;
GEM_BUG_ON ( desc - > head % 4 ) ;
GEM_BUG_ON ( desc - > tail % 4 ) ;
GEM_BUG_ON ( tail > = size ) ;
/*
* tail = = head condition indicates empty . GuC FW does not support
* using up the entire buffer to get tail = = head meaning full .
*/
if ( tail < head )
used = ( size - head ) + tail ;
else
used = tail - head ;
/* make sure there is a space including extra dw for the fence */
if ( unlikely ( used + len + 1 > = size ) )
return - ENOSPC ;
2018-03-26 19:48:23 +00:00
/*
* Write the message . The format is the following :
2017-05-26 11:13:25 +00:00
* DW0 : header ( including action code )
* DW1 : fence
* DW2 + : action data
*/
header = ( len < < GUC_CT_MSG_LEN_SHIFT ) |
( GUC_CT_MSG_WRITE_FENCE_TO_DESC ) |
2018-03-27 12:14:39 +00:00
( want_response ? GUC_CT_MSG_SEND_STATUS : 0 ) |
2017-05-26 11:13:25 +00:00
( action [ 0 ] < < GUC_CT_MSG_ACTION_SHIFT ) ;
2018-04-10 12:14:17 +01:00
CT_DEBUG_DRIVER ( " CT: writing %*ph %*ph %*ph \n " ,
2018-03-26 19:48:28 +00:00
4 , & header , 4 , & fence ,
4 * ( len - 1 ) , & action [ 1 ] ) ;
2017-05-26 11:13:25 +00:00
cmds [ tail ] = header ;
tail = ( tail + 1 ) % size ;
cmds [ tail ] = fence ;
tail = ( tail + 1 ) % size ;
for ( i = 1 ; i < len ; i + + ) {
cmds [ tail ] = action [ i ] ;
tail = ( tail + 1 ) % size ;
}
/* now update desc tail (back in bytes) */
desc - > tail = tail * 4 ;
GEM_BUG_ON ( desc - > tail > desc - > size ) ;
return 0 ;
}
2018-03-26 19:48:24 +00:00
/**
* wait_for_ctb_desc_update - Wait for the CT buffer descriptor update .
* @ desc : buffer descriptor
2017-05-26 11:13:25 +00:00
* @ fence : response fence
* @ status : placeholder for status
2018-03-26 19:48:24 +00:00
*
* Guc will update CT buffer descriptor with new fence and status
* after processing the command identified by the fence . Wait for
* specified fence and then read from the descriptor status of the
* command .
*
* Return :
* * 0 response received ( status is valid )
* * - ETIMEDOUT no response within hardcoded timeout
* * - EPROTO no response , CT buffer is in error
2017-05-26 11:13:25 +00:00
*/
2018-03-26 19:48:24 +00:00
static int wait_for_ctb_desc_update ( struct guc_ct_buffer_desc * desc ,
u32 fence ,
u32 * status )
2017-05-26 11:13:25 +00:00
{
int err ;
/*
* Fast commands should complete in less than 10u s , so sample quickly
* up to that length of time , then switch to a slower sleep - wait loop .
* No GuC command should ever take longer than 10 ms .
*/
# define done (READ_ONCE(desc->fence) == fence)
err = wait_for_us ( done , 10 ) ;
if ( err )
err = wait_for ( done , 10 ) ;
# undef done
if ( unlikely ( err ) ) {
DRM_ERROR ( " CT: fence %u failed; reported fence=%u \n " ,
fence , desc - > fence ) ;
if ( WARN_ON ( desc - > is_in_error ) ) {
/* Something went wrong with the messaging, try to reset
* the buffer and hope for the best
*/
guc_ct_buffer_desc_reset ( desc ) ;
err = - EPROTO ;
}
}
* status = desc - > status ;
return err ;
}
2018-03-27 12:14:39 +00:00
/**
* wait_for_ct_request_update - Wait for CT request state update .
* @ req : pointer to pending request
* @ status : placeholder for status
*
* For each sent request , Guc shall send bac CT response message .
* Our message handler will update status of tracked request once
* response message with given fence is received . Wait here and
* check for valid response status value .
*
* Return :
* * 0 response received ( status is valid )
* * - ETIMEDOUT no response within hardcoded timeout
*/
static int wait_for_ct_request_update ( struct ct_request * req , u32 * status )
{
int err ;
/*
* Fast commands should complete in less than 10u s , so sample quickly
* up to that length of time , then switch to a slower sleep - wait loop .
* No GuC command should ever take longer than 10 ms .
*/
# define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
err = wait_for_us ( done , 10 ) ;
if ( err )
err = wait_for ( done , 10 ) ;
# undef done
if ( unlikely ( err ) )
DRM_ERROR ( " CT: fence %u err %d \n " , req - > fence , err ) ;
* status = req - > status ;
return err ;
}
static int ctch_send ( struct intel_guc_ct * ct ,
2017-05-26 11:13:25 +00:00
struct intel_guc_ct_channel * ctch ,
const u32 * action ,
u32 len ,
2018-03-27 12:14:39 +00:00
u32 * response_buf ,
u32 response_buf_size ,
2017-05-26 11:13:25 +00:00
u32 * status )
{
struct intel_guc_ct_buffer * ctb = & ctch - > ctbs [ CTB_SEND ] ;
struct guc_ct_buffer_desc * desc = ctb - > desc ;
2018-03-27 12:14:39 +00:00
struct ct_request request ;
unsigned long flags ;
2017-05-26 11:13:25 +00:00
u32 fence ;
int err ;
2019-02-19 17:39:26 -08:00
GEM_BUG_ON ( ! ctch - > enabled ) ;
2017-05-26 11:13:25 +00:00
GEM_BUG_ON ( ! len ) ;
GEM_BUG_ON ( len & ~ GUC_CT_MSG_LEN_MASK ) ;
2018-03-27 12:14:39 +00:00
GEM_BUG_ON ( ! response_buf & & response_buf_size ) ;
2017-05-26 11:13:25 +00:00
fence = ctch_get_next_fence ( ctch ) ;
2018-03-27 12:14:39 +00:00
request . fence = fence ;
request . status = 0 ;
request . response_len = response_buf_size ;
request . response_buf = response_buf ;
spin_lock_irqsave ( & ct - > lock , flags ) ;
list_add_tail ( & request . link , & ct - > pending_requests ) ;
spin_unlock_irqrestore ( & ct - > lock , flags ) ;
err = ctb_write ( ctb , action , len , fence , ! ! response_buf ) ;
2017-05-26 11:13:25 +00:00
if ( unlikely ( err ) )
2018-03-27 12:14:39 +00:00
goto unlink ;
2017-05-26 11:13:25 +00:00
2018-03-27 12:14:39 +00:00
intel_guc_notify ( ct_to_guc ( ct ) ) ;
2017-05-26 11:13:25 +00:00
2018-03-27 12:14:39 +00:00
if ( response_buf )
err = wait_for_ct_request_update ( & request , status ) ;
else
err = wait_for_ctb_desc_update ( desc , fence , status ) ;
2017-05-26 11:13:25 +00:00
if ( unlikely ( err ) )
2018-03-27 12:14:39 +00:00
goto unlink ;
if ( ! INTEL_GUC_MSG_IS_RESPONSE_SUCCESS ( * status ) ) {
err = - EIO ;
goto unlink ;
}
if ( response_buf ) {
/* There shall be no data in the status */
WARN_ON ( INTEL_GUC_MSG_TO_DATA ( request . status ) ) ;
/* Return actual response len */
err = request . response_len ;
} else {
/* There shall be no response payload */
WARN_ON ( request . response_len ) ;
/* Return data decoded from the status dword */
err = INTEL_GUC_MSG_TO_DATA ( * status ) ;
}
2018-03-26 19:48:19 +00:00
2018-03-27 12:14:39 +00:00
unlink :
spin_lock_irqsave ( & ct - > lock , flags ) ;
list_del ( & request . link ) ;
spin_unlock_irqrestore ( & ct - > lock , flags ) ;
return err ;
2017-05-26 11:13:25 +00:00
}
/*
* Command Transport ( CT ) buffer based GuC send function .
*/
2018-03-26 19:48:20 +00:00
static int intel_guc_send_ct ( struct intel_guc * guc , const u32 * action , u32 len ,
u32 * response_buf , u32 response_buf_size )
2017-05-26 11:13:25 +00:00
{
2018-03-27 12:14:39 +00:00
struct intel_guc_ct * ct = & guc - > ct ;
struct intel_guc_ct_channel * ctch = & ct - > host_channel ;
2017-05-26 11:13:25 +00:00
u32 status = ~ 0 ; /* undefined */
2018-03-26 19:48:19 +00:00
int ret ;
2017-05-26 11:13:25 +00:00
mutex_lock ( & guc - > send_mutex ) ;
2018-03-27 12:14:39 +00:00
ret = ctch_send ( ct , ctch , action , len , response_buf , response_buf_size ,
& status ) ;
2018-03-26 19:48:19 +00:00
if ( unlikely ( ret < 0 ) ) {
2017-05-26 11:13:25 +00:00
DRM_ERROR ( " CT: send action %#X failed; err=%d status=%#X \n " ,
2018-03-26 19:48:19 +00:00
action [ 0 ] , ret , status ) ;
2018-03-26 19:48:28 +00:00
} else if ( unlikely ( ret ) ) {
CT_DEBUG_DRIVER ( " CT: send action %#x returned %d (%#x) \n " ,
action [ 0 ] , ret , ret ) ;
2017-05-26 11:13:25 +00:00
}
mutex_unlock ( & guc - > send_mutex ) ;
2018-03-26 19:48:19 +00:00
return ret ;
2017-05-26 11:13:25 +00:00
}
2018-03-26 19:48:23 +00:00
static inline unsigned int ct_header_get_len ( u32 header )
{
return ( header > > GUC_CT_MSG_LEN_SHIFT ) & GUC_CT_MSG_LEN_MASK ;
}
static inline unsigned int ct_header_get_action ( u32 header )
{
return ( header > > GUC_CT_MSG_ACTION_SHIFT ) & GUC_CT_MSG_ACTION_MASK ;
}
static inline bool ct_header_is_response ( u32 header )
{
2019-05-27 18:36:09 +00:00
return ! ! ( header & GUC_CT_MSG_IS_RESPONSE ) ;
2018-03-26 19:48:23 +00:00
}
static int ctb_read ( struct intel_guc_ct_buffer * ctb , u32 * data )
{
struct guc_ct_buffer_desc * desc = ctb - > desc ;
u32 head = desc - > head / 4 ; /* in dwords */
u32 tail = desc - > tail / 4 ; /* in dwords */
u32 size = desc - > size / 4 ; /* in dwords */
u32 * cmds = ctb - > cmds ;
s32 available ; /* in dwords */
unsigned int len ;
unsigned int i ;
GEM_BUG_ON ( desc - > size % 4 ) ;
GEM_BUG_ON ( desc - > head % 4 ) ;
GEM_BUG_ON ( desc - > tail % 4 ) ;
GEM_BUG_ON ( tail > = size ) ;
GEM_BUG_ON ( head > = size ) ;
/* tail == head condition indicates empty */
available = tail - head ;
if ( unlikely ( available = = 0 ) )
return - ENODATA ;
/* beware of buffer wrap case */
if ( unlikely ( available < 0 ) )
available + = size ;
2018-03-26 19:48:28 +00:00
CT_DEBUG_DRIVER ( " CT: available %d (%u:%u) \n " , available , head , tail ) ;
2018-03-26 19:48:23 +00:00
GEM_BUG_ON ( available < 0 ) ;
data [ 0 ] = cmds [ head ] ;
head = ( head + 1 ) % size ;
/* message len with header */
len = ct_header_get_len ( data [ 0 ] ) + 1 ;
if ( unlikely ( len > ( u32 ) available ) ) {
2018-04-10 12:14:17 +01:00
DRM_ERROR ( " CT: incomplete message %*ph %*ph %*ph \n " ,
2018-03-26 19:48:23 +00:00
4 , data ,
4 * ( head + available - 1 > size ?
size - head : available - 1 ) , & cmds [ head ] ,
4 * ( head + available - 1 > size ?
available - 1 - size + head : 0 ) , & cmds [ 0 ] ) ;
return - EPROTO ;
}
for ( i = 1 ; i < len ; i + + ) {
data [ i ] = cmds [ head ] ;
head = ( head + 1 ) % size ;
}
2018-04-10 12:14:17 +01:00
CT_DEBUG_DRIVER ( " CT: received %*ph \n " , 4 * len , data ) ;
2018-03-26 19:48:23 +00:00
desc - > head = head * 4 ;
return 0 ;
}
/**
* DOC : CTB GuC to Host response
*
* Format of the CTB GuC to Host response message is as follows : :
*
* + - - - - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | msg [ 0 ] | [ 1 ] | [ 2 ] | [ 3 ] | . . . | [ n - 1 ] |
* + - - - - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | MESSAGE | MESSAGE PAYLOAD |
* + HEADER + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | | 0 | 1 | 2 | . . . | n |
* + = = = = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = +
* | len > = 2 | FENCE | STATUS | response specific data |
* + - - - - - - + - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
*
* ^ - - - - - - - - - - - - - - - - - - - - - - - len - - - - - - - - - - - - - - - - - - - - - - - ^
*/
static int ct_handle_response ( struct intel_guc_ct * ct , const u32 * msg )
{
u32 header = msg [ 0 ] ;
u32 len = ct_header_get_len ( header ) ;
u32 msglen = len + 1 ; /* total message length including header */
u32 fence ;
u32 status ;
2018-03-27 12:14:39 +00:00
u32 datalen ;
struct ct_request * req ;
bool found = false ;
2018-03-26 19:48:23 +00:00
GEM_BUG_ON ( ! ct_header_is_response ( header ) ) ;
2018-03-27 12:14:39 +00:00
GEM_BUG_ON ( ! in_irq ( ) ) ;
2018-03-26 19:48:23 +00:00
/* Response payload shall at least include fence and status */
if ( unlikely ( len < 2 ) ) {
2018-04-10 12:14:17 +01:00
DRM_ERROR ( " CT: corrupted response %*ph \n " , 4 * msglen , msg ) ;
2018-03-26 19:48:23 +00:00
return - EPROTO ;
}
fence = msg [ 1 ] ;
status = msg [ 2 ] ;
2018-03-27 12:14:39 +00:00
datalen = len - 2 ;
2018-03-26 19:48:23 +00:00
/* Format of the status follows RESPONSE message */
if ( unlikely ( ! INTEL_GUC_MSG_IS_RESPONSE ( status ) ) ) {
2018-04-10 12:14:17 +01:00
DRM_ERROR ( " CT: corrupted response %*ph \n " , 4 * msglen , msg ) ;
2018-03-26 19:48:23 +00:00
return - EPROTO ;
}
2018-03-26 19:48:28 +00:00
CT_DEBUG_DRIVER ( " CT: response fence %u status %#x \n " , fence , status ) ;
2018-03-27 12:14:39 +00:00
spin_lock ( & ct - > lock ) ;
list_for_each_entry ( req , & ct - > pending_requests , link ) {
if ( unlikely ( fence ! = req - > fence ) ) {
2018-03-26 19:48:28 +00:00
CT_DEBUG_DRIVER ( " CT: request %u awaits response \n " ,
req - > fence ) ;
2018-03-27 12:14:39 +00:00
continue ;
}
if ( unlikely ( datalen > req - > response_len ) ) {
2018-04-10 12:14:17 +01:00
DRM_ERROR ( " CT: response %u too long %*ph \n " ,
2018-03-27 12:14:39 +00:00
req - > fence , 4 * msglen , msg ) ;
datalen = 0 ;
}
if ( datalen )
memcpy ( req - > response_buf , msg + 3 , 4 * datalen ) ;
req - > response_len = datalen ;
WRITE_ONCE ( req - > status , status ) ;
found = true ;
break ;
}
spin_unlock ( & ct - > lock ) ;
if ( ! found )
2018-04-10 12:14:17 +01:00
DRM_ERROR ( " CT: unsolicited response %*ph \n " , 4 * msglen , msg ) ;
2018-03-26 19:48:23 +00:00
return 0 ;
}
2018-03-26 19:48:26 +00:00
static void ct_process_request ( struct intel_guc_ct * ct ,
u32 action , u32 len , const u32 * payload )
{
2018-03-27 21:41:24 +00:00
struct intel_guc * guc = ct_to_guc ( ct ) ;
2019-03-21 12:00:04 +00:00
int ret ;
2018-03-27 21:41:24 +00:00
2018-04-10 12:14:17 +01:00
CT_DEBUG_DRIVER ( " CT: request %x %*ph \n " , action , 4 * len , payload ) ;
2018-03-26 19:48:28 +00:00
2018-03-26 19:48:26 +00:00
switch ( action ) {
2018-03-27 21:41:24 +00:00
case INTEL_GUC_ACTION_DEFAULT :
2019-03-21 12:00:04 +00:00
ret = intel_guc_to_host_process_recv_msg ( guc , payload , len ) ;
if ( unlikely ( ret ) )
2018-03-27 21:41:24 +00:00
goto fail_unexpected ;
break ;
2018-03-26 19:48:26 +00:00
default :
2018-03-27 21:41:24 +00:00
fail_unexpected :
2018-04-10 12:14:17 +01:00
DRM_ERROR ( " CT: unexpected request %x %*ph \n " ,
2018-03-26 19:48:26 +00:00
action , 4 * len , payload ) ;
break ;
}
}
static bool ct_process_incoming_requests ( struct intel_guc_ct * ct )
{
unsigned long flags ;
struct ct_incoming_request * request ;
u32 header ;
u32 * payload ;
bool done ;
spin_lock_irqsave ( & ct - > lock , flags ) ;
request = list_first_entry_or_null ( & ct - > incoming_requests ,
struct ct_incoming_request , link ) ;
if ( request )
list_del ( & request - > link ) ;
done = ! ! list_empty ( & ct - > incoming_requests ) ;
spin_unlock_irqrestore ( & ct - > lock , flags ) ;
if ( ! request )
return true ;
header = request - > msg [ 0 ] ;
payload = & request - > msg [ 1 ] ;
ct_process_request ( ct ,
ct_header_get_action ( header ) ,
ct_header_get_len ( header ) ,
payload ) ;
kfree ( request ) ;
return done ;
}
static void ct_incoming_request_worker_func ( struct work_struct * w )
{
struct intel_guc_ct * ct = container_of ( w , struct intel_guc_ct , worker ) ;
bool done ;
done = ct_process_incoming_requests ( ct ) ;
if ( ! done )
queue_work ( system_unbound_wq , & ct - > worker ) ;
}
/**
* DOC : CTB GuC to Host request
*
* Format of the CTB GuC to Host request message is as follows : :
*
* + - - - - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | msg [ 0 ] | [ 1 ] | [ 2 ] | [ 3 ] | . . . | [ n - 1 ] |
* + - - - - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | MESSAGE | MESSAGE PAYLOAD |
* + HEADER + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
* | | 0 | 1 | 2 | . . . | n |
* + = = = = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = + = = = = = = = = = +
* | len | request specific data |
* + - - - - - - + - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - + - - - - - - - - - +
*
* ^ - - - - - - - - - - - - - - - - - - - - - - - len - - - - - - - - - - - - - - - - - - - - - - - ^
*/
2018-03-26 19:48:23 +00:00
static int ct_handle_request ( struct intel_guc_ct * ct , const u32 * msg )
{
u32 header = msg [ 0 ] ;
2018-03-26 19:48:26 +00:00
u32 len = ct_header_get_len ( header ) ;
u32 msglen = len + 1 ; /* total message length including header */
struct ct_incoming_request * request ;
unsigned long flags ;
2018-03-26 19:48:23 +00:00
GEM_BUG_ON ( ct_header_is_response ( header ) ) ;
2018-03-26 19:48:26 +00:00
request = kmalloc ( sizeof ( * request ) + 4 * msglen , GFP_ATOMIC ) ;
if ( unlikely ( ! request ) ) {
2018-04-10 12:14:17 +01:00
DRM_ERROR ( " CT: dropping request %*ph \n " , 4 * msglen , msg ) ;
2018-03-26 19:48:26 +00:00
return 0 ; /* XXX: -ENOMEM ? */
}
memcpy ( request - > msg , msg , 4 * msglen ) ;
spin_lock_irqsave ( & ct - > lock , flags ) ;
list_add_tail ( & request - > link , & ct - > incoming_requests ) ;
spin_unlock_irqrestore ( & ct - > lock , flags ) ;
queue_work ( system_unbound_wq , & ct - > worker ) ;
2018-03-26 19:48:23 +00:00
return 0 ;
}
static void ct_process_host_channel ( struct intel_guc_ct * ct )
{
struct intel_guc_ct_channel * ctch = & ct - > host_channel ;
struct intel_guc_ct_buffer * ctb = & ctch - > ctbs [ CTB_RECV ] ;
u32 msg [ GUC_CT_MSG_LEN_MASK + 1 ] ; /* one extra dw for the header */
int err = 0 ;
2019-02-19 17:39:26 -08:00
if ( ! ctch - > enabled )
2018-03-26 19:48:23 +00:00
return ;
do {
err = ctb_read ( ctb , msg ) ;
if ( err )
break ;
if ( ct_header_is_response ( msg [ 0 ] ) )
err = ct_handle_response ( ct , msg ) ;
else
err = ct_handle_request ( ct , msg ) ;
} while ( ! err ) ;
if ( GEM_WARN_ON ( err = = - EPROTO ) ) {
DRM_ERROR ( " CT: corrupted message detected! \n " ) ;
ctb - > desc - > is_in_error = 1 ;
}
}
/*
* When we ' re communicating with the GuC over CT , GuC uses events
* to notify us about new messages being posted on the RECV buffer .
*/
static void intel_guc_to_host_event_handler_ct ( struct intel_guc * guc )
{
struct intel_guc_ct * ct = & guc - > ct ;
ct_process_host_channel ( ct ) ;
}
2019-02-19 17:39:26 -08:00
/**
* intel_guc_ct_init - Init CT communication
* @ ct : pointer to CT struct
*
* Allocate memory required for communication via
* the CT channel .
*
* Return : 0 on success , a negative errno code on failure .
*/
int intel_guc_ct_init ( struct intel_guc_ct * ct )
{
struct intel_guc * guc = ct_to_guc ( ct ) ;
struct intel_guc_ct_channel * ctch = & ct - > host_channel ;
int err ;
err = ctch_init ( guc , ctch ) ;
if ( unlikely ( err ) ) {
DRM_ERROR ( " CT: can't open channel %d; err=%d \n " ,
ctch - > owner , err ) ;
return err ;
}
GEM_BUG_ON ( ! ctch - > vma ) ;
return 0 ;
}
/**
* intel_guc_ct_fini - Fini CT communication
* @ ct : pointer to CT struct
*
* Deallocate memory required for communication via
* the CT channel .
*/
void intel_guc_ct_fini ( struct intel_guc_ct * ct )
{
struct intel_guc * guc = ct_to_guc ( ct ) ;
struct intel_guc_ct_channel * ctch = & ct - > host_channel ;
ctch_fini ( guc , ctch ) ;
}
2017-05-26 11:13:25 +00:00
/**
2018-03-20 16:20:20 +00:00
* intel_guc_ct_enable - Enable buffer based command transport .
* @ ct : pointer to CT struct
*
* Return : 0 on success , a negative errno code on failure .
2017-05-26 11:13:25 +00:00
*/
2018-03-20 16:20:20 +00:00
int intel_guc_ct_enable ( struct intel_guc_ct * ct )
2017-05-26 11:13:25 +00:00
{
2018-03-20 16:20:20 +00:00
struct intel_guc * guc = ct_to_guc ( ct ) ;
struct intel_guc_ct_channel * ctch = & ct - > host_channel ;
2017-05-26 11:13:25 +00:00
int err ;
2019-02-19 17:39:26 -08:00
if ( ctch - > enabled )
return 0 ;
err = ctch_enable ( guc , ctch ) ;
2017-05-26 11:13:25 +00:00
if ( unlikely ( err ) )
return err ;
/* Switch into cmd transport buffer based send() */
guc - > send = intel_guc_send_ct ;
2018-03-26 19:48:23 +00:00
guc - > handler = intel_guc_to_host_event_handler_ct ;
2017-05-26 11:13:25 +00:00
DRM_INFO ( " CT: %s \n " , enableddisabled ( true ) ) ;
return 0 ;
}
/**
2018-03-20 16:20:20 +00:00
* intel_guc_ct_disable - Disable buffer based command transport .
* @ ct : pointer to CT struct
2017-05-26 11:13:25 +00:00
*/
2018-03-20 16:20:20 +00:00
void intel_guc_ct_disable ( struct intel_guc_ct * ct )
2017-05-26 11:13:25 +00:00
{
2018-03-20 16:20:20 +00:00
struct intel_guc * guc = ct_to_guc ( ct ) ;
struct intel_guc_ct_channel * ctch = & ct - > host_channel ;
2017-05-26 11:13:25 +00:00
2019-02-19 17:39:26 -08:00
if ( ! ctch - > enabled )
2017-05-26 11:13:25 +00:00
return ;
2019-02-19 17:39:26 -08:00
ctch_disable ( guc , ctch ) ;
2017-05-26 11:13:25 +00:00
/* Disable send */
guc - > send = intel_guc_send_nop ;
2018-03-26 19:48:23 +00:00
guc - > handler = intel_guc_to_host_event_handler_nop ;
2017-05-26 11:13:25 +00:00
DRM_INFO ( " CT: %s \n " , enableddisabled ( false ) ) ;
}