2022-07-19 23:52:46 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Data Object Exchange
* PCIe r6 .0 , sec 6.30 DOE
*
* Copyright ( C ) 2021 Huawei
* Jonathan Cameron < Jonathan . Cameron @ huawei . com >
*
* Copyright ( C ) 2022 Intel Corporation
* Ira Weiny < ira . weiny @ intel . com >
*/
# define dev_fmt(fmt) "DOE: " fmt
# include <linux/bitfield.h>
# include <linux/delay.h>
# include <linux/jiffies.h>
# include <linux/mutex.h>
# include <linux/pci.h>
# include <linux/pci-doe.h>
# include <linux/workqueue.h>
2023-03-11 17:40:12 +03:00
# include "pci.h"
2022-07-19 23:52:46 +03:00
# define PCI_DOE_PROTOCOL_DISCOVERY 0
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
# define PCI_DOE_TIMEOUT HZ
# define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
# define PCI_DOE_FLAG_CANCEL 0
# define PCI_DOE_FLAG_DEAD 1
2022-11-16 04:56:37 +03:00
/* Max data object length is 2^18 dwords */
# define PCI_DOE_MAX_LENGTH (1 << 18)
2022-07-19 23:52:46 +03:00
/**
* struct pci_doe_mb - State for a single DOE mailbox
*
* This state is used to manage a single DOE mailbox capability . All fields
* should be considered opaque to the consumers and the structure passed into
2023-03-11 17:40:11 +03:00
* the helpers below after being created by pci_doe_create_mb ( ) .
2022-07-19 23:52:46 +03:00
*
* @ pdev : PCI device this mailbox belongs to
* @ cap_offset : Capability offset
* @ prots : Array of protocols supported ( encoded as long values )
* @ wq : Wait queue for work item
* @ work_queue : Queue of pci_doe_work items
* @ flags : Bit array of PCI_DOE_FLAG_ * flags
*/
struct pci_doe_mb {
struct pci_dev * pdev ;
u16 cap_offset ;
struct xarray prots ;
wait_queue_head_t wq ;
struct workqueue_struct * work_queue ;
unsigned long flags ;
} ;
2023-03-11 17:40:09 +03:00
struct pci_doe_protocol {
u16 vid ;
u8 type ;
} ;
/**
* struct pci_doe_task - represents a single query / response
*
* @ prot : DOE Protocol
* @ request_pl : The request payload
* @ request_pl_sz : Size of the request payload ( bytes )
* @ response_pl : The response payload
* @ response_pl_sz : Size of the response payload ( bytes )
* @ rv : Return value . Length of received response or error ( bytes )
* @ complete : Called when task is complete
* @ private : Private data for the consumer
* @ work : Used internally by the mailbox
* @ doe_mb : Used internally by the mailbox
*/
struct pci_doe_task {
struct pci_doe_protocol prot ;
const __le32 * request_pl ;
size_t request_pl_sz ;
__le32 * response_pl ;
size_t response_pl_sz ;
int rv ;
void ( * complete ) ( struct pci_doe_task * task ) ;
void * private ;
/* initialized by pci_doe_submit_task() */
struct work_struct work ;
struct pci_doe_mb * doe_mb ;
} ;
2022-07-19 23:52:46 +03:00
static int pci_doe_wait ( struct pci_doe_mb * doe_mb , unsigned long timeout )
{
if ( wait_event_timeout ( doe_mb - > wq ,
test_bit ( PCI_DOE_FLAG_CANCEL , & doe_mb - > flags ) ,
timeout ) )
return - EIO ;
return 0 ;
}
static void pci_doe_write_ctrl ( struct pci_doe_mb * doe_mb , u32 val )
{
struct pci_dev * pdev = doe_mb - > pdev ;
int offset = doe_mb - > cap_offset ;
pci_write_config_dword ( pdev , offset + PCI_DOE_CTRL , val ) ;
}
static int pci_doe_abort ( struct pci_doe_mb * doe_mb )
{
struct pci_dev * pdev = doe_mb - > pdev ;
int offset = doe_mb - > cap_offset ;
unsigned long timeout_jiffies ;
pci_dbg ( pdev , " [%x] Issuing Abort \n " , offset ) ;
timeout_jiffies = jiffies + PCI_DOE_TIMEOUT ;
pci_doe_write_ctrl ( doe_mb , PCI_DOE_CTRL_ABORT ) ;
do {
int rc ;
u32 val ;
rc = pci_doe_wait ( doe_mb , PCI_DOE_POLL_INTERVAL ) ;
if ( rc )
return rc ;
pci_read_config_dword ( pdev , offset + PCI_DOE_STATUS , & val ) ;
/* Abort success! */
if ( ! FIELD_GET ( PCI_DOE_STATUS_ERROR , val ) & &
! FIELD_GET ( PCI_DOE_STATUS_BUSY , val ) )
return 0 ;
} while ( ! time_after ( jiffies , timeout_jiffies ) ) ;
/* Abort has timed out and the MB is dead */
pci_err ( pdev , " [%x] ABORT timed out \n " , offset ) ;
return - EIO ;
}
static int pci_doe_send_req ( struct pci_doe_mb * doe_mb ,
struct pci_doe_task * task )
{
struct pci_dev * pdev = doe_mb - > pdev ;
int offset = doe_mb - > cap_offset ;
2023-03-11 17:40:15 +03:00
size_t length , remainder ;
2022-07-19 23:52:46 +03:00
u32 val ;
int i ;
/*
* Check the DOE busy bit is not set . If it is set , this could indicate
* someone other than Linux ( e . g . firmware ) is using the mailbox . Note
* it is expected that firmware and OS will negotiate access rights via
* an , as yet to be defined , method .
*/
pci_read_config_dword ( pdev , offset + PCI_DOE_STATUS , & val ) ;
if ( FIELD_GET ( PCI_DOE_STATUS_BUSY , val ) )
return - EBUSY ;
if ( FIELD_GET ( PCI_DOE_STATUS_ERROR , val ) )
return - EIO ;
2022-11-16 04:56:37 +03:00
/* Length is 2 DW of header + length of payload in DW */
2023-03-11 17:40:15 +03:00
length = 2 + DIV_ROUND_UP ( task - > request_pl_sz , sizeof ( __le32 ) ) ;
2022-11-16 04:56:37 +03:00
if ( length > PCI_DOE_MAX_LENGTH )
return - EIO ;
if ( length = = PCI_DOE_MAX_LENGTH )
length = 0 ;
2022-07-19 23:52:46 +03:00
/* Write DOE Header */
val = FIELD_PREP ( PCI_DOE_DATA_OBJECT_HEADER_1_VID , task - > prot . vid ) |
FIELD_PREP ( PCI_DOE_DATA_OBJECT_HEADER_1_TYPE , task - > prot . type ) ;
pci_write_config_dword ( pdev , offset + PCI_DOE_WRITE , val ) ;
pci_write_config_dword ( pdev , offset + PCI_DOE_WRITE ,
FIELD_PREP ( PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH ,
2022-11-16 04:56:37 +03:00
length ) ) ;
2023-03-11 17:40:15 +03:00
/* Write payload */
2023-03-11 17:40:01 +03:00
for ( i = 0 ; i < task - > request_pl_sz / sizeof ( __le32 ) ; i + + )
2022-07-19 23:52:46 +03:00
pci_write_config_dword ( pdev , offset + PCI_DOE_WRITE ,
2023-03-11 17:40:01 +03:00
le32_to_cpu ( task - > request_pl [ i ] ) ) ;
2022-07-19 23:52:46 +03:00
2023-03-11 17:40:15 +03:00
/* Write last payload dword */
remainder = task - > request_pl_sz % sizeof ( __le32 ) ;
if ( remainder ) {
val = 0 ;
memcpy ( & val , & task - > request_pl [ i ] , remainder ) ;
le32_to_cpus ( & val ) ;
pci_write_config_dword ( pdev , offset + PCI_DOE_WRITE , val ) ;
}
2022-07-19 23:52:46 +03:00
pci_doe_write_ctrl ( doe_mb , PCI_DOE_CTRL_GO ) ;
return 0 ;
}
static bool pci_doe_data_obj_ready ( struct pci_doe_mb * doe_mb )
{
struct pci_dev * pdev = doe_mb - > pdev ;
int offset = doe_mb - > cap_offset ;
u32 val ;
pci_read_config_dword ( pdev , offset + PCI_DOE_STATUS , & val ) ;
if ( FIELD_GET ( PCI_DOE_STATUS_DATA_OBJECT_READY , val ) )
return true ;
return false ;
}
static int pci_doe_recv_resp ( struct pci_doe_mb * doe_mb , struct pci_doe_task * task )
{
2023-03-11 17:40:15 +03:00
size_t length , payload_length , remainder , received ;
2022-07-19 23:52:46 +03:00
struct pci_dev * pdev = doe_mb - > pdev ;
int offset = doe_mb - > cap_offset ;
2023-03-11 17:40:15 +03:00
int i = 0 ;
2022-07-19 23:52:46 +03:00
u32 val ;
/* Read the first dword to get the protocol */
pci_read_config_dword ( pdev , offset + PCI_DOE_READ , & val ) ;
if ( ( FIELD_GET ( PCI_DOE_DATA_OBJECT_HEADER_1_VID , val ) ! = task - > prot . vid ) | |
( FIELD_GET ( PCI_DOE_DATA_OBJECT_HEADER_1_TYPE , val ) ! = task - > prot . type ) ) {
dev_err_ratelimited ( & pdev - > dev , " [%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x] \n " ,
doe_mb - > cap_offset , task - > prot . vid , task - > prot . type ,
FIELD_GET ( PCI_DOE_DATA_OBJECT_HEADER_1_VID , val ) ,
FIELD_GET ( PCI_DOE_DATA_OBJECT_HEADER_1_TYPE , val ) ) ;
return - EIO ;
}
pci_write_config_dword ( pdev , offset + PCI_DOE_READ , 0 ) ;
/* Read the second dword to get the length */
pci_read_config_dword ( pdev , offset + PCI_DOE_READ , & val ) ;
pci_write_config_dword ( pdev , offset + PCI_DOE_READ , 0 ) ;
length = FIELD_GET ( PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH , val ) ;
2022-11-16 04:56:37 +03:00
/* A value of 0x0 indicates max data object length */
if ( ! length )
length = PCI_DOE_MAX_LENGTH ;
if ( length < 2 )
2022-07-19 23:52:46 +03:00
return - EIO ;
/* First 2 dwords have already been read */
length - = 2 ;
2023-03-11 17:40:15 +03:00
received = task - > response_pl_sz ;
payload_length = DIV_ROUND_UP ( task - > response_pl_sz , sizeof ( __le32 ) ) ;
remainder = task - > response_pl_sz % sizeof ( __le32 ) ;
/* remainder signifies number of data bytes in last payload dword */
if ( ! remainder )
remainder = sizeof ( __le32 ) ;
if ( length < payload_length ) {
received = length * sizeof ( __le32 ) ;
payload_length = length ;
remainder = sizeof ( __le32 ) ;
}
if ( payload_length ) {
/* Read all payload dwords except the last */
for ( ; i < payload_length - 1 ; i + + ) {
pci_read_config_dword ( pdev , offset + PCI_DOE_READ ,
& val ) ;
task - > response_pl [ i ] = cpu_to_le32 ( val ) ;
pci_write_config_dword ( pdev , offset + PCI_DOE_READ , 0 ) ;
}
/* Read last payload dword */
2023-03-11 17:40:01 +03:00
pci_read_config_dword ( pdev , offset + PCI_DOE_READ , & val ) ;
2023-03-11 17:40:15 +03:00
cpu_to_le32s ( & val ) ;
memcpy ( & task - > response_pl [ i ] , & val , remainder ) ;
2022-07-19 23:52:46 +03:00
/* Prior to the last ack, ensure Data Object Ready */
2023-03-11 17:40:15 +03:00
if ( ! pci_doe_data_obj_ready ( doe_mb ) )
2022-07-19 23:52:46 +03:00
return - EIO ;
pci_write_config_dword ( pdev , offset + PCI_DOE_READ , 0 ) ;
2023-03-11 17:40:15 +03:00
i + + ;
2022-07-19 23:52:46 +03:00
}
/* Flush excess length */
for ( ; i < length ; i + + ) {
pci_read_config_dword ( pdev , offset + PCI_DOE_READ , & val ) ;
pci_write_config_dword ( pdev , offset + PCI_DOE_READ , 0 ) ;
}
/* Final error check to pick up on any since Data Object Ready */
pci_read_config_dword ( pdev , offset + PCI_DOE_STATUS , & val ) ;
if ( FIELD_GET ( PCI_DOE_STATUS_ERROR , val ) )
return - EIO ;
2023-03-11 17:40:15 +03:00
return received ;
2022-07-19 23:52:46 +03:00
}
static void signal_task_complete ( struct pci_doe_task * task , int rv )
{
task - > rv = rv ;
2023-03-11 17:40:06 +03:00
destroy_work_on_stack ( & task - > work ) ;
2023-07-26 21:29:42 +03:00
task - > complete ( task ) ;
2022-07-19 23:52:46 +03:00
}
static void signal_task_abort ( struct pci_doe_task * task , int rv )
{
struct pci_doe_mb * doe_mb = task - > doe_mb ;
struct pci_dev * pdev = doe_mb - > pdev ;
if ( pci_doe_abort ( doe_mb ) ) {
/*
* If the device can ' t process an abort ; set the mailbox dead
* - no more submissions
*/
pci_err ( pdev , " [%x] Abort failed marking mailbox dead \n " ,
doe_mb - > cap_offset ) ;
set_bit ( PCI_DOE_FLAG_DEAD , & doe_mb - > flags ) ;
}
signal_task_complete ( task , rv ) ;
}
static void doe_statemachine_work ( struct work_struct * work )
{
struct pci_doe_task * task = container_of ( work , struct pci_doe_task ,
work ) ;
struct pci_doe_mb * doe_mb = task - > doe_mb ;
struct pci_dev * pdev = doe_mb - > pdev ;
int offset = doe_mb - > cap_offset ;
unsigned long timeout_jiffies ;
u32 val ;
int rc ;
if ( test_bit ( PCI_DOE_FLAG_DEAD , & doe_mb - > flags ) ) {
signal_task_complete ( task , - EIO ) ;
return ;
}
/* Send request */
rc = pci_doe_send_req ( doe_mb , task ) ;
if ( rc ) {
/*
* The specification does not provide any guidance on how to
* resolve conflicting requests from other entities .
* Furthermore , it is likely that busy will not be detected
* most of the time . Flag any detection of status busy with an
* error .
*/
if ( rc = = - EBUSY )
dev_err_ratelimited ( & pdev - > dev , " [%x] busy detected; another entity is sending conflicting requests \n " ,
offset ) ;
signal_task_abort ( task , rc ) ;
return ;
}
timeout_jiffies = jiffies + PCI_DOE_TIMEOUT ;
/* Poll for response */
retry_resp :
pci_read_config_dword ( pdev , offset + PCI_DOE_STATUS , & val ) ;
if ( FIELD_GET ( PCI_DOE_STATUS_ERROR , val ) ) {
signal_task_abort ( task , - EIO ) ;
return ;
}
if ( ! FIELD_GET ( PCI_DOE_STATUS_DATA_OBJECT_READY , val ) ) {
if ( time_after ( jiffies , timeout_jiffies ) ) {
signal_task_abort ( task , - EIO ) ;
return ;
}
rc = pci_doe_wait ( doe_mb , PCI_DOE_POLL_INTERVAL ) ;
if ( rc ) {
signal_task_abort ( task , rc ) ;
return ;
}
goto retry_resp ;
}
rc = pci_doe_recv_resp ( doe_mb , task ) ;
if ( rc < 0 ) {
signal_task_abort ( task , rc ) ;
return ;
}
signal_task_complete ( task , rc ) ;
}
static void pci_doe_task_complete ( struct pci_doe_task * task )
{
complete ( task - > private ) ;
}
2024-03-07 05:20:06 +03:00
static int pci_doe_discovery ( struct pci_doe_mb * doe_mb , u8 capver , u8 * index , u16 * vid ,
2022-07-19 23:52:46 +03:00
u8 * protocol )
{
u32 request_pl = FIELD_PREP ( PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX ,
2024-03-07 05:20:06 +03:00
* index ) |
FIELD_PREP ( PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER ,
( capver > = 2 ) ? 2 : 0 ) ;
2023-03-11 17:40:01 +03:00
__le32 request_pl_le = cpu_to_le32 ( request_pl ) ;
__le32 response_pl_le ;
2022-07-19 23:52:46 +03:00
u32 response_pl ;
int rc ;
2023-03-11 17:40:07 +03:00
rc = pci_doe ( doe_mb , PCI_VENDOR_ID_PCI_SIG , PCI_DOE_PROTOCOL_DISCOVERY ,
& request_pl_le , sizeof ( request_pl_le ) ,
& response_pl_le , sizeof ( response_pl_le ) ) ;
2022-07-19 23:52:46 +03:00
if ( rc < 0 )
return rc ;
2023-03-11 17:40:07 +03:00
if ( rc ! = sizeof ( response_pl_le ) )
2022-07-19 23:52:46 +03:00
return - EIO ;
2023-03-11 17:40:01 +03:00
response_pl = le32_to_cpu ( response_pl_le ) ;
2022-07-19 23:52:46 +03:00
* vid = FIELD_GET ( PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID , response_pl ) ;
* protocol = FIELD_GET ( PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL ,
response_pl ) ;
* index = FIELD_GET ( PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX ,
response_pl ) ;
return 0 ;
}
static void * pci_doe_xa_prot_entry ( u16 vid , u8 prot )
{
return xa_mk_value ( ( vid < < 8 ) | prot ) ;
}
static int pci_doe_cache_protocols ( struct pci_doe_mb * doe_mb )
{
u8 index = 0 ;
u8 xa_idx = 0 ;
2024-03-07 05:20:06 +03:00
u32 hdr = 0 ;
pci_read_config_dword ( doe_mb - > pdev , doe_mb - > cap_offset , & hdr ) ;
2022-07-19 23:52:46 +03:00
do {
int rc ;
u16 vid ;
u8 prot ;
2024-03-07 05:20:06 +03:00
rc = pci_doe_discovery ( doe_mb , PCI_EXT_CAP_VER ( hdr ) , & index ,
& vid , & prot ) ;
2022-07-19 23:52:46 +03:00
if ( rc )
return rc ;
pci_dbg ( doe_mb - > pdev ,
" [%x] Found protocol %d vid: %x prot: %x \n " ,
doe_mb - > cap_offset , xa_idx , vid , prot ) ;
rc = xa_insert ( & doe_mb - > prots , xa_idx + + ,
pci_doe_xa_prot_entry ( vid , prot ) , GFP_KERNEL ) ;
if ( rc )
return rc ;
} while ( index ) ;
return 0 ;
}
2023-03-11 17:40:11 +03:00
static void pci_doe_cancel_tasks ( struct pci_doe_mb * doe_mb )
2022-07-19 23:52:46 +03:00
{
/* Stop all pending work items from starting */
set_bit ( PCI_DOE_FLAG_DEAD , & doe_mb - > flags ) ;
/* Cancel an in progress work item, if necessary */
set_bit ( PCI_DOE_FLAG_CANCEL , & doe_mb - > flags ) ;
wake_up ( & doe_mb - > wq ) ;
}
/**
2023-03-11 17:40:11 +03:00
* pci_doe_create_mb ( ) - Create a DOE mailbox object
2022-07-19 23:52:46 +03:00
*
* @ pdev : PCI device to create the DOE mailbox for
* @ cap_offset : Offset of the DOE mailbox
*
* Create a single mailbox object to manage the mailbox protocol at the
* cap_offset specified .
*
* RETURNS : created mailbox object on success
* ERR_PTR ( - errno ) on failure
*/
2023-03-11 17:40:11 +03:00
static struct pci_doe_mb * pci_doe_create_mb ( struct pci_dev * pdev ,
u16 cap_offset )
2022-07-19 23:52:46 +03:00
{
struct pci_doe_mb * doe_mb ;
int rc ;
2023-03-11 17:40:11 +03:00
doe_mb = kzalloc ( sizeof ( * doe_mb ) , GFP_KERNEL ) ;
2022-07-19 23:52:46 +03:00
if ( ! doe_mb )
return ERR_PTR ( - ENOMEM ) ;
doe_mb - > pdev = pdev ;
doe_mb - > cap_offset = cap_offset ;
init_waitqueue_head ( & doe_mb - > wq ) ;
xa_init ( & doe_mb - > prots ) ;
doe_mb - > work_queue = alloc_ordered_workqueue ( " %s %s DOE [%x] " , 0 ,
2023-03-11 17:40:14 +03:00
dev_bus_name ( & pdev - > dev ) ,
2022-07-19 23:52:46 +03:00
pci_name ( pdev ) ,
doe_mb - > cap_offset ) ;
if ( ! doe_mb - > work_queue ) {
pci_err ( pdev , " [%x] failed to allocate work queue \n " ,
doe_mb - > cap_offset ) ;
2023-03-11 17:40:11 +03:00
rc = - ENOMEM ;
goto err_free ;
2022-07-19 23:52:46 +03:00
}
/* Reset the mailbox by issuing an abort */
rc = pci_doe_abort ( doe_mb ) ;
if ( rc ) {
pci_err ( pdev , " [%x] failed to reset mailbox with abort command : %d \n " ,
doe_mb - > cap_offset , rc ) ;
2023-03-11 17:40:11 +03:00
goto err_destroy_wq ;
2022-07-19 23:52:46 +03:00
}
/*
* The state machine and the mailbox should be in sync now ;
2023-03-11 17:40:11 +03:00
* Use the mailbox to query protocols .
2022-07-19 23:52:46 +03:00
*/
rc = pci_doe_cache_protocols ( doe_mb ) ;
if ( rc ) {
pci_err ( pdev , " [%x] failed to cache protocols : %d \n " ,
doe_mb - > cap_offset , rc ) ;
2023-03-11 17:40:11 +03:00
goto err_cancel ;
2022-07-19 23:52:46 +03:00
}
return doe_mb ;
2023-03-11 17:40:11 +03:00
err_cancel :
pci_doe_cancel_tasks ( doe_mb ) ;
xa_destroy ( & doe_mb - > prots ) ;
err_destroy_wq :
destroy_workqueue ( doe_mb - > work_queue ) ;
err_free :
kfree ( doe_mb ) ;
return ERR_PTR ( rc ) ;
}
/**
* pci_doe_destroy_mb ( ) - Destroy a DOE mailbox object
*
2023-03-11 17:40:14 +03:00
* @ doe_mb : DOE mailbox
2023-03-11 17:40:11 +03:00
*
* Destroy all internal data structures created for the DOE mailbox .
*/
2023-03-11 17:40:14 +03:00
static void pci_doe_destroy_mb ( struct pci_doe_mb * doe_mb )
2023-03-11 17:40:11 +03:00
{
pci_doe_cancel_tasks ( doe_mb ) ;
xa_destroy ( & doe_mb - > prots ) ;
destroy_workqueue ( doe_mb - > work_queue ) ;
kfree ( doe_mb ) ;
}
2022-07-19 23:52:46 +03:00
/**
* pci_doe_supports_prot ( ) - Return if the DOE instance supports the given
* protocol
* @ doe_mb : DOE mailbox capability to query
* @ vid : Protocol Vendor ID
* @ type : Protocol type
*
* RETURNS : True if the DOE mailbox supports the protocol specified
*/
2023-03-11 17:40:14 +03:00
static bool pci_doe_supports_prot ( struct pci_doe_mb * doe_mb , u16 vid , u8 type )
2022-07-19 23:52:46 +03:00
{
unsigned long index ;
void * entry ;
/* The discovery protocol must always be supported */
if ( vid = = PCI_VENDOR_ID_PCI_SIG & & type = = PCI_DOE_PROTOCOL_DISCOVERY )
return true ;
xa_for_each ( & doe_mb - > prots , index , entry )
if ( entry = = pci_doe_xa_prot_entry ( vid , type ) )
return true ;
return false ;
}
/**
* pci_doe_submit_task ( ) - Submit a task to be processed by the state machine
*
* @ doe_mb : DOE mailbox capability to submit to
* @ task : task to be queued
*
* Submit a DOE task ( request / response ) to the DOE mailbox to be processed .
* Returns upon queueing the task object . If the queue is full this function
* will sleep until there is room in the queue .
*
* task - > complete will be called when the state machine is done processing this
* task .
*
2023-03-11 17:40:05 +03:00
* @ task must be allocated on the stack .
*
2022-07-19 23:52:46 +03:00
* Excess data will be discarded .
*
* RETURNS : 0 when task has been successfully queued , - ERRNO on error
*/
2023-03-11 17:40:09 +03:00
static int pci_doe_submit_task ( struct pci_doe_mb * doe_mb ,
struct pci_doe_task * task )
2022-07-19 23:52:46 +03:00
{
if ( ! pci_doe_supports_prot ( doe_mb , task - > prot . vid , task - > prot . type ) )
return - EINVAL ;
if ( test_bit ( PCI_DOE_FLAG_DEAD , & doe_mb - > flags ) )
return - EIO ;
task - > doe_mb = doe_mb ;
2023-03-11 17:40:05 +03:00
INIT_WORK_ONSTACK ( & task - > work , doe_statemachine_work ) ;
2022-07-19 23:52:46 +03:00
queue_work ( doe_mb - > work_queue , & task - > work ) ;
return 0 ;
}
2023-03-11 17:40:07 +03:00
/**
* pci_doe ( ) - Perform Data Object Exchange
*
* @ doe_mb : DOE Mailbox
* @ vendor : Vendor ID
* @ type : Data Object Type
* @ request : Request payload
* @ request_sz : Size of request payload ( bytes )
* @ response : Response payload
* @ response_sz : Size of response payload ( bytes )
*
* Submit @ request to @ doe_mb and store the @ response .
* The DOE exchange is performed synchronously and may therefore sleep .
*
* Payloads are treated as opaque byte streams which are transmitted verbatim ,
* without byte - swapping . If payloads contain little - endian register values ,
* the caller is responsible for conversion with cpu_to_le32 ( ) / le32_to_cpu ( ) .
*
2023-03-11 17:40:15 +03:00
* For convenience , arbitrary payload sizes are allowed even though PCIe r6 .0
* sec 6.30 .1 specifies the Data Object Header 2 " Length " in dwords . The last
* ( partial ) dword is copied with byte granularity and padded with zeroes if
* necessary . Callers are thus relieved of using dword - sized bounce buffers .
*
2023-03-11 17:40:07 +03:00
* RETURNS : Length of received response or negative errno .
* Received data in excess of @ response_sz is discarded .
* The length may be smaller than @ response_sz and the caller
* is responsible for checking that .
*/
int pci_doe ( struct pci_doe_mb * doe_mb , u16 vendor , u8 type ,
const void * request , size_t request_sz ,
void * response , size_t response_sz )
{
DECLARE_COMPLETION_ONSTACK ( c ) ;
struct pci_doe_task task = {
. prot . vid = vendor ,
. prot . type = type ,
. request_pl = request ,
. request_pl_sz = request_sz ,
. response_pl = response ,
. response_pl_sz = response_sz ,
. complete = pci_doe_task_complete ,
. private = & c ,
} ;
int rc ;
rc = pci_doe_submit_task ( doe_mb , & task ) ;
if ( rc )
return rc ;
wait_for_completion ( & c ) ;
return task . rv ;
}
EXPORT_SYMBOL_GPL ( pci_doe ) ;
2023-03-11 17:40:12 +03:00
/**
* pci_find_doe_mailbox ( ) - Find Data Object Exchange mailbox
*
* @ pdev : PCI device
* @ vendor : Vendor ID
* @ type : Data Object Type
*
* Find first DOE mailbox of a PCI device which supports the given protocol .
*
* RETURNS : Pointer to the DOE mailbox or NULL if none was found .
*/
struct pci_doe_mb * pci_find_doe_mailbox ( struct pci_dev * pdev , u16 vendor ,
u8 type )
{
struct pci_doe_mb * doe_mb ;
unsigned long index ;
xa_for_each ( & pdev - > doe_mbs , index , doe_mb )
if ( pci_doe_supports_prot ( doe_mb , vendor , type ) )
return doe_mb ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( pci_find_doe_mailbox ) ;
void pci_doe_init ( struct pci_dev * pdev )
{
struct pci_doe_mb * doe_mb ;
u16 offset = 0 ;
int rc ;
xa_init ( & pdev - > doe_mbs ) ;
while ( ( offset = pci_find_next_ext_capability ( pdev , offset ,
PCI_EXT_CAP_ID_DOE ) ) ) {
doe_mb = pci_doe_create_mb ( pdev , offset ) ;
if ( IS_ERR ( doe_mb ) ) {
pci_err ( pdev , " [%x] failed to create mailbox: %ld \n " ,
offset , PTR_ERR ( doe_mb ) ) ;
continue ;
}
rc = xa_insert ( & pdev - > doe_mbs , offset , doe_mb , GFP_KERNEL ) ;
if ( rc ) {
pci_err ( pdev , " [%x] failed to insert mailbox: %d \n " ,
offset , rc ) ;
pci_doe_destroy_mb ( doe_mb ) ;
}
}
}
void pci_doe_destroy ( struct pci_dev * pdev )
{
struct pci_doe_mb * doe_mb ;
unsigned long index ;
xa_for_each ( & pdev - > doe_mbs , index , doe_mb )
pci_doe_destroy_mb ( doe_mb ) ;
xa_destroy ( & pdev - > doe_mbs ) ;
}
void pci_doe_disconnected ( struct pci_dev * pdev )
{
struct pci_doe_mb * doe_mb ;
unsigned long index ;
xa_for_each ( & pdev - > doe_mbs , index , doe_mb )
pci_doe_cancel_tasks ( doe_mb ) ;
}