2020-01-21 16:44:23 -07:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/device.h>
# include <linux/io-64-nonatomic-lo-hi.h>
# include <linux/dmaengine.h>
# include <uapi/linux/idxd.h>
# include "../dmaengine.h"
# include "registers.h"
# include "idxd.h"
static inline struct idxd_wq * to_idxd_wq ( struct dma_chan * c )
{
2021-04-15 16:37:10 -07:00
struct idxd_dma_chan * idxd_chan ;
idxd_chan = container_of ( c , struct idxd_dma_chan , chan ) ;
return idxd_chan - > wq ;
2020-01-21 16:44:23 -07:00
}
void idxd_dma_complete_txd ( struct idxd_desc * desc ,
2021-10-26 14:36:02 -07:00
enum idxd_complete_type comp_type ,
bool free_desc )
2020-01-21 16:44:23 -07:00
{
2021-10-26 14:36:36 -07:00
struct idxd_device * idxd = desc - > wq - > idxd ;
2020-01-21 16:44:23 -07:00
struct dma_async_tx_descriptor * tx ;
struct dmaengine_result res ;
int complete = 1 ;
2021-10-26 14:36:36 -07:00
if ( desc - > completion - > status = = DSA_COMP_SUCCESS ) {
2020-01-21 16:44:23 -07:00
res . result = DMA_TRANS_NOERROR ;
2021-10-26 14:36:36 -07:00
} else if ( desc - > completion - > status ) {
if ( idxd - > request_int_handles & & comp_type ! = IDXD_COMPLETE_ABORT & &
desc - > completion - > status = = DSA_COMP_INT_HANDLE_INVAL & &
idxd_queue_int_handle_resubmit ( desc ) )
return ;
2020-01-21 16:44:23 -07:00
res . result = DMA_TRANS_WRITE_FAILED ;
2021-10-26 14:36:36 -07:00
} else if ( comp_type = = IDXD_COMPLETE_ABORT ) {
2020-01-21 16:44:23 -07:00
res . result = DMA_TRANS_ABORTED ;
2021-10-26 14:36:36 -07:00
} else {
2020-01-21 16:44:23 -07:00
complete = 0 ;
2021-10-26 14:36:36 -07:00
}
2020-01-21 16:44:23 -07:00
tx = & desc - > txd ;
if ( complete & & tx - > cookie ) {
dma_cookie_complete ( tx ) ;
dma_descriptor_unmap ( tx ) ;
dmaengine_desc_get_callback_invoke ( tx , & res ) ;
tx - > callback = NULL ;
tx - > callback_result = NULL ;
}
2021-10-26 14:36:02 -07:00
if ( free_desc )
idxd_free_desc ( desc - > wq , desc ) ;
2020-01-21 16:44:23 -07:00
}
static void op_flag_setup ( unsigned long flags , u32 * desc_flags )
{
* desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR ;
if ( flags & DMA_PREP_INTERRUPT )
* desc_flags | = IDXD_OP_FLAG_RCI ;
}
static inline void idxd_prep_desc_common ( struct idxd_wq * wq ,
struct dsa_hw_desc * hw , char opcode ,
u64 addr_f1 , u64 addr_f2 , u64 len ,
u64 compl , u32 flags )
{
hw - > flags = flags ;
hw - > opcode = opcode ;
hw - > src_addr = addr_f1 ;
hw - > dst_addr = addr_f2 ;
hw - > xfer_size = len ;
2021-08-19 09:34:06 -07:00
/*
* For dedicated WQ , this field is ignored and HW will use the WQCFG . priv
* field instead . This field should be set to 1 for kernel descriptors .
*/
hw - > priv = 1 ;
2020-01-21 16:44:23 -07:00
hw - > completion_addr = compl ;
}
2022-04-26 15:32:06 -07:00
static struct dma_async_tx_descriptor *
idxd_dma_prep_interrupt ( struct dma_chan * c , unsigned long flags )
{
struct idxd_wq * wq = to_idxd_wq ( c ) ;
u32 desc_flags ;
struct idxd_desc * desc ;
if ( wq - > state ! = IDXD_WQ_ENABLED )
return NULL ;
op_flag_setup ( flags , & desc_flags ) ;
desc = idxd_alloc_desc ( wq , IDXD_OP_BLOCK ) ;
if ( IS_ERR ( desc ) )
return NULL ;
idxd_prep_desc_common ( wq , desc - > hw , DSA_OPCODE_NOOP ,
0 , 0 , 0 , desc - > compl_dma , desc_flags ) ;
desc - > txd . flags = flags ;
return & desc - > txd ;
}
2020-01-21 16:44:23 -07:00
static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy ( struct dma_chan * c , dma_addr_t dma_dest ,
dma_addr_t dma_src , size_t len , unsigned long flags )
{
struct idxd_wq * wq = to_idxd_wq ( c ) ;
u32 desc_flags ;
struct idxd_device * idxd = wq - > idxd ;
struct idxd_desc * desc ;
if ( wq - > state ! = IDXD_WQ_ENABLED )
return NULL ;
if ( len > idxd - > max_xfer_bytes )
return NULL ;
op_flag_setup ( flags , & desc_flags ) ;
desc = idxd_alloc_desc ( wq , IDXD_OP_BLOCK ) ;
if ( IS_ERR ( desc ) )
return NULL ;
idxd_prep_desc_common ( wq , desc - > hw , DSA_OPCODE_MEMMOVE ,
dma_src , dma_dest , len , desc - > compl_dma ,
desc_flags ) ;
desc - > txd . flags = flags ;
return & desc - > txd ;
}
static int idxd_dma_alloc_chan_resources ( struct dma_chan * chan )
{
struct idxd_wq * wq = to_idxd_wq ( chan ) ;
struct device * dev = & wq - > idxd - > pdev - > dev ;
idxd_wq_get ( wq ) ;
dev_dbg ( dev , " %s: client_count: %d \n " , __func__ ,
idxd_wq_refcount ( wq ) ) ;
return 0 ;
}
static void idxd_dma_free_chan_resources ( struct dma_chan * chan )
{
struct idxd_wq * wq = to_idxd_wq ( chan ) ;
struct device * dev = & wq - > idxd - > pdev - > dev ;
idxd_wq_put ( wq ) ;
dev_dbg ( dev , " %s: client_count: %d \n " , __func__ ,
idxd_wq_refcount ( wq ) ) ;
}
static enum dma_status idxd_dma_tx_status ( struct dma_chan * dma_chan ,
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
{
2020-05-13 11:47:49 -07:00
return DMA_OUT_OF_ORDER ;
2020-01-21 16:44:23 -07:00
}
/*
* issue_pending ( ) does not need to do anything since tx_submit ( ) does the job
* already .
*/
static void idxd_dma_issue_pending ( struct dma_chan * dma_chan )
{
}
2021-04-15 16:37:10 -07:00
static dma_cookie_t idxd_dma_tx_submit ( struct dma_async_tx_descriptor * tx )
2020-01-21 16:44:23 -07:00
{
struct dma_chan * c = tx - > chan ;
struct idxd_wq * wq = to_idxd_wq ( c ) ;
dma_cookie_t cookie ;
int rc ;
struct idxd_desc * desc = container_of ( tx , struct idxd_desc , txd ) ;
cookie = dma_cookie_assign ( tx ) ;
rc = idxd_submit_desc ( wq , desc ) ;
2021-10-26 14:36:02 -07:00
if ( rc < 0 ) {
idxd_free_desc ( wq , desc ) ;
2020-01-21 16:44:23 -07:00
return rc ;
2021-10-26 14:36:02 -07:00
}
2020-01-21 16:44:23 -07:00
return cookie ;
}
static void idxd_dma_release ( struct dma_device * device )
{
2021-04-15 16:37:10 -07:00
struct idxd_dma_dev * idxd_dma = container_of ( device , struct idxd_dma_dev , dma ) ;
kfree ( idxd_dma ) ;
2020-01-21 16:44:23 -07:00
}
int idxd_register_dma_device ( struct idxd_device * idxd )
{
2021-04-15 16:37:10 -07:00
struct idxd_dma_dev * idxd_dma ;
struct dma_device * dma ;
struct device * dev = & idxd - > pdev - > dev ;
int rc ;
2020-01-21 16:44:23 -07:00
2021-04-15 16:37:10 -07:00
idxd_dma = kzalloc_node ( sizeof ( * idxd_dma ) , GFP_KERNEL , dev_to_node ( dev ) ) ;
if ( ! idxd_dma )
return - ENOMEM ;
dma = & idxd_dma - > dma ;
2020-01-21 16:44:23 -07:00
INIT_LIST_HEAD ( & dma - > channels ) ;
2021-04-15 16:37:10 -07:00
dma - > dev = dev ;
2020-01-21 16:44:23 -07:00
2022-04-11 15:09:38 -07:00
dma_cap_set ( DMA_INTERRUPT , dma - > cap_mask ) ;
2021-01-15 14:53:07 -07:00
dma_cap_set ( DMA_PRIVATE , dma - > cap_mask ) ;
2020-05-13 11:47:49 -07:00
dma_cap_set ( DMA_COMPLETION_NO_ORDER , dma - > cap_mask ) ;
2020-01-21 16:44:23 -07:00
dma - > device_release = idxd_dma_release ;
2022-04-26 15:32:06 -07:00
dma - > device_prep_dma_interrupt = idxd_dma_prep_interrupt ;
2020-01-21 16:44:23 -07:00
if ( idxd - > hw . opcap . bits [ 0 ] & IDXD_OPCAP_MEMMOVE ) {
dma_cap_set ( DMA_MEMCPY , dma - > cap_mask ) ;
dma - > device_prep_dma_memcpy = idxd_dma_submit_memcpy ;
}
dma - > device_tx_status = idxd_dma_tx_status ;
dma - > device_issue_pending = idxd_dma_issue_pending ;
dma - > device_alloc_chan_resources = idxd_dma_alloc_chan_resources ;
dma - > device_free_chan_resources = idxd_dma_free_chan_resources ;
2021-04-15 16:37:10 -07:00
rc = dma_async_device_register ( dma ) ;
if ( rc < 0 ) {
kfree ( idxd_dma ) ;
return rc ;
}
idxd_dma - > idxd = idxd ;
/*
* This pointer is protected by the refs taken by the dma_chan . It will remain valid
* as long as there are outstanding channels .
*/
idxd - > idxd_dma = idxd_dma ;
return 0 ;
2020-01-21 16:44:23 -07:00
}
void idxd_unregister_dma_device ( struct idxd_device * idxd )
{
2021-04-15 16:37:10 -07:00
dma_async_device_unregister ( & idxd - > idxd_dma - > dma ) ;
2020-01-21 16:44:23 -07:00
}
2022-05-06 15:23:52 -07:00
static int idxd_register_dma_channel ( struct idxd_wq * wq )
2020-01-21 16:44:23 -07:00
{
struct idxd_device * idxd = wq - > idxd ;
2021-04-15 16:37:10 -07:00
struct dma_device * dma = & idxd - > idxd_dma - > dma ;
struct device * dev = & idxd - > pdev - > dev ;
struct idxd_dma_chan * idxd_chan ;
struct dma_chan * chan ;
int rc , i ;
idxd_chan = kzalloc_node ( sizeof ( * idxd_chan ) , GFP_KERNEL , dev_to_node ( dev ) ) ;
if ( ! idxd_chan )
return - ENOMEM ;
2020-01-21 16:44:23 -07:00
2021-04-15 16:37:10 -07:00
chan = & idxd_chan - > chan ;
2020-01-21 16:44:23 -07:00
chan - > device = dma ;
list_add_tail ( & chan - > device_node , & dma - > channels ) ;
2021-04-15 16:37:10 -07:00
for ( i = 0 ; i < wq - > num_descs ; i + + ) {
struct idxd_desc * desc = wq - > descs [ i ] ;
dma_async_tx_descriptor_init ( & desc - > txd , chan ) ;
desc - > txd . tx_submit = idxd_dma_tx_submit ;
}
2020-01-21 16:44:23 -07:00
rc = dma_async_device_channel_register ( dma , chan ) ;
2021-04-15 16:37:10 -07:00
if ( rc < 0 ) {
kfree ( idxd_chan ) ;
2020-01-21 16:44:23 -07:00
return rc ;
2021-04-15 16:37:10 -07:00
}
wq - > idxd_chan = idxd_chan ;
idxd_chan - > wq = wq ;
2021-07-15 11:43:20 -07:00
get_device ( wq_confdev ( wq ) ) ;
2020-01-21 16:44:23 -07:00
return 0 ;
}
2022-05-06 15:23:52 -07:00
static void idxd_unregister_dma_channel ( struct idxd_wq * wq )
2020-01-21 16:44:23 -07:00
{
2021-04-15 16:37:10 -07:00
struct idxd_dma_chan * idxd_chan = wq - > idxd_chan ;
struct dma_chan * chan = & idxd_chan - > chan ;
struct idxd_dma_dev * idxd_dma = wq - > idxd - > idxd_dma ;
2021-01-18 10:28:44 -07:00
2021-04-15 16:37:10 -07:00
dma_async_device_channel_unregister ( & idxd_dma - > dma , chan ) ;
2021-01-18 10:28:44 -07:00
list_del ( & chan - > device_node ) ;
2021-04-15 16:37:10 -07:00
kfree ( wq - > idxd_chan ) ;
wq - > idxd_chan = NULL ;
2021-07-15 11:43:20 -07:00
put_device ( wq_confdev ( wq ) ) ;
2020-01-21 16:44:23 -07:00
}
2021-07-15 11:44:30 -07:00
static int idxd_dmaengine_drv_probe ( struct idxd_dev * idxd_dev )
{
struct device * dev = & idxd_dev - > conf_dev ;
struct idxd_wq * wq = idxd_dev_to_wq ( idxd_dev ) ;
struct idxd_device * idxd = wq - > idxd ;
int rc ;
if ( idxd - > state ! = IDXD_DEV_ENABLED )
return - ENXIO ;
mutex_lock ( & wq - > wq_lock ) ;
wq - > type = IDXD_WQT_KERNEL ;
2021-12-13 11:51:34 -07:00
2022-04-20 09:43:36 -07:00
rc = drv_enable_wq ( wq ) ;
2021-07-15 11:44:30 -07:00
if ( rc < 0 ) {
dev_dbg ( dev , " Enable wq %d failed: %d \n " , wq - > id , rc ) ;
rc = - ENXIO ;
goto err ;
}
rc = idxd_register_dma_channel ( wq ) ;
if ( rc < 0 ) {
2021-07-20 13:42:15 -07:00
idxd - > cmd_status = IDXD_SCMD_DMA_CHAN_ERR ;
2021-07-15 11:44:30 -07:00
dev_dbg ( dev , " Failed to register dma channel \n " ) ;
goto err_dma ;
}
2021-07-20 13:42:15 -07:00
idxd - > cmd_status = 0 ;
2021-07-15 11:44:30 -07:00
mutex_unlock ( & wq - > wq_lock ) ;
return 0 ;
err_dma :
2022-04-20 09:43:36 -07:00
drv_disable_wq ( wq ) ;
2022-03-04 14:02:57 -07:00
err :
2021-07-15 11:44:30 -07:00
wq - > type = IDXD_WQT_NONE ;
mutex_unlock ( & wq - > wq_lock ) ;
return rc ;
}
static void idxd_dmaengine_drv_remove ( struct idxd_dev * idxd_dev )
{
struct idxd_wq * wq = idxd_dev_to_wq ( idxd_dev ) ;
mutex_lock ( & wq - > wq_lock ) ;
2021-10-26 14:36:29 -07:00
__idxd_wq_quiesce ( wq ) ;
2021-07-15 11:44:30 -07:00
idxd_unregister_dma_channel ( wq ) ;
2022-04-20 09:43:36 -07:00
drv_disable_wq ( wq ) ;
2021-07-15 11:44:30 -07:00
mutex_unlock ( & wq - > wq_lock ) ;
}
static enum idxd_dev_type dev_types [ ] = {
IDXD_DEV_WQ ,
IDXD_DEV_NONE ,
} ;
struct idxd_device_driver idxd_dmaengine_drv = {
. probe = idxd_dmaengine_drv_probe ,
. remove = idxd_dmaengine_drv_remove ,
. name = " dmaengine " ,
. type = dev_types ,
} ;
2021-07-15 11:44:47 -07:00
EXPORT_SYMBOL_GPL ( idxd_dmaengine_drv ) ;