2016-04-18 17:21:44 +03:00
/*
* AMD Cryptographic Coprocessor ( CCP ) driver
*
2017-07-17 23:00:49 +03:00
* Copyright ( C ) 2016 , 2017 Advanced Micro Devices , Inc .
2016-04-18 17:21:44 +03:00
*
* Author : Gary R Hook < gary . hook @ amd . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2017-03-23 20:53:30 +03:00
# include <linux/module.h>
2016-04-18 17:21:44 +03:00
# include <linux/kernel.h>
# include <linux/dmaengine.h>
# include <linux/spinlock.h>
# include <linux/mutex.h>
# include <linux/ccp.h>
# include "ccp-dev.h"
# include "../../dma/dmaengine.h"
# define CCP_DMA_WIDTH(_mask) \
( { \
u64 mask = _mask + 1 ; \
( mask = = 0 ) ? 64 : fls64 ( mask ) ; \
} )
2017-03-23 20:53:30 +03:00
/* The CCP as a DMA provider can be configured for public or private
* channels . Default is specified in the vdata for the device ( PCI ID ) .
* This module parameter will override for all channels on all devices :
* dma_chan_attr = 0x2 to force all channels public
* = 0x1 to force all channels private
* = 0x0 to defer to the vdata setting
* = any other value : warning , revert to 0x0
*/
static unsigned int dma_chan_attr = CCP_DMA_DFLT ;
module_param ( dma_chan_attr , uint , 0444 ) ;
MODULE_PARM_DESC ( dma_chan_attr , " Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public " ) ;
unsigned int ccp_get_dma_chan_attr ( struct ccp_device * ccp )
{
switch ( dma_chan_attr ) {
case CCP_DMA_DFLT :
return ccp - > vdata - > dma_chan_attr ;
case CCP_DMA_PRIV :
return DMA_PRIVATE ;
case CCP_DMA_PUB :
return 0 ;
default :
dev_info_once ( ccp - > dev , " Invalid value for dma_chan_attr: %d \n " ,
dma_chan_attr ) ;
return ccp - > vdata - > dma_chan_attr ;
}
}
2016-04-18 17:21:44 +03:00
static void ccp_free_cmd_resources ( struct ccp_device * ccp ,
struct list_head * list )
{
struct ccp_dma_cmd * cmd , * ctmp ;
list_for_each_entry_safe ( cmd , ctmp , list , entry ) {
list_del ( & cmd - > entry ) ;
kmem_cache_free ( ccp - > dma_cmd_cache , cmd ) ;
}
}
static void ccp_free_desc_resources ( struct ccp_device * ccp ,
struct list_head * list )
{
struct ccp_dma_desc * desc , * dtmp ;
list_for_each_entry_safe ( desc , dtmp , list , entry ) {
ccp_free_cmd_resources ( ccp , & desc - > active ) ;
ccp_free_cmd_resources ( ccp , & desc - > pending ) ;
list_del ( & desc - > entry ) ;
kmem_cache_free ( ccp - > dma_desc_cache , desc ) ;
}
}
static void ccp_free_chan_resources ( struct dma_chan * dma_chan )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
unsigned long flags ;
dev_dbg ( chan - > ccp - > dev , " %s - chan=%p \n " , __func__ , chan ) ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
ccp_free_desc_resources ( chan - > ccp , & chan - > complete ) ;
ccp_free_desc_resources ( chan - > ccp , & chan - > active ) ;
ccp_free_desc_resources ( chan - > ccp , & chan - > pending ) ;
2017-01-28 02:09:04 +03:00
ccp_free_desc_resources ( chan - > ccp , & chan - > created ) ;
2016-04-18 17:21:44 +03:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
}
static void ccp_cleanup_desc_resources ( struct ccp_device * ccp ,
struct list_head * list )
{
struct ccp_dma_desc * desc , * dtmp ;
list_for_each_entry_safe_reverse ( desc , dtmp , list , entry ) {
if ( ! async_tx_test_ack ( & desc - > tx_desc ) )
continue ;
dev_dbg ( ccp - > dev , " %s - desc=%p \n " , __func__ , desc ) ;
ccp_free_cmd_resources ( ccp , & desc - > active ) ;
ccp_free_cmd_resources ( ccp , & desc - > pending ) ;
list_del ( & desc - > entry ) ;
kmem_cache_free ( ccp - > dma_desc_cache , desc ) ;
}
}
static void ccp_do_cleanup ( unsigned long data )
{
struct ccp_dma_chan * chan = ( struct ccp_dma_chan * ) data ;
unsigned long flags ;
dev_dbg ( chan - > ccp - > dev , " %s - chan=%s \n " , __func__ ,
dma_chan_name ( & chan - > dma_chan ) ) ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
ccp_cleanup_desc_resources ( chan - > ccp , & chan - > complete ) ;
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
}
static int ccp_issue_next_cmd ( struct ccp_dma_desc * desc )
{
struct ccp_dma_cmd * cmd ;
int ret ;
cmd = list_first_entry ( & desc - > pending , struct ccp_dma_cmd , entry ) ;
list_move ( & cmd - > entry , & desc - > active ) ;
dev_dbg ( desc - > ccp - > dev , " %s - tx %d, cmd=%p \n " , __func__ ,
desc - > tx_desc . cookie , cmd ) ;
ret = ccp_enqueue_cmd ( & cmd - > ccp_cmd ) ;
if ( ! ret | | ( ret = = - EINPROGRESS ) | | ( ret = = - EBUSY ) )
return 0 ;
dev_dbg ( desc - > ccp - > dev , " %s - error: ret=%d, tx %d, cmd=%p \n " , __func__ ,
ret , desc - > tx_desc . cookie , cmd ) ;
return ret ;
}
static void ccp_free_active_cmd ( struct ccp_dma_desc * desc )
{
struct ccp_dma_cmd * cmd ;
cmd = list_first_entry_or_null ( & desc - > active , struct ccp_dma_cmd ,
entry ) ;
if ( ! cmd )
return ;
dev_dbg ( desc - > ccp - > dev , " %s - freeing tx %d cmd=%p \n " ,
__func__ , desc - > tx_desc . cookie , cmd ) ;
list_del ( & cmd - > entry ) ;
kmem_cache_free ( desc - > ccp - > dma_cmd_cache , cmd ) ;
}
static struct ccp_dma_desc * __ccp_next_dma_desc ( struct ccp_dma_chan * chan ,
struct ccp_dma_desc * desc )
{
/* Move current DMA descriptor to the complete list */
if ( desc )
list_move ( & desc - > entry , & chan - > complete ) ;
/* Get the next DMA descriptor on the active list */
desc = list_first_entry_or_null ( & chan - > active , struct ccp_dma_desc ,
entry ) ;
return desc ;
}
static struct ccp_dma_desc * ccp_handle_active_desc ( struct ccp_dma_chan * chan ,
struct ccp_dma_desc * desc )
{
struct dma_async_tx_descriptor * tx_desc ;
unsigned long flags ;
/* Loop over descriptors until one is found with commands */
do {
if ( desc ) {
/* Remove the DMA command from the list and free it */
ccp_free_active_cmd ( desc ) ;
if ( ! list_empty ( & desc - > pending ) ) {
/* No errors, keep going */
if ( desc - > status ! = DMA_ERROR )
return desc ;
/* Error, free remaining commands and move on */
ccp_free_cmd_resources ( desc - > ccp ,
& desc - > pending ) ;
}
tx_desc = & desc - > tx_desc ;
} else {
tx_desc = NULL ;
}
spin_lock_irqsave ( & chan - > lock , flags ) ;
if ( desc ) {
if ( desc - > status ! = DMA_ERROR )
desc - > status = DMA_COMPLETE ;
dev_dbg ( desc - > ccp - > dev ,
" %s - tx %d complete, status=%u \n " , __func__ ,
desc - > tx_desc . cookie , desc - > status ) ;
dma_cookie_complete ( tx_desc ) ;
}
desc = __ccp_next_dma_desc ( chan , desc ) ;
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
if ( tx_desc ) {
if ( tx_desc - > callback & &
( tx_desc - > flags & DMA_PREP_INTERRUPT ) )
tx_desc - > callback ( tx_desc - > callback_param ) ;
dma_run_dependencies ( tx_desc ) ;
}
} while ( desc ) ;
return NULL ;
}
static struct ccp_dma_desc * __ccp_pending_to_active ( struct ccp_dma_chan * chan )
{
struct ccp_dma_desc * desc ;
if ( list_empty ( & chan - > pending ) )
return NULL ;
desc = list_empty ( & chan - > active )
? list_first_entry ( & chan - > pending , struct ccp_dma_desc , entry )
: NULL ;
list_splice_tail_init ( & chan - > pending , & chan - > active ) ;
return desc ;
}
static void ccp_cmd_callback ( void * data , int err )
{
struct ccp_dma_desc * desc = data ;
struct ccp_dma_chan * chan ;
int ret ;
if ( err = = - EINPROGRESS )
return ;
chan = container_of ( desc - > tx_desc . chan , struct ccp_dma_chan ,
dma_chan ) ;
dev_dbg ( chan - > ccp - > dev , " %s - tx %d callback, err=%d \n " ,
__func__ , desc - > tx_desc . cookie , err ) ;
if ( err )
desc - > status = DMA_ERROR ;
while ( true ) {
/* Check for DMA descriptor completion */
desc = ccp_handle_active_desc ( chan , desc ) ;
/* Don't submit cmd if no descriptor or DMA is paused */
if ( ! desc | | ( chan - > status = = DMA_PAUSED ) )
break ;
ret = ccp_issue_next_cmd ( desc ) ;
if ( ! ret )
break ;
desc - > status = DMA_ERROR ;
}
tasklet_schedule ( & chan - > cleanup_tasklet ) ;
}
static dma_cookie_t ccp_tx_submit ( struct dma_async_tx_descriptor * tx_desc )
{
struct ccp_dma_desc * desc = container_of ( tx_desc , struct ccp_dma_desc ,
tx_desc ) ;
struct ccp_dma_chan * chan ;
dma_cookie_t cookie ;
unsigned long flags ;
chan = container_of ( tx_desc - > chan , struct ccp_dma_chan , dma_chan ) ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
cookie = dma_cookie_assign ( tx_desc ) ;
2017-01-28 02:09:04 +03:00
list_del ( & desc - > entry ) ;
2016-04-18 17:21:44 +03:00
list_add_tail ( & desc - > entry , & chan - > pending ) ;
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
dev_dbg ( chan - > ccp - > dev , " %s - added tx descriptor %d to pending list \n " ,
__func__ , cookie ) ;
return cookie ;
}
static struct ccp_dma_cmd * ccp_alloc_dma_cmd ( struct ccp_dma_chan * chan )
{
struct ccp_dma_cmd * cmd ;
cmd = kmem_cache_alloc ( chan - > ccp - > dma_cmd_cache , GFP_NOWAIT ) ;
if ( cmd )
memset ( cmd , 0 , sizeof ( * cmd ) ) ;
return cmd ;
}
static struct ccp_dma_desc * ccp_alloc_dma_desc ( struct ccp_dma_chan * chan ,
unsigned long flags )
{
struct ccp_dma_desc * desc ;
2016-09-15 06:28:04 +03:00
desc = kmem_cache_zalloc ( chan - > ccp - > dma_desc_cache , GFP_NOWAIT ) ;
2016-04-18 17:21:44 +03:00
if ( ! desc )
return NULL ;
dma_async_tx_descriptor_init ( & desc - > tx_desc , & chan - > dma_chan ) ;
desc - > tx_desc . flags = flags ;
desc - > tx_desc . tx_submit = ccp_tx_submit ;
desc - > ccp = chan - > ccp ;
INIT_LIST_HEAD ( & desc - > pending ) ;
INIT_LIST_HEAD ( & desc - > active ) ;
desc - > status = DMA_IN_PROGRESS ;
return desc ;
}
static struct ccp_dma_desc * ccp_create_desc ( struct dma_chan * dma_chan ,
struct scatterlist * dst_sg ,
unsigned int dst_nents ,
struct scatterlist * src_sg ,
unsigned int src_nents ,
unsigned long flags )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
struct ccp_device * ccp = chan - > ccp ;
struct ccp_dma_desc * desc ;
struct ccp_dma_cmd * cmd ;
struct ccp_cmd * ccp_cmd ;
struct ccp_passthru_nomap_engine * ccp_pt ;
unsigned int src_offset , src_len ;
unsigned int dst_offset , dst_len ;
unsigned int len ;
unsigned long sflags ;
size_t total_len ;
if ( ! dst_sg | | ! src_sg )
return NULL ;
if ( ! dst_nents | | ! src_nents )
return NULL ;
desc = ccp_alloc_dma_desc ( chan , flags ) ;
if ( ! desc )
return NULL ;
total_len = 0 ;
src_len = sg_dma_len ( src_sg ) ;
src_offset = 0 ;
dst_len = sg_dma_len ( dst_sg ) ;
dst_offset = 0 ;
while ( true ) {
if ( ! src_len ) {
src_nents - - ;
if ( ! src_nents )
break ;
src_sg = sg_next ( src_sg ) ;
if ( ! src_sg )
break ;
src_len = sg_dma_len ( src_sg ) ;
src_offset = 0 ;
continue ;
}
if ( ! dst_len ) {
dst_nents - - ;
if ( ! dst_nents )
break ;
dst_sg = sg_next ( dst_sg ) ;
if ( ! dst_sg )
break ;
dst_len = sg_dma_len ( dst_sg ) ;
dst_offset = 0 ;
continue ;
}
len = min ( dst_len , src_len ) ;
cmd = ccp_alloc_dma_cmd ( chan ) ;
if ( ! cmd )
goto err ;
ccp_cmd = & cmd - > ccp_cmd ;
2017-03-10 21:28:18 +03:00
ccp_cmd - > ccp = chan - > ccp ;
2016-04-18 17:21:44 +03:00
ccp_pt = & ccp_cmd - > u . passthru_nomap ;
ccp_cmd - > flags = CCP_CMD_MAY_BACKLOG ;
ccp_cmd - > flags | = CCP_CMD_PASSTHRU_NO_DMA_MAP ;
ccp_cmd - > engine = CCP_ENGINE_PASSTHRU ;
ccp_pt - > bit_mod = CCP_PASSTHRU_BITWISE_NOOP ;
ccp_pt - > byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP ;
ccp_pt - > src_dma = sg_dma_address ( src_sg ) + src_offset ;
ccp_pt - > dst_dma = sg_dma_address ( dst_sg ) + dst_offset ;
ccp_pt - > src_len = len ;
ccp_pt - > final = 1 ;
ccp_cmd - > callback = ccp_cmd_callback ;
ccp_cmd - > data = desc ;
list_add_tail ( & cmd - > entry , & desc - > pending ) ;
dev_dbg ( ccp - > dev ,
" %s - cmd=%p, src=%pad, dst=%pad, len=%llu \n " , __func__ ,
cmd , & ccp_pt - > src_dma ,
& ccp_pt - > dst_dma , ccp_pt - > src_len ) ;
total_len + = len ;
src_len - = len ;
src_offset + = len ;
dst_len - = len ;
dst_offset + = len ;
}
desc - > len = total_len ;
if ( list_empty ( & desc - > pending ) )
goto err ;
dev_dbg ( ccp - > dev , " %s - desc=%p \n " , __func__ , desc ) ;
spin_lock_irqsave ( & chan - > lock , sflags ) ;
2017-01-28 02:09:04 +03:00
list_add_tail ( & desc - > entry , & chan - > created ) ;
2016-04-18 17:21:44 +03:00
spin_unlock_irqrestore ( & chan - > lock , sflags ) ;
return desc ;
err :
ccp_free_cmd_resources ( ccp , & desc - > pending ) ;
kmem_cache_free ( ccp - > dma_desc_cache , desc ) ;
return NULL ;
}
static struct dma_async_tx_descriptor * ccp_prep_dma_memcpy (
struct dma_chan * dma_chan , dma_addr_t dst , dma_addr_t src , size_t len ,
unsigned long flags )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
struct ccp_dma_desc * desc ;
struct scatterlist dst_sg , src_sg ;
dev_dbg ( chan - > ccp - > dev ,
" %s - src=%pad, dst=%pad, len=%zu, flags=%#lx \n " ,
__func__ , & src , & dst , len , flags ) ;
sg_init_table ( & dst_sg , 1 ) ;
sg_dma_address ( & dst_sg ) = dst ;
sg_dma_len ( & dst_sg ) = len ;
sg_init_table ( & src_sg , 1 ) ;
sg_dma_address ( & src_sg ) = src ;
sg_dma_len ( & src_sg ) = len ;
desc = ccp_create_desc ( dma_chan , & dst_sg , 1 , & src_sg , 1 , flags ) ;
if ( ! desc )
return NULL ;
return & desc - > tx_desc ;
}
static struct dma_async_tx_descriptor * ccp_prep_dma_sg (
struct dma_chan * dma_chan , struct scatterlist * dst_sg ,
unsigned int dst_nents , struct scatterlist * src_sg ,
unsigned int src_nents , unsigned long flags )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
struct ccp_dma_desc * desc ;
dev_dbg ( chan - > ccp - > dev ,
" %s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx \n " ,
__func__ , src_sg , src_nents , dst_sg , dst_nents , flags ) ;
desc = ccp_create_desc ( dma_chan , dst_sg , dst_nents , src_sg , src_nents ,
flags ) ;
if ( ! desc )
return NULL ;
return & desc - > tx_desc ;
}
static struct dma_async_tx_descriptor * ccp_prep_dma_interrupt (
struct dma_chan * dma_chan , unsigned long flags )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
struct ccp_dma_desc * desc ;
desc = ccp_alloc_dma_desc ( chan , flags ) ;
if ( ! desc )
return NULL ;
return & desc - > tx_desc ;
}
static void ccp_issue_pending ( struct dma_chan * dma_chan )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
struct ccp_dma_desc * desc ;
unsigned long flags ;
dev_dbg ( chan - > ccp - > dev , " %s \n " , __func__ ) ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
desc = __ccp_pending_to_active ( chan ) ;
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
/* If there was nothing active, start processing */
if ( desc )
ccp_cmd_callback ( desc , 0 ) ;
}
static enum dma_status ccp_tx_status ( struct dma_chan * dma_chan ,
dma_cookie_t cookie ,
struct dma_tx_state * state )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
struct ccp_dma_desc * desc ;
enum dma_status ret ;
unsigned long flags ;
if ( chan - > status = = DMA_PAUSED ) {
ret = DMA_PAUSED ;
goto out ;
}
ret = dma_cookie_status ( dma_chan , cookie , state ) ;
if ( ret = = DMA_COMPLETE ) {
spin_lock_irqsave ( & chan - > lock , flags ) ;
/* Get status from complete chain, if still there */
list_for_each_entry ( desc , & chan - > complete , entry ) {
if ( desc - > tx_desc . cookie ! = cookie )
continue ;
ret = desc - > status ;
break ;
}
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
}
out :
dev_dbg ( chan - > ccp - > dev , " %s - %u \n " , __func__ , ret ) ;
return ret ;
}
static int ccp_pause ( struct dma_chan * dma_chan )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
chan - > status = DMA_PAUSED ;
/*TODO: Wait for active DMA to complete before returning? */
return 0 ;
}
static int ccp_resume ( struct dma_chan * dma_chan )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
struct ccp_dma_desc * desc ;
unsigned long flags ;
spin_lock_irqsave ( & chan - > lock , flags ) ;
desc = list_first_entry_or_null ( & chan - > active , struct ccp_dma_desc ,
entry ) ;
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
/* Indicate the channel is running again */
chan - > status = DMA_IN_PROGRESS ;
/* If there was something active, re-start */
if ( desc )
ccp_cmd_callback ( desc , 0 ) ;
return 0 ;
}
static int ccp_terminate_all ( struct dma_chan * dma_chan )
{
struct ccp_dma_chan * chan = container_of ( dma_chan , struct ccp_dma_chan ,
dma_chan ) ;
unsigned long flags ;
dev_dbg ( chan - > ccp - > dev , " %s \n " , __func__ ) ;
/*TODO: Wait for active DMA to complete before continuing */
spin_lock_irqsave ( & chan - > lock , flags ) ;
/*TODO: Purge the complete list? */
ccp_free_desc_resources ( chan - > ccp , & chan - > active ) ;
ccp_free_desc_resources ( chan - > ccp , & chan - > pending ) ;
2017-01-28 02:09:04 +03:00
ccp_free_desc_resources ( chan - > ccp , & chan - > created ) ;
2016-04-18 17:21:44 +03:00
spin_unlock_irqrestore ( & chan - > lock , flags ) ;
return 0 ;
}
int ccp_dmaengine_register ( struct ccp_device * ccp )
{
struct ccp_dma_chan * chan ;
struct dma_device * dma_dev = & ccp - > dma_dev ;
struct dma_chan * dma_chan ;
char * dma_cmd_cache_name ;
char * dma_desc_cache_name ;
unsigned int i ;
int ret ;
ccp - > ccp_dma_chan = devm_kcalloc ( ccp - > dev , ccp - > cmd_q_count ,
sizeof ( * ( ccp - > ccp_dma_chan ) ) ,
GFP_KERNEL ) ;
if ( ! ccp - > ccp_dma_chan )
return - ENOMEM ;
dma_cmd_cache_name = devm_kasprintf ( ccp - > dev , GFP_KERNEL ,
" %s-dmaengine-cmd-cache " ,
ccp - > name ) ;
if ( ! dma_cmd_cache_name )
return - ENOMEM ;
ccp - > dma_cmd_cache = kmem_cache_create ( dma_cmd_cache_name ,
sizeof ( struct ccp_dma_cmd ) ,
sizeof ( void * ) ,
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! ccp - > dma_cmd_cache )
return - ENOMEM ;
dma_desc_cache_name = devm_kasprintf ( ccp - > dev , GFP_KERNEL ,
" %s-dmaengine-desc-cache " ,
ccp - > name ) ;
2016-09-17 19:01:22 +03:00
if ( ! dma_desc_cache_name ) {
2016-09-02 12:48:53 +03:00
ret = - ENOMEM ;
goto err_cache ;
}
2016-04-18 17:21:44 +03:00
ccp - > dma_desc_cache = kmem_cache_create ( dma_desc_cache_name ,
sizeof ( struct ccp_dma_desc ) ,
sizeof ( void * ) ,
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! ccp - > dma_desc_cache ) {
ret = - ENOMEM ;
goto err_cache ;
}
dma_dev - > dev = ccp - > dev ;
dma_dev - > src_addr_widths = CCP_DMA_WIDTH ( dma_get_mask ( ccp - > dev ) ) ;
dma_dev - > dst_addr_widths = CCP_DMA_WIDTH ( dma_get_mask ( ccp - > dev ) ) ;
dma_dev - > directions = DMA_MEM_TO_MEM ;
dma_dev - > residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR ;
dma_cap_set ( DMA_MEMCPY , dma_dev - > cap_mask ) ;
dma_cap_set ( DMA_SG , dma_dev - > cap_mask ) ;
dma_cap_set ( DMA_INTERRUPT , dma_dev - > cap_mask ) ;
2017-03-23 20:53:30 +03:00
/* The DMA channels for this device can be set to public or private,
* and overridden by the module parameter dma_chan_attr .
* Default : according to the value in vdata ( dma_chan_attr = 0 )
* dma_chan_attr = 0x1 : all channels private ( override vdata )
* dma_chan_attr = 0x2 : all channels public ( override vdata )
*/
if ( ccp_get_dma_chan_attr ( ccp ) = = DMA_PRIVATE )
dma_cap_set ( DMA_PRIVATE , dma_dev - > cap_mask ) ;
2016-04-18 17:21:44 +03:00
INIT_LIST_HEAD ( & dma_dev - > channels ) ;
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
chan = ccp - > ccp_dma_chan + i ;
dma_chan = & chan - > dma_chan ;
chan - > ccp = ccp ;
spin_lock_init ( & chan - > lock ) ;
2017-01-28 02:09:04 +03:00
INIT_LIST_HEAD ( & chan - > created ) ;
2016-04-18 17:21:44 +03:00
INIT_LIST_HEAD ( & chan - > pending ) ;
INIT_LIST_HEAD ( & chan - > active ) ;
INIT_LIST_HEAD ( & chan - > complete ) ;
tasklet_init ( & chan - > cleanup_tasklet , ccp_do_cleanup ,
( unsigned long ) chan ) ;
dma_chan - > device = dma_dev ;
dma_cookie_init ( dma_chan ) ;
list_add_tail ( & dma_chan - > device_node , & dma_dev - > channels ) ;
}
dma_dev - > device_free_chan_resources = ccp_free_chan_resources ;
dma_dev - > device_prep_dma_memcpy = ccp_prep_dma_memcpy ;
dma_dev - > device_prep_dma_sg = ccp_prep_dma_sg ;
dma_dev - > device_prep_dma_interrupt = ccp_prep_dma_interrupt ;
dma_dev - > device_issue_pending = ccp_issue_pending ;
dma_dev - > device_tx_status = ccp_tx_status ;
dma_dev - > device_pause = ccp_pause ;
dma_dev - > device_resume = ccp_resume ;
dma_dev - > device_terminate_all = ccp_terminate_all ;
ret = dma_async_device_register ( dma_dev ) ;
if ( ret )
goto err_reg ;
return 0 ;
err_reg :
kmem_cache_destroy ( ccp - > dma_desc_cache ) ;
err_cache :
kmem_cache_destroy ( ccp - > dma_cmd_cache ) ;
return ret ;
}
void ccp_dmaengine_unregister ( struct ccp_device * ccp )
{
struct dma_device * dma_dev = & ccp - > dma_dev ;
dma_async_device_unregister ( dma_dev ) ;
kmem_cache_destroy ( ccp - > dma_desc_cache ) ;
kmem_cache_destroy ( ccp - > dma_cmd_cache ) ;
}