2018-08-23 15:34:54 +02:00
// SPDX-License-Identifier: GPL-2.0
2012-05-09 17:09:13 +02:00
/*
* Dmaengine driver base library for DMA controllers , found on SH - based SoCs
*
* extracted from shdma . c
*
* Copyright ( C ) 2011 - 2012 Guennadi Liakhovetski < g . liakhovetski @ gmx . de >
* Copyright ( C ) 2009 Nobuhiro Iwamatsu < iwamatsu . nobuhiro @ renesas . com >
* Copyright ( C ) 2009 Renesas Solutions , Inc . All rights reserved .
* Copyright ( C ) 2007 Freescale Semiconductor , Inc . All rights reserved .
*/
# include <linux/delay.h>
# include <linux/shdma-base.h>
# include <linux/dmaengine.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/module.h>
# include <linux/pm_runtime.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include "../dmaengine.h"
/* DMA descriptor control */
enum shdma_desc_status {
DESC_IDLE ,
DESC_PREPARED ,
DESC_SUBMITTED ,
DESC_COMPLETED , /* completed, have to call callback */
DESC_WAITING , /* callback called, waiting for ack / re-submit */
} ;
# define NR_DESCS_PER_CHANNEL 32
# define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
# define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
/*
* For slave DMA we assume , that there is a finite number of DMA slaves in the
* system , and that each such slave can only use a finite number of channels .
* We use slave channel IDs to make sure , that no such slave channel ID is
* allocated more than once .
*/
static unsigned int slave_num = 256 ;
module_param ( slave_num , uint , 0444 ) ;
/* A bitmask with slave_num bits */
static unsigned long * shdma_slave_used ;
/* Called under spin_lock_irq(&schan->chan_lock") */
static void shdma_chan_xfer_ld_queue ( struct shdma_chan * schan )
{
struct shdma_dev * sdev = to_shdma_dev ( schan - > dma_chan . device ) ;
const struct shdma_ops * ops = sdev - > ops ;
struct shdma_desc * sdesc ;
/* DMA work check */
if ( ops - > channel_busy ( schan ) )
return ;
/* Find the first not transferred descriptor */
list_for_each_entry ( sdesc , & schan - > ld_queue , node )
if ( sdesc - > mark = = DESC_SUBMITTED ) {
ops - > start_xfer ( schan , sdesc ) ;
break ;
}
}
static dma_cookie_t shdma_tx_submit ( struct dma_async_tx_descriptor * tx )
{
struct shdma_desc * chunk , * c , * desc =
2014-04-02 20:16:51 -07:00
container_of ( tx , struct shdma_desc , async_tx ) ;
2012-05-09 17:09:13 +02:00
struct shdma_chan * schan = to_shdma_chan ( tx - > chan ) ;
dma_async_tx_callback callback = tx - > callback ;
dma_cookie_t cookie ;
bool power_up ;
spin_lock_irq ( & schan - > chan_lock ) ;
power_up = list_empty ( & schan - > ld_queue ) ;
cookie = dma_cookie_assign ( tx ) ;
/* Mark all chunks of this descriptor as submitted, move to the queue */
list_for_each_entry_safe ( chunk , c , desc - > node . prev , node ) {
/*
* All chunks are on the global ld_free , so , we have to find
* the end of the chain ourselves
*/
if ( chunk ! = desc & & ( chunk - > mark = = DESC_IDLE | |
chunk - > async_tx . cookie > 0 | |
chunk - > async_tx . cookie = = - EBUSY | |
& chunk - > node = = & schan - > ld_free ) )
break ;
chunk - > mark = DESC_SUBMITTED ;
2014-04-02 20:16:51 -07:00
if ( chunk - > chunks = = 1 ) {
chunk - > async_tx . callback = callback ;
chunk - > async_tx . callback_param = tx - > callback_param ;
} else {
/* Callback goes to the last chunk */
chunk - > async_tx . callback = NULL ;
}
2012-05-09 17:09:13 +02:00
chunk - > cookie = cookie ;
list_move_tail ( & chunk - > node , & schan - > ld_queue ) ;
dev_dbg ( schan - > dev , " submit #%d@%p on %d \n " ,
2014-04-02 20:16:51 -07:00
tx - > cookie , & chunk - > async_tx , schan - > id ) ;
2012-05-09 17:09:13 +02:00
}
if ( power_up ) {
int ret ;
schan - > pm_state = SHDMA_PM_BUSY ;
ret = pm_runtime_get ( schan - > dev ) ;
spin_unlock_irq ( & schan - > chan_lock ) ;
if ( ret < 0 )
dev_err ( schan - > dev , " %s(): GET = %d \n " , __func__ , ret ) ;
pm_runtime_barrier ( schan - > dev ) ;
spin_lock_irq ( & schan - > chan_lock ) ;
/* Have we been reset, while waiting? */
if ( schan - > pm_state ! = SHDMA_PM_ESTABLISHED ) {
struct shdma_dev * sdev =
to_shdma_dev ( schan - > dma_chan . device ) ;
const struct shdma_ops * ops = sdev - > ops ;
dev_dbg ( schan - > dev , " Bring up channel %d \n " ,
schan - > id ) ;
/*
* TODO : . xfer_setup ( ) might fail on some platforms .
* Make it int then , on error remove chunks from the
* queue again
*/
2012-07-05 12:29:41 +02:00
ops - > setup_xfer ( schan , schan - > slave_id ) ;
2012-05-09 17:09:13 +02:00
if ( schan - > pm_state = = SHDMA_PM_PENDING )
shdma_chan_xfer_ld_queue ( schan ) ;
schan - > pm_state = SHDMA_PM_ESTABLISHED ;
}
} else {
/*
* Tell . device_issue_pending ( ) not to run the queue , interrupts
* will do it anyway
*/
schan - > pm_state = SHDMA_PM_PENDING ;
}
spin_unlock_irq ( & schan - > chan_lock ) ;
return cookie ;
}
/* Called with desc_lock held */
static struct shdma_desc * shdma_get_desc ( struct shdma_chan * schan )
{
struct shdma_desc * sdesc ;
list_for_each_entry ( sdesc , & schan - > ld_free , node )
if ( sdesc - > mark ! = DESC_PREPARED ) {
BUG_ON ( sdesc - > mark ! = DESC_IDLE ) ;
list_del ( & sdesc - > node ) ;
return sdesc ;
}
return NULL ;
}
2015-02-17 01:46:49 +00:00
static int shdma_setup_slave ( struct shdma_chan * schan , dma_addr_t slave_addr )
2012-07-05 12:29:42 +02:00
{
struct shdma_dev * sdev = to_shdma_dev ( schan - > dma_chan . device ) ;
const struct shdma_ops * ops = sdev - > ops ;
2013-06-18 18:16:57 +02:00
int ret , match ;
if ( schan - > dev - > of_node ) {
match = schan - > hw_req ;
2013-08-02 16:50:36 +02:00
ret = ops - > set_slave ( schan , match , slave_addr , true ) ;
2013-06-18 18:16:57 +02:00
if ( ret < 0 )
return ret ;
} else {
2015-02-17 01:46:49 +00:00
match = schan - > real_slave_id ;
2013-06-18 18:16:57 +02:00
}
2012-07-05 12:29:42 +02:00
2015-02-17 01:46:49 +00:00
if ( schan - > real_slave_id < 0 | | schan - > real_slave_id > = slave_num )
2012-07-05 12:29:42 +02:00
return - EINVAL ;
2015-02-17 01:46:49 +00:00
if ( test_and_set_bit ( schan - > real_slave_id , shdma_slave_used ) )
2012-07-05 12:29:42 +02:00
return - EBUSY ;
2013-08-02 16:50:36 +02:00
ret = ops - > set_slave ( schan , match , slave_addr , false ) ;
2012-07-05 12:29:42 +02:00
if ( ret < 0 ) {
2015-02-17 01:46:49 +00:00
clear_bit ( schan - > real_slave_id , shdma_slave_used ) ;
2012-07-05 12:29:42 +02:00
return ret ;
}
2015-02-17 01:46:49 +00:00
schan - > slave_id = schan - > real_slave_id ;
2012-07-05 12:29:42 +02:00
return 0 ;
}
2012-05-09 17:09:13 +02:00
static int shdma_alloc_chan_resources ( struct dma_chan * chan )
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
struct shdma_dev * sdev = to_shdma_dev ( schan - > dma_chan . device ) ;
const struct shdma_ops * ops = sdev - > ops ;
struct shdma_desc * desc ;
struct shdma_slave * slave = chan - > private ;
int ret , i ;
/*
* This relies on the guarantee from dmaengine that alloc_chan_resources
* never runs concurrently with itself or free_chan_resources .
*/
if ( slave ) {
2012-07-05 12:29:42 +02:00
/* Legacy mode: .private is set in filter */
2015-02-17 01:46:49 +00:00
schan - > real_slave_id = slave - > slave_id ;
ret = shdma_setup_slave ( schan , 0 ) ;
2012-05-09 17:09:13 +02:00
if ( ret < 0 )
goto esetslave ;
2012-07-05 12:29:41 +02:00
} else {
2015-02-17 01:46:49 +00:00
/* Normal mode: real_slave_id was set by filter */
2012-07-05 12:29:41 +02:00
schan - > slave_id = - EINVAL ;
2012-05-09 17:09:13 +02:00
}
schan - > desc = kcalloc ( NR_DESCS_PER_CHANNEL ,
sdev - > desc_size , GFP_KERNEL ) ;
if ( ! schan - > desc ) {
ret = - ENOMEM ;
goto edescalloc ;
}
schan - > desc_num = NR_DESCS_PER_CHANNEL ;
for ( i = 0 ; i < NR_DESCS_PER_CHANNEL ; i + + ) {
desc = ops - > embedded_desc ( schan - > desc , i ) ;
dma_async_tx_descriptor_init ( & desc - > async_tx ,
& schan - > dma_chan ) ;
desc - > async_tx . tx_submit = shdma_tx_submit ;
desc - > mark = DESC_IDLE ;
list_add ( & desc - > node , & schan - > ld_free ) ;
}
return NR_DESCS_PER_CHANNEL ;
edescalloc :
if ( slave )
esetslave :
clear_bit ( slave - > slave_id , shdma_slave_used ) ;
chan - > private = NULL ;
return ret ;
}
2014-07-31 09:34:05 +09:00
/*
* This is the standard shdma filter function to be used as a replacement to the
2015-02-17 01:46:49 +00:00
* " old " method , using the . private pointer .
* You always have to pass a valid slave id as the argument , old drivers that
* pass ERR_PTR ( - EINVAL ) as a filter parameter and set it up in dma_slave_config
* need to be updated so we can remove the slave_id field from dma_slave_config .
2014-07-31 09:34:05 +09:00
* parameter . If this filter is used , the slave driver , after calling
* dma_request_channel ( ) , will also have to call dmaengine_slave_config ( ) with
2015-02-17 01:46:49 +00:00
* . direction , and either . src_addr or . dst_addr set .
*
2014-07-31 09:34:05 +09:00
* NOTE : this filter doesn ' t support multiple DMAC drivers with the DMA_SLAVE
* capability ! If this becomes a requirement , hardware glue drivers , using this
* services would have to provide their own filters , which first would check
* the device driver , similar to how other DMAC drivers , e . g . , sa11x0 - dma . c , do
* this , and only then , in case of a match , call this common filter .
* NOTE 2 : This filter function is also used in the DT case by shdma_of_xlate ( ) .
* In that case the MID - RID value is used for slave channel filtering and is
* passed to this function in the " arg " parameter .
*/
bool shdma_chan_filter ( struct dma_chan * chan , void * arg )
{
struct shdma_chan * schan ;
struct shdma_dev * sdev ;
2015-02-17 01:46:49 +00:00
int slave_id = ( long ) arg ;
2014-07-31 09:34:05 +09:00
int ret ;
/* Only support channels handled by this driver. */
if ( chan - > device - > device_alloc_chan_resources ! =
shdma_alloc_chan_resources )
return false ;
2015-02-17 01:46:49 +00:00
schan = to_shdma_chan ( chan ) ;
sdev = to_shdma_dev ( chan - > device ) ;
/*
* For DT , the schan - > slave_id field is generated by the
* set_slave function from the slave ID that is passed in
* from xlate . For the non - DT case , the slave ID is
* directly passed into the filter function by the driver
*/
if ( schan - > dev - > of_node ) {
ret = sdev - > ops - > set_slave ( schan , slave_id , 0 , true ) ;
if ( ret < 0 )
return false ;
schan - > real_slave_id = schan - > slave_id ;
return true ;
}
if ( slave_id < 0 ) {
2014-07-31 09:34:05 +09:00
/* No slave requested - arbitrary channel */
2015-02-17 01:46:49 +00:00
dev_warn ( sdev - > dma_dev . dev , " invalid slave ID passed to dma_request_slave \n " ) ;
2014-07-31 09:34:05 +09:00
return true ;
2015-02-17 01:46:49 +00:00
}
2014-07-31 09:34:05 +09:00
2015-02-17 01:46:49 +00:00
if ( slave_id > = slave_num )
2014-07-31 09:34:05 +09:00
return false ;
2015-02-17 01:46:49 +00:00
ret = sdev - > ops - > set_slave ( schan , slave_id , 0 , true ) ;
2014-07-31 09:34:05 +09:00
if ( ret < 0 )
return false ;
2015-02-17 01:46:49 +00:00
schan - > real_slave_id = slave_id ;
2014-07-31 09:34:05 +09:00
return true ;
}
EXPORT_SYMBOL ( shdma_chan_filter ) ;
2012-05-09 17:09:13 +02:00
static dma_async_tx_callback __ld_cleanup ( struct shdma_chan * schan , bool all )
{
struct shdma_desc * desc , * _desc ;
/* Is the "exposed" head of a chain acked? */
bool head_acked = false ;
dma_cookie_t cookie = 0 ;
dma_async_tx_callback callback = NULL ;
2016-07-20 14:13:09 -07:00
struct dmaengine_desc_callback cb ;
2012-05-09 17:09:13 +02:00
unsigned long flags ;
2014-04-02 20:17:00 -07:00
LIST_HEAD ( cyclic_list ) ;
2012-05-09 17:09:13 +02:00
2016-07-20 14:13:09 -07:00
memset ( & cb , 0 , sizeof ( cb ) ) ;
2012-05-09 17:09:13 +02:00
spin_lock_irqsave ( & schan - > chan_lock , flags ) ;
list_for_each_entry_safe ( desc , _desc , & schan - > ld_queue , node ) {
struct dma_async_tx_descriptor * tx = & desc - > async_tx ;
BUG_ON ( tx - > cookie > 0 & & tx - > cookie ! = desc - > cookie ) ;
BUG_ON ( desc - > mark ! = DESC_SUBMITTED & &
desc - > mark ! = DESC_COMPLETED & &
desc - > mark ! = DESC_WAITING ) ;
/*
* queue is ordered , and we use this loop to ( 1 ) clean up all
* completed descriptors , and to ( 2 ) update descriptor flags of
* any chunks in a ( partially ) completed chain
*/
if ( ! all & & desc - > mark = = DESC_SUBMITTED & &
desc - > cookie ! = cookie )
break ;
if ( tx - > cookie > 0 )
cookie = tx - > cookie ;
if ( desc - > mark = = DESC_COMPLETED & & desc - > chunks = = 1 ) {
if ( schan - > dma_chan . completed_cookie ! = desc - > cookie - 1 )
dev_dbg ( schan - > dev ,
" Completing cookie %d, expected %d \n " ,
desc - > cookie ,
schan - > dma_chan . completed_cookie + 1 ) ;
schan - > dma_chan . completed_cookie = desc - > cookie ;
}
/* Call callback on the last chunk */
if ( desc - > mark = = DESC_COMPLETED & & tx - > callback ) {
desc - > mark = DESC_WAITING ;
2016-07-20 14:13:09 -07:00
dmaengine_desc_get_callback ( tx , & cb ) ;
2012-05-09 17:09:13 +02:00
callback = tx - > callback ;
dev_dbg ( schan - > dev , " descriptor #%d@%p on %d callback \n " ,
tx - > cookie , tx , schan - > id ) ;
BUG_ON ( desc - > chunks ! = 1 ) ;
break ;
}
if ( tx - > cookie > 0 | | tx - > cookie = = - EBUSY ) {
if ( desc - > mark = = DESC_COMPLETED ) {
BUG_ON ( tx - > cookie < 0 ) ;
desc - > mark = DESC_WAITING ;
}
head_acked = async_tx_test_ack ( tx ) ;
} else {
switch ( desc - > mark ) {
case DESC_COMPLETED :
desc - > mark = DESC_WAITING ;
/* Fall through */
case DESC_WAITING :
if ( head_acked )
async_tx_ack ( & desc - > async_tx ) ;
}
}
dev_dbg ( schan - > dev , " descriptor %p #%d completed. \n " ,
tx , tx - > cookie ) ;
if ( ( ( desc - > mark = = DESC_COMPLETED | |
desc - > mark = = DESC_WAITING ) & &
async_tx_test_ack ( & desc - > async_tx ) ) | | all ) {
2014-04-02 20:17:00 -07:00
if ( all | | ! desc - > cyclic ) {
/* Remove from ld_queue list */
desc - > mark = DESC_IDLE ;
list_move ( & desc - > node , & schan - > ld_free ) ;
} else {
/* reuse as cyclic */
desc - > mark = DESC_SUBMITTED ;
list_move_tail ( & desc - > node , & cyclic_list ) ;
}
2012-05-09 17:09:13 +02:00
if ( list_empty ( & schan - > ld_queue ) ) {
dev_dbg ( schan - > dev , " Bring down channel %d \n " , schan - > id ) ;
pm_runtime_put ( schan - > dev ) ;
schan - > pm_state = SHDMA_PM_ESTABLISHED ;
dmaengine: shdma: fix a race condition in __ld_cleanup()
This patch fixes a race condition about a list of shdma-base driver.
If we don't apply this patch, a dma slave driver (especially a usb
peripheral driver) may not be able to start the transfer.
If a dma slave driver has a callback, __ld_cleanup() will call
the callback before this driver removes the list. After the callback,
since the return value of __ld_cleanup() is not zero,
shdma_chan_ld_cleanup() calls __ld_cleanup() again. And, __ld_clean()
will removes the list.
At this time, if a dma slave driver calls dmaengine_submit() before
this driver removes the list, this driver will set schan->pm_state
to SHDMA_PM_PENDING in shdma_tx_submit(). And then, even if a dma
slave driver calls dma_async_issue_pending(), this driver don't
start the transfer because the schan->pm_state is SHDMA_PM_PENDING
in shdma_issue_pending().
So, this patch adds a new condition in __ld_clean() to check if the
schan->pm_state is SHDMA_PM_PENDING or not.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2014-10-09 11:09:00 +09:00
} else if ( schan - > pm_state = = SHDMA_PM_PENDING ) {
shdma_chan_xfer_ld_queue ( schan ) ;
2012-05-09 17:09:13 +02:00
}
}
}
if ( all & & ! callback )
/*
* Terminating and the loop completed normally : forgive
* uncompleted cookies
*/
schan - > dma_chan . completed_cookie = schan - > dma_chan . cookie ;
2014-04-02 20:17:00 -07:00
list_splice_tail ( & cyclic_list , & schan - > ld_queue ) ;
2012-05-09 17:09:13 +02:00
spin_unlock_irqrestore ( & schan - > chan_lock , flags ) ;
2016-07-20 14:13:09 -07:00
dmaengine_desc_callback_invoke ( & cb , NULL ) ;
2012-05-09 17:09:13 +02:00
return callback ;
}
/*
* shdma_chan_ld_cleanup - Clean up link descriptors
*
* Clean up the ld_queue of DMA channel .
*/
static void shdma_chan_ld_cleanup ( struct shdma_chan * schan , bool all )
{
while ( __ld_cleanup ( schan , all ) )
;
}
/*
* shdma_free_chan_resources - Free all resources of the channel .
*/
static void shdma_free_chan_resources ( struct dma_chan * chan )
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
struct shdma_dev * sdev = to_shdma_dev ( chan - > device ) ;
const struct shdma_ops * ops = sdev - > ops ;
LIST_HEAD ( list ) ;
/* Protect against ISR */
spin_lock_irq ( & schan - > chan_lock ) ;
ops - > halt_channel ( schan ) ;
spin_unlock_irq ( & schan - > chan_lock ) ;
/* Now no new interrupts will occur */
/* Prepared and not submitted descriptors can still be on the queue */
if ( ! list_empty ( & schan - > ld_queue ) )
shdma_chan_ld_cleanup ( schan , true ) ;
2012-07-05 12:29:41 +02:00
if ( schan - > slave_id > = 0 ) {
2012-05-09 17:09:13 +02:00
/* The caller is holding dma_list_mutex */
2012-07-05 12:29:41 +02:00
clear_bit ( schan - > slave_id , shdma_slave_used ) ;
2012-05-09 17:09:13 +02:00
chan - > private = NULL ;
}
2015-02-17 01:46:49 +00:00
schan - > real_slave_id = 0 ;
2012-05-09 17:09:13 +02:00
spin_lock_irq ( & schan - > chan_lock ) ;
list_splice_init ( & schan - > ld_free , & list ) ;
schan - > desc_num = 0 ;
spin_unlock_irq ( & schan - > chan_lock ) ;
kfree ( schan - > desc ) ;
}
/**
* shdma_add_desc - get , set up and return one transfer descriptor
* @ schan : DMA channel
* @ flags : DMA transfer flags
* @ dst : destination DMA address , incremented when direction equals
* DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
* @ src : source DMA address , incremented when direction equals
* DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
* @ len : DMA transfer length
* @ first : if NULL , set to the current descriptor and cookie set to - EBUSY
* @ direction : needed for slave DMA to decide which address to keep constant ,
* equals DMA_MEM_TO_MEM for MEMCPY
* Returns 0 or an error
* Locks : called with desc_lock held
*/
static struct shdma_desc * shdma_add_desc ( struct shdma_chan * schan ,
unsigned long flags , dma_addr_t * dst , dma_addr_t * src , size_t * len ,
struct shdma_desc * * first , enum dma_transfer_direction direction )
{
struct shdma_dev * sdev = to_shdma_dev ( schan - > dma_chan . device ) ;
const struct shdma_ops * ops = sdev - > ops ;
struct shdma_desc * new ;
size_t copy_size = * len ;
if ( ! copy_size )
return NULL ;
/* Allocate the link descriptor from the free list */
new = shdma_get_desc ( schan ) ;
if ( ! new ) {
dev_err ( schan - > dev , " No free link descriptor available \n " ) ;
return NULL ;
}
ops - > desc_setup ( schan , new , * src , * dst , & copy_size ) ;
if ( ! * first ) {
/* First desc */
new - > async_tx . cookie = - EBUSY ;
* first = new ;
} else {
/* Other desc - invisible to the user */
new - > async_tx . cookie = - EINVAL ;
}
dev_dbg ( schan - > dev ,
2013-12-11 15:29:15 +01:00
" chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d \n " ,
copy_size , * len , src , dst , & new - > async_tx ,
2012-05-09 17:09:13 +02:00
new - > async_tx . cookie ) ;
new - > mark = DESC_PREPARED ;
new - > async_tx . flags = flags ;
new - > direction = direction ;
2012-07-30 21:28:27 +02:00
new - > partial = 0 ;
2012-05-09 17:09:13 +02:00
* len - = copy_size ;
if ( direction = = DMA_MEM_TO_MEM | | direction = = DMA_MEM_TO_DEV )
* src + = copy_size ;
if ( direction = = DMA_MEM_TO_MEM | | direction = = DMA_DEV_TO_MEM )
* dst + = copy_size ;
return new ;
}
/*
* shdma_prep_sg - prepare transfer descriptors from an SG list
*
* Common routine for public ( MEMCPY ) and slave DMA . The MEMCPY case is also
* converted to scatter - gather to guarantee consistent locking and a correct
* list manipulation . For slave DMA direction carries the usual meaning , and ,
* logically , the SG list is RAM and the addr variable contains slave address ,
* e . g . , the FIFO I / O register . For MEMCPY direction equals DMA_MEM_TO_MEM
* and the SG list contains only one element and points at the source buffer .
*/
static struct dma_async_tx_descriptor * shdma_prep_sg ( struct shdma_chan * schan ,
struct scatterlist * sgl , unsigned int sg_len , dma_addr_t * addr ,
2014-04-02 20:17:00 -07:00
enum dma_transfer_direction direction , unsigned long flags , bool cyclic )
2012-05-09 17:09:13 +02:00
{
struct scatterlist * sg ;
struct shdma_desc * first = NULL , * new = NULL /* compiler... */ ;
LIST_HEAD ( tx_list ) ;
int chunks = 0 ;
unsigned long irq_flags ;
int i ;
for_each_sg ( sgl , sg , sg_len , i )
chunks + = DIV_ROUND_UP ( sg_dma_len ( sg ) , schan - > max_xfer_len ) ;
/* Have to lock the whole loop to protect against concurrent release */
spin_lock_irqsave ( & schan - > chan_lock , irq_flags ) ;
/*
* Chaining :
* first descriptor is what user is dealing with in all API calls , its
* cookie is at first set to - EBUSY , at tx - submit to a positive
* number
* if more than one chunk is needed further chunks have cookie = - EINVAL
* the last chunk , if not equal to the first , has cookie = - ENOSPC
* all chunks are linked onto the tx_list head with their . node heads
* only during this function , then they are immediately spliced
* back onto the free list in form of a chain
*/
for_each_sg ( sgl , sg , sg_len , i ) {
dma_addr_t sg_addr = sg_dma_address ( sg ) ;
size_t len = sg_dma_len ( sg ) ;
if ( ! len )
goto err_get_desc ;
do {
2013-12-11 15:29:15 +01:00
dev_dbg ( schan - > dev , " Add SG #%d@%p[%zu], dma %pad \n " ,
i , sg , len , & sg_addr ) ;
2012-05-09 17:09:13 +02:00
if ( direction = = DMA_DEV_TO_MEM )
new = shdma_add_desc ( schan , flags ,
& sg_addr , addr , & len , & first ,
direction ) ;
else
new = shdma_add_desc ( schan , flags ,
addr , & sg_addr , & len , & first ,
direction ) ;
if ( ! new )
goto err_get_desc ;
2014-04-02 20:17:00 -07:00
new - > cyclic = cyclic ;
if ( cyclic )
new - > chunks = 1 ;
else
new - > chunks = chunks - - ;
2012-05-09 17:09:13 +02:00
list_add_tail ( & new - > node , & tx_list ) ;
} while ( len ) ;
}
if ( new ! = first )
new - > async_tx . cookie = - ENOSPC ;
/* Put them back on the free list, so, they don't get lost */
list_splice_tail ( & tx_list , & schan - > ld_free ) ;
spin_unlock_irqrestore ( & schan - > chan_lock , irq_flags ) ;
return & first - > async_tx ;
err_get_desc :
list_for_each_entry ( new , & tx_list , node )
new - > mark = DESC_IDLE ;
list_splice ( & tx_list , & schan - > ld_free ) ;
spin_unlock_irqrestore ( & schan - > chan_lock , irq_flags ) ;
return NULL ;
}
static struct dma_async_tx_descriptor * shdma_prep_memcpy (
struct dma_chan * chan , dma_addr_t dma_dest , dma_addr_t dma_src ,
size_t len , unsigned long flags )
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
struct scatterlist sg ;
if ( ! chan | | ! len )
return NULL ;
BUG_ON ( ! schan - > desc_num ) ;
sg_init_table ( & sg , 1 ) ;
sg_set_page ( & sg , pfn_to_page ( PFN_DOWN ( dma_src ) ) , len ,
offset_in_page ( dma_src ) ) ;
sg_dma_address ( & sg ) = dma_src ;
sg_dma_len ( & sg ) = len ;
2014-04-02 20:17:00 -07:00
return shdma_prep_sg ( schan , & sg , 1 , & dma_dest , DMA_MEM_TO_MEM ,
flags , false ) ;
2012-05-09 17:09:13 +02:00
}
static struct dma_async_tx_descriptor * shdma_prep_slave_sg (
struct dma_chan * chan , struct scatterlist * sgl , unsigned int sg_len ,
enum dma_transfer_direction direction , unsigned long flags , void * context )
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
struct shdma_dev * sdev = to_shdma_dev ( schan - > dma_chan . device ) ;
const struct shdma_ops * ops = sdev - > ops ;
2012-07-05 12:29:41 +02:00
int slave_id = schan - > slave_id ;
2012-05-09 17:09:13 +02:00
dma_addr_t slave_addr ;
if ( ! chan )
return NULL ;
BUG_ON ( ! schan - > desc_num ) ;
/* Someone calling slave DMA on a generic channel? */
2012-07-05 12:29:41 +02:00
if ( slave_id < 0 | | ! sg_len ) {
dev_warn ( schan - > dev , " %s: bad parameter: len=%d, id=%d \n " ,
__func__ , sg_len , slave_id ) ;
2012-05-09 17:09:13 +02:00
return NULL ;
}
slave_addr = ops - > slave_addr ( schan ) ;
return shdma_prep_sg ( schan , sgl , sg_len , & slave_addr ,
2014-04-02 20:17:00 -07:00
direction , flags , false ) ;
}
2014-06-02 09:40:00 +05:30
# define SHDMA_MAX_SG_LEN 32
2014-06-02 09:22:03 +05:30
static struct dma_async_tx_descriptor * shdma_prep_dma_cyclic (
2014-04-02 20:17:00 -07:00
struct dma_chan * chan , dma_addr_t buf_addr , size_t buf_len ,
size_t period_len , enum dma_transfer_direction direction ,
2014-08-01 12:20:10 +02:00
unsigned long flags )
2014-04-02 20:17:00 -07:00
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
struct shdma_dev * sdev = to_shdma_dev ( schan - > dma_chan . device ) ;
2014-07-31 09:34:06 +09:00
struct dma_async_tx_descriptor * desc ;
2014-04-02 20:17:00 -07:00
const struct shdma_ops * ops = sdev - > ops ;
unsigned int sg_len = buf_len / period_len ;
int slave_id = schan - > slave_id ;
dma_addr_t slave_addr ;
2014-07-31 09:34:06 +09:00
struct scatterlist * sgl ;
2014-04-02 20:17:00 -07:00
int i ;
if ( ! chan )
return NULL ;
BUG_ON ( ! schan - > desc_num ) ;
2014-06-02 09:40:00 +05:30
if ( sg_len > SHDMA_MAX_SG_LEN ) {
2020-03-16 09:16:53 +00:00
dev_err ( schan - > dev , " sg length %d exceeds limit %d " ,
2014-06-02 09:40:00 +05:30
sg_len , SHDMA_MAX_SG_LEN ) ;
return NULL ;
}
2014-04-02 20:17:00 -07:00
/* Someone calling slave DMA on a generic channel? */
if ( slave_id < 0 | | ( buf_len < period_len ) ) {
dev_warn ( schan - > dev ,
2014-06-02 09:32:59 +05:30
" %s: bad parameter: buf_len=%zu, period_len=%zu, id=%d \n " ,
2014-04-02 20:17:00 -07:00
__func__ , buf_len , period_len , slave_id ) ;
return NULL ;
}
slave_addr = ops - > slave_addr ( schan ) ;
2014-07-31 09:34:06 +09:00
/*
* Allocate the sg list dynamically as it would consumer too much stack
* space .
*/
sgl = kcalloc ( sg_len , sizeof ( * sgl ) , GFP_KERNEL ) ;
if ( ! sgl )
return NULL ;
2014-04-02 20:17:00 -07:00
sg_init_table ( sgl , sg_len ) ;
2014-07-31 09:34:06 +09:00
2014-04-02 20:17:00 -07:00
for ( i = 0 ; i < sg_len ; i + + ) {
dma_addr_t src = buf_addr + ( period_len * i ) ;
sg_set_page ( & sgl [ i ] , pfn_to_page ( PFN_DOWN ( src ) ) , period_len ,
offset_in_page ( src ) ) ;
sg_dma_address ( & sgl [ i ] ) = src ;
sg_dma_len ( & sgl [ i ] ) = period_len ;
}
2014-07-31 09:34:06 +09:00
desc = shdma_prep_sg ( schan , sgl , sg_len , & slave_addr ,
2014-04-02 20:17:00 -07:00
direction , flags , true ) ;
2014-07-31 09:34:06 +09:00
kfree ( sgl ) ;
return desc ;
2012-05-09 17:09:13 +02:00
}
2014-11-17 14:42:33 +01:00
static int shdma_terminate_all ( struct dma_chan * chan )
2012-05-09 17:09:13 +02:00
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
struct shdma_dev * sdev = to_shdma_dev ( chan - > device ) ;
const struct shdma_ops * ops = sdev - > ops ;
unsigned long flags ;
2014-11-17 14:42:33 +01:00
spin_lock_irqsave ( & schan - > chan_lock , flags ) ;
ops - > halt_channel ( schan ) ;
2012-07-30 21:28:27 +02:00
2014-11-17 14:42:33 +01:00
if ( ops - > get_partial & & ! list_empty ( & schan - > ld_queue ) ) {
/* Record partial transfer */
struct shdma_desc * desc = list_first_entry ( & schan - > ld_queue ,
struct shdma_desc , node ) ;
desc - > partial = ops - > get_partial ( schan , desc ) ;
}
2012-07-30 21:28:27 +02:00
2014-11-17 14:42:33 +01:00
spin_unlock_irqrestore ( & schan - > chan_lock , flags ) ;
2012-05-09 17:09:13 +02:00
2014-11-17 14:42:33 +01:00
shdma_chan_ld_cleanup ( schan , true ) ;
2012-05-09 17:09:13 +02:00
return 0 ;
}
2014-11-17 14:42:33 +01:00
static int shdma_config ( struct dma_chan * chan ,
struct dma_slave_config * config )
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
/*
* So far only . slave_id is used , but the slave drivers are
* encouraged to also set a transfer direction and an address .
*/
if ( ! config )
return - EINVAL ;
2015-02-17 01:46:49 +00:00
/*
* overriding the slave_id through dma_slave_config is deprecated ,
* but possibly some out - of - tree drivers still do it .
*/
if ( WARN_ON_ONCE ( config - > slave_id & &
config - > slave_id ! = schan - > real_slave_id ) )
schan - > real_slave_id = config - > slave_id ;
2014-11-17 14:42:33 +01:00
/*
* We could lock this , but you shouldn ' t be configuring the
* channel , while using it . . .
*/
2015-02-17 01:46:49 +00:00
return shdma_setup_slave ( schan ,
2014-11-17 14:42:33 +01:00
config - > direction = = DMA_DEV_TO_MEM ?
config - > src_addr : config - > dst_addr ) ;
}
2012-05-09 17:09:13 +02:00
static void shdma_issue_pending ( struct dma_chan * chan )
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
spin_lock_irq ( & schan - > chan_lock ) ;
if ( schan - > pm_state = = SHDMA_PM_ESTABLISHED )
shdma_chan_xfer_ld_queue ( schan ) ;
else
schan - > pm_state = SHDMA_PM_PENDING ;
spin_unlock_irq ( & schan - > chan_lock ) ;
}
static enum dma_status shdma_tx_status ( struct dma_chan * chan ,
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
{
struct shdma_chan * schan = to_shdma_chan ( chan ) ;
enum dma_status status ;
unsigned long flags ;
shdma_chan_ld_cleanup ( schan , false ) ;
spin_lock_irqsave ( & schan - > chan_lock , flags ) ;
status = dma_cookie_status ( chan , cookie , txstate ) ;
/*
* If we don ' t find cookie on the queue , it has been aborted and we have
* to report error
*/
2013-10-16 21:04:06 +05:30
if ( status ! = DMA_COMPLETE ) {
2012-05-09 17:09:13 +02:00
struct shdma_desc * sdesc ;
status = DMA_ERROR ;
list_for_each_entry ( sdesc , & schan - > ld_queue , node )
if ( sdesc - > cookie = = cookie ) {
status = DMA_IN_PROGRESS ;
break ;
}
}
spin_unlock_irqrestore ( & schan - > chan_lock , flags ) ;
return status ;
}
/* Called from error IRQ or NMI */
bool shdma_reset ( struct shdma_dev * sdev )
{
const struct shdma_ops * ops = sdev - > ops ;
struct shdma_chan * schan ;
unsigned int handled = 0 ;
int i ;
/* Reset all channels */
shdma_for_each_chan ( schan , sdev , i ) {
struct shdma_desc * sdesc ;
LIST_HEAD ( dl ) ;
if ( ! schan )
continue ;
spin_lock ( & schan - > chan_lock ) ;
/* Stop the channel */
ops - > halt_channel ( schan ) ;
list_splice_init ( & schan - > ld_queue , & dl ) ;
if ( ! list_empty ( & dl ) ) {
dev_dbg ( schan - > dev , " Bring down channel %d \n " , schan - > id ) ;
pm_runtime_put ( schan - > dev ) ;
}
schan - > pm_state = SHDMA_PM_ESTABLISHED ;
spin_unlock ( & schan - > chan_lock ) ;
/* Complete all */
list_for_each_entry ( sdesc , & dl , node ) {
struct dma_async_tx_descriptor * tx = & sdesc - > async_tx ;
2016-07-20 14:13:09 -07:00
2012-05-09 17:09:13 +02:00
sdesc - > mark = DESC_IDLE ;
2016-07-20 14:13:09 -07:00
dmaengine_desc_get_callback_invoke ( tx , NULL ) ;
2012-05-09 17:09:13 +02:00
}
spin_lock ( & schan - > chan_lock ) ;
list_splice ( & dl , & schan - > ld_free ) ;
spin_unlock ( & schan - > chan_lock ) ;
handled + + ;
}
return ! ! handled ;
}
EXPORT_SYMBOL ( shdma_reset ) ;
static irqreturn_t chan_irq ( int irq , void * dev )
{
struct shdma_chan * schan = dev ;
const struct shdma_ops * ops =
to_shdma_dev ( schan - > dma_chan . device ) - > ops ;
irqreturn_t ret ;
spin_lock ( & schan - > chan_lock ) ;
ret = ops - > chan_irq ( schan , irq ) ? IRQ_WAKE_THREAD : IRQ_NONE ;
spin_unlock ( & schan - > chan_lock ) ;
return ret ;
}
static irqreturn_t chan_irqt ( int irq , void * dev )
{
struct shdma_chan * schan = dev ;
const struct shdma_ops * ops =
to_shdma_dev ( schan - > dma_chan . device ) - > ops ;
struct shdma_desc * sdesc ;
spin_lock_irq ( & schan - > chan_lock ) ;
list_for_each_entry ( sdesc , & schan - > ld_queue , node ) {
if ( sdesc - > mark = = DESC_SUBMITTED & &
ops - > desc_completed ( schan , sdesc ) ) {
dev_dbg ( schan - > dev , " done #%d@%p \n " ,
sdesc - > async_tx . cookie , & sdesc - > async_tx ) ;
sdesc - > mark = DESC_COMPLETED ;
break ;
}
}
/* Next desc */
shdma_chan_xfer_ld_queue ( schan ) ;
spin_unlock_irq ( & schan - > chan_lock ) ;
shdma_chan_ld_cleanup ( schan , false ) ;
return IRQ_HANDLED ;
}
int shdma_request_irq ( struct shdma_chan * schan , int irq ,
unsigned long flags , const char * name )
{
2013-07-02 17:45:55 +02:00
int ret = devm_request_threaded_irq ( schan - > dev , irq , chan_irq ,
chan_irqt , flags , name , schan ) ;
2012-05-09 17:09:13 +02:00
schan - > irq = ret < 0 ? ret : irq ;
return ret ;
}
EXPORT_SYMBOL ( shdma_request_irq ) ;
void shdma_chan_probe ( struct shdma_dev * sdev ,
struct shdma_chan * schan , int id )
{
schan - > pm_state = SHDMA_PM_ESTABLISHED ;
/* reference struct dma_device */
schan - > dma_chan . device = & sdev - > dma_dev ;
dma_cookie_init ( & schan - > dma_chan ) ;
schan - > dev = sdev - > dma_dev . dev ;
schan - > id = id ;
if ( ! schan - > max_xfer_len )
schan - > max_xfer_len = PAGE_SIZE ;
spin_lock_init ( & schan - > chan_lock ) ;
/* Init descripter manage list */
INIT_LIST_HEAD ( & schan - > ld_queue ) ;
INIT_LIST_HEAD ( & schan - > ld_free ) ;
/* Add the channel to DMA device channel list */
list_add_tail ( & schan - > dma_chan . device_node ,
& sdev - > dma_dev . channels ) ;
2014-10-16 11:01:01 +02:00
sdev - > schan [ id ] = schan ;
2012-05-09 17:09:13 +02:00
}
EXPORT_SYMBOL ( shdma_chan_probe ) ;
void shdma_chan_remove ( struct shdma_chan * schan )
{
list_del ( & schan - > dma_chan . device_node ) ;
}
EXPORT_SYMBOL ( shdma_chan_remove ) ;
int shdma_init ( struct device * dev , struct shdma_dev * sdev ,
int chan_num )
{
struct dma_device * dma_dev = & sdev - > dma_dev ;
/*
* Require all call - backs for now , they can trivially be made optional
* later as required
*/
if ( ! sdev - > ops | |
! sdev - > desc_size | |
! sdev - > ops - > embedded_desc | |
! sdev - > ops - > start_xfer | |
! sdev - > ops - > setup_xfer | |
! sdev - > ops - > set_slave | |
! sdev - > ops - > desc_setup | |
! sdev - > ops - > slave_addr | |
! sdev - > ops - > channel_busy | |
! sdev - > ops - > halt_channel | |
! sdev - > ops - > desc_completed )
return - EINVAL ;
sdev - > schan = kcalloc ( chan_num , sizeof ( * sdev - > schan ) , GFP_KERNEL ) ;
if ( ! sdev - > schan )
return - ENOMEM ;
INIT_LIST_HEAD ( & dma_dev - > channels ) ;
/* Common and MEMCPY operations */
dma_dev - > device_alloc_chan_resources
= shdma_alloc_chan_resources ;
dma_dev - > device_free_chan_resources = shdma_free_chan_resources ;
dma_dev - > device_prep_dma_memcpy = shdma_prep_memcpy ;
dma_dev - > device_tx_status = shdma_tx_status ;
dma_dev - > device_issue_pending = shdma_issue_pending ;
/* Compulsory for DMA_SLAVE fields */
dma_dev - > device_prep_slave_sg = shdma_prep_slave_sg ;
2014-04-02 20:17:00 -07:00
dma_dev - > device_prep_dma_cyclic = shdma_prep_dma_cyclic ;
2014-11-17 14:42:33 +01:00
dma_dev - > device_config = shdma_config ;
dma_dev - > device_terminate_all = shdma_terminate_all ;
2012-05-09 17:09:13 +02:00
dma_dev - > dev = dev ;
return 0 ;
}
EXPORT_SYMBOL ( shdma_init ) ;
void shdma_cleanup ( struct shdma_dev * sdev )
{
kfree ( sdev - > schan ) ;
}
EXPORT_SYMBOL ( shdma_cleanup ) ;
static int __init shdma_enter ( void )
{
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:03:40 -07:00
shdma_slave_used = kcalloc ( DIV_ROUND_UP ( slave_num , BITS_PER_LONG ) ,
sizeof ( long ) ,
GFP_KERNEL ) ;
2012-05-09 17:09:13 +02:00
if ( ! shdma_slave_used )
return - ENOMEM ;
return 0 ;
}
module_init ( shdma_enter ) ;
static void __exit shdma_exit ( void )
{
kfree ( shdma_slave_used ) ;
}
module_exit ( shdma_exit ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_DESCRIPTION ( " SH-DMA driver base library " ) ;
MODULE_AUTHOR ( " Guennadi Liakhovetski <g.liakhovetski@gmx.de> " ) ;