2010-07-21 11:58:10 +04:00
/*
* intel_mid_dma . c - Intel Langwell DMA Drivers
*
* Copyright ( C ) 2008 - 10 Intel Corp
* Author : Vinod Koul < vinod . koul @ intel . com >
* The driver design is based on dw_dmac driver
* ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; version 2 of the License .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License along
* with this program ; if not , write to the Free Software Foundation , Inc . ,
* 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA .
*
* ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
*
*
*/
# include <linux/pci.h>
# include <linux/interrupt.h>
2010-10-04 14:42:40 +04:00
# include <linux/pm_runtime.h>
2010-07-21 11:58:10 +04:00
# include <linux/intel_mid_dma.h>
2011-05-27 20:33:10 +04:00
# include <linux/module.h>
2010-07-21 11:58:10 +04:00
2012-03-07 02:34:26 +04:00
# include "dmaengine.h"
2010-07-21 11:58:10 +04:00
# define MAX_CHAN 4 /*max ch across controllers*/
# include "intel_mid_dma_regs.h"
# define INTEL_MID_DMAC1_ID 0x0814
# define INTEL_MID_DMAC2_ID 0x0813
# define INTEL_MID_GP_DMAC2_ID 0x0827
# define INTEL_MFLD_DMAC1_ID 0x0830
# define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
# define LNW_PERIPHRAL_MASK_SIZE 0x10
# define LNW_PERIPHRAL_STATUS 0x0
# define LNW_PERIPHRAL_MASK 0x8
struct intel_mid_dma_probe_info {
u8 max_chan ;
u8 ch_base ;
u16 block_size ;
u32 pimr_mask ;
} ;
# define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
( ( kernel_ulong_t ) & ( struct intel_mid_dma_probe_info ) { \
. max_chan = ( _max_chan ) , \
. ch_base = ( _ch_base ) , \
. block_size = ( _block_size ) , \
. pimr_mask = ( _pimr_mask ) , \
} )
/*****************************************************************************
Utility Functions */
/**
* get_ch_index - convert status to channel
* @ status : status mask
* @ base : dma ch base value
*
* Modify the status mask and return the channel index needing
* attention ( or - 1 if neither )
*/
static int get_ch_index ( int * status , unsigned int base )
{
int i ;
for ( i = 0 ; i < MAX_CHAN ; i + + ) {
if ( * status & ( 1 < < ( i + base ) ) ) {
* status = * status & ~ ( 1 < < ( i + base ) ) ;
pr_debug ( " MDMA: index %d New status %x \n " , i , * status ) ;
return i ;
}
}
return - 1 ;
}
/**
* get_block_ts - calculates dma transaction length
* @ len : dma transfer length
* @ tx_width : dma transfer src width
* @ block_size : dma controller max block size
*
* Based on src width calculate the DMA trsaction length in data items
* return data items or FFFF if exceeds max length for block
*/
static int get_block_ts ( int len , int tx_width , int block_size )
{
int byte_width = 0 , block_ts = 0 ;
switch ( tx_width ) {
2010-10-04 14:38:43 +04:00
case DMA_SLAVE_BUSWIDTH_1_BYTE :
2010-07-21 11:58:10 +04:00
byte_width = 1 ;
break ;
2010-10-04 14:38:43 +04:00
case DMA_SLAVE_BUSWIDTH_2_BYTES :
2010-07-21 11:58:10 +04:00
byte_width = 2 ;
break ;
2010-10-04 14:38:43 +04:00
case DMA_SLAVE_BUSWIDTH_4_BYTES :
2010-07-21 11:58:10 +04:00
default :
byte_width = 4 ;
break ;
}
block_ts = len / byte_width ;
if ( block_ts > block_size )
block_ts = 0xFFFF ;
return block_ts ;
}
/*****************************************************************************
DMAC1 interrupt Functions */
/**
* dmac1_mask_periphral_intr - mask the periphral interrupt
2011-10-10 11:03:59 +04:00
* @ mid : dma device for which masking is required
2010-07-21 11:58:10 +04:00
*
* Masks the DMA periphral interrupt
* this is valid for DMAC1 family controllers only
* This controller should have periphral mask registers already mapped
*/
2011-10-10 11:03:59 +04:00
static void dmac1_mask_periphral_intr ( struct middma_device * mid )
2010-07-21 11:58:10 +04:00
{
u32 pimr ;
if ( mid - > pimr_mask ) {
pimr = readl ( mid - > mask_reg + LNW_PERIPHRAL_MASK ) ;
pimr | = mid - > pimr_mask ;
writel ( pimr , mid - > mask_reg + LNW_PERIPHRAL_MASK ) ;
}
return ;
}
/**
* dmac1_unmask_periphral_intr - unmask the periphral interrupt
* @ midc : dma channel for which masking is required
*
* UnMasks the DMA periphral interrupt ,
* this is valid for DMAC1 family controllers only
* This controller should have periphral mask registers already mapped
*/
static void dmac1_unmask_periphral_intr ( struct intel_mid_dma_chan * midc )
{
u32 pimr ;
struct middma_device * mid = to_middma_device ( midc - > chan . device ) ;
if ( mid - > pimr_mask ) {
pimr = readl ( mid - > mask_reg + LNW_PERIPHRAL_MASK ) ;
pimr & = ~ mid - > pimr_mask ;
writel ( pimr , mid - > mask_reg + LNW_PERIPHRAL_MASK ) ;
}
return ;
}
/**
* enable_dma_interrupt - enable the periphral interrupt
* @ midc : dma channel for which enable interrupt is required
*
* Enable the DMA periphral interrupt ,
* this is valid for DMAC1 family controllers only
* This controller should have periphral mask registers already mapped
*/
static void enable_dma_interrupt ( struct intel_mid_dma_chan * midc )
{
dmac1_unmask_periphral_intr ( midc ) ;
/*en ch interrupts*/
iowrite32 ( UNMASK_INTR_REG ( midc - > ch_id ) , midc - > dma_base + MASK_TFR ) ;
iowrite32 ( UNMASK_INTR_REG ( midc - > ch_id ) , midc - > dma_base + MASK_ERR ) ;
return ;
}
/**
* disable_dma_interrupt - disable the periphral interrupt
* @ midc : dma channel for which disable interrupt is required
*
* Disable the DMA periphral interrupt ,
* this is valid for DMAC1 family controllers only
* This controller should have periphral mask registers already mapped
*/
static void disable_dma_interrupt ( struct intel_mid_dma_chan * midc )
{
/*Check LPE PISR, make sure fwd is disabled*/
iowrite32 ( MASK_INTR_REG ( midc - > ch_id ) , midc - > dma_base + MASK_BLOCK ) ;
iowrite32 ( MASK_INTR_REG ( midc - > ch_id ) , midc - > dma_base + MASK_TFR ) ;
iowrite32 ( MASK_INTR_REG ( midc - > ch_id ) , midc - > dma_base + MASK_ERR ) ;
return ;
}
/*****************************************************************************
DMA channel helper Functions */
/**
* mid_desc_get - get a descriptor
* @ midc : dma channel for which descriptor is required
*
* Obtain a descriptor for the channel . Returns NULL if none are free .
* Once the descriptor is returned it is private until put on another
* list or freed
*/
static struct intel_mid_dma_desc * midc_desc_get ( struct intel_mid_dma_chan * midc )
{
struct intel_mid_dma_desc * desc , * _desc ;
struct intel_mid_dma_desc * ret = NULL ;
spin_lock_bh ( & midc - > lock ) ;
list_for_each_entry_safe ( desc , _desc , & midc - > free_list , desc_node ) {
if ( async_tx_test_ack ( & desc - > txd ) ) {
list_del ( & desc - > desc_node ) ;
ret = desc ;
break ;
}
}
spin_unlock_bh ( & midc - > lock ) ;
return ret ;
}
/**
* mid_desc_put - put a descriptor
* @ midc : dma channel for which descriptor is required
* @ desc : descriptor to put
*
* Return a descriptor from lwn_desc_get back to the free pool
*/
static void midc_desc_put ( struct intel_mid_dma_chan * midc ,
struct intel_mid_dma_desc * desc )
{
if ( desc ) {
spin_lock_bh ( & midc - > lock ) ;
list_add_tail ( & desc - > desc_node , & midc - > free_list ) ;
spin_unlock_bh ( & midc - > lock ) ;
}
}
/**
* midc_dostart - begin a DMA transaction
* @ midc : channel for which txn is to be started
* @ first : first descriptor of series
*
* Load a transaction into the engine . This must be called with midc - > lock
* held and bh disabled .
*/
static void midc_dostart ( struct intel_mid_dma_chan * midc ,
struct intel_mid_dma_desc * first )
{
struct middma_device * mid = to_middma_device ( midc - > chan . device ) ;
/* channel is idle */
2010-10-04 14:42:40 +04:00
if ( midc - > busy & & test_ch_en ( midc - > dma_base , midc - > ch_id ) ) {
2010-07-21 11:58:10 +04:00
/*error*/
pr_err ( " ERR_MDMA: channel is busy in start \n " ) ;
/* The tasklet will hopefully advance the queue... */
return ;
}
2010-10-04 14:42:40 +04:00
midc - > busy = true ;
2010-07-21 11:58:10 +04:00
/*write registers and en*/
iowrite32 ( first - > sar , midc - > ch_regs + SAR ) ;
iowrite32 ( first - > dar , midc - > ch_regs + DAR ) ;
2010-10-04 14:37:53 +04:00
iowrite32 ( first - > lli_phys , midc - > ch_regs + LLP ) ;
2010-07-21 11:58:10 +04:00
iowrite32 ( first - > cfg_hi , midc - > ch_regs + CFG_HIGH ) ;
iowrite32 ( first - > cfg_lo , midc - > ch_regs + CFG_LOW ) ;
iowrite32 ( first - > ctl_lo , midc - > ch_regs + CTL_LOW ) ;
iowrite32 ( first - > ctl_hi , midc - > ch_regs + CTL_HIGH ) ;
pr_debug ( " MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x \n " ,
( int ) first - > sar , ( int ) first - > dar , first - > cfg_hi ,
first - > cfg_lo , first - > ctl_hi , first - > ctl_lo ) ;
2010-10-04 14:37:53 +04:00
first - > status = DMA_IN_PROGRESS ;
2010-07-21 11:58:10 +04:00
iowrite32 ( ENABLE_CHANNEL ( midc - > ch_id ) , mid - > dma_base + DMA_CHAN_EN ) ;
}
/**
* midc_descriptor_complete - process completed descriptor
* @ midc : channel owning the descriptor
* @ desc : the descriptor itself
*
* Process a completed descriptor and perform any callbacks upon
* the completion . The completion handling drops the lock during the
* callbacks but must be called with the lock held .
*/
static void midc_descriptor_complete ( struct intel_mid_dma_chan * midc ,
2011-12-16 13:01:38 +04:00
struct intel_mid_dma_desc * desc )
__releases ( & midc - > lock ) __acquires ( & midc - > lock )
2010-07-21 11:58:10 +04:00
{
struct dma_async_tx_descriptor * txd = & desc - > txd ;
dma_async_tx_callback callback_txd = NULL ;
2010-10-04 14:37:53 +04:00
struct intel_mid_dma_lli * llitem ;
2010-07-21 11:58:10 +04:00
void * param_txd = NULL ;
2012-03-07 02:35:07 +04:00
dma_cookie_complete ( txd ) ;
2010-07-21 11:58:10 +04:00
callback_txd = txd - > callback ;
param_txd = txd - > callback_param ;
2010-10-04 14:37:53 +04:00
if ( desc - > lli ! = NULL ) {
/*clear the DONE bit of completed LLI in memory*/
llitem = desc - > lli + desc - > current_lli ;
llitem - > ctl_hi & = CLEAR_DONE ;
if ( desc - > current_lli < desc - > lli_length - 1 )
( desc - > current_lli ) + + ;
else
desc - > current_lli = 0 ;
}
2010-07-21 11:58:10 +04:00
spin_unlock_bh ( & midc - > lock ) ;
if ( callback_txd ) {
pr_debug ( " MDMA: TXD callback set ... calling \n " ) ;
callback_txd ( param_txd ) ;
2010-10-04 14:37:53 +04:00
}
if ( midc - > raw_tfr ) {
desc - > status = DMA_SUCCESS ;
if ( desc - > lli ! = NULL ) {
pci_pool_free ( desc - > lli_pool , desc - > lli ,
desc - > lli_phys ) ;
pci_pool_destroy ( desc - > lli_pool ) ;
2011-12-16 13:01:38 +04:00
desc - > lli = NULL ;
2010-10-04 14:37:53 +04:00
}
list_move ( & desc - > desc_node , & midc - > free_list ) ;
midc - > busy = false ;
2010-07-21 11:58:10 +04:00
}
spin_lock_bh ( & midc - > lock ) ;
}
/**
* midc_scan_descriptors - check the descriptors in channel
* mark completed when tx is completete
* @ mid : device
* @ midc : channel to scan
*
* Walk the descriptor chain for the device and process any entries
* that are complete .
*/
static void midc_scan_descriptors ( struct middma_device * mid ,
struct intel_mid_dma_chan * midc )
{
struct intel_mid_dma_desc * desc = NULL , * _desc = NULL ;
/*tx is complete*/
list_for_each_entry_safe ( desc , _desc , & midc - > active_list , desc_node ) {
2010-10-04 14:37:53 +04:00
if ( desc - > status = = DMA_IN_PROGRESS )
2010-07-21 11:58:10 +04:00
midc_descriptor_complete ( midc , desc ) ;
}
return ;
2010-10-04 14:37:53 +04:00
}
/**
* midc_lli_fill_sg - Helper function to convert
* SG list to Linked List Items .
* @ midc : Channel
* @ desc : DMA descriptor
* @ sglist : Pointer to SG list
* @ sglen : SG list length
* @ flags : DMA transaction flags
*
* Walk through the SG list and convert the SG list into Linked
* List Items ( LLI ) .
*/
static int midc_lli_fill_sg ( struct intel_mid_dma_chan * midc ,
struct intel_mid_dma_desc * desc ,
struct scatterlist * sglist ,
unsigned int sglen ,
unsigned int flags )
{
struct intel_mid_dma_slave * mids ;
struct scatterlist * sg ;
dma_addr_t lli_next , sg_phy_addr ;
struct intel_mid_dma_lli * lli_bloc_desc ;
union intel_mid_dma_ctl_lo ctl_lo ;
union intel_mid_dma_ctl_hi ctl_hi ;
int i ;
pr_debug ( " MDMA: Entered midc_lli_fill_sg \n " ) ;
2010-10-04 14:38:43 +04:00
mids = midc - > mid_slave ;
2010-10-04 14:37:53 +04:00
lli_bloc_desc = desc - > lli ;
lli_next = desc - > lli_phys ;
2010-07-21 11:58:10 +04:00
2010-10-04 14:37:53 +04:00
ctl_lo . ctl_lo = desc - > ctl_lo ;
ctl_hi . ctl_hi = desc - > ctl_hi ;
for_each_sg ( sglist , sg , sglen , i ) {
/*Populate CTL_LOW and LLI values*/
if ( i ! = sglen - 1 ) {
lli_next = lli_next +
sizeof ( struct intel_mid_dma_lli ) ;
} else {
/*Check for circular list, otherwise terminate LLI to ZERO*/
if ( flags & DMA_PREP_CIRCULAR_LIST ) {
pr_debug ( " MDMA: LLI is configured in circular mode \n " ) ;
lli_next = desc - > lli_phys ;
} else {
lli_next = 0 ;
ctl_lo . ctlx . llp_dst_en = 0 ;
ctl_lo . ctlx . llp_src_en = 0 ;
}
}
/*Populate CTL_HI values*/
2012-04-25 22:50:52 +04:00
ctl_hi . ctlx . block_ts = get_block_ts ( sg_dma_len ( sg ) ,
2010-10-04 14:37:53 +04:00
desc - > width ,
midc - > dma - > block_size ) ;
/*Populate SAR and DAR values*/
2012-04-25 22:50:51 +04:00
sg_phy_addr = sg_dma_address ( sg ) ;
2011-10-13 21:04:23 +04:00
if ( desc - > dirn = = DMA_MEM_TO_DEV ) {
2010-10-04 14:37:53 +04:00
lli_bloc_desc - > sar = sg_phy_addr ;
2010-10-04 14:38:43 +04:00
lli_bloc_desc - > dar = mids - > dma_slave . dst_addr ;
2011-10-13 21:04:23 +04:00
} else if ( desc - > dirn = = DMA_DEV_TO_MEM ) {
2010-10-04 14:38:43 +04:00
lli_bloc_desc - > sar = mids - > dma_slave . src_addr ;
2010-10-04 14:37:53 +04:00
lli_bloc_desc - > dar = sg_phy_addr ;
}
/*Copy values into block descriptor in system memroy*/
lli_bloc_desc - > llp = lli_next ;
lli_bloc_desc - > ctl_lo = ctl_lo . ctl_lo ;
lli_bloc_desc - > ctl_hi = ctl_hi . ctl_hi ;
lli_bloc_desc + + ;
}
/*Copy very first LLI values to descriptor*/
desc - > ctl_lo = desc - > lli - > ctl_lo ;
desc - > ctl_hi = desc - > lli - > ctl_hi ;
desc - > sar = desc - > lli - > sar ;
desc - > dar = desc - > lli - > dar ;
return 0 ;
}
2010-07-21 11:58:10 +04:00
/*****************************************************************************
DMA engine callback Functions */
/**
* intel_mid_dma_tx_submit - callback to submit DMA transaction
* @ tx : dma engine descriptor
*
2012-09-01 21:31:09 +04:00
* Submit the DMA transaction for this descriptor , start if ch idle
2010-07-21 11:58:10 +04:00
*/
static dma_cookie_t intel_mid_dma_tx_submit ( struct dma_async_tx_descriptor * tx )
{
struct intel_mid_dma_desc * desc = to_intel_mid_dma_desc ( tx ) ;
struct intel_mid_dma_chan * midc = to_intel_mid_dma_chan ( tx - > chan ) ;
dma_cookie_t cookie ;
spin_lock_bh ( & midc - > lock ) ;
2012-03-07 02:34:46 +04:00
cookie = dma_cookie_assign ( tx ) ;
2010-07-21 11:58:10 +04:00
2010-10-04 14:37:53 +04:00
if ( list_empty ( & midc - > active_list ) )
2010-07-21 11:58:10 +04:00
list_add_tail ( & desc - > desc_node , & midc - > active_list ) ;
2010-10-04 14:37:53 +04:00
else
2010-07-21 11:58:10 +04:00
list_add_tail ( & desc - > desc_node , & midc - > queue ) ;
2010-10-04 14:37:53 +04:00
midc_dostart ( midc , desc ) ;
2010-07-21 11:58:10 +04:00
spin_unlock_bh ( & midc - > lock ) ;
return cookie ;
}
/**
* intel_mid_dma_issue_pending - callback to issue pending txn
* @ chan : chan where pending trascation needs to be checked and submitted
*
* Call for scan to issue pending descriptors
*/
static void intel_mid_dma_issue_pending ( struct dma_chan * chan )
{
struct intel_mid_dma_chan * midc = to_intel_mid_dma_chan ( chan ) ;
spin_lock_bh ( & midc - > lock ) ;
if ( ! list_empty ( & midc - > queue ) )
midc_scan_descriptors ( to_middma_device ( chan - > device ) , midc ) ;
spin_unlock_bh ( & midc - > lock ) ;
}
/**
* intel_mid_dma_tx_status - Return status of txn
* @ chan : chan for where status needs to be checked
* @ cookie : cookie for txn
* @ txstate : DMA txn state
*
* Return status of DMA txn
*/
static enum dma_status intel_mid_dma_tx_status ( struct dma_chan * chan ,
dma_cookie_t cookie ,
struct dma_tx_state * txstate )
{
2012-03-13 10:28:12 +04:00
struct intel_mid_dma_chan * midc = to_intel_mid_dma_chan ( chan ) ;
2012-03-07 02:35:27 +04:00
enum dma_status ret ;
2010-07-21 11:58:10 +04:00
2012-03-07 02:35:27 +04:00
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2010-07-21 11:58:10 +04:00
if ( ret ! = DMA_SUCCESS ) {
2011-12-16 13:01:38 +04:00
spin_lock_bh ( & midc - > lock ) ;
2010-07-21 11:58:10 +04:00
midc_scan_descriptors ( to_middma_device ( chan - > device ) , midc ) ;
2011-12-16 13:01:38 +04:00
spin_unlock_bh ( & midc - > lock ) ;
2010-07-21 11:58:10 +04:00
2012-03-07 02:35:27 +04:00
ret = dma_cookie_status ( chan , cookie , txstate ) ;
2010-07-21 11:58:10 +04:00
}
return ret ;
}
2010-10-04 14:38:43 +04:00
static int dma_slave_control ( struct dma_chan * chan , unsigned long arg )
{
struct intel_mid_dma_chan * midc = to_intel_mid_dma_chan ( chan ) ;
struct dma_slave_config * slave = ( struct dma_slave_config * ) arg ;
struct intel_mid_dma_slave * mid_slave ;
BUG_ON ( ! midc ) ;
BUG_ON ( ! slave ) ;
pr_debug ( " MDMA: slave control called \n " ) ;
mid_slave = to_intel_mid_dma_slave ( slave ) ;
BUG_ON ( ! mid_slave ) ;
midc - > mid_slave = mid_slave ;
return 0 ;
}
2010-07-21 11:58:10 +04:00
/**
* intel_mid_dma_device_control - DMA device control
* @ chan : chan for DMA control
* @ cmd : control cmd
* @ arg : cmd arg value
*
* Perform DMA control command
*/
static int intel_mid_dma_device_control ( struct dma_chan * chan ,
enum dma_ctrl_cmd cmd , unsigned long arg )
{
struct intel_mid_dma_chan * midc = to_intel_mid_dma_chan ( chan ) ;
struct middma_device * mid = to_middma_device ( chan - > device ) ;
struct intel_mid_dma_desc * desc , * _desc ;
2010-10-04 14:37:53 +04:00
union intel_mid_dma_cfg_lo cfg_lo ;
2010-07-21 11:58:10 +04:00
2010-10-04 14:38:43 +04:00
if ( cmd = = DMA_SLAVE_CONFIG )
return dma_slave_control ( chan , arg ) ;
2010-07-21 11:58:10 +04:00
if ( cmd ! = DMA_TERMINATE_ALL )
return - ENXIO ;
spin_lock_bh ( & midc - > lock ) ;
2010-10-04 14:42:40 +04:00
if ( midc - > busy = = false ) {
2010-07-21 11:58:10 +04:00
spin_unlock_bh ( & midc - > lock ) ;
return 0 ;
}
2010-10-04 14:37:53 +04:00
/*Suspend and disable the channel*/
cfg_lo . cfg_lo = ioread32 ( midc - > ch_regs + CFG_LOW ) ;
cfg_lo . cfgx . ch_susp = 1 ;
iowrite32 ( cfg_lo . cfg_lo , midc - > ch_regs + CFG_LOW ) ;
iowrite32 ( DISABLE_CHANNEL ( midc - > ch_id ) , mid - > dma_base + DMA_CHAN_EN ) ;
midc - > busy = false ;
2010-07-21 11:58:10 +04:00
/* Disable interrupts */
disable_dma_interrupt ( midc ) ;
2010-10-04 14:37:53 +04:00
midc - > descs_allocated = 0 ;
2010-07-21 11:58:10 +04:00
spin_unlock_bh ( & midc - > lock ) ;
2010-10-04 14:37:53 +04:00
list_for_each_entry_safe ( desc , _desc , & midc - > active_list , desc_node ) {
if ( desc - > lli ! = NULL ) {
pci_pool_free ( desc - > lli_pool , desc - > lli ,
desc - > lli_phys ) ;
pci_pool_destroy ( desc - > lli_pool ) ;
2011-12-16 13:01:38 +04:00
desc - > lli = NULL ;
2010-10-04 14:37:53 +04:00
}
list_move ( & desc - > desc_node , & midc - > free_list ) ;
2010-07-21 11:58:10 +04:00
}
return 0 ;
}
/**
* intel_mid_dma_prep_memcpy - Prep memcpy txn
* @ chan : chan for DMA transfer
* @ dest : destn address
* @ src : src address
* @ len : DMA transfer len
* @ flags : DMA flags
*
* Perform a DMA memcpy . Note we support slave periphral DMA transfers only
* The periphral txn details should be filled in slave structure properly
* Returns the descriptor for this txn
*/
static struct dma_async_tx_descriptor * intel_mid_dma_prep_memcpy (
struct dma_chan * chan , dma_addr_t dest ,
dma_addr_t src , size_t len , unsigned long flags )
{
struct intel_mid_dma_chan * midc ;
struct intel_mid_dma_desc * desc = NULL ;
struct intel_mid_dma_slave * mids ;
union intel_mid_dma_ctl_lo ctl_lo ;
union intel_mid_dma_ctl_hi ctl_hi ;
union intel_mid_dma_cfg_lo cfg_lo ;
union intel_mid_dma_cfg_hi cfg_hi ;
2010-10-04 14:38:43 +04:00
enum dma_slave_buswidth width ;
2010-07-21 11:58:10 +04:00
pr_debug ( " MDMA: Prep for memcpy \n " ) ;
2010-10-04 14:38:25 +04:00
BUG_ON ( ! chan ) ;
2010-07-21 11:58:10 +04:00
if ( ! len )
return NULL ;
midc = to_intel_mid_dma_chan ( chan ) ;
2010-10-04 14:38:25 +04:00
BUG_ON ( ! midc ) ;
2010-07-21 11:58:10 +04:00
2010-10-04 14:38:43 +04:00
mids = midc - > mid_slave ;
BUG_ON ( ! mids ) ;
2010-07-21 11:58:10 +04:00
pr_debug ( " MDMA:called for DMA %x CH %d Length %zu \n " ,
midc - > dma - > pci_id , midc - > ch_id , len ) ;
pr_debug ( " MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x \n " ,
2010-10-04 14:38:43 +04:00
mids - > cfg_mode , mids - > dma_slave . direction ,
mids - > hs_mode , mids - > dma_slave . src_addr_width ) ;
2010-07-21 11:58:10 +04:00
/*calculate CFG_LO*/
if ( mids - > hs_mode = = LNW_DMA_SW_HS ) {
cfg_lo . cfg_lo = 0 ;
cfg_lo . cfgx . hs_sel_dst = 1 ;
cfg_lo . cfgx . hs_sel_src = 1 ;
} else if ( mids - > hs_mode = = LNW_DMA_HW_HS )
cfg_lo . cfg_lo = 0x00000 ;
/*calculate CFG_HI*/
if ( mids - > cfg_mode = = LNW_DMA_MEM_TO_MEM ) {
/*SW HS only*/
cfg_hi . cfg_hi = 0 ;
} else {
cfg_hi . cfg_hi = 0 ;
if ( midc - > dma - > pimr_mask ) {
cfg_hi . cfgx . protctl = 0x0 ; /*default value*/
cfg_hi . cfgx . fifo_mode = 1 ;
2011-10-13 21:04:23 +04:00
if ( mids - > dma_slave . direction = = DMA_MEM_TO_DEV ) {
2010-07-21 11:58:10 +04:00
cfg_hi . cfgx . src_per = 0 ;
if ( mids - > device_instance = = 0 )
cfg_hi . cfgx . dst_per = 3 ;
if ( mids - > device_instance = = 1 )
cfg_hi . cfgx . dst_per = 1 ;
2011-10-13 21:04:23 +04:00
} else if ( mids - > dma_slave . direction = = DMA_DEV_TO_MEM ) {
2010-07-21 11:58:10 +04:00
if ( mids - > device_instance = = 0 )
cfg_hi . cfgx . src_per = 2 ;
if ( mids - > device_instance = = 1 )
cfg_hi . cfgx . src_per = 0 ;
cfg_hi . cfgx . dst_per = 0 ;
}
} else {
cfg_hi . cfgx . protctl = 0x1 ; /*default value*/
cfg_hi . cfgx . src_per = cfg_hi . cfgx . dst_per =
midc - > ch_id - midc - > dma - > chan_base ;
}
}
/*calculate CTL_HI*/
ctl_hi . ctlx . reser = 0 ;
2010-10-04 14:37:53 +04:00
ctl_hi . ctlx . done = 0 ;
2010-10-04 14:38:43 +04:00
width = mids - > dma_slave . src_addr_width ;
2010-07-21 11:58:10 +04:00
ctl_hi . ctlx . block_ts = get_block_ts ( len , width , midc - > dma - > block_size ) ;
pr_debug ( " MDMA:calc len %d for block size %d \n " ,
ctl_hi . ctlx . block_ts , midc - > dma - > block_size ) ;
/*calculate CTL_LO*/
ctl_lo . ctl_lo = 0 ;
ctl_lo . ctlx . int_en = 1 ;
2010-10-04 14:38:43 +04:00
ctl_lo . ctlx . dst_msize = mids - > dma_slave . src_maxburst ;
ctl_lo . ctlx . src_msize = mids - > dma_slave . dst_maxburst ;
2010-07-21 11:58:10 +04:00
2010-12-02 12:14:30 +03:00
/*
* Here we need some translation from " enum dma_slave_buswidth "
* to the format for our dma controller
* standard intel_mid_dmac ' s format
* 1 Byte 0 b000
* 2 Bytes 0 b001
* 4 Bytes 0 b010
*/
ctl_lo . ctlx . dst_tr_width = mids - > dma_slave . dst_addr_width / 2 ;
ctl_lo . ctlx . src_tr_width = mids - > dma_slave . src_addr_width / 2 ;
2010-07-21 11:58:10 +04:00
if ( mids - > cfg_mode = = LNW_DMA_MEM_TO_MEM ) {
ctl_lo . ctlx . tt_fc = 0 ;
ctl_lo . ctlx . sinc = 0 ;
ctl_lo . ctlx . dinc = 0 ;
} else {
2011-10-13 21:04:23 +04:00
if ( mids - > dma_slave . direction = = DMA_MEM_TO_DEV ) {
2010-07-21 11:58:10 +04:00
ctl_lo . ctlx . sinc = 0 ;
ctl_lo . ctlx . dinc = 2 ;
ctl_lo . ctlx . tt_fc = 1 ;
2011-10-13 21:04:23 +04:00
} else if ( mids - > dma_slave . direction = = DMA_DEV_TO_MEM ) {
2010-07-21 11:58:10 +04:00
ctl_lo . ctlx . sinc = 2 ;
ctl_lo . ctlx . dinc = 0 ;
ctl_lo . ctlx . tt_fc = 2 ;
}
}
pr_debug ( " MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x \n " ,
ctl_lo . ctl_lo , ctl_hi . ctl_hi , cfg_lo . cfg_lo , cfg_hi . cfg_hi ) ;
enable_dma_interrupt ( midc ) ;
desc = midc_desc_get ( midc ) ;
if ( desc = = NULL )
goto err_desc_get ;
desc - > sar = src ;
desc - > dar = dest ;
desc - > len = len ;
desc - > cfg_hi = cfg_hi . cfg_hi ;
desc - > cfg_lo = cfg_lo . cfg_lo ;
desc - > ctl_lo = ctl_lo . ctl_lo ;
desc - > ctl_hi = ctl_hi . ctl_hi ;
desc - > width = width ;
2010-10-04 14:38:43 +04:00
desc - > dirn = mids - > dma_slave . direction ;
2010-10-04 14:37:53 +04:00
desc - > lli_phys = 0 ;
desc - > lli = NULL ;
desc - > lli_pool = NULL ;
2010-07-21 11:58:10 +04:00
return & desc - > txd ;
err_desc_get :
pr_err ( " ERR_MDMA: Failed to get desc \n " ) ;
midc_desc_put ( midc , desc ) ;
return NULL ;
}
2010-10-04 14:37:53 +04:00
/**
* intel_mid_dma_prep_slave_sg - Prep slave sg txn
* @ chan : chan for DMA transfer
* @ sgl : scatter gather list
* @ sg_len : length of sg txn
* @ direction : DMA transfer dirtn
* @ flags : DMA flags
2012-03-09 00:35:13 +04:00
* @ context : transfer context ( ignored )
2010-10-04 14:37:53 +04:00
*
* Prepares LLI based periphral transfer
*/
static struct dma_async_tx_descriptor * intel_mid_dma_prep_slave_sg (
struct dma_chan * chan , struct scatterlist * sgl ,
2011-10-13 21:04:23 +04:00
unsigned int sg_len , enum dma_transfer_direction direction ,
2012-03-09 00:35:13 +04:00
unsigned long flags , void * context )
2010-10-04 14:37:53 +04:00
{
struct intel_mid_dma_chan * midc = NULL ;
struct intel_mid_dma_slave * mids = NULL ;
struct intel_mid_dma_desc * desc = NULL ;
struct dma_async_tx_descriptor * txd = NULL ;
union intel_mid_dma_ctl_lo ctl_lo ;
pr_debug ( " MDMA: Prep for slave SG \n " ) ;
if ( ! sg_len ) {
pr_err ( " MDMA: Invalid SG length \n " ) ;
return NULL ;
}
midc = to_intel_mid_dma_chan ( chan ) ;
BUG_ON ( ! midc ) ;
2010-10-04 14:38:43 +04:00
mids = midc - > mid_slave ;
2010-10-04 14:37:53 +04:00
BUG_ON ( ! mids ) ;
if ( ! midc - > dma - > pimr_mask ) {
2010-12-02 12:14:30 +03:00
/* We can still handle sg list with only one item */
if ( sg_len = = 1 ) {
txd = intel_mid_dma_prep_memcpy ( chan ,
mids - > dma_slave . dst_addr ,
mids - > dma_slave . src_addr ,
2012-04-25 22:50:52 +04:00
sg_dma_len ( sgl ) ,
2010-12-02 12:14:30 +03:00
flags ) ;
return txd ;
} else {
pr_warn ( " MDMA: SG list is not supported by this controller \n " ) ;
return NULL ;
}
2010-10-04 14:37:53 +04:00
}
pr_debug ( " MDMA: SG Length = %d, direction = %d, Flags = %#lx \n " ,
sg_len , direction , flags ) ;
2012-04-25 22:50:52 +04:00
txd = intel_mid_dma_prep_memcpy ( chan , 0 , 0 , sg_dma_len ( sgl ) , flags ) ;
2010-10-04 14:37:53 +04:00
if ( NULL = = txd ) {
pr_err ( " MDMA: Prep memcpy failed \n " ) ;
return NULL ;
}
2010-12-02 12:14:30 +03:00
2010-10-04 14:37:53 +04:00
desc = to_intel_mid_dma_desc ( txd ) ;
desc - > dirn = direction ;
ctl_lo . ctl_lo = desc - > ctl_lo ;
ctl_lo . ctlx . llp_dst_en = 1 ;
ctl_lo . ctlx . llp_src_en = 1 ;
desc - > ctl_lo = ctl_lo . ctl_lo ;
desc - > lli_length = sg_len ;
desc - > current_lli = 0 ;
/* DMA coherent memory pool for LLI descriptors*/
desc - > lli_pool = pci_pool_create ( " intel_mid_dma_lli_pool " ,
midc - > dma - > pdev ,
( sizeof ( struct intel_mid_dma_lli ) * sg_len ) ,
32 , 0 ) ;
if ( NULL = = desc - > lli_pool ) {
pr_err ( " MID_DMA:LLI pool create failed \n " ) ;
return NULL ;
}
desc - > lli = pci_pool_alloc ( desc - > lli_pool , GFP_KERNEL , & desc - > lli_phys ) ;
if ( ! desc - > lli ) {
pr_err ( " MID_DMA: LLI alloc failed \n " ) ;
pci_pool_destroy ( desc - > lli_pool ) ;
return NULL ;
}
midc_lli_fill_sg ( midc , desc , sgl , sg_len , flags ) ;
if ( flags & DMA_PREP_INTERRUPT ) {
iowrite32 ( UNMASK_INTR_REG ( midc - > ch_id ) ,
midc - > dma_base + MASK_BLOCK ) ;
pr_debug ( " MDMA:Enabled Block interrupt \n " ) ;
}
return & desc - > txd ;
}
2010-07-21 11:58:10 +04:00
/**
* intel_mid_dma_free_chan_resources - Frees dma resources
* @ chan : chan requiring attention
*
* Frees the allocated resources on this DMA chan
*/
static void intel_mid_dma_free_chan_resources ( struct dma_chan * chan )
{
struct intel_mid_dma_chan * midc = to_intel_mid_dma_chan ( chan ) ;
struct middma_device * mid = to_middma_device ( chan - > device ) ;
struct intel_mid_dma_desc * desc , * _desc ;
2010-10-04 14:42:40 +04:00
if ( true = = midc - > busy ) {
2010-07-21 11:58:10 +04:00
/*trying to free ch in use!!!!!*/
pr_err ( " ERR_MDMA: trying to free ch in use \n " ) ;
}
spin_lock_bh ( & midc - > lock ) ;
midc - > descs_allocated = 0 ;
list_for_each_entry_safe ( desc , _desc , & midc - > active_list , desc_node ) {
list_del ( & desc - > desc_node ) ;
pci_pool_free ( mid - > dma_pool , desc , desc - > txd . phys ) ;
}
list_for_each_entry_safe ( desc , _desc , & midc - > free_list , desc_node ) {
list_del ( & desc - > desc_node ) ;
pci_pool_free ( mid - > dma_pool , desc , desc - > txd . phys ) ;
}
list_for_each_entry_safe ( desc , _desc , & midc - > queue , desc_node ) {
list_del ( & desc - > desc_node ) ;
pci_pool_free ( mid - > dma_pool , desc , desc - > txd . phys ) ;
}
spin_unlock_bh ( & midc - > lock ) ;
midc - > in_use = false ;
2010-10-04 14:42:40 +04:00
midc - > busy = false ;
2010-07-21 11:58:10 +04:00
/* Disable CH interrupts */
iowrite32 ( MASK_INTR_REG ( midc - > ch_id ) , mid - > dma_base + MASK_BLOCK ) ;
iowrite32 ( MASK_INTR_REG ( midc - > ch_id ) , mid - > dma_base + MASK_ERR ) ;
2012-01-31 14:49:00 +04:00
pm_runtime_put ( & mid - > pdev - > dev ) ;
2010-07-21 11:58:10 +04:00
}
/**
* intel_mid_dma_alloc_chan_resources - Allocate dma resources
* @ chan : chan requiring attention
*
* Allocates DMA resources on this chan
* Return the descriptors allocated
*/
static int intel_mid_dma_alloc_chan_resources ( struct dma_chan * chan )
{
struct intel_mid_dma_chan * midc = to_intel_mid_dma_chan ( chan ) ;
struct middma_device * mid = to_middma_device ( chan - > device ) ;
struct intel_mid_dma_desc * desc ;
dma_addr_t phys ;
int i = 0 ;
2010-10-04 14:42:40 +04:00
pm_runtime_get_sync ( & mid - > pdev - > dev ) ;
if ( mid - > state = = SUSPENDED ) {
2011-12-16 13:01:40 +04:00
if ( dma_resume ( & mid - > pdev - > dev ) ) {
2010-10-04 14:42:40 +04:00
pr_err ( " ERR_MDMA: resume failed " ) ;
return - EFAULT ;
}
}
2010-07-21 11:58:10 +04:00
/* ASSERT: channel is idle */
if ( test_ch_en ( mid - > dma_base , midc - > ch_id ) ) {
/*ch is not idle*/
pr_err ( " ERR_MDMA: ch not idle \n " ) ;
2010-10-04 14:42:40 +04:00
pm_runtime_put ( & mid - > pdev - > dev ) ;
2010-07-21 11:58:10 +04:00
return - EIO ;
}
2012-03-07 02:35:47 +04:00
dma_cookie_init ( chan ) ;
2010-07-21 11:58:10 +04:00
spin_lock_bh ( & midc - > lock ) ;
while ( midc - > descs_allocated < DESCS_PER_CHANNEL ) {
spin_unlock_bh ( & midc - > lock ) ;
desc = pci_pool_alloc ( mid - > dma_pool , GFP_KERNEL , & phys ) ;
if ( ! desc ) {
pr_err ( " ERR_MDMA: desc failed \n " ) ;
2010-10-04 14:42:40 +04:00
pm_runtime_put ( & mid - > pdev - > dev ) ;
2010-07-21 11:58:10 +04:00
return - ENOMEM ;
/*check*/
}
dma_async_tx_descriptor_init ( & desc - > txd , chan ) ;
desc - > txd . tx_submit = intel_mid_dma_tx_submit ;
desc - > txd . flags = DMA_CTRL_ACK ;
desc - > txd . phys = phys ;
spin_lock_bh ( & midc - > lock ) ;
i = + + midc - > descs_allocated ;
list_add_tail ( & desc - > desc_node , & midc - > free_list ) ;
}
spin_unlock_bh ( & midc - > lock ) ;
2010-10-04 14:42:40 +04:00
midc - > in_use = true ;
midc - > busy = false ;
2010-07-21 11:58:10 +04:00
pr_debug ( " MID_DMA: Desc alloc done ret: %d desc \n " , i ) ;
return i ;
}
/**
* midc_handle_error - Handle DMA txn error
2011-03-31 05:57:33 +04:00
* @ mid : controller where error occurred
* @ midc : chan where error occurred
2010-07-21 11:58:10 +04:00
*
* Scan the descriptor for error
*/
static void midc_handle_error ( struct middma_device * mid ,
struct intel_mid_dma_chan * midc )
{
midc_scan_descriptors ( mid , midc ) ;
}
/**
* dma_tasklet - DMA interrupt tasklet
* @ data : tasklet arg ( the controller structure )
*
* Scan the controller for interrupts for completion / error
* Clear the interrupt and call for handling completion / error
*/
static void dma_tasklet ( unsigned long data )
{
struct middma_device * mid = NULL ;
struct intel_mid_dma_chan * midc = NULL ;
2010-10-04 14:37:53 +04:00
u32 status , raw_tfr , raw_block ;
2010-07-21 11:58:10 +04:00
int i ;
mid = ( struct middma_device * ) data ;
if ( mid = = NULL ) {
pr_err ( " ERR_MDMA: tasklet Null param \n " ) ;
return ;
}
pr_debug ( " MDMA: in tasklet for device %x \n " , mid - > pci_id ) ;
2010-10-04 14:37:53 +04:00
raw_tfr = ioread32 ( mid - > dma_base + RAW_TFR ) ;
raw_block = ioread32 ( mid - > dma_base + RAW_BLOCK ) ;
status = raw_tfr | raw_block ;
2010-07-21 11:58:10 +04:00
status & = mid - > intr_mask ;
while ( status ) {
/*txn interrupt*/
i = get_ch_index ( & status , mid - > chan_base ) ;
if ( i < 0 ) {
pr_err ( " ERR_MDMA:Invalid ch index %x \n " , i ) ;
return ;
}
midc = & mid - > ch [ i ] ;
if ( midc = = NULL ) {
pr_err ( " ERR_MDMA:Null param midc \n " ) ;
return ;
}
pr_debug ( " MDMA:Tx complete interrupt %x, Ch No %d Index %d \n " ,
status , midc - > ch_id , i ) ;
2010-10-04 14:37:53 +04:00
midc - > raw_tfr = raw_tfr ;
midc - > raw_block = raw_block ;
spin_lock_bh ( & midc - > lock ) ;
2010-07-21 11:58:10 +04:00
/*clearing this interrupts first*/
iowrite32 ( ( 1 < < midc - > ch_id ) , mid - > dma_base + CLEAR_TFR ) ;
2010-10-04 14:37:53 +04:00
if ( raw_block ) {
iowrite32 ( ( 1 < < midc - > ch_id ) ,
mid - > dma_base + CLEAR_BLOCK ) ;
}
2010-07-21 11:58:10 +04:00
midc_scan_descriptors ( mid , midc ) ;
pr_debug ( " MDMA:Scan of desc... complete, unmasking \n " ) ;
iowrite32 ( UNMASK_INTR_REG ( midc - > ch_id ) ,
mid - > dma_base + MASK_TFR ) ;
2010-10-04 14:37:53 +04:00
if ( raw_block ) {
iowrite32 ( UNMASK_INTR_REG ( midc - > ch_id ) ,
mid - > dma_base + MASK_BLOCK ) ;
}
2010-07-21 11:58:10 +04:00
spin_unlock_bh ( & midc - > lock ) ;
}
status = ioread32 ( mid - > dma_base + RAW_ERR ) ;
status & = mid - > intr_mask ;
while ( status ) {
/*err interrupt*/
i = get_ch_index ( & status , mid - > chan_base ) ;
if ( i < 0 ) {
pr_err ( " ERR_MDMA:Invalid ch index %x \n " , i ) ;
return ;
}
midc = & mid - > ch [ i ] ;
if ( midc = = NULL ) {
pr_err ( " ERR_MDMA:Null param midc \n " ) ;
return ;
}
pr_debug ( " MDMA:Tx complete interrupt %x, Ch No %d Index %d \n " ,
status , midc - > ch_id , i ) ;
iowrite32 ( ( 1 < < midc - > ch_id ) , mid - > dma_base + CLEAR_ERR ) ;
spin_lock_bh ( & midc - > lock ) ;
midc_handle_error ( mid , midc ) ;
iowrite32 ( UNMASK_INTR_REG ( midc - > ch_id ) ,
mid - > dma_base + MASK_ERR ) ;
spin_unlock_bh ( & midc - > lock ) ;
}
pr_debug ( " MDMA:Exiting takslet... \n " ) ;
return ;
}
static void dma_tasklet1 ( unsigned long data )
{
pr_debug ( " MDMA:in takslet1... \n " ) ;
return dma_tasklet ( data ) ;
}
static void dma_tasklet2 ( unsigned long data )
{
pr_debug ( " MDMA:in takslet2... \n " ) ;
return dma_tasklet ( data ) ;
}
/**
* intel_mid_dma_interrupt - DMA ISR
* @ irq : IRQ where interrupt occurred
* @ data : ISR cllback data ( the controller structure )
*
* See if this is our interrupt if so then schedule the tasklet
* otherwise ignore
*/
static irqreturn_t intel_mid_dma_interrupt ( int irq , void * data )
{
struct middma_device * mid = data ;
2010-10-04 14:37:02 +04:00
u32 tfr_status , err_status ;
2010-07-21 11:58:10 +04:00
int call_tasklet = 0 ;
2010-10-04 14:37:02 +04:00
tfr_status = ioread32 ( mid - > dma_base + RAW_TFR ) ;
err_status = ioread32 ( mid - > dma_base + RAW_ERR ) ;
if ( ! tfr_status & & ! err_status )
return IRQ_NONE ;
2010-07-21 11:58:10 +04:00
/*DMA Interrupt*/
pr_debug ( " MDMA:Got an interrupt on irq %d \n " , irq ) ;
2010-10-04 14:37:02 +04:00
pr_debug ( " MDMA: Status %x, Mask %x \n " , tfr_status , mid - > intr_mask ) ;
tfr_status & = mid - > intr_mask ;
if ( tfr_status ) {
2010-07-21 11:58:10 +04:00
/*need to disable intr*/
2010-10-04 14:37:53 +04:00
iowrite32 ( ( tfr_status < < INT_MASK_WE ) , mid - > dma_base + MASK_TFR ) ;
iowrite32 ( ( tfr_status < < INT_MASK_WE ) , mid - > dma_base + MASK_BLOCK ) ;
2010-10-04 14:37:02 +04:00
pr_debug ( " MDMA: Calling tasklet %x \n " , tfr_status ) ;
2010-07-21 11:58:10 +04:00
call_tasklet = 1 ;
}
2010-10-04 14:37:02 +04:00
err_status & = mid - > intr_mask ;
if ( err_status ) {
2012-01-31 14:48:59 +04:00
iowrite32 ( ( err_status < < INT_MASK_WE ) ,
mid - > dma_base + MASK_ERR ) ;
2010-07-21 11:58:10 +04:00
call_tasklet = 1 ;
}
if ( call_tasklet )
tasklet_schedule ( & mid - > tasklet ) ;
return IRQ_HANDLED ;
}
static irqreturn_t intel_mid_dma_interrupt1 ( int irq , void * data )
{
return intel_mid_dma_interrupt ( irq , data ) ;
}
static irqreturn_t intel_mid_dma_interrupt2 ( int irq , void * data )
{
return intel_mid_dma_interrupt ( irq , data ) ;
}
/**
* mid_setup_dma - Setup the DMA controller
* @ pdev : Controller PCI device structure
*
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-01 22:38:34 +03:00
* Initialize the DMA controller , channels , registers with DMA engine ,
* ISR . Initialize DMA controller channels .
2010-07-21 11:58:10 +04:00
*/
static int mid_setup_dma ( struct pci_dev * pdev )
{
struct middma_device * dma = pci_get_drvdata ( pdev ) ;
int err , i ;
/* DMA coherent memory pool for DMA descriptor allocations */
dma - > dma_pool = pci_pool_create ( " intel_mid_dma_desc_pool " , pdev ,
sizeof ( struct intel_mid_dma_desc ) ,
32 , 0 ) ;
if ( NULL = = dma - > dma_pool ) {
pr_err ( " ERR_MDMA:pci_pool_create failed \n " ) ;
err = - ENOMEM ;
goto err_dma_pool ;
}
INIT_LIST_HEAD ( & dma - > common . channels ) ;
dma - > pci_id = pdev - > device ;
if ( dma - > pimr_mask ) {
dma - > mask_reg = ioremap ( LNW_PERIPHRAL_MASK_BASE ,
LNW_PERIPHRAL_MASK_SIZE ) ;
if ( dma - > mask_reg = = NULL ) {
2011-03-31 05:57:33 +04:00
pr_err ( " ERR_MDMA:Can't map periphral intr space !! \n " ) ;
2011-12-16 13:01:39 +04:00
err = - ENOMEM ;
goto err_ioremap ;
2010-07-21 11:58:10 +04:00
}
} else
dma - > mask_reg = NULL ;
pr_debug ( " MDMA:Adding %d channel for this controller \n " , dma - > max_chan ) ;
/*init CH structures*/
dma - > intr_mask = 0 ;
2010-10-04 14:42:40 +04:00
dma - > state = RUNNING ;
2010-07-21 11:58:10 +04:00
for ( i = 0 ; i < dma - > max_chan ; i + + ) {
struct intel_mid_dma_chan * midch = & dma - > ch [ i ] ;
midch - > chan . device = & dma - > common ;
2012-03-07 02:35:47 +04:00
dma_cookie_init ( & midch - > chan ) ;
2010-07-21 11:58:10 +04:00
midch - > ch_id = dma - > chan_base + i ;
pr_debug ( " MDMA:Init CH %d, ID %d \n " , i , midch - > ch_id ) ;
midch - > dma_base = dma - > dma_base ;
midch - > ch_regs = dma - > dma_base + DMA_CH_SIZE * midch - > ch_id ;
midch - > dma = dma ;
dma - > intr_mask | = 1 < < ( dma - > chan_base + i ) ;
spin_lock_init ( & midch - > lock ) ;
INIT_LIST_HEAD ( & midch - > active_list ) ;
INIT_LIST_HEAD ( & midch - > queue ) ;
INIT_LIST_HEAD ( & midch - > free_list ) ;
/*mask interrupts*/
iowrite32 ( MASK_INTR_REG ( midch - > ch_id ) ,
dma - > dma_base + MASK_BLOCK ) ;
iowrite32 ( MASK_INTR_REG ( midch - > ch_id ) ,
dma - > dma_base + MASK_SRC_TRAN ) ;
iowrite32 ( MASK_INTR_REG ( midch - > ch_id ) ,
dma - > dma_base + MASK_DST_TRAN ) ;
iowrite32 ( MASK_INTR_REG ( midch - > ch_id ) ,
dma - > dma_base + MASK_ERR ) ;
iowrite32 ( MASK_INTR_REG ( midch - > ch_id ) ,
dma - > dma_base + MASK_TFR ) ;
disable_dma_interrupt ( midch ) ;
list_add_tail ( & midch - > chan . device_node , & dma - > common . channels ) ;
}
pr_debug ( " MDMA: Calc Mask as %x for this controller \n " , dma - > intr_mask ) ;
/*init dma structure*/
dma_cap_zero ( dma - > common . cap_mask ) ;
dma_cap_set ( DMA_MEMCPY , dma - > common . cap_mask ) ;
dma_cap_set ( DMA_SLAVE , dma - > common . cap_mask ) ;
dma_cap_set ( DMA_PRIVATE , dma - > common . cap_mask ) ;
dma - > common . dev = & pdev - > dev ;
dma - > common . device_alloc_chan_resources =
intel_mid_dma_alloc_chan_resources ;
dma - > common . device_free_chan_resources =
intel_mid_dma_free_chan_resources ;
dma - > common . device_tx_status = intel_mid_dma_tx_status ;
dma - > common . device_prep_dma_memcpy = intel_mid_dma_prep_memcpy ;
dma - > common . device_issue_pending = intel_mid_dma_issue_pending ;
dma - > common . device_prep_slave_sg = intel_mid_dma_prep_slave_sg ;
dma - > common . device_control = intel_mid_dma_device_control ;
/*enable dma cntrl*/
iowrite32 ( REG_BIT0 , dma - > dma_base + DMA_CFG ) ;
/*register irq */
if ( dma - > pimr_mask ) {
pr_debug ( " MDMA:Requesting irq shared for DMAC1 \n " ) ;
err = request_irq ( pdev - > irq , intel_mid_dma_interrupt1 ,
IRQF_SHARED , " INTEL_MID_DMAC1 " , dma ) ;
if ( 0 ! = err )
goto err_irq ;
} else {
dma - > intr_mask = 0x03 ;
pr_debug ( " MDMA:Requesting irq for DMAC2 \n " ) ;
err = request_irq ( pdev - > irq , intel_mid_dma_interrupt2 ,
2010-10-04 14:37:27 +04:00
IRQF_SHARED , " INTEL_MID_DMAC2 " , dma ) ;
2010-07-21 11:58:10 +04:00
if ( 0 ! = err )
goto err_irq ;
}
/*register device w/ engine*/
err = dma_async_device_register ( & dma - > common ) ;
if ( 0 ! = err ) {
pr_err ( " ERR_MDMA:device_register failed: %d \n " , err ) ;
goto err_engine ;
}
if ( dma - > pimr_mask ) {
pr_debug ( " setting up tasklet1 for DMAC1 \n " ) ;
tasklet_init ( & dma - > tasklet , dma_tasklet1 , ( unsigned long ) dma ) ;
} else {
pr_debug ( " setting up tasklet2 for DMAC2 \n " ) ;
tasklet_init ( & dma - > tasklet , dma_tasklet2 , ( unsigned long ) dma ) ;
}
return 0 ;
err_engine :
free_irq ( pdev - > irq , dma ) ;
err_irq :
2011-12-16 13:01:39 +04:00
if ( dma - > mask_reg )
iounmap ( dma - > mask_reg ) ;
err_ioremap :
2010-07-21 11:58:10 +04:00
pci_pool_destroy ( dma - > dma_pool ) ;
err_dma_pool :
pr_err ( " ERR_MDMA:setup_dma failed: %d \n " , err ) ;
return err ;
}
/**
* middma_shutdown - Shutdown the DMA controller
* @ pdev : Controller PCI device structure
*
* Called by remove
* Unregister DMa controller , clear all structures and free interrupt
*/
static void middma_shutdown ( struct pci_dev * pdev )
{
struct middma_device * device = pci_get_drvdata ( pdev ) ;
dma_async_device_unregister ( & device - > common ) ;
pci_pool_destroy ( device - > dma_pool ) ;
if ( device - > mask_reg )
iounmap ( device - > mask_reg ) ;
if ( device - > dma_base )
iounmap ( device - > dma_base ) ;
free_irq ( pdev - > irq , device ) ;
return ;
}
/**
* intel_mid_dma_probe - PCI Probe
* @ pdev : Controller PCI device structure
* @ id : pci device id structure
*
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-01 22:38:34 +03:00
* Initialize the PCI device , map BARs , query driver data .
2010-07-21 11:58:10 +04:00
* Call setup_dma to complete contoller and chan initilzation
*/
2012-11-19 22:22:55 +04:00
static int intel_mid_dma_probe ( struct pci_dev * pdev ,
2010-07-21 11:58:10 +04:00
const struct pci_device_id * id )
{
struct middma_device * device ;
u32 base_addr , bar_size ;
struct intel_mid_dma_probe_info * info ;
int err ;
pr_debug ( " MDMA: probe for %x \n " , pdev - > device ) ;
info = ( void * ) id - > driver_data ;
pr_debug ( " MDMA: CH %d, base %d, block len %d, Periphral mask %x \n " ,
info - > max_chan , info - > ch_base ,
info - > block_size , info - > pimr_mask ) ;
err = pci_enable_device ( pdev ) ;
if ( err )
goto err_enable_device ;
err = pci_request_regions ( pdev , " intel_mid_dmac " ) ;
if ( err )
goto err_request_regions ;
err = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
if ( err )
goto err_set_dma_mask ;
err = pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
if ( err )
goto err_set_dma_mask ;
device = kzalloc ( sizeof ( * device ) , GFP_KERNEL ) ;
if ( ! device ) {
pr_err ( " ERR_MDMA:kzalloc failed probe \n " ) ;
err = - ENOMEM ;
goto err_kzalloc ;
}
device - > pdev = pci_dev_get ( pdev ) ;
base_addr = pci_resource_start ( pdev , 0 ) ;
bar_size = pci_resource_len ( pdev , 0 ) ;
device - > dma_base = ioremap_nocache ( base_addr , DMA_REG_SIZE ) ;
if ( ! device - > dma_base ) {
pr_err ( " ERR_MDMA:ioremap failed \n " ) ;
err = - ENOMEM ;
goto err_ioremap ;
}
pci_set_drvdata ( pdev , device ) ;
pci_set_master ( pdev ) ;
device - > max_chan = info - > max_chan ;
device - > chan_base = info - > ch_base ;
device - > block_size = info - > block_size ;
device - > pimr_mask = info - > pimr_mask ;
err = mid_setup_dma ( pdev ) ;
if ( err )
goto err_dma ;
2011-03-31 22:02:43 +04:00
pm_runtime_put_noidle ( & pdev - > dev ) ;
2010-10-04 14:42:40 +04:00
pm_runtime_allow ( & pdev - > dev ) ;
2010-07-21 11:58:10 +04:00
return 0 ;
err_dma :
iounmap ( device - > dma_base ) ;
err_ioremap :
pci_dev_put ( pdev ) ;
kfree ( device ) ;
err_kzalloc :
err_set_dma_mask :
pci_release_regions ( pdev ) ;
pci_disable_device ( pdev ) ;
err_request_regions :
err_enable_device :
pr_err ( " ERR_MDMA:Probe failed %d \n " , err ) ;
return err ;
}
/**
* intel_mid_dma_remove - PCI remove
* @ pdev : Controller PCI device structure
*
* Free up all resources and data
* Call shutdown_dma to complete contoller and chan cleanup
*/
2012-12-22 03:09:59 +04:00
static void intel_mid_dma_remove ( struct pci_dev * pdev )
2010-07-21 11:58:10 +04:00
{
struct middma_device * device = pci_get_drvdata ( pdev ) ;
2011-03-31 22:02:43 +04:00
pm_runtime_get_noresume ( & pdev - > dev ) ;
pm_runtime_forbid ( & pdev - > dev ) ;
2010-07-21 11:58:10 +04:00
middma_shutdown ( pdev ) ;
pci_dev_put ( pdev ) ;
kfree ( device ) ;
pci_release_regions ( pdev ) ;
pci_disable_device ( pdev ) ;
}
2010-10-04 14:42:40 +04:00
/* Power Management */
/*
* dma_suspend - PCI suspend function
*
* @ pci : PCI device structure
* @ state : PM message
*
* This function is called by OS when a power event occurs
*/
2011-12-16 13:01:40 +04:00
static int dma_suspend ( struct device * dev )
2010-10-04 14:42:40 +04:00
{
2011-12-16 13:01:40 +04:00
struct pci_dev * pci = to_pci_dev ( dev ) ;
2010-10-04 14:42:40 +04:00
int i ;
struct middma_device * device = pci_get_drvdata ( pci ) ;
pr_debug ( " MDMA: dma_suspend called \n " ) ;
for ( i = 0 ; i < device - > max_chan ; i + + ) {
if ( device - > ch [ i ] . in_use )
return - EAGAIN ;
}
2011-10-10 11:03:59 +04:00
dmac1_mask_periphral_intr ( device ) ;
2010-10-04 14:42:40 +04:00
device - > state = SUSPENDED ;
pci_save_state ( pci ) ;
pci_disable_device ( pci ) ;
pci_set_power_state ( pci , PCI_D3hot ) ;
return 0 ;
}
/**
* dma_resume - PCI resume function
*
* @ pci : PCI device structure
*
* This function is called by OS when a power event occurs
*/
2011-12-16 13:01:40 +04:00
int dma_resume ( struct device * dev )
2010-10-04 14:42:40 +04:00
{
2011-12-16 13:01:40 +04:00
struct pci_dev * pci = to_pci_dev ( dev ) ;
2010-10-04 14:42:40 +04:00
int ret ;
struct middma_device * device = pci_get_drvdata ( pci ) ;
pr_debug ( " MDMA: dma_resume called \n " ) ;
pci_set_power_state ( pci , PCI_D0 ) ;
pci_restore_state ( pci ) ;
ret = pci_enable_device ( pci ) ;
if ( ret ) {
2011-03-31 05:57:33 +04:00
pr_err ( " MDMA: device can't be enabled for %x \n " , pci - > device ) ;
2010-10-04 14:42:40 +04:00
return ret ;
}
device - > state = RUNNING ;
iowrite32 ( REG_BIT0 , device - > dma_base + DMA_CFG ) ;
return 0 ;
}
static int dma_runtime_suspend ( struct device * dev )
{
struct pci_dev * pci_dev = to_pci_dev ( dev ) ;
2011-03-31 22:02:43 +04:00
struct middma_device * device = pci_get_drvdata ( pci_dev ) ;
device - > state = SUSPENDED ;
return 0 ;
2010-10-04 14:42:40 +04:00
}
static int dma_runtime_resume ( struct device * dev )
{
struct pci_dev * pci_dev = to_pci_dev ( dev ) ;
2011-03-31 22:02:43 +04:00
struct middma_device * device = pci_get_drvdata ( pci_dev ) ;
device - > state = RUNNING ;
iowrite32 ( REG_BIT0 , device - > dma_base + DMA_CFG ) ;
return 0 ;
2010-10-04 14:42:40 +04:00
}
static int dma_runtime_idle ( struct device * dev )
{
struct pci_dev * pdev = to_pci_dev ( dev ) ;
struct middma_device * device = pci_get_drvdata ( pdev ) ;
int i ;
for ( i = 0 ; i < device - > max_chan ; i + + ) {
if ( device - > ch [ i ] . in_use )
return - EAGAIN ;
}
PM / Runtime: Rework the "runtime idle" helper routine
The "runtime idle" helper routine, rpm_idle(), currently ignores
return values from .runtime_idle() callbacks executed by it.
However, it turns out that many subsystems use
pm_generic_runtime_idle() which checks the return value of the
driver's callback and executes pm_runtime_suspend() for the device
unless that value is not 0. If that logic is moved to rpm_idle()
instead, pm_generic_runtime_idle() can be dropped and its users
will not need any .runtime_idle() callbacks any more.
Moreover, the PCI, SCSI, and SATA subsystems' .runtime_idle()
routines, pci_pm_runtime_idle(), scsi_runtime_idle(), and
ata_port_runtime_idle(), respectively, as well as a few drivers'
ones may be simplified if rpm_idle() calls rpm_suspend() after 0 has
been returned by the .runtime_idle() callback executed by it.
To reduce overall code bloat, make the changes described above.
Tested-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Tested-by: Kevin Hilman <khilman@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Kevin Hilman <khilman@linaro.org>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Alan Stern <stern@rowland.harvard.edu>
2013-06-03 23:49:52 +04:00
return 0 ;
2010-10-04 14:42:40 +04:00
}
2010-07-21 11:58:10 +04:00
/******************************************************************************
* PCI stuff
*/
static struct pci_device_id intel_mid_dma_ids [ ] = {
{ PCI_VDEVICE ( INTEL , INTEL_MID_DMAC1_ID ) , INFO ( 2 , 6 , 4095 , 0x200020 ) } ,
{ PCI_VDEVICE ( INTEL , INTEL_MID_DMAC2_ID ) , INFO ( 2 , 0 , 2047 , 0 ) } ,
{ PCI_VDEVICE ( INTEL , INTEL_MID_GP_DMAC2_ID ) , INFO ( 2 , 0 , 2047 , 0 ) } ,
{ PCI_VDEVICE ( INTEL , INTEL_MFLD_DMAC1_ID ) , INFO ( 4 , 0 , 4095 , 0x400040 ) } ,
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , intel_mid_dma_ids ) ;
2010-10-04 14:42:40 +04:00
static const struct dev_pm_ops intel_mid_dma_pm = {
. runtime_suspend = dma_runtime_suspend ,
. runtime_resume = dma_runtime_resume ,
. runtime_idle = dma_runtime_idle ,
2011-12-16 13:01:40 +04:00
. suspend = dma_suspend ,
. resume = dma_resume ,
2010-10-04 14:42:40 +04:00
} ;
2010-12-05 01:53:32 +03:00
static struct pci_driver intel_mid_dma_pci_driver = {
2010-07-21 11:58:10 +04:00
. name = " Intel MID DMA " ,
. id_table = intel_mid_dma_ids ,
. probe = intel_mid_dma_probe ,
2012-11-19 22:20:04 +04:00
. remove = intel_mid_dma_remove ,
2010-10-04 14:42:40 +04:00
# ifdef CONFIG_PM
. driver = {
. pm = & intel_mid_dma_pm ,
} ,
# endif
2010-07-21 11:58:10 +04:00
} ;
static int __init intel_mid_dma_init ( void )
{
pr_debug ( " INFO_MDMA: LNW DMA Driver Version %s \n " ,
INTEL_MID_DMA_DRIVER_VERSION ) ;
2010-12-05 01:53:32 +03:00
return pci_register_driver ( & intel_mid_dma_pci_driver ) ;
2010-07-21 11:58:10 +04:00
}
fs_initcall ( intel_mid_dma_init ) ;
static void __exit intel_mid_dma_exit ( void )
{
2010-12-05 01:53:32 +03:00
pci_unregister_driver ( & intel_mid_dma_pci_driver ) ;
2010-07-21 11:58:10 +04:00
}
module_exit ( intel_mid_dma_exit ) ;
MODULE_AUTHOR ( " Vinod Koul <vinod.koul@intel.com> " ) ;
MODULE_DESCRIPTION ( " Intel (R) MID DMAC Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_VERSION ( INTEL_MID_DMA_DRIVER_VERSION ) ;