2009-09-25 23:14:43 +04:00
/*
* at91_can . c - CAN network driver for AT91 SoC CAN controller
*
2010-10-29 16:33:57 +04:00
* ( C ) 2007 by Hans J . Koch < hjk @ hansjkoch . de >
2011-01-06 11:58:42 +03:00
* ( C ) 2008 , 2009 , 2010 , 2011 by Marc Kleine - Budde < kernel @ pengutronix . de >
2009-09-25 23:14:43 +04:00
*
* This software may be distributed under the terms of the GNU General
* Public License ( " GPL " ) version 2 as distributed in the ' COPYING '
* file from the main directory of the linux kernel source .
*
*
* Your platform definition file should specify something like :
*
* static struct at91_can_data ek_can_data = {
* transceiver_switch = sam9263ek_transceiver_switch ,
* } ;
*
* at91_add_device_can ( & ek_can_data ) ;
*
*/
# include <linux/clk.h>
# include <linux/errno.h>
# include <linux/if_arp.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/netdevice.h>
2013-03-11 21:26:03 +04:00
# include <linux/of.h>
2009-09-25 23:14:43 +04:00
# include <linux/platform_device.h>
2011-01-10 22:44:22 +03:00
# include <linux/rtnetlink.h>
2009-09-25 23:14:43 +04:00
# include <linux/skbuff.h>
# include <linux/spinlock.h>
# include <linux/string.h>
# include <linux/types.h>
2012-10-30 01:12:23 +04:00
# include <linux/platform_data/atmel.h>
2009-09-25 23:14:43 +04:00
# include <linux/can/dev.h>
# include <linux/can/error.h>
2012-12-18 21:50:59 +04:00
# include <linux/can/led.h>
2009-09-25 23:14:43 +04:00
2011-05-03 18:37:16 +04:00
# define AT91_MB_MASK(i) ((1 << (i)) - 1)
2009-09-25 23:14:43 +04:00
/* Common registers */
enum at91_reg {
AT91_MR = 0x000 ,
AT91_IER = 0x004 ,
AT91_IDR = 0x008 ,
AT91_IMR = 0x00C ,
AT91_SR = 0x010 ,
AT91_BR = 0x014 ,
AT91_TIM = 0x018 ,
AT91_TIMESTP = 0x01C ,
AT91_ECR = 0x020 ,
AT91_TCR = 0x024 ,
AT91_ACR = 0x028 ,
} ;
/* Mailbox registers (0 <= i <= 15) */
# define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
# define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
# define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
# define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
# define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
# define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
# define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
# define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
/* Register bits */
# define AT91_MR_CANEN BIT(0)
# define AT91_MR_LPM BIT(1)
# define AT91_MR_ABM BIT(2)
# define AT91_MR_OVL BIT(3)
# define AT91_MR_TEOF BIT(4)
# define AT91_MR_TTM BIT(5)
# define AT91_MR_TIMFRZ BIT(6)
# define AT91_MR_DRPT BIT(7)
# define AT91_SR_RBSY BIT(29)
# define AT91_MMR_PRIO_SHIFT (16)
# define AT91_MID_MIDE BIT(29)
# define AT91_MSR_MRTR BIT(20)
# define AT91_MSR_MABT BIT(22)
# define AT91_MSR_MRDY BIT(23)
# define AT91_MSR_MMI BIT(24)
# define AT91_MCR_MRTR BIT(20)
# define AT91_MCR_MTCR BIT(23)
/* Mailbox Modes */
enum at91_mb_mode {
AT91_MB_MODE_DISABLED = 0 ,
AT91_MB_MODE_RX = 1 ,
AT91_MB_MODE_RX_OVRWR = 2 ,
AT91_MB_MODE_TX = 3 ,
AT91_MB_MODE_CONSUMER = 4 ,
AT91_MB_MODE_PRODUCER = 5 ,
} ;
/* Interrupt mask bits */
# define AT91_IRQ_ERRA (1 << 16)
# define AT91_IRQ_WARN (1 << 17)
# define AT91_IRQ_ERRP (1 << 18)
# define AT91_IRQ_BOFF (1 << 19)
# define AT91_IRQ_SLEEP (1 << 20)
# define AT91_IRQ_WAKEUP (1 << 21)
# define AT91_IRQ_TOVF (1 << 22)
# define AT91_IRQ_TSTP (1 << 23)
# define AT91_IRQ_CERR (1 << 24)
# define AT91_IRQ_SERR (1 << 25)
# define AT91_IRQ_AERR (1 << 26)
# define AT91_IRQ_FERR (1 << 27)
# define AT91_IRQ_BERR (1 << 28)
# define AT91_IRQ_ERR_ALL (0x1fff0000)
# define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR )
# define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
AT91_IRQ_ERRP | AT91_IRQ_BOFF )
# define AT91_IRQ_ALL (0x1fffffff)
2011-05-03 19:41:09 +04:00
enum at91_devtype {
AT91_DEVTYPE_SAM9263 ,
2011-04-17 02:08:45 +04:00
AT91_DEVTYPE_SAM9X5 ,
2011-05-03 19:41:09 +04:00
} ;
struct at91_devtype_data {
unsigned int rx_first ;
unsigned int rx_split ;
unsigned int rx_last ;
unsigned int tx_shift ;
enum at91_devtype type ;
} ;
2009-09-25 23:14:43 +04:00
struct at91_priv {
2011-01-31 00:14:49 +03:00
struct can_priv can ; /* must be the first member! */
struct napi_struct napi ;
2009-09-25 23:14:43 +04:00
2011-01-31 00:14:49 +03:00
void __iomem * reg_base ;
2009-09-25 23:14:43 +04:00
2011-01-31 00:14:49 +03:00
u32 reg_sr ;
unsigned int tx_next ;
unsigned int tx_echo ;
unsigned int rx_next ;
2011-05-03 19:41:09 +04:00
struct at91_devtype_data devtype_data ;
2009-09-25 23:14:43 +04:00
2011-01-31 00:14:49 +03:00
struct clk * clk ;
struct at91_can_data * pdata ;
2011-01-10 22:44:22 +03:00
2011-01-31 00:14:49 +03:00
canid_t mb0_id ;
2009-09-25 23:14:43 +04:00
} ;
2013-03-11 21:26:03 +04:00
static const struct at91_devtype_data at91_at91sam9263_data = {
. rx_first = 1 ,
. rx_split = 8 ,
. rx_last = 11 ,
. tx_shift = 2 ,
. type = AT91_DEVTYPE_SAM9263 ,
} ;
static const struct at91_devtype_data at91_at91sam9x5_data = {
. rx_first = 0 ,
. rx_split = 4 ,
. rx_last = 5 ,
. tx_shift = 1 ,
. type = AT91_DEVTYPE_SAM9X5 ,
2011-05-03 19:41:09 +04:00
} ;
2012-07-16 14:58:31 +04:00
static const struct can_bittiming_const at91_bittiming_const = {
2010-10-21 05:01:22 +04:00
. name = KBUILD_MODNAME ,
2009-09-25 23:14:43 +04:00
. tseg1_min = 4 ,
. tseg1_max = 16 ,
. tseg2_min = 2 ,
. tseg2_max = 8 ,
. sjw_max = 4 ,
. brp_min = 2 ,
. brp_max = 128 ,
. brp_inc = 1 ,
} ;
2011-05-03 19:41:09 +04:00
# define AT91_IS(_model) \
static inline int at91_is_sam # # _model ( const struct at91_priv * priv ) \
{ \
return priv - > devtype_data . type = = AT91_DEVTYPE_SAM # # _model ; \
}
AT91_IS ( 9263 ) ;
2011-04-17 02:08:45 +04:00
AT91_IS ( 9 X5 ) ;
2011-05-03 19:41:09 +04:00
static inline unsigned int get_mb_rx_first ( const struct at91_priv * priv )
{
return priv - > devtype_data . rx_first ;
}
static inline unsigned int get_mb_rx_last ( const struct at91_priv * priv )
{
return priv - > devtype_data . rx_last ;
}
static inline unsigned int get_mb_rx_split ( const struct at91_priv * priv )
{
return priv - > devtype_data . rx_split ;
}
static inline unsigned int get_mb_rx_num ( const struct at91_priv * priv )
{
return get_mb_rx_last ( priv ) - get_mb_rx_first ( priv ) + 1 ;
}
2011-05-03 19:31:40 +04:00
static inline unsigned int get_mb_rx_low_last ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return get_mb_rx_split ( priv ) - 1 ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_mb_rx_low_mask ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return AT91_MB_MASK ( get_mb_rx_split ( priv ) ) &
~ AT91_MB_MASK ( get_mb_rx_first ( priv ) ) ;
}
static inline unsigned int get_mb_tx_shift ( const struct at91_priv * priv )
{
return priv - > devtype_data . tx_shift ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_mb_tx_num ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return 1 < < get_mb_tx_shift ( priv ) ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_mb_tx_first ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return get_mb_rx_last ( priv ) + 1 ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_mb_tx_last ( const struct at91_priv * priv )
{
return get_mb_tx_first ( priv ) + get_mb_tx_num ( priv ) - 1 ;
}
static inline unsigned int get_next_prio_shift ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return get_mb_tx_shift ( priv ) ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_next_prio_mask ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return 0xf < < get_mb_tx_shift ( priv ) ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_next_mb_mask ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return AT91_MB_MASK ( get_mb_tx_shift ( priv ) ) ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_next_mask ( const struct at91_priv * priv )
{
return get_next_mb_mask ( priv ) | get_next_prio_mask ( priv ) ;
}
static inline unsigned int get_irq_mb_rx ( const struct at91_priv * priv )
{
2011-05-03 19:41:09 +04:00
return AT91_MB_MASK ( get_mb_rx_last ( priv ) + 1 ) &
~ AT91_MB_MASK ( get_mb_rx_first ( priv ) ) ;
2011-05-03 19:31:40 +04:00
}
static inline unsigned int get_irq_mb_tx ( const struct at91_priv * priv )
{
return AT91_MB_MASK ( get_mb_tx_last ( priv ) + 1 ) &
~ AT91_MB_MASK ( get_mb_tx_first ( priv ) ) ;
}
2011-05-03 19:47:55 +04:00
static inline unsigned int get_tx_next_mb ( const struct at91_priv * priv )
2009-09-25 23:14:43 +04:00
{
2011-05-03 19:31:40 +04:00
return ( priv - > tx_next & get_next_mb_mask ( priv ) ) + get_mb_tx_first ( priv ) ;
2009-09-25 23:14:43 +04:00
}
2011-05-03 19:47:55 +04:00
static inline unsigned int get_tx_next_prio ( const struct at91_priv * priv )
2009-09-25 23:14:43 +04:00
{
2011-05-03 19:31:40 +04:00
return ( priv - > tx_next > > get_next_prio_shift ( priv ) ) & 0xf ;
2009-09-25 23:14:43 +04:00
}
2011-05-03 19:47:55 +04:00
static inline unsigned int get_tx_echo_mb ( const struct at91_priv * priv )
2009-09-25 23:14:43 +04:00
{
2011-05-03 19:31:40 +04:00
return ( priv - > tx_echo & get_next_mb_mask ( priv ) ) + get_mb_tx_first ( priv ) ;
2009-09-25 23:14:43 +04:00
}
static inline u32 at91_read ( const struct at91_priv * priv , enum at91_reg reg )
{
2015-03-18 18:53:10 +03:00
return readl_relaxed ( priv - > reg_base + reg ) ;
2009-09-25 23:14:43 +04:00
}
static inline void at91_write ( const struct at91_priv * priv , enum at91_reg reg ,
u32 value )
{
2015-03-18 18:53:10 +03:00
writel_relaxed ( value , priv - > reg_base + reg ) ;
2009-09-25 23:14:43 +04:00
}
static inline void set_mb_mode_prio ( const struct at91_priv * priv ,
unsigned int mb , enum at91_mb_mode mode , int prio )
{
at91_write ( priv , AT91_MMR ( mb ) , ( mode < < 24 ) | ( prio < < 16 ) ) ;
}
static inline void set_mb_mode ( const struct at91_priv * priv , unsigned int mb ,
enum at91_mb_mode mode )
{
set_mb_mode_prio ( priv , mb , mode , 0 ) ;
}
2011-01-10 22:44:22 +03:00
static inline u32 at91_can_id_to_reg_mid ( canid_t can_id )
{
u32 reg_mid ;
if ( can_id & CAN_EFF_FLAG )
reg_mid = ( can_id & CAN_EFF_MASK ) | AT91_MID_MIDE ;
else
reg_mid = ( can_id & CAN_SFF_MASK ) < < 18 ;
return reg_mid ;
}
2009-09-25 23:14:43 +04:00
/*
* Swtich transceiver on or off
*/
static void at91_transceiver_switch ( const struct at91_priv * priv , int on )
{
if ( priv - > pdata & & priv - > pdata - > transceiver_switch )
priv - > pdata - > transceiver_switch ( on ) ;
}
static void at91_setup_mailboxes ( struct net_device * dev )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
unsigned int i ;
2011-01-10 22:44:22 +03:00
u32 reg_mid ;
2009-09-25 23:14:43 +04:00
/*
2011-01-10 00:46:25 +03:00
* Due to a chip bug ( errata 50.2 .6 .3 & 50.3 .5 .3 ) the first
* mailbox is disabled . The next 11 mailboxes are used as a
* reception FIFO . The last mailbox is configured with
* overwrite option . The overwrite flag indicates a FIFO
* overflow .
2009-09-25 23:14:43 +04:00
*/
2011-01-10 22:44:22 +03:00
reg_mid = at91_can_id_to_reg_mid ( priv - > mb0_id ) ;
2011-05-03 19:41:09 +04:00
for ( i = 0 ; i < get_mb_rx_first ( priv ) ; i + + ) {
2011-01-10 00:46:25 +03:00
set_mb_mode ( priv , i , AT91_MB_MODE_DISABLED ) ;
2011-01-10 22:44:22 +03:00
at91_write ( priv , AT91_MID ( i ) , reg_mid ) ;
at91_write ( priv , AT91_MCR ( i ) , 0x0 ) ; /* clear dlc */
}
2011-05-03 19:41:09 +04:00
for ( i = get_mb_rx_first ( priv ) ; i < get_mb_rx_last ( priv ) ; i + + )
2009-09-25 23:14:43 +04:00
set_mb_mode ( priv , i , AT91_MB_MODE_RX ) ;
2011-05-03 19:41:09 +04:00
set_mb_mode ( priv , get_mb_rx_last ( priv ) , AT91_MB_MODE_RX_OVRWR ) ;
2009-09-25 23:14:43 +04:00
2010-10-21 05:01:14 +04:00
/* reset acceptance mask and id register */
2011-05-03 19:41:09 +04:00
for ( i = get_mb_rx_first ( priv ) ; i < = get_mb_rx_last ( priv ) ; i + + ) {
2011-01-31 00:14:49 +03:00
at91_write ( priv , AT91_MAM ( i ) , 0x0 ) ;
2010-10-21 05:01:14 +04:00
at91_write ( priv , AT91_MID ( i ) , AT91_MID_MIDE ) ;
}
2009-09-25 23:14:43 +04:00
/* The last 4 mailboxes are used for transmitting. */
2011-05-03 19:31:40 +04:00
for ( i = get_mb_tx_first ( priv ) ; i < = get_mb_tx_last ( priv ) ; i + + )
2009-09-25 23:14:43 +04:00
set_mb_mode_prio ( priv , i , AT91_MB_MODE_TX , 0 ) ;
/* Reset tx and rx helper pointers */
2011-01-06 11:58:42 +03:00
priv - > tx_next = priv - > tx_echo = 0 ;
2011-05-03 19:41:09 +04:00
priv - > rx_next = get_mb_rx_first ( priv ) ;
2009-09-25 23:14:43 +04:00
}
static int at91_set_bittiming ( struct net_device * dev )
{
const struct at91_priv * priv = netdev_priv ( dev ) ;
const struct can_bittiming * bt = & priv - > can . bittiming ;
u32 reg_br ;
2010-10-21 05:01:13 +04:00
reg_br = ( ( priv - > can . ctrlmode & CAN_CTRLMODE_3_SAMPLES ) ? 1 < < 24 : 0 ) |
( ( bt - > brp - 1 ) < < 16 ) | ( ( bt - > sjw - 1 ) < < 12 ) |
2009-09-25 23:14:43 +04:00
( ( bt - > prop_seg - 1 ) < < 8 ) | ( ( bt - > phase_seg1 - 1 ) < < 4 ) |
( ( bt - > phase_seg2 - 1 ) < < 0 ) ;
2010-10-21 05:01:21 +04:00
netdev_info ( dev , " writing AT91_BR: 0x%08x \n " , reg_br ) ;
2009-09-25 23:14:43 +04:00
at91_write ( priv , AT91_BR , reg_br ) ;
return 0 ;
}
2010-10-21 05:01:18 +04:00
static int at91_get_berr_counter ( const struct net_device * dev ,
struct can_berr_counter * bec )
{
const struct at91_priv * priv = netdev_priv ( dev ) ;
u32 reg_ecr = at91_read ( priv , AT91_ECR ) ;
bec - > rxerr = reg_ecr & 0xff ;
bec - > txerr = reg_ecr > > 16 ;
return 0 ;
}
2009-09-25 23:14:43 +04:00
static void at91_chip_start ( struct net_device * dev )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
u32 reg_mr , reg_ier ;
/* disable interrupts */
at91_write ( priv , AT91_IDR , AT91_IRQ_ALL ) ;
/* disable chip */
reg_mr = at91_read ( priv , AT91_MR ) ;
at91_write ( priv , AT91_MR , reg_mr & ~ AT91_MR_CANEN ) ;
2010-10-21 05:01:19 +04:00
at91_set_bittiming ( dev ) ;
2009-09-25 23:14:43 +04:00
at91_setup_mailboxes ( dev ) ;
at91_transceiver_switch ( priv , 1 ) ;
/* enable chip */
2014-02-11 12:46:59 +04:00
if ( priv - > can . ctrlmode & CAN_CTRLMODE_LISTENONLY )
reg_mr = AT91_MR_CANEN | AT91_MR_ABM ;
else
reg_mr = AT91_MR_CANEN ;
at91_write ( priv , AT91_MR , reg_mr ) ;
2009-09-25 23:14:43 +04:00
priv - > can . state = CAN_STATE_ERROR_ACTIVE ;
/* Enable interrupts */
2011-05-03 19:31:40 +04:00
reg_ier = get_irq_mb_rx ( priv ) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME ;
2009-09-25 23:14:43 +04:00
at91_write ( priv , AT91_IDR , AT91_IRQ_ALL ) ;
at91_write ( priv , AT91_IER , reg_ier ) ;
}
static void at91_chip_stop ( struct net_device * dev , enum can_state state )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
u32 reg_mr ;
/* disable interrupts */
at91_write ( priv , AT91_IDR , AT91_IRQ_ALL ) ;
reg_mr = at91_read ( priv , AT91_MR ) ;
at91_write ( priv , AT91_MR , reg_mr & ~ AT91_MR_CANEN ) ;
at91_transceiver_switch ( priv , 0 ) ;
priv - > can . state = state ;
}
/*
* theory of operation :
*
* According to the datasheet priority 0 is the highest priority , 15
* is the lowest . If two mailboxes have the same priority level the
* message of the mailbox with the lowest number is sent first .
*
* We use the first TX mailbox ( AT91_MB_TX_FIRST ) with prio 0 , then
* the next mailbox with prio 0 , and so on , until all mailboxes are
* used . Then we start from the beginning with mailbox
* AT91_MB_TX_FIRST , but with prio 1 , mailbox AT91_MB_TX_FIRST + 1
* prio 1. When we reach the last mailbox with prio 15 , we have to
* stop sending , waiting for all messages to be delivered , then start
* again with mailbox AT91_MB_TX_FIRST prio 0.
*
* We use the priv - > tx_next as counter for the next transmission
* mailbox , but without the offset AT91_MB_TX_FIRST . The lower bits
* encode the mailbox number , the upper 4 bits the mailbox priority :
*
2011-05-03 19:41:09 +04:00
* priv - > tx_next = ( prio < < get_next_prio_shift ( priv ) ) |
* ( mb - get_mb_tx_first ( priv ) ) ;
2009-09-25 23:14:43 +04:00
*
*/
static netdev_tx_t at91_start_xmit ( struct sk_buff * skb , struct net_device * dev )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
struct net_device_stats * stats = & dev - > stats ;
struct can_frame * cf = ( struct can_frame * ) skb - > data ;
unsigned int mb , prio ;
u32 reg_mid , reg_mcr ;
2010-01-12 13:00:46 +03:00
if ( can_dropped_invalid_skb ( dev , skb ) )
return NETDEV_TX_OK ;
2009-09-25 23:14:43 +04:00
mb = get_tx_next_mb ( priv ) ;
prio = get_tx_next_prio ( priv ) ;
if ( unlikely ( ! ( at91_read ( priv , AT91_MSR ( mb ) ) & AT91_MSR_MRDY ) ) ) {
netif_stop_queue ( dev ) ;
2010-10-21 05:01:21 +04:00
netdev_err ( dev , " BUG! TX buffer full when queue awake! \n " ) ;
2009-09-25 23:14:43 +04:00
return NETDEV_TX_BUSY ;
}
2011-01-10 22:44:22 +03:00
reg_mid = at91_can_id_to_reg_mid ( cf - > can_id ) ;
2009-09-25 23:14:43 +04:00
reg_mcr = ( ( cf - > can_id & CAN_RTR_FLAG ) ? AT91_MCR_MRTR : 0 ) |
( cf - > can_dlc < < 16 ) | AT91_MCR_MTCR ;
/* disable MB while writing ID (see datasheet) */
set_mb_mode ( priv , mb , AT91_MB_MODE_DISABLED ) ;
at91_write ( priv , AT91_MID ( mb ) , reg_mid ) ;
set_mb_mode_prio ( priv , mb , AT91_MB_MODE_TX , prio ) ;
at91_write ( priv , AT91_MDL ( mb ) , * ( u32 * ) ( cf - > data + 0 ) ) ;
at91_write ( priv , AT91_MDH ( mb ) , * ( u32 * ) ( cf - > data + 4 ) ) ;
/* This triggers transmission */
at91_write ( priv , AT91_MCR ( mb ) , reg_mcr ) ;
stats - > tx_bytes + = cf - > can_dlc ;
2011-03-31 05:57:33 +04:00
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
2011-05-03 19:31:40 +04:00
can_put_echo_skb ( skb , dev , mb - get_mb_tx_first ( priv ) ) ;
2009-09-25 23:14:43 +04:00
/*
* we have to stop the queue and deliver all messages in case
* of a prio + mb counter wrap around . This is the case if
* tx_next buffer prio and mailbox equals 0.
*
* also stop the queue if next buffer is still in use
* ( = = not ready )
*/
priv - > tx_next + + ;
if ( ! ( at91_read ( priv , AT91_MSR ( get_tx_next_mb ( priv ) ) ) &
AT91_MSR_MRDY ) | |
2011-05-03 19:31:40 +04:00
( priv - > tx_next & get_next_mask ( priv ) ) = = 0 )
2009-09-25 23:14:43 +04:00
netif_stop_queue ( dev ) ;
/* Enable interrupt for this mailbox */
at91_write ( priv , AT91_IER , 1 < < mb ) ;
return NETDEV_TX_OK ;
}
/**
* at91_activate_rx_low - activate lower rx mailboxes
* @ priv : a91 context
*
* Reenables the lower mailboxes for reception of new CAN messages
*/
static inline void at91_activate_rx_low ( const struct at91_priv * priv )
{
2011-05-03 19:31:40 +04:00
u32 mask = get_mb_rx_low_mask ( priv ) ;
2009-09-25 23:14:43 +04:00
at91_write ( priv , AT91_TCR , mask ) ;
}
/**
* at91_activate_rx_mb - reactive single rx mailbox
* @ priv : a91 context
* @ mb : mailbox to reactivate
*
* Reenables given mailbox for reception of new CAN messages
*/
static inline void at91_activate_rx_mb ( const struct at91_priv * priv ,
unsigned int mb )
{
u32 mask = 1 < < mb ;
at91_write ( priv , AT91_TCR , mask ) ;
}
/**
* at91_rx_overflow_err - send error frame due to rx overflow
* @ dev : net device
*/
static void at91_rx_overflow_err ( struct net_device * dev )
{
struct net_device_stats * stats = & dev - > stats ;
struct sk_buff * skb ;
struct can_frame * cf ;
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " RX buffer overflow \n " ) ;
2009-09-25 23:14:43 +04:00
stats - > rx_over_errors + + ;
stats - > rx_errors + + ;
skb = alloc_can_err_skb ( dev , & cf ) ;
if ( unlikely ( ! skb ) )
return ;
cf - > can_id | = CAN_ERR_CRTL ;
cf - > data [ 1 ] = CAN_ERR_CRTL_RX_OVERFLOW ;
netif_receive_skb ( skb ) ;
stats - > rx_packets + + ;
stats - > rx_bytes + = cf - > can_dlc ;
}
/**
* at91_read_mb - read CAN msg from mailbox ( lowlevel impl )
* @ dev : net device
* @ mb : mailbox number to read from
* @ cf : can frame where to store message
*
* Reads a CAN message from the given mailbox and stores data into
* given can frame . " mb " and " cf " must be valid .
*/
static void at91_read_mb ( struct net_device * dev , unsigned int mb ,
struct can_frame * cf )
{
const struct at91_priv * priv = netdev_priv ( dev ) ;
u32 reg_msr , reg_mid ;
reg_mid = at91_read ( priv , AT91_MID ( mb ) ) ;
if ( reg_mid & AT91_MID_MIDE )
cf - > can_id = ( ( reg_mid > > 0 ) & CAN_EFF_MASK ) | CAN_EFF_FLAG ;
else
cf - > can_id = ( reg_mid > > 18 ) & CAN_SFF_MASK ;
reg_msr = at91_read ( priv , AT91_MSR ( mb ) ) ;
2009-12-12 07:13:21 +03:00
cf - > can_dlc = get_can_dlc ( ( reg_msr > > 16 ) & 0xf ) ;
2009-09-25 23:14:43 +04:00
2010-10-21 20:39:26 +04:00
if ( reg_msr & AT91_MSR_MRTR )
cf - > can_id | = CAN_RTR_FLAG ;
else {
* ( u32 * ) ( cf - > data + 0 ) = at91_read ( priv , AT91_MDL ( mb ) ) ;
* ( u32 * ) ( cf - > data + 4 ) = at91_read ( priv , AT91_MDH ( mb ) ) ;
}
2009-09-25 23:14:43 +04:00
2010-10-21 05:01:14 +04:00
/* allow RX of extended frames */
at91_write ( priv , AT91_MID ( mb ) , AT91_MID_MIDE ) ;
2011-05-03 19:41:09 +04:00
if ( unlikely ( mb = = get_mb_rx_last ( priv ) & & reg_msr & AT91_MSR_MMI ) )
2009-09-25 23:14:43 +04:00
at91_rx_overflow_err ( dev ) ;
}
/**
* at91_read_msg - read CAN message from mailbox
* @ dev : net device
* @ mb : mail box to read from
*
* Reads a CAN message from given mailbox , and put into linux network
* RX queue , does all housekeeping chores ( stats , . . . )
*/
static void at91_read_msg ( struct net_device * dev , unsigned int mb )
{
struct net_device_stats * stats = & dev - > stats ;
struct can_frame * cf ;
struct sk_buff * skb ;
skb = alloc_can_skb ( dev , & cf ) ;
if ( unlikely ( ! skb ) ) {
stats - > rx_dropped + + ;
return ;
}
at91_read_mb ( dev , mb , cf ) ;
netif_receive_skb ( skb ) ;
stats - > rx_packets + + ;
stats - > rx_bytes + = cf - > can_dlc ;
2012-12-18 21:50:59 +04:00
can_led_event ( dev , CAN_LED_EVENT_RX ) ;
2009-09-25 23:14:43 +04:00
}
/**
* at91_poll_rx - read multiple CAN messages from mailboxes
* @ dev : net device
* @ quota : max number of pkgs we ' re allowed to receive
*
* Theory of Operation :
*
2011-05-03 19:41:09 +04:00
* About 3 / 4 of the mailboxes ( get_mb_rx_first ( ) . . . get_mb_rx_last ( ) )
* on the chip are reserved for RX . We split them into 2 groups . The
* lower group ranges from get_mb_rx_first ( ) to get_mb_rx_low_last ( ) .
2009-09-25 23:14:43 +04:00
*
* Like it or not , but the chip always saves a received CAN message
* into the first free mailbox it finds ( starting with the
* lowest ) . This makes it very difficult to read the messages in the
* right order from the chip . This is how we work around that problem :
*
2011-01-10 00:46:25 +03:00
* The first message goes into mb nr . 1 and issues an interrupt . All
2009-09-25 23:14:43 +04:00
* rx ints are disabled in the interrupt handler and a napi poll is
* scheduled . We read the mailbox , but do _not_ reenable the mb ( to
* receive another message ) .
*
* lower mbxs upper
2011-01-10 00:46:25 +03:00
* ____ ^ ______ __ ^ __
* / \ / \
2009-09-25 23:14:43 +04:00
* + - + - + - + - + - + - + - + - + + - + - + - + - +
2011-01-10 00:46:25 +03:00
* | | x | x | x | x | x | x | x | | | | | |
2009-09-25 23:14:43 +04:00
* + - + - + - + - + - + - + - + - + + - + - + - + - +
* 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
* 0 1 2 3 4 5 6 7 8 9 0 1 / box
2011-01-10 00:46:25 +03:00
* ^
* |
* \
* unused , due to chip bug
2009-09-25 23:14:43 +04:00
*
* The variable priv - > rx_next points to the next mailbox to read a
* message from . As long we ' re in the lower mailboxes we just read the
* mailbox but not reenable it .
*
* With completion of the last of the lower mailboxes , we reenable the
* whole first group , but continue to look for filled mailboxes in the
* upper mailboxes . Imagine the second group like overflow mailboxes ,
* which takes CAN messages if the lower goup is full . While in the
* upper group we reenable the mailbox right after reading it . Giving
* the chip more room to store messages .
*
* After finishing we look again in the lower group if we ' ve still
* quota .
*
*/
static int at91_poll_rx ( struct net_device * dev , int quota )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
u32 reg_sr = at91_read ( priv , AT91_SR ) ;
const unsigned long * addr = ( unsigned long * ) & reg_sr ;
unsigned int mb ;
int received = 0 ;
2011-05-03 19:31:40 +04:00
if ( priv - > rx_next > get_mb_rx_low_last ( priv ) & &
reg_sr & get_mb_rx_low_mask ( priv ) )
2010-10-21 05:01:21 +04:00
netdev_info ( dev ,
" order of incoming frames cannot be guaranteed \n " ) ;
2009-09-25 23:14:43 +04:00
again :
2011-05-03 19:31:40 +04:00
for ( mb = find_next_bit ( addr , get_mb_tx_first ( priv ) , priv - > rx_next ) ;
mb < get_mb_tx_first ( priv ) & & quota > 0 ;
2009-09-25 23:14:43 +04:00
reg_sr = at91_read ( priv , AT91_SR ) ,
2011-05-03 19:31:40 +04:00
mb = find_next_bit ( addr , get_mb_tx_first ( priv ) , + + priv - > rx_next ) ) {
2009-09-25 23:14:43 +04:00
at91_read_msg ( dev , mb ) ;
/* reactivate mailboxes */
2011-05-03 19:31:40 +04:00
if ( mb = = get_mb_rx_low_last ( priv ) )
2009-09-25 23:14:43 +04:00
/* all lower mailboxed, if just finished it */
at91_activate_rx_low ( priv ) ;
2011-05-03 19:31:40 +04:00
else if ( mb > get_mb_rx_low_last ( priv ) )
2009-09-25 23:14:43 +04:00
/* only the mailbox we read */
at91_activate_rx_mb ( priv , mb ) ;
received + + ;
quota - - ;
}
/* upper group completed, look again in lower */
2011-05-03 19:31:40 +04:00
if ( priv - > rx_next > get_mb_rx_low_last ( priv ) & &
2011-05-03 19:41:09 +04:00
quota > 0 & & mb > get_mb_rx_last ( priv ) ) {
priv - > rx_next = get_mb_rx_first ( priv ) ;
2009-09-25 23:14:43 +04:00
goto again ;
}
return received ;
}
static void at91_poll_err_frame ( struct net_device * dev ,
struct can_frame * cf , u32 reg_sr )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
/* CRC error */
if ( reg_sr & AT91_IRQ_CERR ) {
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " CERR irq \n " ) ;
2009-09-25 23:14:43 +04:00
dev - > stats . rx_errors + + ;
priv - > can . can_stats . bus_error + + ;
cf - > can_id | = CAN_ERR_PROT | CAN_ERR_BUSERROR ;
}
/* Stuffing Error */
if ( reg_sr & AT91_IRQ_SERR ) {
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " SERR irq \n " ) ;
2009-09-25 23:14:43 +04:00
dev - > stats . rx_errors + + ;
priv - > can . can_stats . bus_error + + ;
cf - > can_id | = CAN_ERR_PROT | CAN_ERR_BUSERROR ;
cf - > data [ 2 ] | = CAN_ERR_PROT_STUFF ;
}
/* Acknowledgement Error */
if ( reg_sr & AT91_IRQ_AERR ) {
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " AERR irq \n " ) ;
2009-09-25 23:14:43 +04:00
dev - > stats . tx_errors + + ;
cf - > can_id | = CAN_ERR_ACK ;
}
/* Form error */
if ( reg_sr & AT91_IRQ_FERR ) {
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " FERR irq \n " ) ;
2009-09-25 23:14:43 +04:00
dev - > stats . rx_errors + + ;
priv - > can . can_stats . bus_error + + ;
cf - > can_id | = CAN_ERR_PROT | CAN_ERR_BUSERROR ;
cf - > data [ 2 ] | = CAN_ERR_PROT_FORM ;
}
/* Bit Error */
if ( reg_sr & AT91_IRQ_BERR ) {
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " BERR irq \n " ) ;
2009-09-25 23:14:43 +04:00
dev - > stats . tx_errors + + ;
priv - > can . can_stats . bus_error + + ;
cf - > can_id | = CAN_ERR_PROT | CAN_ERR_BUSERROR ;
cf - > data [ 2 ] | = CAN_ERR_PROT_BIT ;
}
}
static int at91_poll_err ( struct net_device * dev , int quota , u32 reg_sr )
{
struct sk_buff * skb ;
struct can_frame * cf ;
if ( quota = = 0 )
return 0 ;
skb = alloc_can_err_skb ( dev , & cf ) ;
if ( unlikely ( ! skb ) )
return 0 ;
at91_poll_err_frame ( dev , cf , reg_sr ) ;
netif_receive_skb ( skb ) ;
dev - > stats . rx_packets + + ;
dev - > stats . rx_bytes + = cf - > can_dlc ;
return 1 ;
}
static int at91_poll ( struct napi_struct * napi , int quota )
{
struct net_device * dev = napi - > dev ;
const struct at91_priv * priv = netdev_priv ( dev ) ;
u32 reg_sr = at91_read ( priv , AT91_SR ) ;
int work_done = 0 ;
2011-05-03 19:31:40 +04:00
if ( reg_sr & get_irq_mb_rx ( priv ) )
2009-09-25 23:14:43 +04:00
work_done + = at91_poll_rx ( dev , quota - work_done ) ;
/*
* The error bits are clear on read ,
* so use saved value from irq handler .
*/
reg_sr | = priv - > reg_sr ;
if ( reg_sr & AT91_IRQ_ERR_FRAME )
work_done + = at91_poll_err ( dev , quota - work_done , reg_sr ) ;
if ( work_done < quota ) {
/* enable IRQs for frame errors and all mailboxes >= rx_next */
u32 reg_ier = AT91_IRQ_ERR_FRAME ;
2011-05-03 19:31:40 +04:00
reg_ier | = get_irq_mb_rx ( priv ) & ~ AT91_MB_MASK ( priv - > rx_next ) ;
2009-09-25 23:14:43 +04:00
napi_complete ( napi ) ;
at91_write ( priv , AT91_IER , reg_ier ) ;
}
return work_done ;
}
/*
* theory of operation :
*
* priv - > tx_echo holds the number of the oldest can_frame put for
* transmission into the hardware , but not yet ACKed by the CAN tx
* complete IRQ .
*
* We iterate from priv - > tx_echo to priv - > tx_next and check if the
* packet has been transmitted , echo it back to the CAN framework . If
* we discover a not yet transmitted package , stop looking for more .
*
*/
static void at91_irq_tx ( struct net_device * dev , u32 reg_sr )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
u32 reg_msr ;
unsigned int mb ;
/* masking of reg_sr not needed, already done by at91_irq */
for ( /* nix */ ; ( priv - > tx_next - priv - > tx_echo ) > 0 ; priv - > tx_echo + + ) {
mb = get_tx_echo_mb ( priv ) ;
/* no event in mailbox? */
if ( ! ( reg_sr & ( 1 < < mb ) ) )
break ;
/* Disable irq for this TX mailbox */
at91_write ( priv , AT91_IDR , 1 < < mb ) ;
/*
* only echo if mailbox signals us a transfer
* complete ( MSR_MRDY ) . Otherwise it ' s a tansfer
* abort . " can_bus_off() " takes care about the skbs
* parked in the echo queue .
*/
reg_msr = at91_read ( priv , AT91_MSR ( mb ) ) ;
if ( likely ( reg_msr & AT91_MSR_MRDY & &
~ reg_msr & AT91_MSR_MABT ) ) {
2011-03-31 05:57:33 +04:00
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
2011-05-03 19:31:40 +04:00
can_get_echo_skb ( dev , mb - get_mb_tx_first ( priv ) ) ;
2009-09-25 23:14:43 +04:00
dev - > stats . tx_packets + + ;
2012-12-18 21:50:59 +04:00
can_led_event ( dev , CAN_LED_EVENT_TX ) ;
2009-09-25 23:14:43 +04:00
}
}
/*
* restart queue if we don ' t have a wrap around but restart if
* we get a TX int for the last can frame directly before a
* wrap around .
*/
2011-05-03 19:31:40 +04:00
if ( ( priv - > tx_next & get_next_mask ( priv ) ) ! = 0 | |
( priv - > tx_echo & get_next_mask ( priv ) ) = = 0 )
2009-09-25 23:14:43 +04:00
netif_wake_queue ( dev ) ;
}
static void at91_irq_err_state ( struct net_device * dev ,
struct can_frame * cf , enum can_state new_state )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
2010-10-21 05:01:18 +04:00
u32 reg_idr = 0 , reg_ier = 0 ;
struct can_berr_counter bec ;
2009-09-25 23:14:43 +04:00
2010-10-21 05:01:18 +04:00
at91_get_berr_counter ( dev , & bec ) ;
2009-09-25 23:14:43 +04:00
switch ( priv - > can . state ) {
case CAN_STATE_ERROR_ACTIVE :
/*
* from : ERROR_ACTIVE
* to : ERROR_WARNING , ERROR_PASSIVE , BUS_OFF
* = > : there was a warning int
*/
if ( new_state > = CAN_STATE_ERROR_WARNING & &
new_state < = CAN_STATE_BUS_OFF ) {
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " Error Warning IRQ \n " ) ;
2009-09-25 23:14:43 +04:00
priv - > can . can_stats . error_warning + + ;
cf - > can_id | = CAN_ERR_CRTL ;
2010-10-21 05:01:18 +04:00
cf - > data [ 1 ] = ( bec . txerr > bec . rxerr ) ?
2009-09-25 23:14:43 +04:00
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING ;
}
case CAN_STATE_ERROR_WARNING : /* fallthrough */
/*
* from : ERROR_ACTIVE , ERROR_WARNING
* to : ERROR_PASSIVE , BUS_OFF
* = > : error passive int
*/
if ( new_state > = CAN_STATE_ERROR_PASSIVE & &
new_state < = CAN_STATE_BUS_OFF ) {
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " Error Passive IRQ \n " ) ;
2009-09-25 23:14:43 +04:00
priv - > can . can_stats . error_passive + + ;
cf - > can_id | = CAN_ERR_CRTL ;
2010-10-21 05:01:18 +04:00
cf - > data [ 1 ] = ( bec . txerr > bec . rxerr ) ?
2009-09-25 23:14:43 +04:00
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE ;
}
break ;
case CAN_STATE_BUS_OFF :
/*
* from : BUS_OFF
* to : ERROR_ACTIVE , ERROR_WARNING , ERROR_PASSIVE
*/
if ( new_state < = CAN_STATE_ERROR_PASSIVE ) {
cf - > can_id | = CAN_ERR_RESTARTED ;
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " restarted \n " ) ;
2009-09-25 23:14:43 +04:00
priv - > can . can_stats . restarts + + ;
netif_carrier_on ( dev ) ;
netif_wake_queue ( dev ) ;
}
break ;
default :
break ;
}
/* process state changes depending on the new state */
switch ( new_state ) {
case CAN_STATE_ERROR_ACTIVE :
/*
* actually we want to enable AT91_IRQ_WARN here , but
* it screws up the system under certain
* circumstances . so just enable AT91_IRQ_ERRP , thus
* the " fallthrough "
*/
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " Error Active \n " ) ;
2009-09-25 23:14:43 +04:00
cf - > can_id | = CAN_ERR_PROT ;
cf - > data [ 2 ] = CAN_ERR_PROT_ACTIVE ;
case CAN_STATE_ERROR_WARNING : /* fallthrough */
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF ;
reg_ier = AT91_IRQ_ERRP ;
break ;
case CAN_STATE_ERROR_PASSIVE :
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP ;
reg_ier = AT91_IRQ_BOFF ;
break ;
case CAN_STATE_BUS_OFF :
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
AT91_IRQ_WARN | AT91_IRQ_BOFF ;
reg_ier = 0 ;
cf - > can_id | = CAN_ERR_BUSOFF ;
2010-10-21 05:01:21 +04:00
netdev_dbg ( dev , " bus-off \n " ) ;
2009-09-25 23:14:43 +04:00
netif_carrier_off ( dev ) ;
priv - > can . can_stats . bus_off + + ;
/* turn off chip, if restart is disabled */
if ( ! priv - > can . restart_ms ) {
at91_chip_stop ( dev , CAN_STATE_BUS_OFF ) ;
return ;
}
break ;
default :
break ;
}
at91_write ( priv , AT91_IDR , reg_idr ) ;
at91_write ( priv , AT91_IER , reg_ier ) ;
}
2011-04-17 02:08:45 +04:00
static int at91_get_state_by_bec ( const struct net_device * dev ,
enum can_state * state )
{
struct can_berr_counter bec ;
int err ;
err = at91_get_berr_counter ( dev , & bec ) ;
if ( err )
return err ;
if ( bec . txerr < 96 & & bec . rxerr < 96 )
* state = CAN_STATE_ERROR_ACTIVE ;
else if ( bec . txerr < 128 & & bec . rxerr < 128 )
* state = CAN_STATE_ERROR_WARNING ;
else if ( bec . txerr < 256 & & bec . rxerr < 256 )
* state = CAN_STATE_ERROR_PASSIVE ;
else
* state = CAN_STATE_BUS_OFF ;
return 0 ;
}
2009-09-25 23:14:43 +04:00
static void at91_irq_err ( struct net_device * dev )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
struct sk_buff * skb ;
struct can_frame * cf ;
enum can_state new_state ;
u32 reg_sr ;
2011-04-17 02:08:45 +04:00
int err ;
2009-09-25 23:14:43 +04:00
2011-04-17 02:08:45 +04:00
if ( at91_is_sam9263 ( priv ) ) {
reg_sr = at91_read ( priv , AT91_SR ) ;
/* we need to look at the unmasked reg_sr */
if ( unlikely ( reg_sr & AT91_IRQ_BOFF ) )
new_state = CAN_STATE_BUS_OFF ;
else if ( unlikely ( reg_sr & AT91_IRQ_ERRP ) )
new_state = CAN_STATE_ERROR_PASSIVE ;
else if ( unlikely ( reg_sr & AT91_IRQ_WARN ) )
new_state = CAN_STATE_ERROR_WARNING ;
else if ( likely ( reg_sr & AT91_IRQ_ERRA ) )
new_state = CAN_STATE_ERROR_ACTIVE ;
else {
netdev_err ( dev , " BUG! hardware in undefined state \n " ) ;
return ;
}
} else {
err = at91_get_state_by_bec ( dev , & new_state ) ;
if ( err )
return ;
2009-09-25 23:14:43 +04:00
}
/* state hasn't changed */
if ( likely ( new_state = = priv - > can . state ) )
return ;
skb = alloc_can_err_skb ( dev , & cf ) ;
if ( unlikely ( ! skb ) )
return ;
at91_irq_err_state ( dev , cf , new_state ) ;
netif_rx ( skb ) ;
dev - > stats . rx_packets + + ;
dev - > stats . rx_bytes + = cf - > can_dlc ;
priv - > can . state = new_state ;
}
/*
* interrupt handler
*/
static irqreturn_t at91_irq ( int irq , void * dev_id )
{
struct net_device * dev = dev_id ;
struct at91_priv * priv = netdev_priv ( dev ) ;
irqreturn_t handled = IRQ_NONE ;
u32 reg_sr , reg_imr ;
reg_sr = at91_read ( priv , AT91_SR ) ;
reg_imr = at91_read ( priv , AT91_IMR ) ;
/* Ignore masked interrupts */
reg_sr & = reg_imr ;
if ( ! reg_sr )
goto exit ;
handled = IRQ_HANDLED ;
/* Receive or error interrupt? -> napi */
2011-05-03 19:31:40 +04:00
if ( reg_sr & ( get_irq_mb_rx ( priv ) | AT91_IRQ_ERR_FRAME ) ) {
2009-09-25 23:14:43 +04:00
/*
* The error bits are clear on read ,
* save for later use .
*/
priv - > reg_sr = reg_sr ;
at91_write ( priv , AT91_IDR ,
2011-05-03 19:31:40 +04:00
get_irq_mb_rx ( priv ) | AT91_IRQ_ERR_FRAME ) ;
2009-09-25 23:14:43 +04:00
napi_schedule ( & priv - > napi ) ;
}
/* Transmission complete interrupt */
2011-05-03 19:31:40 +04:00
if ( reg_sr & get_irq_mb_tx ( priv ) )
2009-09-25 23:14:43 +04:00
at91_irq_tx ( dev , reg_sr ) ;
at91_irq_err ( dev ) ;
exit :
return handled ;
}
static int at91_open ( struct net_device * dev )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
int err ;
2014-09-17 16:26:48 +04:00
err = clk_prepare_enable ( priv - > clk ) ;
if ( err )
return err ;
2009-09-25 23:14:43 +04:00
/* check or determine and set bittime */
err = open_candev ( dev ) ;
if ( err )
goto out ;
/* register interrupt handler */
if ( request_irq ( dev - > irq , at91_irq , IRQF_SHARED ,
dev - > name , dev ) ) {
err = - EAGAIN ;
goto out_close ;
}
2012-12-18 21:50:59 +04:00
can_led_event ( dev , CAN_LED_EVENT_OPEN ) ;
2009-09-25 23:14:43 +04:00
/* start chip and queuing */
at91_chip_start ( dev ) ;
napi_enable ( & priv - > napi ) ;
netif_start_queue ( dev ) ;
return 0 ;
out_close :
close_candev ( dev ) ;
out :
2014-09-17 16:26:48 +04:00
clk_disable_unprepare ( priv - > clk ) ;
2009-09-25 23:14:43 +04:00
return err ;
}
/*
* stop CAN bus activity
*/
static int at91_close ( struct net_device * dev )
{
struct at91_priv * priv = netdev_priv ( dev ) ;
netif_stop_queue ( dev ) ;
napi_disable ( & priv - > napi ) ;
at91_chip_stop ( dev , CAN_STATE_STOPPED ) ;
free_irq ( dev - > irq , dev ) ;
2014-09-17 16:26:48 +04:00
clk_disable_unprepare ( priv - > clk ) ;
2009-09-25 23:14:43 +04:00
close_candev ( dev ) ;
2012-12-18 21:50:59 +04:00
can_led_event ( dev , CAN_LED_EVENT_STOP ) ;
2009-09-25 23:14:43 +04:00
return 0 ;
}
static int at91_set_mode ( struct net_device * dev , enum can_mode mode )
{
switch ( mode ) {
case CAN_MODE_START :
at91_chip_start ( dev ) ;
netif_wake_queue ( dev ) ;
break ;
default :
return - EOPNOTSUPP ;
}
return 0 ;
}
static const struct net_device_ops at91_netdev_ops = {
. ndo_open = at91_open ,
. ndo_stop = at91_close ,
. ndo_start_xmit = at91_start_xmit ,
2014-03-07 12:23:41 +04:00
. ndo_change_mtu = can_change_mtu ,
2009-09-25 23:14:43 +04:00
} ;
2011-01-10 22:44:22 +03:00
static ssize_t at91_sysfs_show_mb0_id ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct at91_priv * priv = netdev_priv ( to_net_dev ( dev ) ) ;
if ( priv - > mb0_id & CAN_EFF_FLAG )
return snprintf ( buf , PAGE_SIZE , " 0x%08x \n " , priv - > mb0_id ) ;
else
return snprintf ( buf , PAGE_SIZE , " 0x%03x \n " , priv - > mb0_id ) ;
}
static ssize_t at91_sysfs_set_mb0_id ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t count )
{
struct net_device * ndev = to_net_dev ( dev ) ;
struct at91_priv * priv = netdev_priv ( ndev ) ;
unsigned long can_id ;
ssize_t ret ;
int err ;
rtnl_lock ( ) ;
if ( ndev - > flags & IFF_UP ) {
ret = - EBUSY ;
goto out ;
}
2013-06-01 01:18:55 +04:00
err = kstrtoul ( buf , 0 , & can_id ) ;
2011-01-10 22:44:22 +03:00
if ( err ) {
ret = err ;
goto out ;
}
if ( can_id & CAN_EFF_FLAG )
can_id & = CAN_EFF_MASK | CAN_EFF_FLAG ;
else
can_id & = CAN_SFF_MASK ;
priv - > mb0_id = can_id ;
ret = count ;
out :
rtnl_unlock ( ) ;
return ret ;
}
2011-02-04 05:23:50 +03:00
static DEVICE_ATTR ( mb0_id , S_IWUSR | S_IRUGO ,
2011-01-10 22:44:22 +03:00
at91_sysfs_show_mb0_id , at91_sysfs_set_mb0_id ) ;
static struct attribute * at91_sysfs_attrs [ ] = {
& dev_attr_mb0_id . attr ,
NULL ,
} ;
static struct attribute_group at91_sysfs_attr_group = {
. attrs = at91_sysfs_attrs ,
} ;
2013-03-11 21:26:03 +04:00
# if defined(CONFIG_OF)
static const struct of_device_id at91_can_dt_ids [ ] = {
{
. compatible = " atmel,at91sam9x5-can " ,
. data = & at91_at91sam9x5_data ,
} , {
. compatible = " atmel,at91sam9263-can " ,
. data = & at91_at91sam9263_data ,
} , {
/* sentinel */
}
} ;
MODULE_DEVICE_TABLE ( of , at91_can_dt_ids ) ;
# endif
static const struct at91_devtype_data * at91_can_get_driver_data ( struct platform_device * pdev )
{
if ( pdev - > dev . of_node ) {
const struct of_device_id * match ;
match = of_match_node ( at91_can_dt_ids , pdev - > dev . of_node ) ;
if ( ! match ) {
dev_err ( & pdev - > dev , " no matching node found in dtb \n " ) ;
return NULL ;
}
return ( const struct at91_devtype_data * ) match - > data ;
}
return ( const struct at91_devtype_data * )
platform_get_device_id ( pdev ) - > driver_data ;
}
2012-12-03 18:22:44 +04:00
static int at91_can_probe ( struct platform_device * pdev )
2009-09-25 23:14:43 +04:00
{
2011-05-03 19:41:09 +04:00
const struct at91_devtype_data * devtype_data ;
2009-09-25 23:14:43 +04:00
struct net_device * dev ;
struct at91_priv * priv ;
struct resource * res ;
struct clk * clk ;
void __iomem * addr ;
int err , irq ;
2013-03-11 21:26:03 +04:00
devtype_data = at91_can_get_driver_data ( pdev ) ;
if ( ! devtype_data ) {
dev_err ( & pdev - > dev , " no driver data \n " ) ;
err = - ENODEV ;
goto exit ;
}
2011-05-03 19:41:09 +04:00
2009-09-25 23:14:43 +04:00
clk = clk_get ( & pdev - > dev , " can_clk " ) ;
if ( IS_ERR ( clk ) ) {
dev_err ( & pdev - > dev , " no clock defined \n " ) ;
err = - ENODEV ;
goto exit ;
}
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
irq = platform_get_irq ( pdev , 0 ) ;
2009-12-19 07:31:56 +03:00
if ( ! res | | irq < = 0 ) {
2009-09-25 23:14:43 +04:00
err = - ENODEV ;
goto exit_put ;
}
if ( ! request_mem_region ( res - > start ,
resource_size ( res ) ,
pdev - > name ) ) {
err = - EBUSY ;
goto exit_put ;
}
addr = ioremap_nocache ( res - > start , resource_size ( res ) ) ;
if ( ! addr ) {
err = - ENOMEM ;
goto exit_release ;
}
2011-05-03 19:41:09 +04:00
dev = alloc_candev ( sizeof ( struct at91_priv ) ,
1 < < devtype_data - > tx_shift ) ;
2009-09-25 23:14:43 +04:00
if ( ! dev ) {
err = - ENOMEM ;
goto exit_iounmap ;
}
dev - > netdev_ops = & at91_netdev_ops ;
dev - > irq = irq ;
dev - > flags | = IFF_ECHO ;
priv = netdev_priv ( dev ) ;
priv - > can . clock . freq = clk_get_rate ( clk ) ;
priv - > can . bittiming_const = & at91_bittiming_const ;
priv - > can . do_set_mode = at91_set_mode ;
2010-10-21 05:01:18 +04:00
priv - > can . do_get_berr_counter = at91_get_berr_counter ;
2014-02-11 12:46:59 +04:00
priv - > can . ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LISTENONLY ;
2011-05-03 19:41:09 +04:00
priv - > reg_base = addr ;
priv - > devtype_data = * devtype_data ;
2009-09-25 23:14:43 +04:00
priv - > clk = clk ;
2013-09-10 12:40:09 +04:00
priv - > pdata = dev_get_platdata ( & pdev - > dev ) ;
2011-01-10 22:44:22 +03:00
priv - > mb0_id = 0x7ff ;
2009-09-25 23:14:43 +04:00
2011-05-03 19:41:09 +04:00
netif_napi_add ( dev , & priv - > napi , at91_poll , get_mb_rx_num ( priv ) ) ;
2009-09-25 23:14:43 +04:00
2011-06-01 02:20:17 +04:00
if ( at91_is_sam9263 ( priv ) )
dev - > sysfs_groups [ 0 ] = & at91_sysfs_attr_group ;
2013-08-21 14:15:03 +04:00
platform_set_drvdata ( pdev , dev ) ;
2009-09-25 23:14:43 +04:00
SET_NETDEV_DEV ( dev , & pdev - > dev ) ;
err = register_candev ( dev ) ;
if ( err ) {
dev_err ( & pdev - > dev , " registering netdev failed \n " ) ;
goto exit_free ;
}
2012-12-18 21:50:59 +04:00
devm_can_led_init ( dev ) ;
2009-09-25 23:14:43 +04:00
dev_info ( & pdev - > dev , " device registered (reg_base=%p, irq=%d) \n " ,
priv - > reg_base , dev - > irq ) ;
return 0 ;
exit_free :
2010-10-21 05:01:15 +04:00
free_candev ( dev ) ;
2009-09-25 23:14:43 +04:00
exit_iounmap :
iounmap ( addr ) ;
exit_release :
release_mem_region ( res - > start , resource_size ( res ) ) ;
exit_put :
clk_put ( clk ) ;
exit :
return err ;
}
2012-12-03 18:22:44 +04:00
static int at91_can_remove ( struct platform_device * pdev )
2009-09-25 23:14:43 +04:00
{
struct net_device * dev = platform_get_drvdata ( pdev ) ;
struct at91_priv * priv = netdev_priv ( dev ) ;
struct resource * res ;
unregister_netdev ( dev ) ;
iounmap ( priv - > reg_base ) ;
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
release_mem_region ( res - > start , resource_size ( res ) ) ;
clk_put ( priv - > clk ) ;
2010-10-21 05:01:15 +04:00
free_candev ( dev ) ;
2009-09-25 23:14:43 +04:00
return 0 ;
}
2011-05-03 19:41:09 +04:00
static const struct platform_device_id at91_can_id_table [ ] = {
{
2013-10-09 14:19:19 +04:00
. name = " at91sam9x5_can " ,
2013-03-11 21:26:03 +04:00
. driver_data = ( kernel_ulong_t ) & at91_at91sam9x5_data ,
2011-04-17 02:08:45 +04:00
} , {
2013-10-09 14:19:19 +04:00
. name = " at91_can " ,
2013-03-11 21:26:03 +04:00
. driver_data = ( kernel_ulong_t ) & at91_at91sam9263_data ,
2011-05-03 19:41:09 +04:00
} , {
/* sentinel */
}
} ;
2012-10-12 11:44:51 +04:00
MODULE_DEVICE_TABLE ( platform , at91_can_id_table ) ;
2011-05-03 19:41:09 +04:00
2009-09-25 23:14:43 +04:00
static struct platform_driver at91_can_driver = {
2011-01-31 00:14:49 +03:00
. probe = at91_can_probe ,
2012-12-03 18:22:44 +04:00
. remove = at91_can_remove ,
2011-01-31 00:14:49 +03:00
. driver = {
. name = KBUILD_MODNAME ,
2013-06-12 15:30:39 +04:00
. of_match_table = of_match_ptr ( at91_can_dt_ids ) ,
2009-09-25 23:14:43 +04:00
} ,
2011-05-03 19:41:09 +04:00
. id_table = at91_can_id_table ,
2009-09-25 23:14:43 +04:00
} ;
2011-11-27 19:42:31 +04:00
module_platform_driver ( at91_can_driver ) ;
2009-09-25 23:14:43 +04:00
MODULE_AUTHOR ( " Marc Kleine-Budde <mkl@pengutronix.de> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2010-10-21 05:01:22 +04:00
MODULE_DESCRIPTION ( KBUILD_MODNAME " CAN netdevice driver " ) ;