2017-03-24 11:41:48 +03:00
/*
2017-10-10 13:48:19 +03:00
* Copyright 2017 Broadcom . All Rights Reserved .
2017-03-24 11:41:48 +03:00
* The term " Broadcom " refers to Broadcom Limited and / or its subsidiaries .
2009-09-05 06:06:35 +04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License version 2
2017-03-24 11:41:48 +03:00
* as published by the Free Software Foundation . The full GNU General
2009-09-05 06:06:35 +04:00
* Public License is included in this distribution in the file called COPYING .
*
* Contact Information :
2016-08-19 12:50:24 +03:00
* linux - drivers @ broadcom . com
2009-09-05 06:06:35 +04:00
*
*/
# ifndef BEISCSI_H
# define BEISCSI_H
# include <linux/pci.h>
# include <linux/if_vlan.h>
2015-11-10 16:56:14 +03:00
# include <linux/irq_poll.h>
2009-10-23 10:22:33 +04:00
# define FW_VER_LEN 32
# define MCC_Q_LEN 128
# define MCC_CQ_LEN 256
2010-01-05 02:40:46 +03:00
# define MAX_MCC_CMD 16
2010-02-11 02:41:15 +03:00
/* BladeEngine Generation numbers */
# define BE_GEN2 2
# define BE_GEN3 3
2012-10-20 03:13:20 +04:00
# define BE_GEN4 4
2009-09-05 06:06:35 +04:00
struct be_dma_mem {
void * va ;
dma_addr_t dma ;
u32 size ;
} ;
struct be_queue_info {
struct be_dma_mem dma_mem ;
u16 len ;
u16 entry_size ; /* Size of an element in the queue */
u16 id ;
u16 tail , head ;
bool created ;
2016-02-04 13:19:17 +03:00
u16 used ; /* Number of valid elements in the queue */
2009-09-05 06:06:35 +04:00
} ;
static inline u32 MODULO ( u16 val , u16 limit )
{
WARN_ON ( limit & ( limit - 1 ) ) ;
return val & ( limit - 1 ) ;
}
static inline void index_inc ( u16 * index , u16 limit )
{
* index = MODULO ( ( * index + 1 ) , limit ) ;
}
static inline void * queue_head_node ( struct be_queue_info * q )
{
return q - > dma_mem . va + q - > head * q - > entry_size ;
}
2010-01-05 02:40:46 +03:00
static inline void * queue_get_wrb ( struct be_queue_info * q , unsigned int wrb_num )
{
return q - > dma_mem . va + wrb_num * q - > entry_size ;
}
2009-09-05 06:06:35 +04:00
static inline void * queue_tail_node ( struct be_queue_info * q )
{
return q - > dma_mem . va + q - > tail * q - > entry_size ;
}
static inline void queue_head_inc ( struct be_queue_info * q )
{
index_inc ( & q - > head , q - > len ) ;
}
static inline void queue_tail_inc ( struct be_queue_info * q )
{
index_inc ( & q - > tail , q - > len ) ;
}
/*ISCSI */
2014-05-06 05:41:26 +04:00
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
2017-10-10 13:48:18 +03:00
unsigned long jiffies ;
u32 eq_prev ; /* Used to calculate eqe */
u32 prev_eqd ;
# define BEISCSI_EQ_DELAY_MIN 0
# define BEISCSI_EQ_DELAY_DEF 32
# define BEISCSI_EQ_DELAY_MAX 128
2014-05-06 05:41:26 +04:00
} ;
2009-09-05 06:06:35 +04:00
struct be_eq_obj {
2014-05-06 05:41:26 +04:00
u32 cq_count ;
2009-09-05 06:06:35 +04:00
struct be_queue_info q ;
2009-10-23 10:22:33 +04:00
struct beiscsi_hba * phba ;
struct be_queue_info * cq ;
2016-08-19 12:50:10 +03:00
struct work_struct mcc_work ; /* Work Item */
2015-11-10 16:56:14 +03:00
struct irq_poll iopoll ;
2009-09-05 06:06:35 +04:00
} ;
struct be_mcc_obj {
2009-10-23 10:22:33 +04:00
struct be_queue_info q ;
struct be_queue_info cq ;
2009-09-05 06:06:35 +04:00
} ;
2014-01-29 11:16:39 +04:00
struct beiscsi_mcc_tag_state {
2016-01-20 11:40:47 +03:00
unsigned long tag_state ;
2016-08-19 12:50:13 +03:00
# define MCC_TAG_STATE_RUNNING 0
# define MCC_TAG_STATE_TIMEOUT 1
# define MCC_TAG_STATE_ASYNC 2
# define MCC_TAG_STATE_IGNORE 3
2016-08-19 12:50:12 +03:00
void ( * cbfn ) ( struct beiscsi_hba * , unsigned int ) ;
2014-01-29 11:16:39 +04:00
struct be_dma_mem tag_mem_state ;
} ;
2009-09-05 06:06:35 +04:00
struct be_ctrl_info {
u8 __iomem * csr ;
u8 __iomem * db ; /* Door Bell */
u8 __iomem * pcicfg ; /* PCI config space */
struct pci_dev * pdev ;
/* Mbox used for cmd request/response */
2016-01-20 11:40:46 +03:00
struct mutex mbox_lock ; /* For serializing mbox cmds to BE card */
2009-09-05 06:06:35 +04:00
struct be_dma_mem mbox_mem ;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
struct be_dma_mem mbox_mem_alloced ;
/* MCC Rings */
struct be_mcc_obj mcc_obj ;
spinlock_t mcc_lock ; /* For serializing mcc cmds to BE card */
2010-01-05 02:40:46 +03:00
wait_queue_head_t mcc_wait [ MAX_MCC_CMD + 1 ] ;
unsigned int mcc_tag [ MAX_MCC_CMD ] ;
2016-02-04 13:19:10 +03:00
unsigned int mcc_tag_status [ MAX_MCC_CMD + 1 ] ;
2010-01-05 02:40:46 +03:00
unsigned short mcc_alloc_index ;
unsigned short mcc_free_index ;
unsigned int mcc_tag_available ;
2014-01-29 11:16:39 +04:00
struct beiscsi_mcc_tag_state ptag_state [ MAX_MCC_CMD + 1 ] ;
2009-09-05 06:06:35 +04:00
} ;
# include "be_cmds.h"
2016-02-04 13:19:10 +03:00
/* WRB index mask for MCC_Q_LEN queue entries */
# define MCC_Q_WRB_IDX_MASK CQE_STATUS_WRB_MASK
# define MCC_Q_WRB_IDX_SHIFT CQE_STATUS_WRB_SHIFT
/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
# define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1)
2017-10-10 13:48:18 +03:00
# define PAGE_SHIFT_4K 12
# define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
2009-09-05 06:06:35 +04:00
/* Returns number of pages spanned by the data starting at the given addr */
2010-07-22 02:46:00 +04:00
# define PAGES_4K_SPANNED(_address, size) \
( ( u32 ) ( ( ( ( size_t ) ( _address ) & ( PAGE_SIZE_4K - 1 ) ) + \
2009-09-05 06:06:35 +04:00
( size ) + ( PAGE_SIZE_4K - 1 ) ) > > PAGE_SHIFT_4K ) )
/* Returns bit offset within a DWORD of a bitfield */
2010-07-22 02:46:00 +04:00
# define AMAP_BIT_OFFSET(_struct, field) \
2009-09-05 06:06:35 +04:00
( ( ( size_t ) & ( ( ( _struct * ) 0 ) - > field ) ) % 32 )
/* Returns the bit mask of the field that is NOT shifted into location. */
static inline u32 amap_mask ( u32 bitsize )
{
return ( bitsize = = 32 ? 0xFFFFFFFF : ( 1 < < bitsize ) - 1 ) ;
}
static inline void amap_set ( void * ptr , u32 dw_offset , u32 mask ,
u32 offset , u32 value )
{
u32 * dw = ( u32 * ) ptr + dw_offset ;
* dw & = ~ ( mask < < offset ) ;
* dw | = ( mask & value ) < < offset ;
}
# define AMAP_SET_BITS(_struct, field, ptr, val) \
amap_set ( ptr , \
offsetof ( _struct , field ) / 32 , \
amap_mask ( sizeof ( ( ( _struct * ) 0 ) - > field ) ) , \
AMAP_BIT_OFFSET ( _struct , field ) , \
val )
static inline u32 amap_get ( void * ptr , u32 dw_offset , u32 mask , u32 offset )
{
u32 * dw = ptr ;
return mask & ( * ( dw + dw_offset ) > > offset ) ;
}
# define AMAP_GET_BITS(_struct, field, ptr) \
amap_get ( ptr , \
offsetof ( _struct , field ) / 32 , \
amap_mask ( sizeof ( ( ( _struct * ) 0 ) - > field ) ) , \
AMAP_BIT_OFFSET ( _struct , field ) )
# define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
# define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
static inline void swap_dws ( void * wrb , int len )
{
# ifdef __BIG_ENDIAN
u32 * dw = wrb ;
WARN_ON ( len % 4 ) ;
do {
* dw = cpu_to_le32 ( * dw ) ;
dw + + ;
len - = 4 ;
} while ( len ) ;
# endif /* __BIG_ENDIAN */
}
# endif /* BEISCSI_H */