sfc: Copy shared files needed for Siena (part 2)
These are the files starting with m through w. No changes are done, those will be done with subsequent commits. Signed-off-by: Martin Habets <habetsm.xilinx@gmail.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
6e173d3b4a
commit
d48523cb88
2375
drivers/net/ethernet/sfc/siena/mcdi.c
Normal file
2375
drivers/net/ethernet/sfc/siena/mcdi.c
Normal file
File diff suppressed because it is too large
Load Diff
388
drivers/net/ethernet/sfc/siena/mcdi.h
Normal file
388
drivers/net/ethernet/sfc/siena/mcdi.h
Normal file
@ -0,0 +1,388 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2008-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_MCDI_H
|
||||
#define EFX_MCDI_H
|
||||
|
||||
/**
|
||||
* enum efx_mcdi_state - MCDI request handling state
|
||||
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
|
||||
* mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
|
||||
* @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
|
||||
* Only the thread that moved into this state is allowed to move out of it.
|
||||
* @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
|
||||
* @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that
|
||||
* indicates we must wait for a proxy try again message.
|
||||
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
|
||||
* has not yet consumed the result. For all other threads, equivalent to
|
||||
* %MCDI_STATE_RUNNING.
|
||||
*/
|
||||
enum efx_mcdi_state {
|
||||
MCDI_STATE_QUIESCENT,
|
||||
MCDI_STATE_RUNNING_SYNC,
|
||||
MCDI_STATE_RUNNING_ASYNC,
|
||||
MCDI_STATE_PROXY_WAIT,
|
||||
MCDI_STATE_COMPLETED,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum efx_mcdi_mode - MCDI transaction mode
|
||||
* @MCDI_MODE_POLL: poll for MCDI completion, until timeout
|
||||
* @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
|
||||
* @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
|
||||
*/
|
||||
enum efx_mcdi_mode {
|
||||
MCDI_MODE_POLL,
|
||||
MCDI_MODE_EVENTS,
|
||||
MCDI_MODE_FAIL,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_iface - MCDI protocol context
|
||||
* @efx: The associated NIC.
|
||||
* @state: Request handling state. Waited for by @wq.
|
||||
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
|
||||
* @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
|
||||
* @new_epoch: Indicates start of day or start of MC reboot recovery
|
||||
* @iface_lock: Serialises access to @seqno, @credits and response metadata
|
||||
* @seqno: The next sequence number to use for mcdi requests.
|
||||
* @credits: Number of spurious MCDI completion events allowed before we
|
||||
* trigger a fatal error
|
||||
* @resprc: Response error/success code (Linux numbering)
|
||||
* @resp_hdr_len: Response header length
|
||||
* @resp_data_len: Response data (SDU or error) length
|
||||
* @async_lock: Serialises access to @async_list while event processing is
|
||||
* enabled
|
||||
* @async_list: Queue of asynchronous requests
|
||||
* @async_timer: Timer for asynchronous request timeout
|
||||
* @logging_buffer: buffer that may be used to build MCDI tracing messages
|
||||
* @logging_enabled: whether to trace MCDI
|
||||
* @proxy_rx_handle: Most recently received proxy authorisation handle
|
||||
* @proxy_rx_status: Status of most recent proxy authorisation
|
||||
* @proxy_rx_wq: Wait queue for updates to proxy_rx_handle
|
||||
*/
|
||||
struct efx_mcdi_iface {
|
||||
struct efx_nic *efx;
|
||||
enum efx_mcdi_state state;
|
||||
enum efx_mcdi_mode mode;
|
||||
wait_queue_head_t wq;
|
||||
spinlock_t iface_lock;
|
||||
bool new_epoch;
|
||||
unsigned int credits;
|
||||
unsigned int seqno;
|
||||
int resprc;
|
||||
int resprc_raw;
|
||||
size_t resp_hdr_len;
|
||||
size_t resp_data_len;
|
||||
spinlock_t async_lock;
|
||||
struct list_head async_list;
|
||||
struct timer_list async_timer;
|
||||
#ifdef CONFIG_SFC_MCDI_LOGGING
|
||||
char *logging_buffer;
|
||||
bool logging_enabled;
|
||||
#endif
|
||||
unsigned int proxy_rx_handle;
|
||||
int proxy_rx_status;
|
||||
wait_queue_head_t proxy_rx_wq;
|
||||
};
|
||||
|
||||
struct efx_mcdi_mon {
|
||||
struct efx_buffer dma_buf;
|
||||
struct mutex update_lock;
|
||||
unsigned long last_update;
|
||||
struct device *device;
|
||||
struct efx_mcdi_mon_attribute *attrs;
|
||||
struct attribute_group group;
|
||||
const struct attribute_group *groups[2];
|
||||
unsigned int n_attrs;
|
||||
};
|
||||
|
||||
struct efx_mcdi_mtd_partition {
|
||||
struct efx_mtd_partition common;
|
||||
bool updating;
|
||||
u16 nvram_type;
|
||||
u16 fw_subtype;
|
||||
};
|
||||
|
||||
#define to_efx_mcdi_mtd_partition(mtd) \
|
||||
container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_data - extra state for NICs that implement MCDI
|
||||
* @iface: Interface/protocol state
|
||||
* @hwmon: Hardware monitor state
|
||||
* @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
|
||||
*/
|
||||
struct efx_mcdi_data {
|
||||
struct efx_mcdi_iface iface;
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
struct efx_mcdi_mon hwmon;
|
||||
#endif
|
||||
u32 fn_flags;
|
||||
};
|
||||
|
||||
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
|
||||
{
|
||||
EFX_WARN_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->iface;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
|
||||
{
|
||||
EFX_WARN_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->hwmon;
|
||||
}
|
||||
#endif
|
||||
|
||||
int efx_mcdi_init(struct efx_nic *efx);
|
||||
void efx_mcdi_detach(struct efx_nic *efx);
|
||||
void efx_mcdi_fini(struct efx_nic *efx);
|
||||
|
||||
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
|
||||
size_t inlen, efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
|
||||
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen);
|
||||
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
|
||||
size_t inlen, efx_dword_t *outbuf,
|
||||
size_t outlen, size_t *outlen_actual);
|
||||
|
||||
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
|
||||
unsigned long cookie, int rc,
|
||||
efx_dword_t *outbuf,
|
||||
size_t outlen_actual);
|
||||
int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
|
||||
efx_mcdi_async_completer *complete,
|
||||
unsigned long cookie);
|
||||
int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
size_t outlen,
|
||||
efx_mcdi_async_completer *complete,
|
||||
unsigned long cookie);
|
||||
|
||||
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
|
||||
size_t inlen, efx_dword_t *outbuf,
|
||||
size_t outlen, int rc);
|
||||
|
||||
int efx_mcdi_poll_reboot(struct efx_nic *efx);
|
||||
void efx_mcdi_mode_poll(struct efx_nic *efx);
|
||||
void efx_mcdi_mode_event(struct efx_nic *efx);
|
||||
void efx_mcdi_flush_async(struct efx_nic *efx);
|
||||
|
||||
void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
|
||||
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
|
||||
|
||||
/* We expect that 16- and 32-bit fields in MCDI requests and responses
|
||||
* are appropriately aligned, but 64-bit fields are only
|
||||
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
|
||||
* memory strictly 32 bits at a time, so add any necessary padding.
|
||||
*/
|
||||
#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4)
|
||||
#define _MCDI_DECLARE_BUF(_name, _len) \
|
||||
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
|
||||
#define MCDI_DECLARE_BUF(_name, _len) \
|
||||
_MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
|
||||
#define MCDI_DECLARE_BUF_ERR(_name) \
|
||||
MCDI_DECLARE_BUF(_name, 8)
|
||||
#define _MCDI_PTR(_buf, _offset) \
|
||||
((u8 *)(_buf) + (_offset))
|
||||
#define MCDI_PTR(_buf, _field) \
|
||||
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
|
||||
#define _MCDI_CHECK_ALIGN(_ofst, _align) \
|
||||
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
|
||||
#define _MCDI_DWORD(_buf, _field) \
|
||||
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
|
||||
|
||||
#define MCDI_BYTE(_buf, _field) \
|
||||
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
|
||||
*MCDI_PTR(_buf, _field))
|
||||
#define MCDI_WORD(_buf, _field) \
|
||||
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
|
||||
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
|
||||
#define MCDI_SET_DWORD(_buf, _field, _value) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
|
||||
#define MCDI_DWORD(_buf, _field) \
|
||||
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
|
||||
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1)
|
||||
#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2) \
|
||||
EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2)
|
||||
#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3) \
|
||||
EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3)
|
||||
#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4) \
|
||||
EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4)
|
||||
#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5) \
|
||||
EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5)
|
||||
#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6) \
|
||||
EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6)
|
||||
#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6, _name7, _value7) \
|
||||
EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6, \
|
||||
MC_CMD_ ## _name7, _value7)
|
||||
#define MCDI_SET_QWORD(_buf, _field, _value) \
|
||||
do { \
|
||||
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
|
||||
EFX_DWORD_0, (u32)(_value)); \
|
||||
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
|
||||
EFX_DWORD_0, (u64)(_value) >> 32); \
|
||||
} while (0)
|
||||
#define MCDI_QWORD(_buf, _field) \
|
||||
(EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
|
||||
(u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
|
||||
#define MCDI_FIELD(_ptr, _type, _field) \
|
||||
EFX_EXTRACT_DWORD( \
|
||||
*(efx_dword_t *) \
|
||||
_MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
|
||||
MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
|
||||
(MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
|
||||
MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
|
||||
|
||||
#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
|
||||
(_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
|
||||
+ (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
|
||||
#define MCDI_DECLARE_STRUCT_PTR(_name) \
|
||||
efx_dword_t *_name
|
||||
#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
|
||||
((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_VAR_ARRAY_LEN(_len, _field) \
|
||||
min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
|
||||
((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
|
||||
#define MCDI_ARRAY_WORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
|
||||
le16_to_cpu(*(__force const __le16 *) \
|
||||
_MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
|
||||
#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
|
||||
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
|
||||
EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
|
||||
EFX_DWORD_0, _value)
|
||||
#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
|
||||
EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
|
||||
#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
|
||||
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
|
||||
do { \
|
||||
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
|
||||
EFX_DWORD_0, (u32)(_value)); \
|
||||
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
|
||||
EFX_DWORD_0, (u64)(_value) >> 32); \
|
||||
} while (0)
|
||||
#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
|
||||
MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
|
||||
_type ## _TYPEDEF, _field2)
|
||||
|
||||
#define MCDI_EVENT_FIELD(_ev, _field) \
|
||||
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
|
||||
|
||||
#define MCDI_CAPABILITY(field) \
|
||||
MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN
|
||||
|
||||
#define MCDI_CAPABILITY_OFST(field) \
|
||||
MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST
|
||||
|
||||
#define efx_has_cap(efx, field) \
|
||||
efx->type->check_caps(efx, \
|
||||
MCDI_CAPABILITY(field), \
|
||||
MCDI_CAPABILITY_OFST(field))
|
||||
|
||||
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
|
||||
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
|
||||
u16 *fw_subtype_list, u32 *capabilities);
|
||||
int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
|
||||
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
|
||||
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
|
||||
size_t *size_out, size_t *erase_size_out,
|
||||
bool *protected_out);
|
||||
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
|
||||
int efx_mcdi_nvram_test_all(struct efx_nic *efx);
|
||||
int efx_mcdi_handle_assertion(struct efx_nic *efx);
|
||||
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
|
||||
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
|
||||
int *id_out);
|
||||
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
|
||||
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
|
||||
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
|
||||
int efx_mcdi_flush_rxqs(struct efx_nic *efx);
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
|
||||
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
|
||||
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
|
||||
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
|
||||
unsigned int *flags);
|
||||
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
|
||||
unsigned int *enabled_out);
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
int efx_mcdi_mon_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_mon_remove(struct efx_nic *efx);
|
||||
#else
|
||||
static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
|
||||
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SFC_MTD
|
||||
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
|
||||
size_t *retlen, u8 *buffer);
|
||||
int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
|
||||
int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
|
||||
size_t *retlen, const u8 *buffer);
|
||||
int efx_mcdi_mtd_sync(struct mtd_info *mtd);
|
||||
void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
|
||||
#endif
|
||||
|
||||
#endif /* EFX_MCDI_H */
|
531
drivers/net/ethernet/sfc/siena/mcdi_mon.c
Normal file
531
drivers/net/ethernet/sfc/siena/mcdi_mon.c
Normal file
@ -0,0 +1,531 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2011-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/stat.h>
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "nic.h"
|
||||
|
||||
enum efx_hwmon_type {
|
||||
EFX_HWMON_UNKNOWN,
|
||||
EFX_HWMON_TEMP, /* temperature */
|
||||
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
|
||||
EFX_HWMON_IN, /* voltage */
|
||||
EFX_HWMON_CURR, /* current */
|
||||
EFX_HWMON_POWER, /* power */
|
||||
EFX_HWMON_TYPES_COUNT
|
||||
};
|
||||
|
||||
static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
|
||||
[EFX_HWMON_TEMP] = " degC",
|
||||
[EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
|
||||
[EFX_HWMON_IN] = " mV",
|
||||
[EFX_HWMON_CURR] = " mA",
|
||||
[EFX_HWMON_POWER] = " W",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
const char *label;
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
int port;
|
||||
} efx_mcdi_sensor_type[] = {
|
||||
#define SENSOR(name, label, hwmon_type, port) \
|
||||
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
|
||||
SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
|
||||
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
|
||||
SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
|
||||
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
|
||||
SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
|
||||
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
|
||||
SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
|
||||
SENSOR(IN_1V0, "1.0V supply", IN, -1),
|
||||
SENSOR(IN_1V2, "1.2V supply", IN, -1),
|
||||
SENSOR(IN_1V8, "1.8V supply", IN, -1),
|
||||
SENSOR(IN_2V5, "2.5V supply", IN, -1),
|
||||
SENSOR(IN_3V3, "3.3V supply", IN, -1),
|
||||
SENSOR(IN_12V0, "12.0V supply", IN, -1),
|
||||
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
|
||||
SENSOR(IN_VREF, "Ref. voltage", IN, -1),
|
||||
SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
|
||||
SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
|
||||
SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
|
||||
SENSOR(PSU_TEMP, "Controller regulator temp.",
|
||||
TEMP, -1),
|
||||
SENSOR(FAN_0, "Fan 0", COOL, -1),
|
||||
SENSOR(FAN_1, "Fan 1", COOL, -1),
|
||||
SENSOR(FAN_2, "Fan 2", COOL, -1),
|
||||
SENSOR(FAN_3, "Fan 3", COOL, -1),
|
||||
SENSOR(FAN_4, "Fan 4", COOL, -1),
|
||||
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
|
||||
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
|
||||
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
|
||||
SENSOR(NIC_POWER, "Board power use", POWER, -1),
|
||||
SENSOR(IN_0V9, "0.9V supply", IN, -1),
|
||||
SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
|
||||
SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
|
||||
SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
|
||||
SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
|
||||
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
|
||||
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
|
||||
SENSOR(CONTROLLER_VPTAT,
|
||||
"Controller PTAT voltage (int. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_INTERNAL_TEMP,
|
||||
"Controller die temp. (int. ADC)", TEMP, -1),
|
||||
SENSOR(CONTROLLER_VPTAT_EXTADC,
|
||||
"Controller PTAT voltage (ext. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
|
||||
"Controller die temp. (ext. ADC)", TEMP, -1),
|
||||
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
|
||||
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
|
||||
SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
|
||||
SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
|
||||
SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
|
||||
#undef SENSOR
|
||||
};
|
||||
|
||||
static const char *const sensor_status_names[] = {
|
||||
[MC_CMD_SENSOR_STATE_OK] = "OK",
|
||||
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
|
||||
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
|
||||
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
|
||||
[MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
|
||||
};
|
||||
|
||||
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
|
||||
{
|
||||
unsigned int type, state, value;
|
||||
enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
const char *name = NULL, *state_txt, *unit;
|
||||
|
||||
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
|
||||
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
|
||||
value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
|
||||
|
||||
/* Deal gracefully with the board having more drivers than we
|
||||
* know about, but do not expect new sensor states. */
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
|
||||
name = efx_mcdi_sensor_type[type].label;
|
||||
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
}
|
||||
if (!name)
|
||||
name = "No sensor name available";
|
||||
EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
|
||||
state_txt = sensor_status_names[state];
|
||||
EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
|
||||
unit = efx_hwmon_unit[hwmon_type];
|
||||
if (!unit)
|
||||
unit = "";
|
||||
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"Sensor %d (%s) reports condition '%s' for value %d%s\n",
|
||||
type, name, state_txt, value, unit);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
|
||||
struct efx_mcdi_mon_attribute {
|
||||
struct device_attribute dev_attr;
|
||||
unsigned int index;
|
||||
unsigned int type;
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
unsigned int limit_value;
|
||||
char name[12];
|
||||
};
|
||||
|
||||
static int efx_mcdi_mon_update(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
|
||||
int rc;
|
||||
|
||||
MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
|
||||
hwmon->dma_buf.dma_addr);
|
||||
MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
|
||||
inbuf, sizeof(inbuf), NULL, 0, NULL);
|
||||
if (rc == 0)
|
||||
hwmon->last_update = jiffies;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
|
||||
efx_dword_t *entry)
|
||||
{
|
||||
struct efx_nic *efx = dev_get_drvdata(dev->parent);
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
|
||||
|
||||
mutex_lock(&hwmon->update_lock);
|
||||
|
||||
/* Use cached value if last update was < 1 s ago */
|
||||
if (time_before(jiffies, hwmon->last_update + HZ))
|
||||
rc = 0;
|
||||
else
|
||||
rc = efx_mcdi_mon_update(efx);
|
||||
|
||||
/* Copy out the requested entry */
|
||||
*entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
|
||||
|
||||
mutex_unlock(&hwmon->update_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_value(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
efx_dword_t entry;
|
||||
unsigned int value, state;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
|
||||
if (state == MC_CMD_SENSOR_STATE_NO_READING)
|
||||
return -EBUSY;
|
||||
|
||||
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
|
||||
|
||||
switch (mon_attr->hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
/* Convert temperature from degrees to milli-degrees Celsius */
|
||||
value *= 1000;
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
/* Convert power from watts to microwatts */
|
||||
value *= 1000000;
|
||||
break;
|
||||
default:
|
||||
/* No conversion needed */
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "%u\n", value);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
unsigned int value;
|
||||
|
||||
value = mon_attr->limit_value;
|
||||
|
||||
switch (mon_attr->hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
/* Convert temperature from degrees to milli-degrees Celsius */
|
||||
value *= 1000;
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
/* Convert power from watts to microwatts */
|
||||
value *= 1000000;
|
||||
break;
|
||||
default:
|
||||
/* No conversion needed */
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "%u\n", value);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
efx_dword_t entry;
|
||||
int state;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
|
||||
return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_label(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
return sprintf(buf, "%s\n",
|
||||
efx_mcdi_sensor_type[mon_attr->type].label);
|
||||
}
|
||||
|
||||
static void
|
||||
efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
|
||||
ssize_t (*reader)(struct device *,
|
||||
struct device_attribute *, char *),
|
||||
unsigned int index, unsigned int type,
|
||||
unsigned int limit_value)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
|
||||
|
||||
strlcpy(attr->name, name, sizeof(attr->name));
|
||||
attr->index = index;
|
||||
attr->type = type;
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
|
||||
attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
else
|
||||
attr->hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
attr->limit_value = limit_value;
|
||||
sysfs_attr_init(&attr->dev_attr.attr);
|
||||
attr->dev_attr.attr.name = attr->name;
|
||||
attr->dev_attr.attr.mode = 0444;
|
||||
attr->dev_attr.show = reader;
|
||||
hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
|
||||
}
|
||||
|
||||
int efx_mcdi_mon_probe(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
|
||||
unsigned int n_pages, n_sensors, n_attrs, page;
|
||||
size_t outlen;
|
||||
char name[12];
|
||||
u32 mask;
|
||||
int rc, i, j, type;
|
||||
|
||||
/* Find out how many sensors are present */
|
||||
n_sensors = 0;
|
||||
page = 0;
|
||||
do {
|
||||
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
|
||||
return -EIO;
|
||||
|
||||
mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
|
||||
n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
++page;
|
||||
} while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
n_pages = page;
|
||||
|
||||
/* Don't create a device if there are none */
|
||||
if (n_sensors == 0)
|
||||
return 0;
|
||||
|
||||
rc = efx_nic_alloc_buffer(
|
||||
efx, &hwmon->dma_buf,
|
||||
n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mutex_init(&hwmon->update_lock);
|
||||
efx_mcdi_mon_update(efx);
|
||||
|
||||
/* Allocate space for the maximum possible number of
|
||||
* attributes for this set of sensors:
|
||||
* value, min, max, crit, alarm and label for each sensor.
|
||||
*/
|
||||
n_attrs = 6 * n_sensors;
|
||||
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
|
||||
if (!hwmon->attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
|
||||
GFP_KERNEL);
|
||||
if (!hwmon->group.attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0, j = -1, type = -1; ; i++) {
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
const char *hwmon_prefix;
|
||||
unsigned hwmon_index;
|
||||
u16 min1, max1, min2, max2;
|
||||
|
||||
/* Find next sensor type or exit if there is none */
|
||||
do {
|
||||
type++;
|
||||
|
||||
if ((type % 32) == 0) {
|
||||
page = type / 32;
|
||||
j = -1;
|
||||
if (page == n_pages)
|
||||
goto hwmon_register;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
|
||||
page);
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
|
||||
inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf),
|
||||
&outlen);
|
||||
if (rc)
|
||||
goto fail;
|
||||
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mask = (MCDI_DWORD(outbuf,
|
||||
SENSOR_INFO_OUT_MASK) &
|
||||
~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
|
||||
/* Check again for short response */
|
||||
if (outlen <
|
||||
MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
} while (!(mask & (1 << type % 32)));
|
||||
j++;
|
||||
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
|
||||
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
|
||||
/* Skip sensors specific to a different port */
|
||||
if (hwmon_type != EFX_HWMON_UNKNOWN &&
|
||||
efx_mcdi_sensor_type[type].port >= 0 &&
|
||||
efx_mcdi_sensor_type[type].port !=
|
||||
efx_port_num(efx))
|
||||
continue;
|
||||
} else {
|
||||
hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
}
|
||||
|
||||
switch (hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
hwmon_prefix = "temp";
|
||||
hwmon_index = ++n_temp; /* 1-based */
|
||||
break;
|
||||
case EFX_HWMON_COOL:
|
||||
/* This is likely to be a heatsink, but there
|
||||
* is no convention for representing cooling
|
||||
* devices other than fans.
|
||||
*/
|
||||
hwmon_prefix = "fan";
|
||||
hwmon_index = ++n_cool; /* 1-based */
|
||||
break;
|
||||
default:
|
||||
hwmon_prefix = "in";
|
||||
hwmon_index = n_in++; /* 0-based */
|
||||
break;
|
||||
case EFX_HWMON_CURR:
|
||||
hwmon_prefix = "curr";
|
||||
hwmon_index = ++n_curr; /* 1-based */
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
hwmon_prefix = "power";
|
||||
hwmon_index = ++n_power; /* 1-based */
|
||||
break;
|
||||
}
|
||||
|
||||
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MIN1);
|
||||
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MAX1);
|
||||
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MIN2);
|
||||
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MAX2);
|
||||
|
||||
if (min1 != max1) {
|
||||
snprintf(name, sizeof(name), "%s%u_input",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_value, i, type, 0);
|
||||
|
||||
if (hwmon_type != EFX_HWMON_POWER) {
|
||||
snprintf(name, sizeof(name), "%s%u_min",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, min1);
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_max",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max1);
|
||||
|
||||
if (min2 != max2) {
|
||||
/* Assume max2 is critical value.
|
||||
* But we have no good way to expose min2.
|
||||
*/
|
||||
snprintf(name, sizeof(name), "%s%u_crit",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max2);
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_alarm",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
|
||||
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
|
||||
efx_mcdi_sensor_type[type].label) {
|
||||
snprintf(name, sizeof(name), "%s%u_label",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_label, i, type, 0);
|
||||
}
|
||||
}
|
||||
|
||||
hwmon_register:
|
||||
hwmon->groups[0] = &hwmon->group;
|
||||
hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
|
||||
KBUILD_MODNAME, NULL,
|
||||
hwmon->groups);
|
||||
if (IS_ERR(hwmon->device)) {
|
||||
rc = PTR_ERR(hwmon->device);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
efx_mcdi_mon_remove(efx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_mcdi_mon_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
|
||||
if (hwmon->device)
|
||||
hwmon_device_unregister(hwmon->device);
|
||||
kfree(hwmon->attrs);
|
||||
kfree(hwmon->group.attrs);
|
||||
efx_nic_free_buffer(efx, &hwmon->dma_buf);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SFC_MCDI_MON */
|
117
drivers/net/ethernet/sfc/siena/mcdi_port.c
Normal file
117
drivers/net/ethernet/sfc/siena/mcdi_port.c
Normal file
@ -0,0 +1,117 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2009-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Driver for PHY related operations via MCDI.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include "efx.h"
|
||||
#include "mcdi_port.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "mcdi_port_common.h"
|
||||
|
||||
static int efx_mcdi_mdio_read(struct net_device *net_dev,
|
||||
int prtad, int devad, u16 addr)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
|
||||
MC_CMD_MDIO_STATUS_GOOD)
|
||||
return -EIO;
|
||||
|
||||
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
|
||||
}
|
||||
|
||||
static int efx_mcdi_mdio_write(struct net_device *net_dev,
|
||||
int prtad, int devad, u16 addr, u16 value)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
|
||||
MC_CMD_MDIO_STATUS_GOOD)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
|
||||
|
||||
return phy_data->supported_cap;
|
||||
}
|
||||
|
||||
bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
|
||||
size_t outlength;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlength);
|
||||
if (rc)
|
||||
return true;
|
||||
|
||||
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
|
||||
}
|
||||
|
||||
int efx_mcdi_port_probe(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Set up MDIO structure for PHY */
|
||||
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
|
||||
efx->mdio.mdio_read = efx_mcdi_mdio_read;
|
||||
efx->mdio.mdio_write = efx_mcdi_mdio_write;
|
||||
|
||||
/* Fill out MDIO structure, loopback modes, and initial link state */
|
||||
rc = efx_mcdi_phy_probe(efx);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
return efx_mcdi_mac_init_stats(efx);
|
||||
}
|
||||
|
||||
void efx_mcdi_port_remove(struct efx_nic *efx)
|
||||
{
|
||||
efx_mcdi_phy_remove(efx);
|
||||
efx_mcdi_mac_fini_stats(efx);
|
||||
}
|
18
drivers/net/ethernet/sfc/siena/mcdi_port.h
Normal file
18
drivers/net/ethernet/sfc/siena/mcdi_port.h
Normal file
@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2008-2013 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_MCDI_PORT_H
|
||||
#define EFX_MCDI_PORT_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
|
||||
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
|
||||
int efx_mcdi_port_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_port_remove(struct efx_nic *efx);
|
||||
|
||||
#endif /* EFX_MCDI_PORT_H */
|
1301
drivers/net/ethernet/sfc/siena/mcdi_port_common.c
Normal file
1301
drivers/net/ethernet/sfc/siena/mcdi_port_common.c
Normal file
File diff suppressed because it is too large
Load Diff
67
drivers/net/ethernet/sfc/siena/mcdi_port_common.h
Normal file
67
drivers/net/ethernet/sfc/siena/mcdi_port_common.h
Normal file
@ -0,0 +1,67 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
#ifndef EFX_MCDI_PORT_COMMON_H
|
||||
#define EFX_MCDI_PORT_COMMON_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
struct efx_mcdi_phy_data {
|
||||
u32 flags;
|
||||
u32 type;
|
||||
u32 supported_cap;
|
||||
u32 channel;
|
||||
u32 port;
|
||||
u32 stats_mask;
|
||||
u8 name[20];
|
||||
u32 media;
|
||||
u32 mmd_mask;
|
||||
u8 revision[20];
|
||||
u32 forced_cap;
|
||||
};
|
||||
|
||||
int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
|
||||
void efx_link_set_advertising(struct efx_nic *efx,
|
||||
const unsigned long *advertising);
|
||||
int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
|
||||
u32 flags, u32 loopback_mode, u32 loopback_speed);
|
||||
int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes);
|
||||
void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset);
|
||||
u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset);
|
||||
u32 efx_get_mcdi_phy_flags(struct efx_nic *efx);
|
||||
u8 mcdi_to_ethtool_media(u32 media);
|
||||
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
|
||||
struct efx_link_state *link_state,
|
||||
u32 speed, u32 flags, u32 fcntl);
|
||||
u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap);
|
||||
u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
|
||||
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
|
||||
bool efx_mcdi_phy_poll(struct efx_nic *efx);
|
||||
int efx_mcdi_phy_probe(struct efx_nic *efx);
|
||||
void efx_mcdi_phy_remove(struct efx_nic *efx);
|
||||
void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd);
|
||||
int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd);
|
||||
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec);
|
||||
int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec);
|
||||
int efx_mcdi_phy_test_alive(struct efx_nic *efx);
|
||||
int efx_mcdi_port_reconfigure(struct efx_nic *efx);
|
||||
int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags);
|
||||
const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index);
|
||||
int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data);
|
||||
int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo);
|
||||
int efx_mcdi_set_mac(struct efx_nic *efx);
|
||||
int efx_mcdi_set_mtu(struct efx_nic *efx);
|
||||
int efx_mcdi_mac_init_stats(struct efx_nic *efx);
|
||||
void efx_mcdi_mac_fini_stats(struct efx_nic *efx);
|
||||
int efx_mcdi_port_get_number(struct efx_nic *efx);
|
||||
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
|
||||
#endif
|
124
drivers/net/ethernet/sfc/siena/mtd.c
Normal file
124
drivers/net/ethernet/sfc/siena/mtd.c
Normal file
@ -0,0 +1,124 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
|
||||
#define to_efx_mtd_partition(mtd) \
|
||||
container_of(mtd, struct efx_mtd_partition, mtd)
|
||||
|
||||
/* MTD interface */
|
||||
|
||||
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
|
||||
{
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
|
||||
return efx->type->mtd_erase(mtd, erase->addr, erase->len);
|
||||
}
|
||||
|
||||
static void efx_mtd_sync(struct mtd_info *mtd)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
int rc;
|
||||
|
||||
rc = efx->type->mtd_sync(mtd);
|
||||
if (rc)
|
||||
pr_err("%s: %s sync failed (%d)\n",
|
||||
part->name, part->dev_type_name, rc);
|
||||
}
|
||||
|
||||
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
|
||||
{
|
||||
int rc;
|
||||
|
||||
for (;;) {
|
||||
rc = mtd_device_unregister(&part->mtd);
|
||||
if (rc != -EBUSY)
|
||||
break;
|
||||
ssleep(1);
|
||||
}
|
||||
WARN_ON(rc);
|
||||
list_del(&part->node);
|
||||
}
|
||||
|
||||
int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
|
||||
size_t n_parts, size_t sizeof_part)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < n_parts; i++) {
|
||||
part = (struct efx_mtd_partition *)((char *)parts +
|
||||
i * sizeof_part);
|
||||
|
||||
part->mtd.writesize = 1;
|
||||
|
||||
if (!(part->mtd.flags & MTD_NO_ERASE))
|
||||
part->mtd.flags |= MTD_WRITEABLE;
|
||||
|
||||
part->mtd.owner = THIS_MODULE;
|
||||
part->mtd.priv = efx;
|
||||
part->mtd.name = part->name;
|
||||
part->mtd._erase = efx_mtd_erase;
|
||||
part->mtd._read = efx->type->mtd_read;
|
||||
part->mtd._write = efx->type->mtd_write;
|
||||
part->mtd._sync = efx_mtd_sync;
|
||||
|
||||
efx->type->mtd_rename(part);
|
||||
|
||||
if (mtd_device_register(&part->mtd, NULL, 0))
|
||||
goto fail;
|
||||
|
||||
/* Add to list in order - efx_mtd_remove() depends on this */
|
||||
list_add_tail(&part->node, &efx->mtd_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (i--) {
|
||||
part = (struct efx_mtd_partition *)((char *)parts +
|
||||
i * sizeof_part);
|
||||
efx_mtd_remove_partition(part);
|
||||
}
|
||||
/* Failure is unlikely here, but probably means we're out of memory */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void efx_mtd_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd_partition *parts, *part, *next;
|
||||
|
||||
WARN_ON(efx_dev_registered(efx));
|
||||
|
||||
if (list_empty(&efx->mtd_list))
|
||||
return;
|
||||
|
||||
parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
|
||||
node);
|
||||
|
||||
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
|
||||
efx_mtd_remove_partition(part);
|
||||
|
||||
kfree(parts);
|
||||
}
|
||||
|
||||
void efx_mtd_rename(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(part, &efx->mtd_list, node)
|
||||
efx->type->mtd_rename(part);
|
||||
}
|
1716
drivers/net/ethernet/sfc/siena/net_driver.h
Normal file
1716
drivers/net/ethernet/sfc/siena/net_driver.h
Normal file
File diff suppressed because it is too large
Load Diff
580
drivers/net/ethernet/sfc/siena/nic.c
Normal file
580
drivers/net/ethernet/sfc/siena/nic.c
Normal file
@ -0,0 +1,580 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cpu_rmap.h>
|
||||
#include "net_driver.h"
|
||||
#include "bitfield.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "ef10_regs.h"
|
||||
#include "farch_regs.h"
|
||||
#include "io.h"
|
||||
#include "workarounds.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Generic buffer handling
|
||||
* These buffers are used for interrupt status, MAC stats, etc.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
|
||||
unsigned int len, gfp_t gfp_flags)
|
||||
{
|
||||
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
|
||||
&buffer->dma_addr, gfp_flags);
|
||||
if (!buffer->addr)
|
||||
return -ENOMEM;
|
||||
buffer->len = len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
|
||||
{
|
||||
if (buffer->addr) {
|
||||
dma_free_coherent(&efx->pci_dev->dev, buffer->len,
|
||||
buffer->addr, buffer->dma_addr);
|
||||
buffer->addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check whether an event is present in the eventq at the current
|
||||
* read pointer. Only useful for self-test.
|
||||
*/
|
||||
bool efx_nic_event_present(struct efx_channel *channel)
|
||||
{
|
||||
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
|
||||
}
|
||||
|
||||
void efx_nic_event_test_start(struct efx_channel *channel)
|
||||
{
|
||||
channel->event_test_cpu = -1;
|
||||
smp_wmb();
|
||||
channel->efx->type->ev_test_generate(channel);
|
||||
}
|
||||
|
||||
int efx_nic_irq_test_start(struct efx_nic *efx)
|
||||
{
|
||||
efx->last_irq_cpu = -1;
|
||||
smp_wmb();
|
||||
return efx->type->irq_test_generate(efx);
|
||||
}
|
||||
|
||||
/* Hook interrupt handler(s)
|
||||
* Try MSI and then legacy interrupts.
|
||||
*/
|
||||
int efx_nic_init_interrupt(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int n_irqs;
|
||||
int rc;
|
||||
|
||||
if (!EFX_INT_MODE_USE_MSI(efx)) {
|
||||
rc = request_irq(efx->legacy_irq,
|
||||
efx->type->irq_handle_legacy, IRQF_SHARED,
|
||||
efx->name, efx);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"failed to hook legacy IRQ %d\n",
|
||||
efx->pci_dev->irq);
|
||||
goto fail1;
|
||||
}
|
||||
efx->irqs_hooked = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
|
||||
efx->net_dev->rx_cpu_rmap =
|
||||
alloc_irq_cpu_rmap(efx->n_rx_channels);
|
||||
if (!efx->net_dev->rx_cpu_rmap) {
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Hook MSI or MSI-X interrupt */
|
||||
n_irqs = 0;
|
||||
efx_for_each_channel(channel, efx) {
|
||||
rc = request_irq(channel->irq, efx->type->irq_handle_msi,
|
||||
IRQF_PROBE_SHARED, /* Not shared */
|
||||
efx->msi_context[channel->channel].name,
|
||||
&efx->msi_context[channel->channel]);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"failed to hook IRQ %d\n", channel->irq);
|
||||
goto fail2;
|
||||
}
|
||||
++n_irqs;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
|
||||
channel->channel < efx->n_rx_channels) {
|
||||
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
|
||||
channel->irq);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
efx->irqs_hooked = true;
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
|
||||
efx->net_dev->rx_cpu_rmap = NULL;
|
||||
#endif
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (n_irqs-- == 0)
|
||||
break;
|
||||
free_irq(channel->irq, &efx->msi_context[channel->channel]);
|
||||
}
|
||||
fail1:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_nic_fini_interrupt(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
|
||||
efx->net_dev->rx_cpu_rmap = NULL;
|
||||
#endif
|
||||
|
||||
if (!efx->irqs_hooked)
|
||||
return;
|
||||
if (EFX_INT_MODE_USE_MSI(efx)) {
|
||||
/* Disable MSI/MSI-X interrupts */
|
||||
efx_for_each_channel(channel, efx)
|
||||
free_irq(channel->irq,
|
||||
&efx->msi_context[channel->channel]);
|
||||
} else {
|
||||
/* Disable legacy interrupt */
|
||||
free_irq(efx->legacy_irq, efx);
|
||||
}
|
||||
efx->irqs_hooked = false;
|
||||
}
|
||||
|
||||
/* Register dump */
|
||||
|
||||
#define REGISTER_REVISION_FA 1
|
||||
#define REGISTER_REVISION_FB 2
|
||||
#define REGISTER_REVISION_FC 3
|
||||
#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
|
||||
#define REGISTER_REVISION_ED 4
|
||||
#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
|
||||
|
||||
struct efx_nic_reg {
|
||||
u32 offset:24;
|
||||
u32 min_revision:3, max_revision:3;
|
||||
};
|
||||
|
||||
#define REGISTER(name, arch, min_rev, max_rev) { \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name, \
|
||||
REGISTER_REVISION_ ## arch ## min_rev, \
|
||||
REGISTER_REVISION_ ## arch ## max_rev \
|
||||
}
|
||||
#define REGISTER_AA(name) REGISTER(name, F, A, A)
|
||||
#define REGISTER_AB(name) REGISTER(name, F, A, B)
|
||||
#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
|
||||
#define REGISTER_BB(name) REGISTER(name, F, B, B)
|
||||
#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
|
||||
#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
|
||||
#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
|
||||
|
||||
static const struct efx_nic_reg efx_nic_regs[] = {
|
||||
REGISTER_AZ(ADR_REGION),
|
||||
REGISTER_AZ(INT_EN_KER),
|
||||
REGISTER_BZ(INT_EN_CHAR),
|
||||
REGISTER_AZ(INT_ADR_KER),
|
||||
REGISTER_BZ(INT_ADR_CHAR),
|
||||
/* INT_ACK_KER is WO */
|
||||
/* INT_ISR0 is RC */
|
||||
REGISTER_AZ(HW_INIT),
|
||||
REGISTER_CZ(USR_EV_CFG),
|
||||
REGISTER_AB(EE_SPI_HCMD),
|
||||
REGISTER_AB(EE_SPI_HADR),
|
||||
REGISTER_AB(EE_SPI_HDATA),
|
||||
REGISTER_AB(EE_BASE_PAGE),
|
||||
REGISTER_AB(EE_VPD_CFG0),
|
||||
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
|
||||
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
|
||||
/* PCIE_CORE_INDIRECT is indirect */
|
||||
REGISTER_AB(NIC_STAT),
|
||||
REGISTER_AB(GPIO_CTL),
|
||||
REGISTER_AB(GLB_CTL),
|
||||
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
|
||||
REGISTER_BZ(DP_CTRL),
|
||||
REGISTER_AZ(MEM_STAT),
|
||||
REGISTER_AZ(CS_DEBUG),
|
||||
REGISTER_AZ(ALTERA_BUILD),
|
||||
REGISTER_AZ(CSR_SPARE),
|
||||
REGISTER_AB(PCIE_SD_CTL0123),
|
||||
REGISTER_AB(PCIE_SD_CTL45),
|
||||
REGISTER_AB(PCIE_PCS_CTL_STAT),
|
||||
/* DEBUG_DATA_OUT is not used */
|
||||
/* DRV_EV is WO */
|
||||
REGISTER_AZ(EVQ_CTL),
|
||||
REGISTER_AZ(EVQ_CNT1),
|
||||
REGISTER_AZ(EVQ_CNT2),
|
||||
REGISTER_AZ(BUF_TBL_CFG),
|
||||
REGISTER_AZ(SRM_RX_DC_CFG),
|
||||
REGISTER_AZ(SRM_TX_DC_CFG),
|
||||
REGISTER_AZ(SRM_CFG),
|
||||
/* BUF_TBL_UPD is WO */
|
||||
REGISTER_AZ(SRM_UPD_EVQ),
|
||||
REGISTER_AZ(SRAM_PARITY),
|
||||
REGISTER_AZ(RX_CFG),
|
||||
REGISTER_BZ(RX_FILTER_CTL),
|
||||
/* RX_FLUSH_DESCQ is WO */
|
||||
REGISTER_AZ(RX_DC_CFG),
|
||||
REGISTER_AZ(RX_DC_PF_WM),
|
||||
REGISTER_BZ(RX_RSS_TKEY),
|
||||
/* RX_NODESC_DROP is RC */
|
||||
REGISTER_AA(RX_SELF_RST),
|
||||
/* RX_DEBUG, RX_PUSH_DROP are not used */
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG1),
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG2),
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG3),
|
||||
/* TX_FLUSH_DESCQ is WO */
|
||||
REGISTER_AZ(TX_DC_CFG),
|
||||
REGISTER_AA(TX_CHKSM_CFG),
|
||||
REGISTER_AZ(TX_CFG),
|
||||
/* TX_PUSH_DROP is not used */
|
||||
REGISTER_AZ(TX_RESERVED),
|
||||
REGISTER_BZ(TX_PACE),
|
||||
/* TX_PACE_DROP_QID is RC */
|
||||
REGISTER_BB(TX_VLAN),
|
||||
REGISTER_BZ(TX_IPFIL_PORTEN),
|
||||
REGISTER_AB(MD_TXD),
|
||||
REGISTER_AB(MD_RXD),
|
||||
REGISTER_AB(MD_CS),
|
||||
REGISTER_AB(MD_PHY_ADR),
|
||||
REGISTER_AB(MD_ID),
|
||||
/* MD_STAT is RC */
|
||||
REGISTER_AB(MAC_STAT_DMA),
|
||||
REGISTER_AB(MAC_CTRL),
|
||||
REGISTER_BB(GEN_MODE),
|
||||
REGISTER_AB(MAC_MC_HASH_REG0),
|
||||
REGISTER_AB(MAC_MC_HASH_REG1),
|
||||
REGISTER_AB(GM_CFG1),
|
||||
REGISTER_AB(GM_CFG2),
|
||||
/* GM_IPG and GM_HD are not used */
|
||||
REGISTER_AB(GM_MAX_FLEN),
|
||||
/* GM_TEST is not used */
|
||||
REGISTER_AB(GM_ADR1),
|
||||
REGISTER_AB(GM_ADR2),
|
||||
REGISTER_AB(GMF_CFG0),
|
||||
REGISTER_AB(GMF_CFG1),
|
||||
REGISTER_AB(GMF_CFG2),
|
||||
REGISTER_AB(GMF_CFG3),
|
||||
REGISTER_AB(GMF_CFG4),
|
||||
REGISTER_AB(GMF_CFG5),
|
||||
REGISTER_BB(TX_SRC_MAC_CTL),
|
||||
REGISTER_AB(XM_ADR_LO),
|
||||
REGISTER_AB(XM_ADR_HI),
|
||||
REGISTER_AB(XM_GLB_CFG),
|
||||
REGISTER_AB(XM_TX_CFG),
|
||||
REGISTER_AB(XM_RX_CFG),
|
||||
REGISTER_AB(XM_MGT_INT_MASK),
|
||||
REGISTER_AB(XM_FC),
|
||||
REGISTER_AB(XM_PAUSE_TIME),
|
||||
REGISTER_AB(XM_TX_PARAM),
|
||||
REGISTER_AB(XM_RX_PARAM),
|
||||
/* XM_MGT_INT_MSK (note no 'A') is RC */
|
||||
REGISTER_AB(XX_PWR_RST),
|
||||
REGISTER_AB(XX_SD_CTL),
|
||||
REGISTER_AB(XX_TXDRV_CTL),
|
||||
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
|
||||
/* XX_CORE_STAT is partly RC */
|
||||
REGISTER_DZ(BIU_HW_REV_ID),
|
||||
REGISTER_DZ(MC_DB_LWRD),
|
||||
REGISTER_DZ(MC_DB_HWRD),
|
||||
};
|
||||
|
||||
struct efx_nic_reg_table {
|
||||
u32 offset:24;
|
||||
u32 min_revision:3, max_revision:3;
|
||||
u32 step:6, rows:21;
|
||||
};
|
||||
|
||||
#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
|
||||
offset, \
|
||||
REGISTER_REVISION_ ## arch ## min_rev, \
|
||||
REGISTER_REVISION_ ## arch ## max_rev, \
|
||||
step, rows \
|
||||
}
|
||||
#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
|
||||
REGISTER_TABLE_DIMENSIONS( \
|
||||
name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
|
||||
arch, min_rev, max_rev, \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
|
||||
#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
|
||||
#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
|
||||
#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
|
||||
#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
|
||||
#define REGISTER_TABLE_BB_CZ(name) \
|
||||
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
|
||||
FR_BZ_ ## name ## _STEP, \
|
||||
FR_BB_ ## name ## _ROWS), \
|
||||
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
|
||||
FR_BZ_ ## name ## _STEP, \
|
||||
FR_CZ_ ## name ## _ROWS)
|
||||
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
|
||||
#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
|
||||
|
||||
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
|
||||
/* DRIVER is not used */
|
||||
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
|
||||
REGISTER_TABLE_BB(TX_IPFIL_TBL),
|
||||
REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
|
||||
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
|
||||
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
|
||||
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
|
||||
/* We can't reasonably read all of the buffer table (up to 8MB!).
|
||||
* However this driver will only use a few entries. Reading
|
||||
* 1K entries allows for some expansion of queue count and
|
||||
* size before we need to change the version. */
|
||||
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
|
||||
F, A, A, 8, 1024),
|
||||
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
|
||||
F, B, Z, 8, 1024),
|
||||
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
|
||||
REGISTER_TABLE_BB_CZ(TIMER_TBL),
|
||||
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
|
||||
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
|
||||
/* TX_FILTER_TBL0 is huge and not used by this driver */
|
||||
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
|
||||
REGISTER_TABLE_CZ(MC_TREG_SMEM),
|
||||
/* MSIX_PBA_TABLE is not mapped */
|
||||
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
|
||||
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
|
||||
REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
|
||||
};
|
||||
|
||||
size_t efx_nic_get_regs_len(struct efx_nic *efx)
|
||||
{
|
||||
const struct efx_nic_reg *reg;
|
||||
const struct efx_nic_reg_table *table;
|
||||
size_t len = 0;
|
||||
|
||||
for (reg = efx_nic_regs;
|
||||
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
|
||||
reg++)
|
||||
if (efx->type->revision >= reg->min_revision &&
|
||||
efx->type->revision <= reg->max_revision)
|
||||
len += sizeof(efx_oword_t);
|
||||
|
||||
for (table = efx_nic_reg_tables;
|
||||
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
|
||||
table++)
|
||||
if (efx->type->revision >= table->min_revision &&
|
||||
efx->type->revision <= table->max_revision)
|
||||
len += table->rows * min_t(size_t, table->step, 16);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
void efx_nic_get_regs(struct efx_nic *efx, void *buf)
|
||||
{
|
||||
const struct efx_nic_reg *reg;
|
||||
const struct efx_nic_reg_table *table;
|
||||
|
||||
for (reg = efx_nic_regs;
|
||||
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
|
||||
reg++) {
|
||||
if (efx->type->revision >= reg->min_revision &&
|
||||
efx->type->revision <= reg->max_revision) {
|
||||
efx_reado(efx, (efx_oword_t *)buf, reg->offset);
|
||||
buf += sizeof(efx_oword_t);
|
||||
}
|
||||
}
|
||||
|
||||
for (table = efx_nic_reg_tables;
|
||||
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
|
||||
table++) {
|
||||
size_t size, i;
|
||||
|
||||
if (!(efx->type->revision >= table->min_revision &&
|
||||
efx->type->revision <= table->max_revision))
|
||||
continue;
|
||||
|
||||
size = min_t(size_t, table->step, 16);
|
||||
|
||||
for (i = 0; i < table->rows; i++) {
|
||||
switch (table->step) {
|
||||
case 4: /* 32-bit SRAM */
|
||||
efx_readd(efx, buf, table->offset + 4 * i);
|
||||
break;
|
||||
case 8: /* 64-bit SRAM */
|
||||
efx_sram_readq(efx,
|
||||
efx->membase + table->offset,
|
||||
buf, i);
|
||||
break;
|
||||
case 16: /* 128-bit-readable register */
|
||||
efx_reado_table(efx, buf, table->offset, i);
|
||||
break;
|
||||
case 32: /* 128-bit register, interleaved */
|
||||
efx_reado_table(efx, buf, table->offset, 2 * i);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
buf += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_nic_describe_stats - Describe supported statistics for ethtool
|
||||
* @desc: Array of &struct efx_hw_stat_desc describing the statistics
|
||||
* @count: Length of the @desc array
|
||||
* @mask: Bitmask of which elements of @desc are enabled
|
||||
* @names: Buffer to copy names to, or %NULL. The names are copied
|
||||
* starting at intervals of %ETH_GSTRING_LEN bytes.
|
||||
*
|
||||
* Returns the number of visible statistics, i.e. the number of set
|
||||
* bits in the first @count bits of @mask for which a name is defined.
|
||||
*/
|
||||
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u8 *names)
|
||||
{
|
||||
size_t visible = 0;
|
||||
size_t index;
|
||||
|
||||
for_each_set_bit(index, mask, count) {
|
||||
if (desc[index].name) {
|
||||
if (names) {
|
||||
strlcpy(names, desc[index].name,
|
||||
ETH_GSTRING_LEN);
|
||||
names += ETH_GSTRING_LEN;
|
||||
}
|
||||
++visible;
|
||||
}
|
||||
}
|
||||
|
||||
return visible;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_nic_copy_stats - Copy stats from the DMA buffer in to an
|
||||
* intermediate buffer. This is used to get a consistent
|
||||
* set of stats while the DMA buffer can be written at any time
|
||||
* by the NIC.
|
||||
* @efx: The associated NIC.
|
||||
* @dest: Destination buffer. Must be the same size as the DMA buffer.
|
||||
*/
|
||||
int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest)
|
||||
{
|
||||
__le64 *dma_stats = efx->stats_buffer.addr;
|
||||
__le64 generation_start, generation_end;
|
||||
int rc = 0, retry;
|
||||
|
||||
if (!dest)
|
||||
return 0;
|
||||
|
||||
if (!dma_stats)
|
||||
goto return_zeroes;
|
||||
|
||||
/* If we're unlucky enough to read statistics during the DMA, wait
|
||||
* up to 10ms for it to finish (typically takes <500us)
|
||||
*/
|
||||
for (retry = 0; retry < 100; ++retry) {
|
||||
generation_end = dma_stats[efx->num_mac_stats - 1];
|
||||
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
|
||||
goto return_zeroes;
|
||||
rmb();
|
||||
memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64));
|
||||
rmb();
|
||||
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
|
||||
if (generation_end == generation_start)
|
||||
return 0; /* return good data */
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
rc = -EIO;
|
||||
|
||||
return_zeroes:
|
||||
memset(dest, 0, efx->num_mac_stats * sizeof(u64));
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_nic_update_stats - Convert statistics DMA buffer to array of u64
|
||||
* @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
|
||||
* layout. DMA widths of 0, 16, 32 and 64 are supported; where
|
||||
* the width is specified as 0 the corresponding element of
|
||||
* @stats is not updated.
|
||||
* @count: Length of the @desc array
|
||||
* @mask: Bitmask of which elements of @desc are enabled
|
||||
* @stats: Buffer to update with the converted statistics. The length
|
||||
* of this array must be at least @count.
|
||||
* @dma_buf: DMA buffer containing hardware statistics
|
||||
* @accumulate: If set, the converted values will be added rather than
|
||||
* directly stored to the corresponding elements of @stats
|
||||
*/
|
||||
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask,
|
||||
u64 *stats, const void *dma_buf, bool accumulate)
|
||||
{
|
||||
size_t index;
|
||||
|
||||
for_each_set_bit(index, mask, count) {
|
||||
if (desc[index].dma_width) {
|
||||
const void *addr = dma_buf + desc[index].offset;
|
||||
u64 val;
|
||||
|
||||
switch (desc[index].dma_width) {
|
||||
case 16:
|
||||
val = le16_to_cpup((__le16 *)addr);
|
||||
break;
|
||||
case 32:
|
||||
val = le32_to_cpup((__le32 *)addr);
|
||||
break;
|
||||
case 64:
|
||||
val = le64_to_cpup((__le64 *)addr);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (accumulate)
|
||||
stats[index] += val;
|
||||
else
|
||||
stats[index] = val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
|
||||
{
|
||||
/* if down, or this is the first update after coming up */
|
||||
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
|
||||
efx->rx_nodesc_drops_while_down +=
|
||||
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
|
||||
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
|
||||
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
|
||||
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
|
||||
}
|
392
drivers/net/ethernet/sfc/siena/nic.h
Normal file
392
drivers/net/ethernet/sfc/siena/nic.h
Normal file
@ -0,0 +1,392 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_NIC_H
|
||||
#define EFX_NIC_H
|
||||
|
||||
#include "nic_common.h"
|
||||
#include "efx.h"
|
||||
|
||||
u32 efx_farch_fpga_ver(struct efx_nic *efx);
|
||||
|
||||
enum {
|
||||
PHY_TYPE_NONE = 0,
|
||||
PHY_TYPE_TXC43128 = 1,
|
||||
PHY_TYPE_88E1111 = 2,
|
||||
PHY_TYPE_SFX7101 = 3,
|
||||
PHY_TYPE_QT2022C2 = 4,
|
||||
PHY_TYPE_PM8358 = 6,
|
||||
PHY_TYPE_SFT9001A = 8,
|
||||
PHY_TYPE_QT2025C = 9,
|
||||
PHY_TYPE_SFT9001B = 10,
|
||||
};
|
||||
|
||||
enum {
|
||||
SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
|
||||
SIENA_STAT_tx_good_bytes,
|
||||
SIENA_STAT_tx_bad_bytes,
|
||||
SIENA_STAT_tx_packets,
|
||||
SIENA_STAT_tx_bad,
|
||||
SIENA_STAT_tx_pause,
|
||||
SIENA_STAT_tx_control,
|
||||
SIENA_STAT_tx_unicast,
|
||||
SIENA_STAT_tx_multicast,
|
||||
SIENA_STAT_tx_broadcast,
|
||||
SIENA_STAT_tx_lt64,
|
||||
SIENA_STAT_tx_64,
|
||||
SIENA_STAT_tx_65_to_127,
|
||||
SIENA_STAT_tx_128_to_255,
|
||||
SIENA_STAT_tx_256_to_511,
|
||||
SIENA_STAT_tx_512_to_1023,
|
||||
SIENA_STAT_tx_1024_to_15xx,
|
||||
SIENA_STAT_tx_15xx_to_jumbo,
|
||||
SIENA_STAT_tx_gtjumbo,
|
||||
SIENA_STAT_tx_collision,
|
||||
SIENA_STAT_tx_single_collision,
|
||||
SIENA_STAT_tx_multiple_collision,
|
||||
SIENA_STAT_tx_excessive_collision,
|
||||
SIENA_STAT_tx_deferred,
|
||||
SIENA_STAT_tx_late_collision,
|
||||
SIENA_STAT_tx_excessive_deferred,
|
||||
SIENA_STAT_tx_non_tcpudp,
|
||||
SIENA_STAT_tx_mac_src_error,
|
||||
SIENA_STAT_tx_ip_src_error,
|
||||
SIENA_STAT_rx_bytes,
|
||||
SIENA_STAT_rx_good_bytes,
|
||||
SIENA_STAT_rx_bad_bytes,
|
||||
SIENA_STAT_rx_packets,
|
||||
SIENA_STAT_rx_good,
|
||||
SIENA_STAT_rx_bad,
|
||||
SIENA_STAT_rx_pause,
|
||||
SIENA_STAT_rx_control,
|
||||
SIENA_STAT_rx_unicast,
|
||||
SIENA_STAT_rx_multicast,
|
||||
SIENA_STAT_rx_broadcast,
|
||||
SIENA_STAT_rx_lt64,
|
||||
SIENA_STAT_rx_64,
|
||||
SIENA_STAT_rx_65_to_127,
|
||||
SIENA_STAT_rx_128_to_255,
|
||||
SIENA_STAT_rx_256_to_511,
|
||||
SIENA_STAT_rx_512_to_1023,
|
||||
SIENA_STAT_rx_1024_to_15xx,
|
||||
SIENA_STAT_rx_15xx_to_jumbo,
|
||||
SIENA_STAT_rx_gtjumbo,
|
||||
SIENA_STAT_rx_bad_gtjumbo,
|
||||
SIENA_STAT_rx_overflow,
|
||||
SIENA_STAT_rx_false_carrier,
|
||||
SIENA_STAT_rx_symbol_error,
|
||||
SIENA_STAT_rx_align_error,
|
||||
SIENA_STAT_rx_length_error,
|
||||
SIENA_STAT_rx_internal_error,
|
||||
SIENA_STAT_rx_nodesc_drop_cnt,
|
||||
SIENA_STAT_COUNT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct siena_nic_data - Siena NIC state
|
||||
* @efx: Pointer back to main interface structure
|
||||
* @wol_filter_id: Wake-on-LAN packet filter id
|
||||
* @stats: Hardware statistics
|
||||
* @vf: Array of &struct siena_vf objects
|
||||
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
|
||||
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
|
||||
* @local_addr_list: List of local addresses. Protected by %local_lock.
|
||||
* @local_page_list: List of DMA addressable pages used to broadcast
|
||||
* %local_addr_list. Protected by %local_lock.
|
||||
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
|
||||
* @peer_work: Work item to broadcast peer addresses to VMs.
|
||||
*/
|
||||
struct siena_nic_data {
|
||||
struct efx_nic *efx;
|
||||
int wol_filter_id;
|
||||
u64 stats[SIENA_STAT_COUNT];
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
struct siena_vf *vf;
|
||||
struct efx_channel *vfdi_channel;
|
||||
unsigned vf_buftbl_base;
|
||||
struct efx_buffer vfdi_status;
|
||||
struct list_head local_addr_list;
|
||||
struct list_head local_page_list;
|
||||
struct mutex local_lock;
|
||||
struct work_struct peer_work;
|
||||
#endif
|
||||
};
|
||||
|
||||
enum {
|
||||
EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
|
||||
EF10_STAT_port_tx_packets,
|
||||
EF10_STAT_port_tx_pause,
|
||||
EF10_STAT_port_tx_control,
|
||||
EF10_STAT_port_tx_unicast,
|
||||
EF10_STAT_port_tx_multicast,
|
||||
EF10_STAT_port_tx_broadcast,
|
||||
EF10_STAT_port_tx_lt64,
|
||||
EF10_STAT_port_tx_64,
|
||||
EF10_STAT_port_tx_65_to_127,
|
||||
EF10_STAT_port_tx_128_to_255,
|
||||
EF10_STAT_port_tx_256_to_511,
|
||||
EF10_STAT_port_tx_512_to_1023,
|
||||
EF10_STAT_port_tx_1024_to_15xx,
|
||||
EF10_STAT_port_tx_15xx_to_jumbo,
|
||||
EF10_STAT_port_rx_bytes,
|
||||
EF10_STAT_port_rx_bytes_minus_good_bytes,
|
||||
EF10_STAT_port_rx_good_bytes,
|
||||
EF10_STAT_port_rx_bad_bytes,
|
||||
EF10_STAT_port_rx_packets,
|
||||
EF10_STAT_port_rx_good,
|
||||
EF10_STAT_port_rx_bad,
|
||||
EF10_STAT_port_rx_pause,
|
||||
EF10_STAT_port_rx_control,
|
||||
EF10_STAT_port_rx_unicast,
|
||||
EF10_STAT_port_rx_multicast,
|
||||
EF10_STAT_port_rx_broadcast,
|
||||
EF10_STAT_port_rx_lt64,
|
||||
EF10_STAT_port_rx_64,
|
||||
EF10_STAT_port_rx_65_to_127,
|
||||
EF10_STAT_port_rx_128_to_255,
|
||||
EF10_STAT_port_rx_256_to_511,
|
||||
EF10_STAT_port_rx_512_to_1023,
|
||||
EF10_STAT_port_rx_1024_to_15xx,
|
||||
EF10_STAT_port_rx_15xx_to_jumbo,
|
||||
EF10_STAT_port_rx_gtjumbo,
|
||||
EF10_STAT_port_rx_bad_gtjumbo,
|
||||
EF10_STAT_port_rx_overflow,
|
||||
EF10_STAT_port_rx_align_error,
|
||||
EF10_STAT_port_rx_length_error,
|
||||
EF10_STAT_port_rx_nodesc_drops,
|
||||
EF10_STAT_port_rx_pm_trunc_bb_overflow,
|
||||
EF10_STAT_port_rx_pm_discard_bb_overflow,
|
||||
EF10_STAT_port_rx_pm_trunc_vfifo_full,
|
||||
EF10_STAT_port_rx_pm_discard_vfifo_full,
|
||||
EF10_STAT_port_rx_pm_trunc_qbb,
|
||||
EF10_STAT_port_rx_pm_discard_qbb,
|
||||
EF10_STAT_port_rx_pm_discard_mapping,
|
||||
EF10_STAT_port_rx_dp_q_disabled_packets,
|
||||
EF10_STAT_port_rx_dp_di_dropped_packets,
|
||||
EF10_STAT_port_rx_dp_streaming_packets,
|
||||
EF10_STAT_port_rx_dp_hlb_fetch,
|
||||
EF10_STAT_port_rx_dp_hlb_wait,
|
||||
EF10_STAT_rx_unicast,
|
||||
EF10_STAT_rx_unicast_bytes,
|
||||
EF10_STAT_rx_multicast,
|
||||
EF10_STAT_rx_multicast_bytes,
|
||||
EF10_STAT_rx_broadcast,
|
||||
EF10_STAT_rx_broadcast_bytes,
|
||||
EF10_STAT_rx_bad,
|
||||
EF10_STAT_rx_bad_bytes,
|
||||
EF10_STAT_rx_overflow,
|
||||
EF10_STAT_tx_unicast,
|
||||
EF10_STAT_tx_unicast_bytes,
|
||||
EF10_STAT_tx_multicast,
|
||||
EF10_STAT_tx_multicast_bytes,
|
||||
EF10_STAT_tx_broadcast,
|
||||
EF10_STAT_tx_broadcast_bytes,
|
||||
EF10_STAT_tx_bad,
|
||||
EF10_STAT_tx_bad_bytes,
|
||||
EF10_STAT_tx_overflow,
|
||||
EF10_STAT_V1_COUNT,
|
||||
EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
|
||||
EF10_STAT_fec_corrected_errors,
|
||||
EF10_STAT_fec_corrected_symbols_lane0,
|
||||
EF10_STAT_fec_corrected_symbols_lane1,
|
||||
EF10_STAT_fec_corrected_symbols_lane2,
|
||||
EF10_STAT_fec_corrected_symbols_lane3,
|
||||
EF10_STAT_ctpio_vi_busy_fallback,
|
||||
EF10_STAT_ctpio_long_write_success,
|
||||
EF10_STAT_ctpio_missing_dbell_fail,
|
||||
EF10_STAT_ctpio_overflow_fail,
|
||||
EF10_STAT_ctpio_underflow_fail,
|
||||
EF10_STAT_ctpio_timeout_fail,
|
||||
EF10_STAT_ctpio_noncontig_wr_fail,
|
||||
EF10_STAT_ctpio_frm_clobber_fail,
|
||||
EF10_STAT_ctpio_invalid_wr_fail,
|
||||
EF10_STAT_ctpio_vi_clobber_fallback,
|
||||
EF10_STAT_ctpio_unqualified_fallback,
|
||||
EF10_STAT_ctpio_runt_fallback,
|
||||
EF10_STAT_ctpio_success,
|
||||
EF10_STAT_ctpio_fallback,
|
||||
EF10_STAT_ctpio_poison,
|
||||
EF10_STAT_ctpio_erase,
|
||||
EF10_STAT_COUNT
|
||||
};
|
||||
|
||||
/* Maximum number of TX PIO buffers we may allocate to a function.
|
||||
* This matches the total number of buffers on each SFC9100-family
|
||||
* controller.
|
||||
*/
|
||||
#define EF10_TX_PIOBUF_COUNT 16
|
||||
|
||||
/**
|
||||
* struct efx_ef10_nic_data - EF10 architecture NIC state
|
||||
* @mcdi_buf: DMA buffer for MCDI
|
||||
* @warm_boot_count: Last seen MC warm boot count
|
||||
* @vi_base: Absolute index of first VI in this function
|
||||
* @n_allocated_vis: Number of VIs allocated to this function
|
||||
* @n_piobufs: Number of PIO buffers allocated to this function
|
||||
* @wc_membase: Base address of write-combining mapping of the memory BAR
|
||||
* @pio_write_base: Base address for writing PIO buffers
|
||||
* @pio_write_vi_base: Relative VI number for @pio_write_base
|
||||
* @piobuf_handle: Handle of each PIO buffer allocated
|
||||
* @piobuf_size: size of a single PIO buffer
|
||||
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
|
||||
* reboot
|
||||
* @mc_stats: Scratch buffer for converting statistics to the kernel's format
|
||||
* @stats: Hardware statistics
|
||||
* @workaround_35388: Flag: firmware supports workaround for bug 35388
|
||||
* @workaround_26807: Flag: firmware supports workaround for bug 26807
|
||||
* @workaround_61265: Flag: firmware supports workaround for bug 61265
|
||||
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
|
||||
* after MC reboot
|
||||
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
|
||||
* %MC_CMD_GET_CAPABILITIES response)
|
||||
* @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of
|
||||
* %MC_CMD_GET_CAPABILITIES response)
|
||||
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
|
||||
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
|
||||
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
|
||||
* @pf_index: The number for this PF, or the parent PF if this is a VF
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
* @vf: Pointer to VF data structure
|
||||
#endif
|
||||
* @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
|
||||
* @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
|
||||
* @vlan_lock: Lock to serialize access to vlan_list.
|
||||
* @udp_tunnels: UDP tunnel port numbers and types.
|
||||
* @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
|
||||
* @udp_tunnels to hardware and thus the push must be re-done.
|
||||
* @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
|
||||
*/
|
||||
struct efx_ef10_nic_data {
|
||||
struct efx_buffer mcdi_buf;
|
||||
u16 warm_boot_count;
|
||||
unsigned int vi_base;
|
||||
unsigned int n_allocated_vis;
|
||||
unsigned int n_piobufs;
|
||||
void __iomem *wc_membase, *pio_write_base;
|
||||
unsigned int pio_write_vi_base;
|
||||
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
|
||||
u16 piobuf_size;
|
||||
bool must_restore_piobufs;
|
||||
__le64 *mc_stats;
|
||||
u64 stats[EF10_STAT_COUNT];
|
||||
bool workaround_35388;
|
||||
bool workaround_26807;
|
||||
bool workaround_61265;
|
||||
bool must_check_datapath_caps;
|
||||
u32 datapath_caps;
|
||||
u32 datapath_caps2;
|
||||
unsigned int rx_dpcpu_fw_id;
|
||||
unsigned int tx_dpcpu_fw_id;
|
||||
bool must_probe_vswitching;
|
||||
unsigned int pf_index;
|
||||
u8 port_id[ETH_ALEN];
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
unsigned int vf_index;
|
||||
struct ef10_vf *vf;
|
||||
#endif
|
||||
u8 vport_mac[ETH_ALEN];
|
||||
struct list_head vlan_list;
|
||||
struct mutex vlan_lock;
|
||||
struct efx_udp_tunnel udp_tunnels[16];
|
||||
bool udp_tunnels_dirty;
|
||||
struct mutex udp_tunnels_lock;
|
||||
u64 licensed_features;
|
||||
};
|
||||
|
||||
/* TSOv2 */
|
||||
int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
bool *data_mapped);
|
||||
|
||||
extern const struct efx_nic_type efx_hunt_a0_nic_type;
|
||||
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
|
||||
|
||||
int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
|
||||
|
||||
/* Falcon/Siena queue operations */
|
||||
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
|
||||
unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, unsigned int len);
|
||||
int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
|
||||
int efx_farch_ev_probe(struct efx_channel *channel);
|
||||
int efx_farch_ev_init(struct efx_channel *channel);
|
||||
void efx_farch_ev_fini(struct efx_channel *channel);
|
||||
void efx_farch_ev_remove(struct efx_channel *channel);
|
||||
int efx_farch_ev_process(struct efx_channel *channel, int quota);
|
||||
void efx_farch_ev_read_ack(struct efx_channel *channel);
|
||||
void efx_farch_ev_test_generate(struct efx_channel *channel);
|
||||
|
||||
/* Falcon/Siena filter operations */
|
||||
int efx_farch_filter_table_probe(struct efx_nic *efx);
|
||||
void efx_farch_filter_table_restore(struct efx_nic *efx);
|
||||
void efx_farch_filter_table_remove(struct efx_nic *efx);
|
||||
void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
|
||||
s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
|
||||
bool replace);
|
||||
int efx_farch_filter_remove_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id);
|
||||
int efx_farch_filter_get_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority, u32 filter_id,
|
||||
struct efx_filter_spec *);
|
||||
int efx_farch_filter_clear_rx(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
|
||||
s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority, u32 *buf,
|
||||
u32 size);
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
unsigned int index);
|
||||
#endif
|
||||
void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
|
||||
|
||||
/* Falcon/Siena interrupts */
|
||||
void efx_farch_irq_enable_master(struct efx_nic *efx);
|
||||
int efx_farch_irq_test_generate(struct efx_nic *efx);
|
||||
void efx_farch_irq_disable_master(struct efx_nic *efx);
|
||||
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
|
||||
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
|
||||
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
|
||||
|
||||
/* Global Resources */
|
||||
void siena_prepare_flush(struct efx_nic *efx);
|
||||
int efx_farch_fini_dmaq(struct efx_nic *efx);
|
||||
void efx_farch_finish_flr(struct efx_nic *efx);
|
||||
void siena_finish_flush(struct efx_nic *efx);
|
||||
void falcon_start_nic_stats(struct efx_nic *efx);
|
||||
void falcon_stop_nic_stats(struct efx_nic *efx);
|
||||
int falcon_reset_xaui(struct efx_nic *efx);
|
||||
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
|
||||
void efx_farch_init_common(struct efx_nic *efx);
|
||||
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
|
||||
void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
|
||||
|
||||
/* Tests */
|
||||
struct efx_farch_register_test {
|
||||
unsigned address;
|
||||
efx_oword_t mask;
|
||||
};
|
||||
|
||||
int efx_farch_test_registers(struct efx_nic *efx,
|
||||
const struct efx_farch_register_test *regs,
|
||||
size_t n_regs);
|
||||
|
||||
void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
|
||||
efx_qword_t *event);
|
||||
|
||||
#endif /* EFX_NIC_H */
|
262
drivers/net/ethernet/sfc/siena/nic_common.h
Normal file
262
drivers/net/ethernet/sfc/siena/nic_common.h
Normal file
@ -0,0 +1,262 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_NIC_COMMON_H
|
||||
#define EFX_NIC_COMMON_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx_common.h"
|
||||
#include "mcdi.h"
|
||||
#include "ptp.h"
|
||||
|
||||
enum {
|
||||
/* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
|
||||
* They are not supported by this driver but these revision numbers
|
||||
* form part of the ethtool API for register dumping.
|
||||
*/
|
||||
EFX_REV_SIENA_A0 = 3,
|
||||
EFX_REV_HUNT_A0 = 4,
|
||||
EFX_REV_EF100 = 5,
|
||||
};
|
||||
|
||||
static inline int efx_nic_rev(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->revision;
|
||||
}
|
||||
|
||||
/* Read the current event from the event queue */
|
||||
static inline efx_qword_t *efx_event(struct efx_channel *channel,
|
||||
unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (channel->eventq.buf.addr)) +
|
||||
(index & channel->eventq_mask);
|
||||
}
|
||||
|
||||
/* See if an event is present
|
||||
*
|
||||
* We check both the high and low dword of the event for all ones. We
|
||||
* wrote all ones when we cleared the event, and no valid event can
|
||||
* have all ones in either its high or low dwords. This approach is
|
||||
* robust against reordering.
|
||||
*
|
||||
* Note that using a single 64-bit comparison is incorrect; even
|
||||
* though the CPU read will be atomic, the DMA write may not be.
|
||||
*/
|
||||
static inline int efx_event_present(efx_qword_t *event)
|
||||
{
|
||||
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
|
||||
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified transmit descriptor in the TX
|
||||
* descriptor queue belonging to the specified channel.
|
||||
*/
|
||||
static inline efx_qword_t *
|
||||
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
/* Report whether this TX queue would be empty for the given write_count.
|
||||
* May return false negative.
|
||||
*/
|
||||
static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
|
||||
{
|
||||
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
|
||||
|
||||
if (empty_read_count == 0)
|
||||
return false;
|
||||
|
||||
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
|
||||
}
|
||||
|
||||
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
bool *data_mapped);
|
||||
|
||||
/* Decide whether to push a TX descriptor to the NIC vs merely writing
|
||||
* the doorbell. This can reduce latency when we are adding a single
|
||||
* descriptor to an empty queue, but is otherwise pointless. Further,
|
||||
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
|
||||
* triggered if we don't check this.
|
||||
* We use the write_count used for the last doorbell push, to get the
|
||||
* NIC's view of the tx queue.
|
||||
*/
|
||||
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
|
||||
|
||||
tx_queue->empty_read_count = 0;
|
||||
return was_empty && tx_queue->write_count - write_count == 1;
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
|
||||
static inline efx_qword_t *
|
||||
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
/* Alignment of PCIe DMA boundaries (4KB) */
|
||||
#define EFX_PAGE_SIZE 4096
|
||||
/* Size and alignment of buffer table entries (same) */
|
||||
#define EFX_BUF_SIZE EFX_PAGE_SIZE
|
||||
|
||||
/* NIC-generic software stats */
|
||||
enum {
|
||||
GENERIC_STAT_rx_noskb_drops,
|
||||
GENERIC_STAT_rx_nodesc_trunc,
|
||||
GENERIC_STAT_COUNT
|
||||
};
|
||||
|
||||
#define EFX_GENERIC_SW_STAT(ext_name) \
|
||||
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
|
||||
|
||||
/* TX data path */
|
||||
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return tx_queue->efx->type->tx_probe(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_init(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
if (tx_queue->efx->type->tx_remove)
|
||||
tx_queue->efx->type->tx_remove(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_write(tx_queue);
|
||||
}
|
||||
|
||||
/* RX data path */
|
||||
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
return rx_queue->efx->type->rx_probe(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_init(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_remove(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_write(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_defer_refill(rx_queue);
|
||||
}
|
||||
|
||||
/* Event data path */
|
||||
static inline int efx_nic_probe_eventq(struct efx_channel *channel)
|
||||
{
|
||||
return channel->efx->type->ev_probe(channel);
|
||||
}
|
||||
static inline int efx_nic_init_eventq(struct efx_channel *channel)
|
||||
{
|
||||
return channel->efx->type->ev_init(channel);
|
||||
}
|
||||
static inline void efx_nic_fini_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_fini(channel);
|
||||
}
|
||||
static inline void efx_nic_remove_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_remove(channel);
|
||||
}
|
||||
static inline int
|
||||
efx_nic_process_eventq(struct efx_channel *channel, int quota)
|
||||
{
|
||||
return channel->efx->type->ev_process(channel, quota);
|
||||
}
|
||||
static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_read_ack(channel);
|
||||
}
|
||||
|
||||
void efx_nic_event_test_start(struct efx_channel *channel);
|
||||
|
||||
bool efx_nic_event_present(struct efx_channel *channel);
|
||||
|
||||
static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
|
||||
{
|
||||
if (efx->type->sensor_event)
|
||||
efx->type->sensor_event(efx, ev);
|
||||
}
|
||||
|
||||
static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->rx_recycle_ring_size(efx);
|
||||
}
|
||||
|
||||
/* Some statistics are computed as A - B where A and B each increase
|
||||
* linearly with some hardware counter(s) and the counters are read
|
||||
* asynchronously. If the counters contributing to B are always read
|
||||
* after those contributing to A, the computed value may be lower than
|
||||
* the true value by some variable amount, and may decrease between
|
||||
* subsequent computations.
|
||||
*
|
||||
* We should never allow statistics to decrease or to exceed the true
|
||||
* value. Since the computed value will never be greater than the
|
||||
* true value, we can achieve this by only storing the computed value
|
||||
* when it increases.
|
||||
*/
|
||||
static inline void efx_update_diff_stat(u64 *stat, u64 diff)
|
||||
{
|
||||
if ((s64)(diff - *stat) > 0)
|
||||
*stat = diff;
|
||||
}
|
||||
|
||||
/* Interrupts */
|
||||
int efx_nic_init_interrupt(struct efx_nic *efx);
|
||||
int efx_nic_irq_test_start(struct efx_nic *efx);
|
||||
void efx_nic_fini_interrupt(struct efx_nic *efx);
|
||||
|
||||
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
|
||||
{
|
||||
return READ_ONCE(channel->event_test_cpu);
|
||||
}
|
||||
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
|
||||
{
|
||||
return READ_ONCE(efx->last_irq_cpu);
|
||||
}
|
||||
|
||||
/* Global Resources */
|
||||
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
|
||||
unsigned int len, gfp_t gfp_flags);
|
||||
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
|
||||
|
||||
size_t efx_nic_get_regs_len(struct efx_nic *efx);
|
||||
void efx_nic_get_regs(struct efx_nic *efx, void *buf);
|
||||
|
||||
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
|
||||
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u8 *names);
|
||||
int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
|
||||
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u64 *stats,
|
||||
const void *dma_buf, bool accumulate);
|
||||
void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
|
||||
static inline size_t efx_nic_update_stats_atomic(struct efx_nic *efx, u64 *full_stats,
|
||||
struct rtnl_link_stats64 *core_stats)
|
||||
{
|
||||
if (efx->type->update_stats_atomic)
|
||||
return efx->type->update_stats_atomic(efx, full_stats, core_stats);
|
||||
return efx->type->update_stats(efx, full_stats, core_stats);
|
||||
}
|
||||
|
||||
#define EFX_MAX_FLUSH_TIME 5000
|
||||
|
||||
#endif /* EFX_NIC_COMMON_H */
|
2210
drivers/net/ethernet/sfc/siena/ptp.c
Normal file
2210
drivers/net/ethernet/sfc/siena/ptp.c
Normal file
File diff suppressed because it is too large
Load Diff
45
drivers/net/ethernet/sfc/siena/ptp.h
Normal file
45
drivers/net/ethernet/sfc/siena/ptp.h
Normal file
@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_PTP_H
|
||||
#define EFX_PTP_H
|
||||
|
||||
#include <linux/net_tstamp.h>
|
||||
#include "net_driver.h"
|
||||
|
||||
struct ethtool_ts_info;
|
||||
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
|
||||
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
|
||||
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
|
||||
void efx_ptp_remove(struct efx_nic *efx);
|
||||
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
|
||||
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
|
||||
int efx_ptp_get_mode(struct efx_nic *efx);
|
||||
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
|
||||
unsigned int new_mode);
|
||||
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
|
||||
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
|
||||
size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
|
||||
size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
|
||||
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
|
||||
void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
|
||||
struct sk_buff *skb);
|
||||
static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (channel->sync_events_state == SYNC_EVENTS_VALID)
|
||||
__efx_rx_skb_attach_timestamp(channel, skb);
|
||||
}
|
||||
void efx_ptp_start_datapath(struct efx_nic *efx);
|
||||
void efx_ptp_stop_datapath(struct efx_nic *efx);
|
||||
bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
|
||||
ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
|
||||
|
||||
#endif /* EFX_PTP_H */
|
399
drivers/net/ethernet/sfc/siena/rx.c
Normal file
399
drivers/net/ethernet/sfc/siena/rx.c
Normal file
@ -0,0 +1,399 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2005-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/checksum.h>
|
||||
#include <net/xdp.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "rx_common.h"
|
||||
#include "filter.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* Preferred number of descriptors to fill at once */
|
||||
#define EFX_RX_PREFERRED_BATCH 8U
|
||||
|
||||
/* Maximum rx prefix used by any architecture. */
|
||||
#define EFX_MAX_RX_PREFIX_SIZE 16
|
||||
|
||||
/* Size of buffer allocated for skb header area. */
|
||||
#define EFX_SKB_HEADERS 128u
|
||||
|
||||
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
||||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
int len)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
|
||||
|
||||
if (likely(len <= max_len))
|
||||
return;
|
||||
|
||||
/* The packet must be discarded, but this is only a fatal error
|
||||
* if the caller indicated it was
|
||||
*/
|
||||
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
||||
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"RX queue %d overlength RX event (%#x > %#x)\n",
|
||||
efx_rx_queue_index(rx_queue), len, max_len);
|
||||
|
||||
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
||||
}
|
||||
|
||||
/* Allocate and construct an SKB around page fragments */
|
||||
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags,
|
||||
u8 *eh, int hdr_len)
|
||||
{
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Allocate an SKB to store the headers */
|
||||
skb = netdev_alloc_skb(efx->net_dev,
|
||||
efx->rx_ip_align + efx->rx_prefix_size +
|
||||
hdr_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
atomic_inc(&efx->n_rx_noskb_drops);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
|
||||
|
||||
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
|
||||
efx->rx_prefix_size + hdr_len);
|
||||
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
|
||||
__skb_put(skb, hdr_len);
|
||||
|
||||
/* Append the remaining page(s) onto the frag list */
|
||||
if (rx_buf->len > hdr_len) {
|
||||
rx_buf->page_offset += hdr_len;
|
||||
rx_buf->len -= hdr_len;
|
||||
|
||||
for (;;) {
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rx_buf->page, rx_buf->page_offset,
|
||||
rx_buf->len, efx->rx_buffer_truesize);
|
||||
rx_buf->page = NULL;
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags == n_frags)
|
||||
break;
|
||||
|
||||
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
||||
}
|
||||
} else {
|
||||
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
||||
rx_buf->page = NULL;
|
||||
n_frags = 0;
|
||||
}
|
||||
|
||||
/* Move past the ethernet header */
|
||||
skb->protocol = eth_type_trans(skb, efx->net_dev);
|
||||
|
||||
skb_mark_napi_id(skb, &channel->napi_str);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
unsigned int n_frags, unsigned int len, u16 flags)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
|
||||
rx_queue->rx_packets++;
|
||||
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->flags |= flags;
|
||||
|
||||
/* Validate the number of fragments and completed length */
|
||||
if (n_frags == 1) {
|
||||
if (!(flags & EFX_RX_PKT_PREFIX_LEN))
|
||||
efx_rx_packet__check_len(rx_queue, rx_buf, len);
|
||||
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
|
||||
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
|
||||
unlikely(len > n_frags * efx->rx_dma_len) ||
|
||||
unlikely(!efx->rx_scatter)) {
|
||||
/* If this isn't an explicit discard request, either
|
||||
* the hardware or the driver is broken.
|
||||
*/
|
||||
WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
|
||||
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
||||
}
|
||||
|
||||
netif_vdbg(efx, rx_status, efx->net_dev,
|
||||
"RX queue %d received ids %x-%x len %d %s%s\n",
|
||||
efx_rx_queue_index(rx_queue), index,
|
||||
(index + n_frags - 1) & rx_queue->ptr_mask, len,
|
||||
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
|
||||
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
|
||||
|
||||
/* Discard packet, if instructed to do so. Process the
|
||||
* previous receive first.
|
||||
*/
|
||||
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
|
||||
efx_rx_flush_packet(channel);
|
||||
efx_discard_rx_packet(channel, rx_buf, n_frags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
|
||||
rx_buf->len = len;
|
||||
|
||||
/* Release and/or sync the DMA mapping - assumes all RX buffers
|
||||
* consumed in-order per RX queue.
|
||||
*/
|
||||
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
||||
|
||||
/* Prefetch nice and early so data will (hopefully) be in cache by
|
||||
* the time we look at it.
|
||||
*/
|
||||
prefetch(efx_rx_buf_va(rx_buf));
|
||||
|
||||
rx_buf->page_offset += efx->rx_prefix_size;
|
||||
rx_buf->len -= efx->rx_prefix_size;
|
||||
|
||||
if (n_frags > 1) {
|
||||
/* Release/sync DMA mapping for additional fragments.
|
||||
* Fix length for last fragment.
|
||||
*/
|
||||
unsigned int tail_frags = n_frags - 1;
|
||||
|
||||
for (;;) {
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
if (--tail_frags == 0)
|
||||
break;
|
||||
efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
|
||||
}
|
||||
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
|
||||
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
||||
}
|
||||
|
||||
/* All fragments have been DMA-synced, so recycle pages. */
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
efx_recycle_rx_pages(channel, rx_buf, n_frags);
|
||||
|
||||
/* Pipeline receives so that we give time for packet headers to be
|
||||
* prefetched into cache.
|
||||
*/
|
||||
efx_rx_flush_packet(channel);
|
||||
channel->rx_pkt_n_frags = n_frags;
|
||||
channel->rx_pkt_index = index;
|
||||
}
|
||||
|
||||
static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
|
||||
|
||||
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
struct efx_rx_queue *rx_queue;
|
||||
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
|
||||
return;
|
||||
}
|
||||
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
||||
|
||||
/* Set the SKB flags */
|
||||
skb_checksum_none_assert(skb);
|
||||
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
|
||||
}
|
||||
|
||||
efx_rx_skb_attach_timestamp(channel, skb);
|
||||
|
||||
if (channel->type->receive_skb)
|
||||
if (channel->type->receive_skb(channel, skb))
|
||||
return;
|
||||
|
||||
/* Pass the packet up */
|
||||
if (channel->rx_list != NULL)
|
||||
/* Add to list, will pass up later */
|
||||
list_add_tail(&skb->list, channel->rx_list);
|
||||
else
|
||||
/* No list, so pass it up now */
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
/** efx_do_xdp: perform XDP processing on a received packet
|
||||
*
|
||||
* Returns true if packet should still be delivered.
|
||||
*/
|
||||
static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf, u8 **ehp)
|
||||
{
|
||||
u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
|
||||
struct efx_rx_queue *rx_queue;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct xdp_frame *xdpf;
|
||||
struct xdp_buff xdp;
|
||||
u32 xdp_act;
|
||||
s16 offset;
|
||||
int err;
|
||||
|
||||
xdp_prog = rcu_dereference_bh(efx->xdp_prog);
|
||||
if (!xdp_prog)
|
||||
return true;
|
||||
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
if (unlikely(channel->rx_pkt_n_frags > 1)) {
|
||||
/* We can't do XDP on fragmented packets - drop. */
|
||||
efx_free_rx_buffers(rx_queue, rx_buf,
|
||||
channel->rx_pkt_n_frags);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"XDP is not possible with multiple receive fragments (%d)\n",
|
||||
channel->rx_pkt_n_frags);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
return false;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
|
||||
rx_buf->len, DMA_FROM_DEVICE);
|
||||
|
||||
/* Save the rx prefix. */
|
||||
EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
|
||||
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
|
||||
efx->rx_prefix_size);
|
||||
|
||||
xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
|
||||
/* No support yet for XDP metadata */
|
||||
xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
|
||||
rx_buf->len, false);
|
||||
|
||||
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
|
||||
offset = (u8 *)xdp.data - *ehp;
|
||||
|
||||
switch (xdp_act) {
|
||||
case XDP_PASS:
|
||||
/* Fix up rx prefix. */
|
||||
if (offset) {
|
||||
*ehp += offset;
|
||||
rx_buf->page_offset += offset;
|
||||
rx_buf->len -= offset;
|
||||
memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
|
||||
efx->rx_prefix_size);
|
||||
}
|
||||
break;
|
||||
|
||||
case XDP_TX:
|
||||
/* Buffer ownership passes to tx on success. */
|
||||
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
|
||||
if (unlikely(err != 1)) {
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"XDP TX failed (%d)\n", err);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
} else {
|
||||
channel->n_rx_xdp_tx++;
|
||||
}
|
||||
break;
|
||||
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
|
||||
if (unlikely(err)) {
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"XDP redirect failed (%d)\n", err);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
} else {
|
||||
channel->n_rx_xdp_redirect++;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
break;
|
||||
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
channel->n_rx_xdp_drops++;
|
||||
break;
|
||||
}
|
||||
|
||||
return xdp_act == XDP_PASS;
|
||||
}
|
||||
|
||||
/* Handle a received packet. Second half: Touches packet payload. */
|
||||
void __efx_rx_packet(struct efx_channel *channel)
|
||||
{
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct efx_rx_buffer *rx_buf =
|
||||
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
|
||||
u8 *eh = efx_rx_buf_va(rx_buf);
|
||||
|
||||
/* Read length from the prefix if necessary. This already
|
||||
* excludes the length of the prefix itself.
|
||||
*/
|
||||
if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
|
||||
rx_buf->len = le16_to_cpup((__le16 *)
|
||||
(eh + efx->rx_packet_len_offset));
|
||||
|
||||
/* If we're in loopback test, then pass the packet directly to the
|
||||
* loopback layer, and free the rx_buf here
|
||||
*/
|
||||
if (unlikely(efx->loopback_selftest)) {
|
||||
struct efx_rx_queue *rx_queue;
|
||||
|
||||
efx_loopback_rx_packet(efx, eh, rx_buf->len);
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
efx_free_rx_buffers(rx_queue, rx_buf,
|
||||
channel->rx_pkt_n_frags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
|
||||
goto out;
|
||||
|
||||
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
|
||||
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
|
||||
|
||||
if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
|
||||
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
|
||||
else
|
||||
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
|
||||
out:
|
||||
channel->rx_pkt_n_frags = 0;
|
||||
}
|
1086
drivers/net/ethernet/sfc/siena/rx_common.c
Normal file
1086
drivers/net/ethernet/sfc/siena/rx_common.c
Normal file
File diff suppressed because it is too large
Load Diff
116
drivers/net/ethernet/sfc/siena/rx_common.h
Normal file
116
drivers/net/ethernet/sfc/siena/rx_common.h
Normal file
@ -0,0 +1,116 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_RX_COMMON_H
|
||||
#define EFX_RX_COMMON_H
|
||||
|
||||
/* Preferred number of descriptors to fill at once */
|
||||
#define EFX_RX_PREFERRED_BATCH 8U
|
||||
|
||||
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
||||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
|
||||
* ring, this number is divided by the number of buffers per page to calculate
|
||||
* the number of pages to store in the RX page recycle ring.
|
||||
*/
|
||||
#define EFX_RECYCLE_RING_SIZE_10G 256
|
||||
|
||||
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
|
||||
{
|
||||
return page_address(buf->page) + buf->page_offset;
|
||||
}
|
||||
|
||||
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
|
||||
#else
|
||||
const u8 *data = eh + efx->rx_packet_hash_offset;
|
||||
|
||||
return (u32)data[0] |
|
||||
(u32)data[1] << 8 |
|
||||
(u32)data[2] << 16 |
|
||||
(u32)data[3] << 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
void efx_rx_slow_fill(struct timer_list *t);
|
||||
|
||||
void efx_recycle_rx_pages(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags);
|
||||
void efx_discard_rx_packet(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags);
|
||||
|
||||
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
|
||||
void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
struct page *page,
|
||||
unsigned int page_offset,
|
||||
u16 flags);
|
||||
void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
|
||||
|
||||
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int len)
|
||||
{
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int num_bufs);
|
||||
|
||||
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
|
||||
void efx_rx_config_page_split(struct efx_nic *efx);
|
||||
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
|
||||
|
||||
void
|
||||
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags, u8 *eh, __wsum csum);
|
||||
|
||||
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
|
||||
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
|
||||
void efx_free_rss_context_entry(struct efx_rss_context *ctx);
|
||||
void efx_set_default_rx_indir_table(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx);
|
||||
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
|
||||
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right);
|
||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
|
||||
bool *force);
|
||||
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec);
|
||||
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec,
|
||||
bool *new);
|
||||
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
|
||||
|
||||
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||
u16 rxq_index, u32 flow_id);
|
||||
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
|
||||
#endif
|
||||
|
||||
int efx_probe_filters(struct efx_nic *efx);
|
||||
void efx_remove_filters(struct efx_nic *efx);
|
||||
|
||||
#endif
|
807
drivers/net/ethernet/sfc/siena/selftest.c
Normal file
807
drivers/net/ethernet/sfc/siena/selftest.c
Normal file
@ -0,0 +1,807 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2012 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "efx_common.h"
|
||||
#include "efx_channels.h"
|
||||
#include "nic.h"
|
||||
#include "mcdi_port_common.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* IRQ latency can be enormous because:
|
||||
* - All IRQs may be disabled on a CPU for a *long* time by e.g. a
|
||||
* slow serial console or an old IDE driver doing error recovery
|
||||
* - The PREEMPT_RT patches mostly deal with this, but also allow a
|
||||
* tasklet or normal task to be given higher priority than our IRQ
|
||||
* threads
|
||||
* Try to avoid blaming the hardware for this.
|
||||
*/
|
||||
#define IRQ_TIMEOUT HZ
|
||||
|
||||
/*
|
||||
* Loopback test packet structure
|
||||
*
|
||||
* The self-test should stress every RSS vector, and unfortunately
|
||||
* Falcon only performs RSS on TCP/UDP packets.
|
||||
*/
|
||||
struct efx_loopback_payload {
|
||||
struct ethhdr header;
|
||||
struct iphdr ip;
|
||||
struct udphdr udp;
|
||||
__be16 iteration;
|
||||
char msg[64];
|
||||
} __packed;
|
||||
|
||||
/* Loopback test source MAC address */
|
||||
static const u8 payload_source[ETH_ALEN] __aligned(2) = {
|
||||
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
|
||||
};
|
||||
|
||||
static const char payload_msg[] =
|
||||
"Hello world! This is an Efx loopback test in progress!";
|
||||
|
||||
/* Interrupt mode names */
|
||||
static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
|
||||
static const char *const efx_interrupt_mode_names[] = {
|
||||
[EFX_INT_MODE_MSIX] = "MSI-X",
|
||||
[EFX_INT_MODE_MSI] = "MSI",
|
||||
[EFX_INT_MODE_LEGACY] = "legacy",
|
||||
};
|
||||
#define INT_MODE(efx) \
|
||||
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
|
||||
|
||||
/**
|
||||
* struct efx_loopback_state - persistent state during a loopback selftest
|
||||
* @flush: Drop all packets in efx_loopback_rx_packet
|
||||
* @packet_count: Number of packets being used in this test
|
||||
* @skbs: An array of skbs transmitted
|
||||
* @offload_csum: Checksums are being offloaded
|
||||
* @rx_good: RX good packet count
|
||||
* @rx_bad: RX bad packet count
|
||||
* @payload: Payload used in tests
|
||||
*/
|
||||
struct efx_loopback_state {
|
||||
bool flush;
|
||||
int packet_count;
|
||||
struct sk_buff **skbs;
|
||||
bool offload_csum;
|
||||
atomic_t rx_good;
|
||||
atomic_t rx_bad;
|
||||
struct efx_loopback_payload payload;
|
||||
};
|
||||
|
||||
/* How long to wait for all the packets to arrive (in ms) */
|
||||
#define LOOPBACK_TIMEOUT_MS 1000
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* MII, NVRAM and register tests
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
rc = efx_mcdi_phy_test_alive(efx);
|
||||
tests->phy_alive = rc ? -1 : 1;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (efx->type->test_nvram) {
|
||||
rc = efx->type->test_nvram(efx);
|
||||
if (rc == -EPERM)
|
||||
rc = 0;
|
||||
else
|
||||
tests->nvram = rc ? -1 : 1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Interrupt and event queue testing
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Test generation and receipt of interrupts */
|
||||
static int efx_test_interrupts(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
unsigned long timeout, wait;
|
||||
int cpu;
|
||||
int rc;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
|
||||
tests->interrupt = -1;
|
||||
|
||||
rc = efx_nic_irq_test_start(efx);
|
||||
if (rc == -ENOTSUPP) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"direct interrupt testing not supported\n");
|
||||
tests->interrupt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
timeout = jiffies + IRQ_TIMEOUT;
|
||||
wait = 1;
|
||||
|
||||
/* Wait for arrival of test interrupt. */
|
||||
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
|
||||
do {
|
||||
schedule_timeout_uninterruptible(wait);
|
||||
cpu = efx_nic_irq_test_irq_cpu(efx);
|
||||
if (cpu >= 0)
|
||||
goto success;
|
||||
wait *= 2;
|
||||
} while (time_before(jiffies, timeout));
|
||||
|
||||
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
|
||||
return -ETIMEDOUT;
|
||||
|
||||
success:
|
||||
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
|
||||
INT_MODE(efx), cpu);
|
||||
tests->interrupt = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Test generation and receipt of interrupting events */
|
||||
static int efx_test_eventq_irq(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int read_ptr[EFX_MAX_CHANNELS];
|
||||
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
|
||||
unsigned long timeout, wait;
|
||||
|
||||
BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
read_ptr[channel->channel] = channel->eventq_read_ptr;
|
||||
set_bit(channel->channel, &dma_pend);
|
||||
set_bit(channel->channel, &int_pend);
|
||||
efx_nic_event_test_start(channel);
|
||||
}
|
||||
|
||||
timeout = jiffies + IRQ_TIMEOUT;
|
||||
wait = 1;
|
||||
|
||||
/* Wait for arrival of interrupts. NAPI processing may or may
|
||||
* not complete in time, but we can cope in any case.
|
||||
*/
|
||||
do {
|
||||
schedule_timeout_uninterruptible(wait);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_stop_eventq(channel);
|
||||
if (channel->eventq_read_ptr !=
|
||||
read_ptr[channel->channel]) {
|
||||
set_bit(channel->channel, &napi_ran);
|
||||
clear_bit(channel->channel, &dma_pend);
|
||||
clear_bit(channel->channel, &int_pend);
|
||||
} else {
|
||||
if (efx_nic_event_present(channel))
|
||||
clear_bit(channel->channel, &dma_pend);
|
||||
if (efx_nic_event_test_irq_cpu(channel) >= 0)
|
||||
clear_bit(channel->channel, &int_pend);
|
||||
}
|
||||
efx_start_eventq(channel);
|
||||
}
|
||||
|
||||
wait *= 2;
|
||||
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
bool dma_seen = !test_bit(channel->channel, &dma_pend);
|
||||
bool int_seen = !test_bit(channel->channel, &int_pend);
|
||||
|
||||
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
|
||||
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
|
||||
|
||||
if (dma_seen && int_seen) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"channel %d event queue passed (with%s NAPI)\n",
|
||||
channel->channel,
|
||||
test_bit(channel->channel, &napi_ran) ?
|
||||
"" : "out");
|
||||
} else {
|
||||
/* Report failure and whether either interrupt or DMA
|
||||
* worked
|
||||
*/
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d timed out waiting for event queue\n",
|
||||
channel->channel);
|
||||
if (int_seen)
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d saw interrupt "
|
||||
"during event queue test\n",
|
||||
channel->channel);
|
||||
if (dma_seen)
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d event was generated, but "
|
||||
"failed to trigger an interrupt\n",
|
||||
channel->channel);
|
||||
}
|
||||
}
|
||||
|
||||
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags)
|
||||
{
|
||||
int rc;
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
if (rc == -EPERM)
|
||||
rc = 0;
|
||||
else
|
||||
netif_info(efx, drv, efx->net_dev,
|
||||
"%s phy selftest\n", rc ? "Failed" : "Passed");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Loopback testing
|
||||
* NB Only one loopback test can be executing concurrently.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Loopback test RX callback
|
||||
* This is called for each received packet during loopback testing.
|
||||
*/
|
||||
void efx_loopback_rx_packet(struct efx_nic *efx,
|
||||
const char *buf_ptr, int pkt_len)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *received;
|
||||
struct efx_loopback_payload *payload;
|
||||
|
||||
BUG_ON(!buf_ptr);
|
||||
|
||||
/* If we are just flushing, then drop the packet */
|
||||
if ((state == NULL) || state->flush)
|
||||
return;
|
||||
|
||||
payload = &state->payload;
|
||||
|
||||
received = (struct efx_loopback_payload *) buf_ptr;
|
||||
received->ip.saddr = payload->ip.saddr;
|
||||
if (state->offload_csum)
|
||||
received->ip.check = payload->ip.check;
|
||||
|
||||
/* Check that header exists */
|
||||
if (pkt_len < sizeof(received->header)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw runt RX packet (length %d) in %s loopback "
|
||||
"test\n", pkt_len, LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that the ethernet header exists */
|
||||
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw non-loopback RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check packet length */
|
||||
if (pkt_len != sizeof(*payload)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw incorrect RX packet length %d (wanted %d) in "
|
||||
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that IP header matches */
|
||||
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw corrupted IP header in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that msg and padding matches */
|
||||
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw corrupted RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that iteration matches */
|
||||
if (received->iteration != payload->iteration) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw RX packet from iteration %d (wanted %d) in "
|
||||
"%s loopback test\n", ntohs(received->iteration),
|
||||
ntohs(payload->iteration), LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Increase correct RX count */
|
||||
netif_vdbg(efx, drv, efx->net_dev,
|
||||
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
|
||||
|
||||
atomic_inc(&state->rx_good);
|
||||
return;
|
||||
|
||||
err:
|
||||
#ifdef DEBUG
|
||||
if (atomic_read(&state->rx_bad) == 0) {
|
||||
netif_err(efx, drv, efx->net_dev, "received packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
buf_ptr, pkt_len, 0);
|
||||
netif_err(efx, drv, efx->net_dev, "expected packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
&state->payload, sizeof(state->payload), 0);
|
||||
}
|
||||
#endif
|
||||
atomic_inc(&state->rx_bad);
|
||||
}
|
||||
|
||||
/* Initialise an efx_selftest_state for a new iteration */
|
||||
static void efx_iterate_state(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct net_device *net_dev = efx->net_dev;
|
||||
struct efx_loopback_payload *payload = &state->payload;
|
||||
|
||||
/* Initialise the layerII header */
|
||||
ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
|
||||
ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
|
||||
payload->header.h_proto = htons(ETH_P_IP);
|
||||
|
||||
/* saddr set later and used as incrementing count */
|
||||
payload->ip.daddr = htonl(INADDR_LOOPBACK);
|
||||
payload->ip.ihl = 5;
|
||||
payload->ip.check = (__force __sum16) htons(0xdead);
|
||||
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
|
||||
payload->ip.version = IPVERSION;
|
||||
payload->ip.protocol = IPPROTO_UDP;
|
||||
|
||||
/* Initialise udp header */
|
||||
payload->udp.source = 0;
|
||||
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
|
||||
sizeof(struct iphdr));
|
||||
payload->udp.check = 0; /* checksum ignored */
|
||||
|
||||
/* Fill out payload */
|
||||
payload->iteration = htons(ntohs(payload->iteration) + 1);
|
||||
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
|
||||
|
||||
/* Fill out remaining state members */
|
||||
atomic_set(&state->rx_good, 0);
|
||||
atomic_set(&state->rx_bad, 0);
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *payload;
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
netdev_tx_t rc;
|
||||
|
||||
/* Transmit N copies of buffer */
|
||||
for (i = 0; i < state->packet_count; i++) {
|
||||
/* Allocate an skb, holding an extra reference for
|
||||
* transmit completion counting */
|
||||
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
state->skbs[i] = skb;
|
||||
skb_get(skb);
|
||||
|
||||
/* Copy the payload in, incrementing the source address to
|
||||
* exercise the rss vectors */
|
||||
payload = skb_put(skb, sizeof(state->payload));
|
||||
memcpy(payload, &state->payload, sizeof(state->payload));
|
||||
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
|
||||
|
||||
/* Ensure everything we've written is visible to the
|
||||
* interrupt handler. */
|
||||
smp_wmb();
|
||||
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
rc = efx_enqueue_skb(tx_queue, skb);
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
if (rc != NETDEV_TX_OK) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"TX queue %d could not transmit packet %d of "
|
||||
"%d in %s loopback test\n", tx_queue->label,
|
||||
i + 1, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
|
||||
/* Defer cleaning up the other skbs for the caller */
|
||||
kfree_skb(skb);
|
||||
return -EPIPE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_poll_loopback(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
|
||||
return atomic_read(&state->rx_good) == state->packet_count;
|
||||
}
|
||||
|
||||
static int efx_end_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct sk_buff *skb;
|
||||
int tx_done = 0, rx_good, rx_bad;
|
||||
int i, rc = 0;
|
||||
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
|
||||
/* Count the number of tx completions, and decrement the refcnt. Any
|
||||
* skbs not already completed will be free'd when the queue is flushed */
|
||||
for (i = 0; i < state->packet_count; i++) {
|
||||
skb = state->skbs[i];
|
||||
if (skb && !skb_shared(skb))
|
||||
++tx_done;
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
/* Check TX completion and received packet counts */
|
||||
rx_good = atomic_read(&state->rx_good);
|
||||
rx_bad = atomic_read(&state->rx_bad);
|
||||
if (tx_done != state->packet_count) {
|
||||
/* Don't free the skbs; they will be picked up on TX
|
||||
* overflow or channel teardown.
|
||||
*/
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"TX queue %d saw only %d out of an expected %d "
|
||||
"TX completion events in %s loopback test\n",
|
||||
tx_queue->label, tx_done, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Allow to fall through so we see the RX errors as well */
|
||||
}
|
||||
|
||||
/* We may always be up to a flush away from our desired packet total */
|
||||
if (rx_good != state->packet_count) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d saw only %d out of an expected %d "
|
||||
"received packets in %s loopback test\n",
|
||||
tx_queue->label, rx_good, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Fall through */
|
||||
}
|
||||
|
||||
/* Update loopback test structure */
|
||||
lb_tests->tx_sent[tx_queue->label] += state->packet_count;
|
||||
lb_tests->tx_done[tx_queue->label] += tx_done;
|
||||
lb_tests->rx_good += rx_good;
|
||||
lb_tests->rx_bad += rx_bad;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efx_test_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
int i, begin_rc, end_rc;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
/* Determine how many packets to send */
|
||||
state->packet_count = efx->txq_entries / 3;
|
||||
state->packet_count = min(1 << (i << 2), state->packet_count);
|
||||
state->skbs = kcalloc(state->packet_count,
|
||||
sizeof(state->skbs[0]), GFP_KERNEL);
|
||||
if (!state->skbs)
|
||||
return -ENOMEM;
|
||||
state->flush = false;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d (hw %d) testing %s loopback with %d packets\n",
|
||||
tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
efx_iterate_state(efx);
|
||||
begin_rc = efx_begin_loopback(tx_queue);
|
||||
|
||||
/* This will normally complete very quickly, but be
|
||||
* prepared to wait much longer. */
|
||||
msleep(1);
|
||||
if (!efx_poll_loopback(efx)) {
|
||||
msleep(LOOPBACK_TIMEOUT_MS);
|
||||
efx_poll_loopback(efx);
|
||||
}
|
||||
|
||||
end_rc = efx_end_loopback(tx_queue, lb_tests);
|
||||
kfree(state->skbs);
|
||||
|
||||
if (begin_rc || end_rc) {
|
||||
/* Wait a while to ensure there are no packets
|
||||
* floating around after a failure. */
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
return begin_rc ? begin_rc : end_rc;
|
||||
}
|
||||
}
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d passed %s loopback test with a burst length "
|
||||
"of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
|
||||
* any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
|
||||
* to delay and retry. Therefore, it's safer to just poll directly. Wait
|
||||
* for link up and any faults to dissipate. */
|
||||
static int efx_wait_for_link(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_link_state *link_state = &efx->link_state;
|
||||
int count, link_up_count = 0;
|
||||
bool link_up;
|
||||
|
||||
for (count = 0; count < 40; count++) {
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
|
||||
if (efx->type->monitor != NULL) {
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->type->monitor(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
}
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
link_up = link_state->up;
|
||||
if (link_up)
|
||||
link_up = !efx->type->check_mac_fault(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
if (link_up) {
|
||||
if (++link_up_count == 2)
|
||||
return 0;
|
||||
} else {
|
||||
link_up_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned int loopback_modes)
|
||||
{
|
||||
enum efx_loopback_mode mode;
|
||||
struct efx_loopback_state *state;
|
||||
struct efx_channel *channel =
|
||||
efx_get_channel(efx, efx->tx_channel_offset);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
int rc = 0;
|
||||
|
||||
/* Set the port loopback_selftest member. From this point on
|
||||
* all received packets will be dropped. Mark the state as
|
||||
* "flushing" so all inflight packets are dropped */
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
BUG_ON(efx->loopback_selftest);
|
||||
state->flush = true;
|
||||
efx->loopback_selftest = state;
|
||||
|
||||
/* Test all supported loopback modes */
|
||||
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
|
||||
if (!(loopback_modes & (1 << mode)))
|
||||
continue;
|
||||
|
||||
/* Move the port into the specified loopback mode. */
|
||||
state->flush = true;
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->loopback_mode = mode;
|
||||
rc = __efx_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"unable to move into %s loopback\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = efx_wait_for_link(efx);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"loopback %s never came up\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Test all enabled types of TX queue */
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
state->offload_csum = (tx_queue->type &
|
||||
EFX_TXQ_TYPE_OUTER_CSUM);
|
||||
rc = efx_test_loopback(tx_queue,
|
||||
&tests->loopback[mode]);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Remove the flush. The caller will remove the loopback setting */
|
||||
state->flush = true;
|
||||
efx->loopback_selftest = NULL;
|
||||
wmb();
|
||||
kfree(state);
|
||||
|
||||
if (rc == -EPERM)
|
||||
rc = 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Entry point
|
||||
*
|
||||
*************************************************************************/
|
||||
|
||||
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags)
|
||||
{
|
||||
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
|
||||
int phy_mode = efx->phy_mode;
|
||||
int rc_test = 0, rc_reset, rc;
|
||||
|
||||
efx_selftest_async_cancel(efx);
|
||||
|
||||
/* Online (i.e. non-disruptive) testing
|
||||
* This checks interrupt generation, event delivery and PHY presence. */
|
||||
|
||||
rc = efx_test_phy_alive(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_nvram(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_interrupts(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_eventq_irq(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
if (rc_test)
|
||||
return rc_test;
|
||||
|
||||
if (!(flags & ETH_TEST_FL_OFFLINE))
|
||||
return efx_test_phy(efx, tests, flags);
|
||||
|
||||
/* Offline (i.e. disruptive) testing
|
||||
* This checks MAC and PHY loopback on the specified port. */
|
||||
|
||||
/* Detach the device so the kernel doesn't transmit during the
|
||||
* loopback test and the watchdog timeout doesn't fire.
|
||||
*/
|
||||
efx_device_detach_sync(efx);
|
||||
|
||||
if (efx->type->test_chip) {
|
||||
rc_reset = efx->type->test_chip(efx, tests);
|
||||
if (rc_reset) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"Unable to recover from chip test\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
return rc_reset;
|
||||
}
|
||||
|
||||
if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
|
||||
rc_test = -EIO;
|
||||
}
|
||||
|
||||
/* Ensure that the phy is powered and out of loopback
|
||||
* for the bist and loopback tests */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
|
||||
efx->loopback_mode = LOOPBACK_NONE;
|
||||
__efx_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
rc = efx_test_phy(efx, tests, flags);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
/* restore the PHY to the previous state */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->phy_mode = phy_mode;
|
||||
efx->loopback_mode = loopback_mode;
|
||||
__efx_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
efx_device_attach_if_not_resetting(efx);
|
||||
|
||||
return rc_test;
|
||||
}
|
||||
|
||||
void efx_selftest_async_start(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
efx_nic_event_test_start(channel);
|
||||
schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
|
||||
}
|
||||
|
||||
void efx_selftest_async_cancel(struct efx_nic *efx)
|
||||
{
|
||||
cancel_delayed_work_sync(&efx->selftest_work);
|
||||
}
|
||||
|
||||
static void efx_selftest_async_work(struct work_struct *data)
|
||||
{
|
||||
struct efx_nic *efx = container_of(data, struct efx_nic,
|
||||
selftest_work.work);
|
||||
struct efx_channel *channel;
|
||||
int cpu;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
cpu = efx_nic_event_test_irq_cpu(channel);
|
||||
if (cpu < 0)
|
||||
netif_err(efx, ifup, efx->net_dev,
|
||||
"channel %d failed to trigger an interrupt\n",
|
||||
channel->channel);
|
||||
else
|
||||
netif_dbg(efx, ifup, efx->net_dev,
|
||||
"channel %d triggered interrupt on CPU %d\n",
|
||||
channel->channel, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void efx_selftest_async_init(struct efx_nic *efx)
|
||||
{
|
||||
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
|
||||
}
|
52
drivers/net/ethernet/sfc/siena/selftest.h
Normal file
52
drivers/net/ethernet/sfc/siena/selftest.h
Normal file
@ -0,0 +1,52 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2012 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_SELFTEST_H
|
||||
#define EFX_SELFTEST_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
/*
|
||||
* Self tests
|
||||
*/
|
||||
|
||||
struct efx_loopback_self_tests {
|
||||
int tx_sent[EFX_MAX_TXQ_PER_CHANNEL];
|
||||
int tx_done[EFX_MAX_TXQ_PER_CHANNEL];
|
||||
int rx_good;
|
||||
int rx_bad;
|
||||
};
|
||||
|
||||
#define EFX_MAX_PHY_TESTS 20
|
||||
|
||||
/* Efx self test results
|
||||
* For fields which are not counters, 1 indicates success and -1
|
||||
* indicates failure; 0 indicates test could not be run.
|
||||
*/
|
||||
struct efx_self_tests {
|
||||
/* online tests */
|
||||
int phy_alive;
|
||||
int nvram;
|
||||
int interrupt;
|
||||
int eventq_dma[EFX_MAX_CHANNELS];
|
||||
int eventq_int[EFX_MAX_CHANNELS];
|
||||
/* offline tests */
|
||||
int memory;
|
||||
int registers;
|
||||
int phy_ext[EFX_MAX_PHY_TESTS];
|
||||
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
|
||||
};
|
||||
|
||||
void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
|
||||
int pkt_len);
|
||||
int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags);
|
||||
void efx_selftest_async_init(struct efx_nic *efx);
|
||||
void efx_selftest_async_start(struct efx_nic *efx);
|
||||
void efx_selftest_async_cancel(struct efx_nic *efx);
|
||||
|
||||
#endif /* EFX_SELFTEST_H */
|
72
drivers/net/ethernet/sfc/siena/sriov.c
Normal file
72
drivers/net/ethernet/sfc/siena/sriov.c
Normal file
@ -0,0 +1,72 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2014-2015 Solarflare Communications Inc.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include "net_driver.h"
|
||||
#include "nic.h"
|
||||
#include "sriov.h"
|
||||
|
||||
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_mac)
|
||||
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
|
||||
u8 qos, __be16 vlan_proto)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_vlan) {
|
||||
if ((vlan & ~VLAN_VID_MASK) ||
|
||||
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
|
||||
return -EINVAL;
|
||||
|
||||
if (vlan_proto != htons(ETH_P_8021Q))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
|
||||
} else {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
|
||||
bool spoofchk)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_spoofchk)
|
||||
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
|
||||
struct ifla_vf_info *ivi)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_get_vf_config)
|
||||
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
|
||||
int link_state)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_link_state)
|
||||
return efx->type->sriov_set_vf_link_state(efx, vf_i,
|
||||
link_state);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
25
drivers/net/ethernet/sfc/siena/sriov.h
Normal file
25
drivers/net/ethernet/sfc/siena/sriov.h
Normal file
@ -0,0 +1,25 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2014-2015 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_SRIOV_H
|
||||
#define EFX_SRIOV_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
|
||||
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
|
||||
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
|
||||
u8 qos, __be16 vlan_proto);
|
||||
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
|
||||
bool spoofchk);
|
||||
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
|
||||
struct ifla_vf_info *ivi);
|
||||
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
|
||||
int link_state);
|
||||
#endif /* CONFIG_SFC_SRIOV */
|
||||
|
||||
#endif /* EFX_SRIOV_H */
|
643
drivers/net/ethernet/sfc/siena/tx.c
Normal file
643
drivers/net/ethernet/sfc/siena/tx.c
Normal file
@ -0,0 +1,643 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2005-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/cache.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "io.h"
|
||||
#include "nic.h"
|
||||
#include "tx.h"
|
||||
#include "tx_common.h"
|
||||
#include "workarounds.h"
|
||||
#include "ef10_regs.h"
|
||||
|
||||
#ifdef EFX_USE_PIO
|
||||
|
||||
#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
|
||||
unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
|
||||
|
||||
#endif /* EFX_USE_PIO */
|
||||
|
||||
static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer)
|
||||
{
|
||||
unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
|
||||
struct efx_buffer *page_buf =
|
||||
&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
|
||||
unsigned int offset =
|
||||
((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
|
||||
|
||||
if (unlikely(!page_buf->addr) &&
|
||||
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
|
||||
GFP_ATOMIC))
|
||||
return NULL;
|
||||
buffer->dma_addr = page_buf->dma_addr + offset;
|
||||
buffer->unmap_len = 0;
|
||||
return (u8 *)page_buf->addr + offset;
|
||||
}
|
||||
|
||||
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer, size_t len)
|
||||
{
|
||||
if (len > EFX_TX_CB_SIZE)
|
||||
return NULL;
|
||||
return efx_tx_get_copy_buffer(tx_queue, buffer);
|
||||
}
|
||||
|
||||
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
|
||||
{
|
||||
/* We need to consider all queues that the net core sees as one */
|
||||
struct efx_nic *efx = txq1->efx;
|
||||
struct efx_tx_queue *txq2;
|
||||
unsigned int fill_level;
|
||||
|
||||
fill_level = efx_channel_tx_old_fill_level(txq1->channel);
|
||||
if (likely(fill_level < efx->txq_stop_thresh))
|
||||
return;
|
||||
|
||||
/* We used the stale old_read_count above, which gives us a
|
||||
* pessimistic estimate of the fill level (which may even
|
||||
* validly be >= efx->txq_entries). Now try again using
|
||||
* read_count (more likely to be a cache miss).
|
||||
*
|
||||
* If we read read_count and then conditionally stop the
|
||||
* queue, it is possible for the completion path to race with
|
||||
* us and complete all outstanding descriptors in the middle,
|
||||
* after which there will be no more completions to wake it.
|
||||
* Therefore we stop the queue first, then read read_count
|
||||
* (with a memory barrier to ensure the ordering), then
|
||||
* restart the queue if the fill level turns out to be low
|
||||
* enough.
|
||||
*/
|
||||
netif_tx_stop_queue(txq1->core_txq);
|
||||
smp_mb();
|
||||
efx_for_each_channel_tx_queue(txq2, txq1->channel)
|
||||
txq2->old_read_count = READ_ONCE(txq2->read_count);
|
||||
|
||||
fill_level = efx_channel_tx_old_fill_level(txq1->channel);
|
||||
EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
|
||||
if (likely(fill_level < efx->txq_stop_thresh)) {
|
||||
smp_mb();
|
||||
if (likely(!efx->loopback_selftest))
|
||||
netif_tx_start_queue(txq1->core_txq);
|
||||
}
|
||||
}
|
||||
|
||||
static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
unsigned int copy_len = skb->len;
|
||||
struct efx_tx_buffer *buffer;
|
||||
u8 *copy_buffer;
|
||||
int rc;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
|
||||
|
||||
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
|
||||
copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
|
||||
if (unlikely(!copy_buffer))
|
||||
return -ENOMEM;
|
||||
|
||||
rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
|
||||
EFX_WARN_ON_PARANOID(rc);
|
||||
buffer->len = copy_len;
|
||||
|
||||
buffer->skb = skb;
|
||||
buffer->flags = EFX_TX_BUF_SKB;
|
||||
|
||||
++tx_queue->insert_count;
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef EFX_USE_PIO
|
||||
|
||||
struct efx_short_copy_buffer {
|
||||
int used;
|
||||
u8 buf[L1_CACHE_BYTES];
|
||||
};
|
||||
|
||||
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
|
||||
* Advances piobuf pointer. Leaves additional data in the copy buffer.
|
||||
*/
|
||||
static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
|
||||
u8 *data, int len,
|
||||
struct efx_short_copy_buffer *copy_buf)
|
||||
{
|
||||
int block_len = len & ~(sizeof(copy_buf->buf) - 1);
|
||||
|
||||
__iowrite64_copy(*piobuf, data, block_len >> 3);
|
||||
*piobuf += block_len;
|
||||
len -= block_len;
|
||||
|
||||
if (len) {
|
||||
data += block_len;
|
||||
BUG_ON(copy_buf->used);
|
||||
BUG_ON(len > sizeof(copy_buf->buf));
|
||||
memcpy(copy_buf->buf, data, len);
|
||||
copy_buf->used = len;
|
||||
}
|
||||
}
|
||||
|
||||
/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
|
||||
* Advances piobuf pointer. Leaves additional data in the copy buffer.
|
||||
*/
|
||||
static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
|
||||
u8 *data, int len,
|
||||
struct efx_short_copy_buffer *copy_buf)
|
||||
{
|
||||
if (copy_buf->used) {
|
||||
/* if the copy buffer is partially full, fill it up and write */
|
||||
int copy_to_buf =
|
||||
min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
|
||||
|
||||
memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
|
||||
copy_buf->used += copy_to_buf;
|
||||
|
||||
/* if we didn't fill it up then we're done for now */
|
||||
if (copy_buf->used < sizeof(copy_buf->buf))
|
||||
return;
|
||||
|
||||
__iowrite64_copy(*piobuf, copy_buf->buf,
|
||||
sizeof(copy_buf->buf) >> 3);
|
||||
*piobuf += sizeof(copy_buf->buf);
|
||||
data += copy_to_buf;
|
||||
len -= copy_to_buf;
|
||||
copy_buf->used = 0;
|
||||
}
|
||||
|
||||
efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
|
||||
}
|
||||
|
||||
static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
|
||||
struct efx_short_copy_buffer *copy_buf)
|
||||
{
|
||||
/* if there's anything in it, write the whole buffer, including junk */
|
||||
if (copy_buf->used)
|
||||
__iowrite64_copy(piobuf, copy_buf->buf,
|
||||
sizeof(copy_buf->buf) >> 3);
|
||||
}
|
||||
|
||||
/* Traverse skb structure and copy fragments in to PIO buffer.
|
||||
* Advances piobuf pointer.
|
||||
*/
|
||||
static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
|
||||
u8 __iomem **piobuf,
|
||||
struct efx_short_copy_buffer *copy_buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
|
||||
copy_buf);
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
|
||||
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
|
||||
u8 *vaddr;
|
||||
|
||||
vaddr = kmap_atomic(skb_frag_page(f));
|
||||
|
||||
efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
|
||||
skb_frag_size(f), copy_buf);
|
||||
kunmap_atomic(vaddr);
|
||||
}
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
|
||||
}
|
||||
|
||||
static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct efx_tx_buffer *buffer =
|
||||
efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
u8 __iomem *piobuf = tx_queue->piobuf;
|
||||
|
||||
/* Copy to PIO buffer. Ensure the writes are padded to the end
|
||||
* of a cache line, as this is required for write-combining to be
|
||||
* effective on at least x86.
|
||||
*/
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags) {
|
||||
/* The size of the copy buffer will ensure all writes
|
||||
* are the size of a cache line.
|
||||
*/
|
||||
struct efx_short_copy_buffer copy_buf;
|
||||
|
||||
copy_buf.used = 0;
|
||||
|
||||
efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
|
||||
&piobuf, ©_buf);
|
||||
efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
|
||||
} else {
|
||||
/* Pad the write to the size of a cache line.
|
||||
* We can do this because we know the skb_shared_info struct is
|
||||
* after the source, and the destination buffer is big enough.
|
||||
*/
|
||||
BUILD_BUG_ON(L1_CACHE_BYTES >
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
||||
__iowrite64_copy(tx_queue->piobuf, skb->data,
|
||||
ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
|
||||
}
|
||||
|
||||
buffer->skb = skb;
|
||||
buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
|
||||
|
||||
EFX_POPULATE_QWORD_5(buffer->option,
|
||||
ESF_DZ_TX_DESC_IS_OPT, 1,
|
||||
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
|
||||
ESF_DZ_TX_PIO_CONT, 0,
|
||||
ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
|
||||
ESF_DZ_TX_PIO_BUF_ADDR,
|
||||
tx_queue->piobuf_offset);
|
||||
++tx_queue->insert_count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Decide whether we can use TX PIO, ie. write packet data directly into
|
||||
* a buffer on the device. This can reduce latency at the expense of
|
||||
* throughput, so we only do this if both hardware and software TX rings
|
||||
* are empty, including all queues for the channel. This also ensures that
|
||||
* only one packet at a time can be using the PIO buffer. If the xmit_more
|
||||
* flag is set then we don't use this - there'll be another packet along
|
||||
* shortly and we want to hold off the doorbell.
|
||||
*/
|
||||
static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_channel *channel = tx_queue->channel;
|
||||
|
||||
if (!tx_queue->piobuf)
|
||||
return false;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
|
||||
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel)
|
||||
if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif /* EFX_USE_PIO */
|
||||
|
||||
/* Send any pending traffic for a channel. xmit_more is shared across all
|
||||
* queues for a channel, so we must check all of them.
|
||||
*/
|
||||
static void efx_tx_send_pending(struct efx_channel *channel)
|
||||
{
|
||||
struct efx_tx_queue *q;
|
||||
|
||||
efx_for_each_channel_tx_queue(q, channel) {
|
||||
if (q->xmit_pending)
|
||||
efx_nic_push_buffers(q);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a socket buffer to a TX queue
|
||||
*
|
||||
* This maps all fragments of a socket buffer for DMA and adds them to
|
||||
* the TX queue. The queue's insert pointer will be incremented by
|
||||
* the number of fragments in the socket buffer.
|
||||
*
|
||||
* If any DMA mapping fails, any mapped fragments will be unmapped,
|
||||
* the queue's insert pointer will be restored to its original value.
|
||||
*
|
||||
* This function is split out from efx_hard_start_xmit to allow the
|
||||
* loopback test to direct packets via specific TX queues.
|
||||
*
|
||||
* Returns NETDEV_TX_OK.
|
||||
* You must hold netif_tx_lock() to call this function.
|
||||
*/
|
||||
netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
||||
{
|
||||
unsigned int old_insert_count = tx_queue->insert_count;
|
||||
bool xmit_more = netdev_xmit_more();
|
||||
bool data_mapped = false;
|
||||
unsigned int segments;
|
||||
unsigned int skb_len;
|
||||
int rc;
|
||||
|
||||
skb_len = skb->len;
|
||||
segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
|
||||
if (segments == 1)
|
||||
segments = 0; /* Don't use TSO for a single segment. */
|
||||
|
||||
/* Handle TSO first - it's *possible* (although unlikely) that we might
|
||||
* be passed a packet to segment that's smaller than the copybreak/PIO
|
||||
* size limit.
|
||||
*/
|
||||
if (segments) {
|
||||
switch (tx_queue->tso_version) {
|
||||
case 1:
|
||||
rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
|
||||
break;
|
||||
case 2:
|
||||
rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
|
||||
break;
|
||||
case 0: /* No TSO on this queue, SW fallback needed */
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (rc == -EINVAL) {
|
||||
rc = efx_tx_tso_fallback(tx_queue, skb);
|
||||
tx_queue->tso_fallbacks++;
|
||||
if (rc == 0)
|
||||
return 0;
|
||||
}
|
||||
if (rc)
|
||||
goto err;
|
||||
#ifdef EFX_USE_PIO
|
||||
} else if (skb_len <= efx_piobuf_size && !xmit_more &&
|
||||
efx_tx_may_pio(tx_queue)) {
|
||||
/* Use PIO for short packets with an empty queue. */
|
||||
if (efx_enqueue_skb_pio(tx_queue, skb))
|
||||
goto err;
|
||||
tx_queue->pio_packets++;
|
||||
data_mapped = true;
|
||||
#endif
|
||||
} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
|
||||
/* Pad short packets or coalesce short fragmented packets. */
|
||||
if (efx_enqueue_skb_copy(tx_queue, skb))
|
||||
goto err;
|
||||
tx_queue->cb_packets++;
|
||||
data_mapped = true;
|
||||
}
|
||||
|
||||
/* Map for DMA and create descriptors if we haven't done so already. */
|
||||
if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
|
||||
goto err;
|
||||
|
||||
efx_tx_maybe_stop_queue(tx_queue);
|
||||
|
||||
tx_queue->xmit_pending = true;
|
||||
|
||||
/* Pass off to hardware */
|
||||
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
|
||||
efx_tx_send_pending(tx_queue->channel);
|
||||
|
||||
if (segments) {
|
||||
tx_queue->tso_bursts++;
|
||||
tx_queue->tso_packets += segments;
|
||||
tx_queue->tx_packets += segments;
|
||||
} else {
|
||||
tx_queue->tx_packets++;
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
|
||||
err:
|
||||
efx_enqueue_unwind(tx_queue, old_insert_count);
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
/* If we're not expecting another transmit and we had something to push
|
||||
* on this queue or a partner queue then we need to push here to get the
|
||||
* previous packets out.
|
||||
*/
|
||||
if (!xmit_more)
|
||||
efx_tx_send_pending(tx_queue->channel);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Transmit a packet from an XDP buffer
|
||||
*
|
||||
* Returns number of packets sent on success, error code otherwise.
|
||||
* Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
|
||||
* (for XDP redirect).
|
||||
*/
|
||||
int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
|
||||
bool flush)
|
||||
{
|
||||
struct efx_tx_buffer *tx_buffer;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct xdp_frame *xdpf;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int len;
|
||||
int space;
|
||||
int cpu;
|
||||
int i = 0;
|
||||
|
||||
if (unlikely(n && !xdpfs))
|
||||
return -EINVAL;
|
||||
if (unlikely(!n))
|
||||
return 0;
|
||||
|
||||
cpu = raw_smp_processor_id();
|
||||
if (unlikely(cpu >= efx->xdp_tx_queue_count))
|
||||
return -EINVAL;
|
||||
|
||||
tx_queue = efx->xdp_tx_queues[cpu];
|
||||
if (unlikely(!tx_queue))
|
||||
return -EINVAL;
|
||||
|
||||
if (!tx_queue->initialised)
|
||||
return -EINVAL;
|
||||
|
||||
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
|
||||
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
|
||||
|
||||
/* If we're borrowing net stack queues we have to handle stop-restart
|
||||
* or we might block the queue and it will be considered as frozen
|
||||
*/
|
||||
if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
|
||||
if (netif_tx_queue_stopped(tx_queue->core_txq))
|
||||
goto unlock;
|
||||
efx_tx_maybe_stop_queue(tx_queue);
|
||||
}
|
||||
|
||||
/* Check for available space. We should never need multiple
|
||||
* descriptors per frame.
|
||||
*/
|
||||
space = efx->txq_entries +
|
||||
tx_queue->read_count - tx_queue->insert_count;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
xdpf = xdpfs[i];
|
||||
|
||||
if (i >= space)
|
||||
break;
|
||||
|
||||
/* We'll want a descriptor for this tx. */
|
||||
prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
|
||||
|
||||
len = xdpf->len;
|
||||
|
||||
/* Map for DMA. */
|
||||
dma_addr = dma_map_single(&efx->pci_dev->dev,
|
||||
xdpf->data, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
|
||||
break;
|
||||
|
||||
/* Create descriptor and set up for unmapping DMA. */
|
||||
tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
|
||||
tx_buffer->xdpf = xdpf;
|
||||
tx_buffer->flags = EFX_TX_BUF_XDP |
|
||||
EFX_TX_BUF_MAP_SINGLE;
|
||||
tx_buffer->dma_offset = 0;
|
||||
tx_buffer->unmap_len = len;
|
||||
tx_queue->tx_packets++;
|
||||
}
|
||||
|
||||
/* Pass mapped frames to hardware. */
|
||||
if (flush && i > 0)
|
||||
efx_nic_push_buffers(tx_queue);
|
||||
|
||||
unlock:
|
||||
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
|
||||
HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
|
||||
|
||||
return i == 0 ? -EIO : i;
|
||||
}
|
||||
|
||||
/* Initiate a packet transmission. We use one channel per CPU
|
||||
* (sharing when we have more CPUs than channels).
|
||||
*
|
||||
* Context: non-blocking.
|
||||
* Should always return NETDEV_TX_OK and consume the skb.
|
||||
*/
|
||||
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
unsigned index, type;
|
||||
|
||||
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
|
||||
|
||||
index = skb_get_queue_mapping(skb);
|
||||
type = efx_tx_csum_type_skb(skb);
|
||||
if (index >= efx->n_tx_channels) {
|
||||
index -= efx->n_tx_channels;
|
||||
type |= EFX_TXQ_TYPE_HIGHPRI;
|
||||
}
|
||||
|
||||
/* PTP "event" packet */
|
||||
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
|
||||
((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
|
||||
unlikely(efx_ptp_is_ptp_tx(efx, skb)))) {
|
||||
/* There may be existing transmits on the channel that are
|
||||
* waiting for this packet to trigger the doorbell write.
|
||||
* We need to send the packets at this point.
|
||||
*/
|
||||
efx_tx_send_pending(efx_get_tx_channel(efx, index));
|
||||
return efx_ptp_tx(efx, skb);
|
||||
}
|
||||
|
||||
tx_queue = efx_get_tx_queue(efx, index, type);
|
||||
if (WARN_ON_ONCE(!tx_queue)) {
|
||||
/* We don't have a TXQ of the right type.
|
||||
* This should never happen, as we don't advertise offload
|
||||
* features unless we can support them.
|
||||
*/
|
||||
dev_kfree_skb_any(skb);
|
||||
/* If we're not expecting another transmit and we had something to push
|
||||
* on this queue or a partner queue then we need to push here to get the
|
||||
* previous packets out.
|
||||
*/
|
||||
if (!netdev_xmit_more())
|
||||
efx_tx_send_pending(tx_queue->channel);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
return __efx_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
unsigned int read_ptr;
|
||||
bool finished = false;
|
||||
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
|
||||
while (!finished) {
|
||||
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
|
||||
|
||||
if (!efx_tx_buffer_in_use(buffer)) {
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"TX queue %d spurious single TX completion\n",
|
||||
tx_queue->queue);
|
||||
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Need to check the flag before dequeueing. */
|
||||
if (buffer->flags & EFX_TX_BUF_SKB)
|
||||
finished = true;
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
}
|
||||
|
||||
tx_queue->pkts_compl += pkts_compl;
|
||||
tx_queue->bytes_compl += bytes_compl;
|
||||
|
||||
EFX_WARN_ON_PARANOID(pkts_compl != 1);
|
||||
|
||||
efx_xmit_done_check_empty(tx_queue);
|
||||
}
|
||||
|
||||
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
/* Must be inverse of queue lookup in efx_hard_start_xmit() */
|
||||
tx_queue->core_txq =
|
||||
netdev_get_tx_queue(efx->net_dev,
|
||||
tx_queue->channel->channel +
|
||||
((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
|
||||
efx->n_tx_channels : 0));
|
||||
}
|
||||
|
||||
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct tc_mqprio_qopt *mqprio = type_data;
|
||||
unsigned tc, num_tc;
|
||||
|
||||
if (type != TC_SETUP_QDISC_MQPRIO)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Only Siena supported highpri queues */
|
||||
if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
num_tc = mqprio->num_tc;
|
||||
|
||||
if (num_tc > EFX_MAX_TX_TC)
|
||||
return -EINVAL;
|
||||
|
||||
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
|
||||
|
||||
if (num_tc == net_dev->num_tc)
|
||||
return 0;
|
||||
|
||||
for (tc = 0; tc < num_tc; tc++) {
|
||||
net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
|
||||
net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
|
||||
}
|
||||
|
||||
net_dev->num_tc = num_tc;
|
||||
|
||||
return netif_set_real_num_tx_queues(net_dev,
|
||||
max_t(int, num_tc, 1) *
|
||||
efx->n_tx_channels);
|
||||
}
|
47
drivers/net/ethernet/sfc/siena/tx.h
Normal file
47
drivers/net/ethernet/sfc/siena/tx.h
Normal file
@ -0,0 +1,47 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2015 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_TX_H
|
||||
#define EFX_TX_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Driver internal tx-path related declarations. */
|
||||
|
||||
unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, unsigned int len);
|
||||
|
||||
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer, size_t len);
|
||||
|
||||
/* What TXQ type will satisfy the checksum offloads required for this skb? */
|
||||
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0; /* no checksum offload */
|
||||
|
||||
if (skb->encapsulation &&
|
||||
skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
|
||||
/* we only advertise features for IPv4 and IPv6 checksums on
|
||||
* encapsulated packets, so if the checksum is for the inner
|
||||
* packet, it must be one of them; no further checking required.
|
||||
*/
|
||||
|
||||
/* Do we also need to offload the outer header checksum? */
|
||||
if (skb_shinfo(skb)->gso_segs > 1 &&
|
||||
!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
|
||||
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
|
||||
return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM;
|
||||
return EFX_TXQ_TYPE_INNER_CSUM;
|
||||
}
|
||||
|
||||
/* similarly, we only advertise features for IPv4 and IPv6 checksums,
|
||||
* so it must be one of them. No need for further checks.
|
||||
*/
|
||||
return EFX_TXQ_TYPE_OUTER_CSUM;
|
||||
}
|
||||
#endif /* EFX_TX_H */
|
449
drivers/net/ethernet/sfc/siena/tx_common.c
Normal file
449
drivers/net/ethernet/sfc/siena/tx_common.c
Normal file
@ -0,0 +1,449 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "nic_common.h"
|
||||
#include "tx_common.h"
|
||||
|
||||
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
|
||||
PAGE_SIZE >> EFX_TX_CB_ORDER);
|
||||
}
|
||||
|
||||
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int entries;
|
||||
int rc;
|
||||
|
||||
/* Create the smallest power-of-two aligned ring */
|
||||
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
|
||||
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
||||
tx_queue->ptr_mask = entries - 1;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating TX queue %d size %#x mask %#x\n",
|
||||
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
|
||||
|
||||
/* Allocate software ring */
|
||||
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
|
||||
GFP_KERNEL);
|
||||
if (!tx_queue->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
|
||||
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
|
||||
if (!tx_queue->cb_page) {
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
/* Allocate hardware ring, determine TXQ type */
|
||||
rc = efx_nic_probe_tx(tx_queue);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
|
||||
tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
fail1:
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"initialising TX queue %d\n", tx_queue->queue);
|
||||
|
||||
tx_queue->insert_count = 0;
|
||||
tx_queue->notify_count = 0;
|
||||
tx_queue->write_count = 0;
|
||||
tx_queue->packet_write_count = 0;
|
||||
tx_queue->old_write_count = 0;
|
||||
tx_queue->read_count = 0;
|
||||
tx_queue->old_read_count = 0;
|
||||
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
|
||||
tx_queue->xmit_pending = false;
|
||||
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
|
||||
tx_queue->channel == efx_ptp_channel(efx));
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
|
||||
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
|
||||
tx_queue->tso_version = 0;
|
||||
|
||||
/* Set up TX descriptor ring */
|
||||
efx_nic_init_tx(tx_queue);
|
||||
|
||||
tx_queue->initialised = true;
|
||||
}
|
||||
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"shutting down TX queue %d\n", tx_queue->queue);
|
||||
|
||||
tx_queue->initialised = false;
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
/* Free any buffers left in the ring */
|
||||
while (tx_queue->read_count != tx_queue->write_count) {
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
|
||||
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
}
|
||||
tx_queue->xmit_pending = false;
|
||||
netdev_tx_reset_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"destroying TX queue %d\n", tx_queue->queue);
|
||||
efx_nic_remove_tx(tx_queue);
|
||||
|
||||
if (tx_queue->cb_page) {
|
||||
for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
|
||||
efx_nic_free_buffer(tx_queue->efx,
|
||||
&tx_queue->cb_page[i]);
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
}
|
||||
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
|
||||
}
|
||||
|
||||
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
if (buffer->unmap_len) {
|
||||
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
||||
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
|
||||
|
||||
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
|
||||
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
buffer->unmap_len = 0;
|
||||
}
|
||||
|
||||
if (buffer->flags & EFX_TX_BUF_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
|
||||
|
||||
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
|
||||
(*pkts_compl)++;
|
||||
(*bytes_compl) += skb->len;
|
||||
if (tx_queue->timestamping &&
|
||||
(tx_queue->completed_timestamp_major ||
|
||||
tx_queue->completed_timestamp_minor)) {
|
||||
struct skb_shared_hwtstamps hwtstamp;
|
||||
|
||||
hwtstamp.hwtstamp =
|
||||
efx_ptp_nic_to_kernel_time(tx_queue);
|
||||
skb_tstamp_tx(skb, &hwtstamp);
|
||||
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
}
|
||||
dev_consume_skb_any((struct sk_buff *)buffer->skb);
|
||||
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
|
||||
"TX queue %d transmission id %x complete\n",
|
||||
tx_queue->queue, tx_queue->read_count);
|
||||
} else if (buffer->flags & EFX_TX_BUF_XDP) {
|
||||
xdp_return_frame_rx_napi(buffer->xdpf);
|
||||
}
|
||||
|
||||
buffer->len = 0;
|
||||
buffer->flags = 0;
|
||||
}
|
||||
|
||||
/* Remove packets from the TX queue
|
||||
*
|
||||
* This removes packets from the TX queue, up to and including the
|
||||
* specified index.
|
||||
*/
|
||||
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
||||
unsigned int index,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int stop_index, read_ptr;
|
||||
|
||||
stop_index = (index + 1) & tx_queue->ptr_mask;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
|
||||
while (read_ptr != stop_index) {
|
||||
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
|
||||
|
||||
if (!efx_tx_buffer_in_use(buffer)) {
|
||||
netif_err(efx, tx_err, efx->net_dev,
|
||||
"TX queue %d spurious TX completion id %d\n",
|
||||
tx_queue->queue, read_ptr);
|
||||
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
|
||||
return;
|
||||
}
|
||||
|
||||
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
}
|
||||
}
|
||||
|
||||
void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
||||
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
||||
if (tx_queue->read_count == tx_queue->old_write_count) {
|
||||
/* Ensure that read_count is flushed. */
|
||||
smp_mb();
|
||||
tx_queue->empty_read_count =
|
||||
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
|
||||
|
||||
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
|
||||
tx_queue->pkts_compl += pkts_compl;
|
||||
tx_queue->bytes_compl += bytes_compl;
|
||||
|
||||
if (pkts_compl > 1)
|
||||
++tx_queue->merge_events;
|
||||
|
||||
/* See if we need to restart the netif queue. This memory
|
||||
* barrier ensures that we write read_count (inside
|
||||
* efx_dequeue_buffers()) before reading the queue status.
|
||||
*/
|
||||
smp_mb();
|
||||
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
|
||||
likely(efx->port_enabled) &&
|
||||
likely(netif_device_present(efx->net_dev))) {
|
||||
fill_level = efx_channel_tx_fill_level(tx_queue->channel);
|
||||
if (fill_level <= efx->txq_wake_thresh)
|
||||
netif_tx_wake_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
efx_xmit_done_check_empty(tx_queue);
|
||||
}
|
||||
|
||||
/* Remove buffers put into a tx_queue for the current packet.
|
||||
* None of the buffers must have an skb attached.
|
||||
*/
|
||||
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int bytes_compl = 0;
|
||||
unsigned int pkts_compl = 0;
|
||||
|
||||
/* Work backwards until we hit the original insert pointer value */
|
||||
while (tx_queue->insert_count != insert_count) {
|
||||
--tx_queue->insert_count;
|
||||
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
}
|
||||
}
|
||||
|
||||
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, size_t len)
|
||||
{
|
||||
const struct efx_nic_type *nic_type = tx_queue->efx->type;
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int dma_len;
|
||||
|
||||
/* Map the fragment taking account of NIC-dependent DMA limits. */
|
||||
do {
|
||||
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
|
||||
if (nic_type->tx_limit_len)
|
||||
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
|
||||
else
|
||||
dma_len = len;
|
||||
|
||||
buffer->len = dma_len;
|
||||
buffer->dma_addr = dma_addr;
|
||||
buffer->flags = EFX_TX_BUF_CONT;
|
||||
len -= dma_len;
|
||||
dma_addr += dma_len;
|
||||
++tx_queue->insert_count;
|
||||
} while (len);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
int efx_tx_tso_header_length(struct sk_buff *skb)
|
||||
{
|
||||
size_t header_len;
|
||||
|
||||
if (skb->encapsulation)
|
||||
header_len = skb_inner_transport_header(skb) -
|
||||
skb->data +
|
||||
(inner_tcp_hdr(skb)->doff << 2u);
|
||||
else
|
||||
header_len = skb_transport_header(skb) - skb->data +
|
||||
(tcp_hdr(skb)->doff << 2u);
|
||||
return header_len;
|
||||
}
|
||||
|
||||
/* Map all data from an SKB for DMA and create descriptors on the queue. */
|
||||
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct device *dma_dev = &efx->pci_dev->dev;
|
||||
unsigned int frag_index, nr_frags;
|
||||
dma_addr_t dma_addr, unmap_addr;
|
||||
unsigned short dma_flags;
|
||||
size_t len, unmap_len;
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
frag_index = 0;
|
||||
|
||||
/* Map header data. */
|
||||
len = skb_headlen(skb);
|
||||
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
|
||||
dma_flags = EFX_TX_BUF_MAP_SINGLE;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
|
||||
if (segment_count) {
|
||||
/* For TSO we need to put the header in to a separate
|
||||
* descriptor. Map this separately if necessary.
|
||||
*/
|
||||
size_t header_len = efx_tx_tso_header_length(skb);
|
||||
|
||||
if (header_len != len) {
|
||||
tx_queue->tso_long_headers++;
|
||||
efx_tx_map_chunk(tx_queue, dma_addr, header_len);
|
||||
len -= header_len;
|
||||
dma_addr += header_len;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add descriptors for each fragment. */
|
||||
do {
|
||||
struct efx_tx_buffer *buffer;
|
||||
skb_frag_t *fragment;
|
||||
|
||||
buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
|
||||
|
||||
/* The final descriptor for a fragment is responsible for
|
||||
* unmapping the whole fragment.
|
||||
*/
|
||||
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
|
||||
buffer->unmap_len = unmap_len;
|
||||
buffer->dma_offset = buffer->dma_addr - unmap_addr;
|
||||
|
||||
if (frag_index >= nr_frags) {
|
||||
/* Store SKB details with the final buffer for
|
||||
* the completion.
|
||||
*/
|
||||
buffer->skb = skb;
|
||||
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Move on to the next fragment. */
|
||||
fragment = &skb_shinfo(skb)->frags[frag_index++];
|
||||
len = skb_frag_size(fragment);
|
||||
dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
dma_flags = 0;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
|
||||
{
|
||||
/* Header and payload descriptor for each output segment, plus
|
||||
* one for every input fragment boundary within a segment
|
||||
*/
|
||||
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
|
||||
|
||||
/* Possibly one more per segment for option descriptors */
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
max_descs += EFX_TSO_MAX_SEGS;
|
||||
|
||||
/* Possibly more for PCIe page boundaries within input fragments */
|
||||
if (PAGE_SIZE > EFX_PAGE_SIZE)
|
||||
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
|
||||
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
|
||||
|
||||
return max_descs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to software TSO.
|
||||
*
|
||||
* This is used if we are unable to send a GSO packet through hardware TSO.
|
||||
* This should only ever happen due to per-queue restrictions - unsupported
|
||||
* packets should first be filtered by the feature flags.
|
||||
*
|
||||
* Returns 0 on success, error code otherwise.
|
||||
*/
|
||||
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *segments, *next;
|
||||
|
||||
segments = skb_gso_segment(skb, 0);
|
||||
if (IS_ERR(segments))
|
||||
return PTR_ERR(segments);
|
||||
|
||||
dev_consume_skb_any(skb);
|
||||
|
||||
skb_list_walk_safe(segments, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
45
drivers/net/ethernet/sfc/siena/tx_common.h
Normal file
45
drivers/net/ethernet/sfc/siena/tx_common.h
Normal file
@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_TX_COMMON_H
|
||||
#define EFX_TX_COMMON_H
|
||||
|
||||
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
|
||||
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl);
|
||||
|
||||
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
|
||||
{
|
||||
return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
|
||||
}
|
||||
|
||||
void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
|
||||
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
||||
|
||||
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count);
|
||||
|
||||
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, size_t len);
|
||||
int efx_tx_tso_header_length(struct sk_buff *skb);
|
||||
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count);
|
||||
|
||||
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
|
||||
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
||||
|
||||
extern bool efx_separate_tx_channels;
|
||||
#endif
|
252
drivers/net/ethernet/sfc/siena/vfdi.h
Normal file
252
drivers/net/ethernet/sfc/siena/vfdi.h
Normal file
@ -0,0 +1,252 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2010-2012 Solarflare Communications Inc.
|
||||
*/
|
||||
#ifndef _VFDI_H
|
||||
#define _VFDI_H
|
||||
|
||||
/**
|
||||
* DOC: Virtual Function Driver Interface
|
||||
*
|
||||
* This file contains software structures used to form a two way
|
||||
* communication channel between the VF driver and the PF driver,
|
||||
* named Virtual Function Driver Interface (VFDI).
|
||||
*
|
||||
* For the purposes of VFDI, a page is a memory region with size and
|
||||
* alignment of 4K. All addresses are DMA addresses to be used within
|
||||
* the domain of the relevant VF.
|
||||
*
|
||||
* The only hardware-defined channels for a VF driver to communicate
|
||||
* with the PF driver are the event mailboxes (%FR_CZ_USR_EV
|
||||
* registers). Writing to these registers generates an event with
|
||||
* EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
|
||||
* and USER_EV_REG_VALUE set to the value written. The PF driver may
|
||||
* direct or disable delivery of these events by setting
|
||||
* %FR_CZ_USR_EV_CFG.
|
||||
*
|
||||
* The PF driver can send arbitrary events to arbitrary event queues.
|
||||
* However, for consistency, VFDI events from the PF are defined to
|
||||
* follow the same form and be sent to the first event queue assigned
|
||||
* to the VF while that queue is enabled by the VF driver.
|
||||
*
|
||||
* The general form of the variable bits of VFDI events is:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | DATA | TYPE | SEQ |
|
||||
*
|
||||
* SEQ is a sequence number which should be incremented by 1 (modulo
|
||||
* 256) for each event. The sequence numbers used in each direction
|
||||
* are independent.
|
||||
*
|
||||
* The VF submits requests of type &struct vfdi_req by sending the
|
||||
* address of the request (ADDR) in a series of 4 events:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
|
||||
* | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
|
||||
* | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
|
||||
* | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
|
||||
*
|
||||
* The address must be page-aligned. After receiving such a valid
|
||||
* series of events, the PF driver will attempt to read the request
|
||||
* and write a response to the same address. In case of an invalid
|
||||
* sequence of events or a DMA error, there will be no response.
|
||||
*
|
||||
* The VF driver may request that the PF driver writes status
|
||||
* information into its domain asynchronously. After writing the
|
||||
* status, the PF driver will send an event of the form:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | reserved | VFDI_EV_TYPE_STATUS | SEQ |
|
||||
*
|
||||
* In case the VF must be reset for any reason, the PF driver will
|
||||
* send an event of the form:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | reserved | VFDI_EV_TYPE_RESET | SEQ |
|
||||
*
|
||||
* It is then the responsibility of the VF driver to request
|
||||
* reinitialisation of its queues.
|
||||
*/
|
||||
#define VFDI_EV_SEQ_LBN 24
|
||||
#define VFDI_EV_SEQ_WIDTH 8
|
||||
#define VFDI_EV_TYPE_LBN 16
|
||||
#define VFDI_EV_TYPE_WIDTH 8
|
||||
#define VFDI_EV_TYPE_REQ_WORD0 0
|
||||
#define VFDI_EV_TYPE_REQ_WORD1 1
|
||||
#define VFDI_EV_TYPE_REQ_WORD2 2
|
||||
#define VFDI_EV_TYPE_REQ_WORD3 3
|
||||
#define VFDI_EV_TYPE_STATUS 4
|
||||
#define VFDI_EV_TYPE_RESET 5
|
||||
#define VFDI_EV_DATA_LBN 0
|
||||
#define VFDI_EV_DATA_WIDTH 16
|
||||
|
||||
struct vfdi_endpoint {
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
__be16 tci;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum vfdi_op - VFDI operation enumeration
|
||||
* @VFDI_OP_RESPONSE: Indicates a response to the request.
|
||||
* @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
|
||||
* @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
|
||||
* @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
|
||||
* @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
|
||||
* finalize the SRAM entries.
|
||||
* @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
|
||||
* @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
|
||||
* @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
|
||||
* from PF and write the initial status.
|
||||
* @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
|
||||
* updates from PF.
|
||||
*/
|
||||
enum vfdi_op {
|
||||
VFDI_OP_RESPONSE = 0,
|
||||
VFDI_OP_INIT_EVQ = 1,
|
||||
VFDI_OP_INIT_RXQ = 2,
|
||||
VFDI_OP_INIT_TXQ = 3,
|
||||
VFDI_OP_FINI_ALL_QUEUES = 4,
|
||||
VFDI_OP_INSERT_FILTER = 5,
|
||||
VFDI_OP_REMOVE_ALL_FILTERS = 6,
|
||||
VFDI_OP_SET_STATUS_PAGE = 7,
|
||||
VFDI_OP_CLEAR_STATUS_PAGE = 8,
|
||||
VFDI_OP_LIMIT,
|
||||
};
|
||||
|
||||
/* Response codes for VFDI operations. Other values may be used in future. */
|
||||
#define VFDI_RC_SUCCESS 0
|
||||
#define VFDI_RC_ENOMEM (-12)
|
||||
#define VFDI_RC_EINVAL (-22)
|
||||
#define VFDI_RC_EOPNOTSUPP (-95)
|
||||
#define VFDI_RC_ETIMEDOUT (-110)
|
||||
|
||||
/**
|
||||
* struct vfdi_req - Request from VF driver to PF driver
|
||||
* @op: Operation code or response indicator, taken from &enum vfdi_op.
|
||||
* @rc: Response code. Set to 0 on success or a negative error code on failure.
|
||||
* @u.init_evq.index: Index of event queue to create.
|
||||
* @u.init_evq.buf_count: Number of 4k buffers backing event queue.
|
||||
* @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
|
||||
* address of each page backing the event queue.
|
||||
* @u.init_rxq.index: Index of receive queue to create.
|
||||
* @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
|
||||
* @u.init_rxq.evq: Instance of event queue to target receive events at.
|
||||
* @u.init_rxq.label: Label used in receive events.
|
||||
* @u.init_rxq.flags: Unused.
|
||||
* @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
|
||||
* address of each page backing the receive queue.
|
||||
* @u.init_txq.index: Index of transmit queue to create.
|
||||
* @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
|
||||
* @u.init_txq.evq: Instance of event queue to target transmit completion
|
||||
* events at.
|
||||
* @u.init_txq.label: Label used in transmit completion events.
|
||||
* @u.init_txq.flags: Checksum offload flags.
|
||||
* @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
|
||||
* address of each page backing the transmit queue.
|
||||
* @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
|
||||
* all traffic at this receive queue.
|
||||
* @u.mac_filter.flags: MAC filter flags.
|
||||
* @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
|
||||
* This address must be page-aligned and the PF may write up to a
|
||||
* whole page (allowing for extension of the structure).
|
||||
* @u.set_status_page.peer_page_count: Number of additional pages the VF
|
||||
* has provided into which peer addresses may be DMAd.
|
||||
* @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
|
||||
* If the number of peers exceeds 256, then the VF must provide
|
||||
* additional pages in this array. The PF will then DMA up to
|
||||
* 512 vfdi_endpoint structures into each page. These addresses
|
||||
* must be page-aligned.
|
||||
*/
|
||||
struct vfdi_req {
|
||||
u32 op;
|
||||
u32 reserved1;
|
||||
s32 rc;
|
||||
u32 reserved2;
|
||||
union {
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u64 addr[];
|
||||
} init_evq;
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u32 evq;
|
||||
u32 label;
|
||||
u32 flags;
|
||||
#define VFDI_RXQ_FLAG_SCATTER_EN 1
|
||||
u32 reserved;
|
||||
u64 addr[];
|
||||
} init_rxq;
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u32 evq;
|
||||
u32 label;
|
||||
u32 flags;
|
||||
#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
|
||||
#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
|
||||
u32 reserved;
|
||||
u64 addr[];
|
||||
} init_txq;
|
||||
struct {
|
||||
u32 rxq;
|
||||
u32 flags;
|
||||
#define VFDI_MAC_FILTER_FLAG_RSS 1
|
||||
#define VFDI_MAC_FILTER_FLAG_SCATTER 2
|
||||
} mac_filter;
|
||||
struct {
|
||||
u64 dma_addr;
|
||||
u64 peer_page_count;
|
||||
u64 peer_page_addr[];
|
||||
} set_status_page;
|
||||
} u;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vfdi_status - Status provided by PF driver to VF driver
|
||||
* @generation_start: A generation count DMA'd to VF *before* the
|
||||
* rest of the structure.
|
||||
* @generation_end: A generation count DMA'd to VF *after* the
|
||||
* rest of the structure.
|
||||
* @version: Version of this structure; currently set to 1. Later
|
||||
* versions must either be layout-compatible or only be sent to VFs
|
||||
* that specifically request them.
|
||||
* @length: Total length of this structure including embedded tables
|
||||
* @vi_scale: log2 the number of VIs available on this VF. This quantity
|
||||
* is used by the hardware for register decoding.
|
||||
* @max_tx_channels: The maximum number of transmit queues the VF can use.
|
||||
* @rss_rxq_count: The number of receive queues present in the shared RSS
|
||||
* indirection table.
|
||||
* @peer_count: Total number of peers in the complete peer list. If larger
|
||||
* than ARRAY_SIZE(%peers), then the VF must provide sufficient
|
||||
* additional pages each of which is filled with vfdi_endpoint structures.
|
||||
* @local: The MAC address and outer VLAN tag of *this* VF
|
||||
* @peers: Table of peer addresses. The @tci fields in these structures
|
||||
* are currently unused and must be ignored. Additional peers are
|
||||
* written into any additional pages provided by the VF.
|
||||
* @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
|
||||
* for interrupt moderation timers, in nanoseconds. This member is only
|
||||
* present if @length is sufficiently large.
|
||||
*/
|
||||
struct vfdi_status {
|
||||
u32 generation_start;
|
||||
u32 generation_end;
|
||||
u32 version;
|
||||
u32 length;
|
||||
u8 vi_scale;
|
||||
u8 max_tx_channels;
|
||||
u8 rss_rxq_count;
|
||||
u8 reserved1;
|
||||
u16 peer_count;
|
||||
u16 reserved2;
|
||||
struct vfdi_endpoint local;
|
||||
struct vfdi_endpoint peers[256];
|
||||
|
||||
/* Members below here extend version 1 of this structure */
|
||||
u32 timer_quantum_ns;
|
||||
};
|
||||
|
||||
#endif
|
34
drivers/net/ethernet/sfc/siena/workarounds.h
Normal file
34
drivers/net/ethernet/sfc/siena/workarounds.h
Normal file
@ -0,0 +1,34 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_WORKAROUNDS_H
|
||||
#define EFX_WORKAROUNDS_H
|
||||
|
||||
/*
|
||||
* Hardware workarounds.
|
||||
* Bug numbers are from Solarflare's Bugzilla.
|
||||
*/
|
||||
|
||||
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
|
||||
#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
#define EFX_WORKAROUND_10G(efx) 1
|
||||
|
||||
/* Bit-bashed I2C reads cause performance drop */
|
||||
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
|
||||
/* Legacy interrupt storm when interrupt fifo fills */
|
||||
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
|
||||
|
||||
/* Lockup when writing event block registers at gen2/gen3 */
|
||||
#define EFX_EF10_WORKAROUND_35388(efx) \
|
||||
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
|
||||
#define EFX_WORKAROUND_35388(efx) \
|
||||
(efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
|
||||
|
||||
/* Moderation timer access must go through MCDI */
|
||||
#define EFX_EF10_WORKAROUND_61265(efx) \
|
||||
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)
|
||||
|
||||
#endif /* EFX_WORKAROUNDS_H */
|
Loading…
Reference in New Issue
Block a user