net: bcmasp: Add support for wake on net filters
Add support for wake on network filters. The max match is 256 bytes. Signed-off-by: Justin Chen <justin.chen@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a2f0751206
commit
c5d511c495
@ -127,6 +127,597 @@ void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
|
||||
rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
|
||||
}
|
||||
|
||||
static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt)
|
||||
{
|
||||
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64),
|
||||
ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index));
|
||||
|
||||
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) |
|
||||
ASP_RX_FILTER_NET_OFFSET_L3_0(32) |
|
||||
ASP_RX_FILTER_NET_OFFSET_L3_1(96) |
|
||||
ASP_RX_FILTER_NET_OFFSET_L4(32),
|
||||
ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
|
||||
|
||||
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
|
||||
ASP_RX_FILTER_NET_CFG_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L2_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L3_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L4_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
|
||||
ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
|
||||
ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
|
||||
ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
|
||||
|
||||
rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
|
||||
ASP_RX_FILTER_NET_CFG_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L2_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L3_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L4_EN |
|
||||
ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
|
||||
ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
|
||||
ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
|
||||
ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1));
|
||||
}
|
||||
|
||||
#define MAX_WAKE_FILTER_SIZE 256
|
||||
enum asp_netfilt_reg_type {
|
||||
ASP_NETFILT_MATCH = 0,
|
||||
ASP_NETFILT_MASK,
|
||||
ASP_NETFILT_MAX
|
||||
};
|
||||
|
||||
static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt,
|
||||
enum asp_netfilt_reg_type reg_type,
|
||||
u32 offset)
|
||||
{
|
||||
u32 block_index, filter_sel;
|
||||
|
||||
if (offset < 32) {
|
||||
block_index = ASP_RX_FILTER_NET_L2;
|
||||
filter_sel = nfilt->hw_index;
|
||||
} else if (offset < 64) {
|
||||
block_index = ASP_RX_FILTER_NET_L2;
|
||||
filter_sel = nfilt->hw_index + 1;
|
||||
} else if (offset < 96) {
|
||||
block_index = ASP_RX_FILTER_NET_L3_0;
|
||||
filter_sel = nfilt->hw_index;
|
||||
} else if (offset < 128) {
|
||||
block_index = ASP_RX_FILTER_NET_L3_0;
|
||||
filter_sel = nfilt->hw_index + 1;
|
||||
} else if (offset < 160) {
|
||||
block_index = ASP_RX_FILTER_NET_L3_1;
|
||||
filter_sel = nfilt->hw_index;
|
||||
} else if (offset < 192) {
|
||||
block_index = ASP_RX_FILTER_NET_L3_1;
|
||||
filter_sel = nfilt->hw_index + 1;
|
||||
} else if (offset < 224) {
|
||||
block_index = ASP_RX_FILTER_NET_L4;
|
||||
filter_sel = nfilt->hw_index;
|
||||
} else if (offset < 256) {
|
||||
block_index = ASP_RX_FILTER_NET_L4;
|
||||
filter_sel = nfilt->hw_index + 1;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (reg_type) {
|
||||
case ASP_NETFILT_MATCH:
|
||||
return ASP_RX_FILTER_NET_PAT(filter_sel, block_index,
|
||||
(offset % 32));
|
||||
case ASP_NETFILT_MASK:
|
||||
return ASP_RX_FILTER_NET_MASK(filter_sel, block_index,
|
||||
(offset % 32));
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void bcmasp_netfilt_wr(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt,
|
||||
enum asp_netfilt_reg_type reg_type,
|
||||
u32 val, u32 offset)
|
||||
{
|
||||
int reg_offset;
|
||||
|
||||
/* HW only accepts 4 byte aligned writes */
|
||||
if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
|
||||
return;
|
||||
|
||||
reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
|
||||
offset);
|
||||
|
||||
rx_filter_core_wl(priv, val, reg_offset);
|
||||
}
|
||||
|
||||
static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt,
|
||||
enum asp_netfilt_reg_type reg_type,
|
||||
u32 offset)
|
||||
{
|
||||
int reg_offset;
|
||||
|
||||
/* HW only accepts 4 byte aligned writes */
|
||||
if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
|
||||
return 0;
|
||||
|
||||
reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
|
||||
offset);
|
||||
|
||||
return rx_filter_core_rl(priv, reg_offset);
|
||||
}
|
||||
|
||||
static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt,
|
||||
u32 offset, void *match, void *mask,
|
||||
size_t size)
|
||||
{
|
||||
u32 shift, mask_val = 0, match_val = 0;
|
||||
bool first_byte = true;
|
||||
|
||||
if ((offset + size) > MAX_WAKE_FILTER_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
while (size--) {
|
||||
/* The HW only accepts 4 byte aligned writes, so if we
|
||||
* begin unaligned or if remaining bytes less than 4,
|
||||
* we need to read then write to avoid losing current
|
||||
* register state
|
||||
*/
|
||||
if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
|
||||
match_val = bcmasp_netfilt_rd(priv, nfilt,
|
||||
ASP_NETFILT_MATCH,
|
||||
ALIGN_DOWN(offset, 4));
|
||||
mask_val = bcmasp_netfilt_rd(priv, nfilt,
|
||||
ASP_NETFILT_MASK,
|
||||
ALIGN_DOWN(offset, 4));
|
||||
}
|
||||
|
||||
shift = (3 - (offset % 4)) * 8;
|
||||
match_val &= ~GENMASK(shift + 7, shift);
|
||||
mask_val &= ~GENMASK(shift + 7, shift);
|
||||
match_val |= (u32)(*((u8 *)match) << shift);
|
||||
mask_val |= (u32)(*((u8 *)mask) << shift);
|
||||
|
||||
/* If last byte or last byte of word, write to reg */
|
||||
if (!size || ((offset % 4) == 3)) {
|
||||
bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH,
|
||||
match_val, ALIGN_DOWN(offset, 4));
|
||||
bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK,
|
||||
mask_val, ALIGN_DOWN(offset, 4));
|
||||
first_byte = true;
|
||||
} else {
|
||||
first_byte = false;
|
||||
}
|
||||
|
||||
offset++;
|
||||
match++;
|
||||
mask++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) {
|
||||
bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i);
|
||||
bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt,
|
||||
struct ethtool_tcpip4_spec *match,
|
||||
struct ethtool_tcpip4_spec *mask,
|
||||
u32 offset)
|
||||
{
|
||||
__be16 val_16, mask_16;
|
||||
|
||||
val_16 = htons(ETH_P_IP);
|
||||
mask_16 = htons(0xFFFF);
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
|
||||
&val_16, &mask_16, sizeof(val_16));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
|
||||
&match->tos, &mask->tos,
|
||||
sizeof(match->tos));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
|
||||
&match->ip4src, &mask->ip4src,
|
||||
sizeof(match->ip4src));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
|
||||
&match->ip4dst, &mask->ip4dst,
|
||||
sizeof(match->ip4dst));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20,
|
||||
&match->psrc, &mask->psrc,
|
||||
sizeof(match->psrc));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22,
|
||||
&match->pdst, &mask->pdst,
|
||||
sizeof(match->pdst));
|
||||
}
|
||||
|
||||
static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt,
|
||||
struct ethtool_tcpip6_spec *match,
|
||||
struct ethtool_tcpip6_spec *mask,
|
||||
u32 offset)
|
||||
{
|
||||
__be16 val_16, mask_16;
|
||||
|
||||
val_16 = htons(ETH_P_IPV6);
|
||||
mask_16 = htons(0xFFFF);
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
|
||||
&val_16, &mask_16, sizeof(val_16));
|
||||
val_16 = htons(match->tclass << 4);
|
||||
mask_16 = htons(mask->tclass << 4);
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
|
||||
&val_16, &mask_16, sizeof(val_16));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8,
|
||||
&match->ip6src, &mask->ip6src,
|
||||
sizeof(match->ip6src));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24,
|
||||
&match->ip6dst, &mask->ip6dst,
|
||||
sizeof(match->ip6dst));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40,
|
||||
&match->psrc, &mask->psrc,
|
||||
sizeof(match->psrc));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42,
|
||||
&match->pdst, &mask->pdst,
|
||||
sizeof(match->pdst));
|
||||
}
|
||||
|
||||
static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv,
|
||||
struct bcmasp_net_filter *nfilt)
|
||||
{
|
||||
struct ethtool_rx_flow_spec *fs = &nfilt->fs;
|
||||
unsigned int offset = 0;
|
||||
__be16 val_16, mask_16;
|
||||
u8 val_8, mask_8;
|
||||
|
||||
/* Currently only supports wake filters */
|
||||
if (!nfilt->wake_filter)
|
||||
return -EINVAL;
|
||||
|
||||
bcmasp_netfilt_reset_hw(priv, nfilt);
|
||||
|
||||
if (fs->flow_type & FLOW_MAC_EXT) {
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest,
|
||||
&fs->m_ext.h_dest,
|
||||
sizeof(fs->h_ext.h_dest));
|
||||
}
|
||||
|
||||
if ((fs->flow_type & FLOW_EXT) &&
|
||||
(fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) {
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2),
|
||||
&fs->h_ext.vlan_etype,
|
||||
&fs->m_ext.vlan_etype,
|
||||
sizeof(fs->h_ext.vlan_etype));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2),
|
||||
&fs->h_ext.vlan_tci,
|
||||
&fs->m_ext.vlan_tci,
|
||||
sizeof(fs->h_ext.vlan_tci));
|
||||
offset += VLAN_HLEN;
|
||||
}
|
||||
|
||||
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
||||
case ETHER_FLOW:
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, 0,
|
||||
&fs->h_u.ether_spec.h_dest,
|
||||
&fs->m_u.ether_spec.h_dest,
|
||||
sizeof(fs->h_u.ether_spec.h_dest));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN,
|
||||
&fs->h_u.ether_spec.h_source,
|
||||
&fs->m_u.ether_spec.h_source,
|
||||
sizeof(fs->h_u.ether_spec.h_source));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
|
||||
&fs->h_u.ether_spec.h_proto,
|
||||
&fs->m_u.ether_spec.h_proto,
|
||||
sizeof(fs->h_u.ether_spec.h_proto));
|
||||
|
||||
break;
|
||||
case IP_USER_FLOW:
|
||||
val_16 = htons(ETH_P_IP);
|
||||
mask_16 = htons(0xFFFF);
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
|
||||
&val_16, &mask_16, sizeof(val_16));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
|
||||
&fs->h_u.usr_ip4_spec.tos,
|
||||
&fs->m_u.usr_ip4_spec.tos,
|
||||
sizeof(fs->h_u.usr_ip4_spec.tos));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
|
||||
&fs->h_u.usr_ip4_spec.proto,
|
||||
&fs->m_u.usr_ip4_spec.proto,
|
||||
sizeof(fs->h_u.usr_ip4_spec.proto));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
|
||||
&fs->h_u.usr_ip4_spec.ip4src,
|
||||
&fs->m_u.usr_ip4_spec.ip4src,
|
||||
sizeof(fs->h_u.usr_ip4_spec.ip4src));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
|
||||
&fs->h_u.usr_ip4_spec.ip4dst,
|
||||
&fs->m_u.usr_ip4_spec.ip4dst,
|
||||
sizeof(fs->h_u.usr_ip4_spec.ip4dst));
|
||||
if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
|
||||
break;
|
||||
|
||||
/* Only supports 20 byte IPv4 header */
|
||||
val_8 = 0x45;
|
||||
mask_8 = 0xFF;
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
|
||||
&val_8, &mask_8, sizeof(val_8));
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt,
|
||||
ETH_HLEN + 20 + offset,
|
||||
&fs->h_u.usr_ip4_spec.l4_4_bytes,
|
||||
&fs->m_u.usr_ip4_spec.l4_4_bytes,
|
||||
sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes)
|
||||
);
|
||||
break;
|
||||
case TCP_V4_FLOW:
|
||||
val_8 = IPPROTO_TCP;
|
||||
mask_8 = 0xFF;
|
||||
bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec,
|
||||
&fs->m_u.tcp_ip4_spec, offset);
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
|
||||
&val_8, &mask_8, sizeof(val_8));
|
||||
break;
|
||||
case UDP_V4_FLOW:
|
||||
val_8 = IPPROTO_UDP;
|
||||
mask_8 = 0xFF;
|
||||
bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec,
|
||||
&fs->m_u.udp_ip4_spec, offset);
|
||||
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
|
||||
&val_8, &mask_8, sizeof(val_8));
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
val_8 = IPPROTO_TCP;
|
||||
mask_8 = 0xFF;
|
||||
bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec,
|
||||
&fs->m_u.tcp_ip6_spec, offset);
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
|
||||
&val_8, &mask_8, sizeof(val_8));
|
||||
break;
|
||||
case UDP_V6_FLOW:
|
||||
val_8 = IPPROTO_UDP;
|
||||
mask_8 = 0xFF;
|
||||
bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec,
|
||||
&fs->m_u.udp_ip6_spec, offset);
|
||||
bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
|
||||
&val_8, &mask_8, sizeof(val_8));
|
||||
break;
|
||||
}
|
||||
|
||||
bcmasp_netfilt_hw_en_wake(priv, nfilt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
|
||||
{
|
||||
struct bcmasp_priv *priv = intf->parent;
|
||||
bool write = false;
|
||||
int ret, i;
|
||||
|
||||
/* Write all filters to HW */
|
||||
for (i = 0; i < NUM_NET_FILTERS; i++) {
|
||||
/* If the filter does not match the port, skip programming. */
|
||||
if (!priv->net_filters[i].claimed ||
|
||||
priv->net_filters[i].port != intf->port)
|
||||
continue;
|
||||
|
||||
if (i > 0 && (i % 2) &&
|
||||
priv->net_filters[i].wake_filter &&
|
||||
priv->net_filters[i - 1].wake_filter)
|
||||
continue;
|
||||
|
||||
ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]);
|
||||
if (!ret)
|
||||
write = true;
|
||||
}
|
||||
|
||||
/* Successfully programmed at least one wake filter
|
||||
* so enable top level wake config
|
||||
*/
|
||||
if (write)
|
||||
rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
|
||||
ASP_RX_FILTER_LNR_MD |
|
||||
ASP_RX_FILTER_GEN_WK_EN |
|
||||
ASP_RX_FILTER_NT_FLT_EN),
|
||||
ASP_RX_FILTER_BLK_CTRL);
|
||||
}
|
||||
|
||||
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
|
||||
u32 *rule_cnt)
|
||||
{
|
||||
struct bcmasp_priv *priv = intf->parent;
|
||||
int j = 0, i;
|
||||
|
||||
for (i = 0; i < NUM_NET_FILTERS; i++) {
|
||||
if (!priv->net_filters[i].claimed ||
|
||||
priv->net_filters[i].port != intf->port)
|
||||
continue;
|
||||
|
||||
if (i > 0 && (i % 2) &&
|
||||
priv->net_filters[i].wake_filter &&
|
||||
priv->net_filters[i - 1].wake_filter)
|
||||
continue;
|
||||
|
||||
rule_locs[j++] = priv->net_filters[i].fs.location;
|
||||
}
|
||||
|
||||
*rule_cnt = j;
|
||||
}
|
||||
|
||||
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
|
||||
{
|
||||
struct bcmasp_priv *priv = intf->parent;
|
||||
int cnt = 0, i;
|
||||
|
||||
for (i = 0; i < NUM_NET_FILTERS; i++) {
|
||||
if (!priv->net_filters[i].claimed ||
|
||||
priv->net_filters[i].port != intf->port)
|
||||
continue;
|
||||
|
||||
/* Skip over a wake filter pair */
|
||||
if (i > 0 && (i % 2) &&
|
||||
priv->net_filters[i].wake_filter &&
|
||||
priv->net_filters[i - 1].wake_filter)
|
||||
continue;
|
||||
|
||||
cnt++;
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
|
||||
struct ethtool_rx_flow_spec *fs)
|
||||
{
|
||||
struct bcmasp_priv *priv = intf->parent;
|
||||
struct ethtool_rx_flow_spec *cur;
|
||||
size_t fs_size = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_NET_FILTERS; i++) {
|
||||
if (!priv->net_filters[i].claimed ||
|
||||
priv->net_filters[i].port != intf->port)
|
||||
continue;
|
||||
|
||||
cur = &priv->net_filters[i].fs;
|
||||
|
||||
if (cur->flow_type != fs->flow_type ||
|
||||
cur->ring_cookie != fs->ring_cookie)
|
||||
continue;
|
||||
|
||||
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
||||
case ETHER_FLOW:
|
||||
fs_size = sizeof(struct ethhdr);
|
||||
break;
|
||||
case IP_USER_FLOW:
|
||||
fs_size = sizeof(struct ethtool_usrip4_spec);
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
case UDP_V6_FLOW:
|
||||
fs_size = sizeof(struct ethtool_tcpip6_spec);
|
||||
break;
|
||||
case TCP_V4_FLOW:
|
||||
case UDP_V4_FLOW:
|
||||
fs_size = sizeof(struct ethtool_tcpip4_spec);
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
|
||||
memcmp(&cur->m_u, &fs->m_u, fs_size))
|
||||
continue;
|
||||
|
||||
if (cur->flow_type & FLOW_EXT) {
|
||||
if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
|
||||
cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
|
||||
cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
|
||||
cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
|
||||
cur->h_ext.data[0] != fs->h_ext.data[0])
|
||||
continue;
|
||||
}
|
||||
if (cur->flow_type & FLOW_MAC_EXT) {
|
||||
if (memcmp(&cur->h_ext.h_dest,
|
||||
&fs->h_ext.h_dest, ETH_ALEN) ||
|
||||
memcmp(&cur->m_ext.h_dest,
|
||||
&fs->m_ext.h_dest, ETH_ALEN))
|
||||
continue;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If no network filter found, return open filter.
|
||||
* If no more open filters return NULL
|
||||
*/
|
||||
struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
|
||||
int loc, bool wake_filter,
|
||||
bool init)
|
||||
{
|
||||
struct bcmasp_net_filter *nfilter = NULL;
|
||||
struct bcmasp_priv *priv = intf->parent;
|
||||
int i, open_index = -1;
|
||||
|
||||
/* Check whether we exceed the filter table capacity */
|
||||
if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* If the filter location is busy (already claimed) and we are initializing
|
||||
* the filter (insertion), return a busy error code.
|
||||
*/
|
||||
if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed)
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
/* We need two filters for wake-up, so we cannot use an odd filter */
|
||||
if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Initialize the loop index based on the desired location or from 0 */
|
||||
i = loc == RX_CLS_LOC_ANY ? 0 : loc;
|
||||
|
||||
for ( ; i < NUM_NET_FILTERS; i++) {
|
||||
/* Found matching network filter */
|
||||
if (!init &&
|
||||
priv->net_filters[i].claimed &&
|
||||
priv->net_filters[i].hw_index == i &&
|
||||
priv->net_filters[i].port == intf->port)
|
||||
return &priv->net_filters[i];
|
||||
|
||||
/* If we don't need a new filter or new filter already found */
|
||||
if (!init || open_index >= 0)
|
||||
continue;
|
||||
|
||||
/* Wake filter conslidates two filters to cover more bytes
|
||||
* Wake filter is open if...
|
||||
* 1. It is an even filter
|
||||
* 2. The current and next filter is not claimed
|
||||
*/
|
||||
if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
|
||||
!priv->net_filters[i + 1].claimed)
|
||||
open_index = i;
|
||||
else if (!priv->net_filters[i].claimed)
|
||||
open_index = i;
|
||||
}
|
||||
|
||||
if (open_index >= 0) {
|
||||
nfilter = &priv->net_filters[open_index];
|
||||
nfilter->claimed = true;
|
||||
nfilter->port = intf->port;
|
||||
nfilter->hw_index = open_index;
|
||||
}
|
||||
|
||||
if (wake_filter && open_index >= 0) {
|
||||
/* Claim next filter */
|
||||
priv->net_filters[open_index + 1].claimed = true;
|
||||
priv->net_filters[open_index + 1].wake_filter = true;
|
||||
nfilter->wake_filter = true;
|
||||
}
|
||||
|
||||
return nfilter ? nfilter : ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
void bcmasp_netfilt_release(struct bcmasp_intf *intf,
|
||||
struct bcmasp_net_filter *nfilt)
|
||||
{
|
||||
struct bcmasp_priv *priv = intf->parent;
|
||||
|
||||
if (nfilt->wake_filter) {
|
||||
memset(&priv->net_filters[nfilt->hw_index + 1], 0,
|
||||
sizeof(struct bcmasp_net_filter));
|
||||
}
|
||||
|
||||
memset(nfilt, 0, sizeof(struct bcmasp_net_filter));
|
||||
}
|
||||
|
||||
static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low)
|
||||
{
|
||||
*high = (u32)(addr[0] << 8 | addr[1]);
|
||||
@ -334,6 +925,9 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
|
||||
priv->mda_filters[i].en = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < NUM_NET_FILTERS; i++)
|
||||
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
|
||||
|
||||
/* Top level filter enable bit should be enabled at all times, set
|
||||
* GEN_WAKE_CLEAR to clear the network filter wake-up which would
|
||||
* otherwise be sticky
|
||||
@ -657,6 +1251,7 @@ static int bcmasp_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&priv->mda_lock);
|
||||
spin_lock_init(&priv->clk_lock);
|
||||
mutex_init(&priv->wol_lock);
|
||||
mutex_init(&priv->net_lock);
|
||||
INIT_LIST_HEAD(&priv->intfs);
|
||||
|
||||
pdata = device_get_match_data(&pdev->dev);
|
||||
|
@ -106,6 +106,14 @@
|
||||
#define ASP_RX_FILTER_NET_OFFSET_L3_1(val) ((val) << 16)
|
||||
#define ASP_RX_FILTER_NET_OFFSET_L4(val) ((val) << 24)
|
||||
|
||||
enum asp_rx_net_filter_block {
|
||||
ASP_RX_FILTER_NET_L2 = 0,
|
||||
ASP_RX_FILTER_NET_L3_0,
|
||||
ASP_RX_FILTER_NET_L3_1,
|
||||
ASP_RX_FILTER_NET_L4,
|
||||
ASP_RX_FILTER_NET_BLOCK_MAX
|
||||
};
|
||||
|
||||
#define ASP_EDPKT_OFFSET 0x9c000
|
||||
#define ASP_EDPKT_ENABLE 0x4
|
||||
#define ASP_EDPKT_ENABLE_EN BIT(0)
|
||||
@ -309,6 +317,17 @@ struct bcmasp_intf {
|
||||
unsigned int wol_irq_enabled:1;
|
||||
};
|
||||
|
||||
#define NUM_NET_FILTERS 32
|
||||
struct bcmasp_net_filter {
|
||||
struct ethtool_rx_flow_spec fs;
|
||||
|
||||
bool claimed;
|
||||
bool wake_filter;
|
||||
|
||||
int port;
|
||||
unsigned int hw_index;
|
||||
};
|
||||
|
||||
#define NUM_MDA_FILTERS 32
|
||||
struct bcmasp_mda_filter {
|
||||
/* Current owner of this filter */
|
||||
@ -361,6 +380,11 @@ struct bcmasp_priv {
|
||||
|
||||
/* Protects accesses to ASP_CTRL_CLOCK_CTRL */
|
||||
spinlock_t clk_lock;
|
||||
|
||||
struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
|
||||
|
||||
/* Network filter lock */
|
||||
struct mutex net_lock;
|
||||
};
|
||||
|
||||
static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf)
|
||||
@ -518,4 +542,20 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf);
|
||||
|
||||
void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en);
|
||||
|
||||
struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
|
||||
int loc, bool wake_filter,
|
||||
bool init);
|
||||
|
||||
bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
|
||||
struct ethtool_rx_flow_spec *fs);
|
||||
|
||||
void bcmasp_netfilt_release(struct bcmasp_intf *intf,
|
||||
struct bcmasp_net_filter *nfilt);
|
||||
|
||||
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
|
||||
|
||||
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
|
||||
u32 *rule_cnt);
|
||||
|
||||
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
|
||||
#endif
|
||||
|
@ -30,7 +30,7 @@ static void bcmasp_set_msglevel(struct net_device *dev, u32 level)
|
||||
intf->msg_enable = level;
|
||||
}
|
||||
|
||||
#define BCMASP_SUPPORTED_WAKE (WAKE_MAGIC | WAKE_MAGICSECURE)
|
||||
#define BCMASP_SUPPORTED_WAKE (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)
|
||||
static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct bcmasp_intf *intf = netdev_priv(dev);
|
||||
@ -64,6 +64,133 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcmasp_flow_insert(struct net_device *dev, struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
struct bcmasp_intf *intf = netdev_priv(dev);
|
||||
struct bcmasp_net_filter *nfilter;
|
||||
u32 loc = cmd->fs.location;
|
||||
bool wake = false;
|
||||
|
||||
if (cmd->fs.ring_cookie == RX_CLS_FLOW_WAKE)
|
||||
wake = true;
|
||||
|
||||
/* Currently only supports WAKE filters */
|
||||
if (!wake)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
||||
case ETHER_FLOW:
|
||||
case IP_USER_FLOW:
|
||||
case TCP_V4_FLOW:
|
||||
case UDP_V4_FLOW:
|
||||
case TCP_V6_FLOW:
|
||||
case UDP_V6_FLOW:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Check if filter already exists */
|
||||
if (bcmasp_netfilt_check_dup(intf, &cmd->fs))
|
||||
return -EINVAL;
|
||||
|
||||
nfilter = bcmasp_netfilt_get_init(intf, loc, wake, true);
|
||||
if (IS_ERR(nfilter))
|
||||
return PTR_ERR(nfilter);
|
||||
|
||||
/* Return the location where we did insert the filter */
|
||||
cmd->fs.location = nfilter->hw_index;
|
||||
memcpy(&nfilter->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec));
|
||||
|
||||
/* Since we only support wake filters, defer register programming till
|
||||
* suspend time.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcmasp_flow_delete(struct net_device *dev, struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
struct bcmasp_intf *intf = netdev_priv(dev);
|
||||
struct bcmasp_net_filter *nfilter;
|
||||
|
||||
nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
|
||||
if (IS_ERR(nfilter))
|
||||
return PTR_ERR(nfilter);
|
||||
|
||||
bcmasp_netfilt_release(intf, nfilter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
struct bcmasp_net_filter *nfilter;
|
||||
|
||||
nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
|
||||
if (IS_ERR(nfilter))
|
||||
return PTR_ERR(nfilter);
|
||||
|
||||
memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
|
||||
|
||||
cmd->data = NUM_NET_FILTERS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcmasp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
|
||||
{
|
||||
struct bcmasp_intf *intf = netdev_priv(dev);
|
||||
int ret = -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&intf->parent->net_lock);
|
||||
|
||||
switch (cmd->cmd) {
|
||||
case ETHTOOL_SRXCLSRLINS:
|
||||
ret = bcmasp_flow_insert(dev, cmd);
|
||||
break;
|
||||
case ETHTOOL_SRXCLSRLDEL:
|
||||
ret = bcmasp_flow_delete(dev, cmd);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&intf->parent->net_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
|
||||
u32 *rule_locs)
|
||||
{
|
||||
struct bcmasp_intf *intf = netdev_priv(dev);
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&intf->parent->net_lock);
|
||||
|
||||
switch (cmd->cmd) {
|
||||
case ETHTOOL_GRXCLSRLCNT:
|
||||
cmd->rule_cnt = bcmasp_netfilt_get_active(intf);
|
||||
/* We support specifying rule locations */
|
||||
cmd->data |= RX_CLS_LOC_SPECIAL;
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRULE:
|
||||
err = bcmasp_flow_get(intf, cmd);
|
||||
break;
|
||||
case ETHTOOL_GRXCLSRLALL:
|
||||
bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
|
||||
cmd->data = NUM_NET_FILTERS;
|
||||
break;
|
||||
default:
|
||||
err = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&intf->parent->net_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
const struct ethtool_ops bcmasp_ethtool_ops = {
|
||||
.get_drvinfo = bcmasp_get_drvinfo,
|
||||
.get_link = ethtool_op_get_link,
|
||||
@ -73,4 +200,6 @@ const struct ethtool_ops bcmasp_ethtool_ops = {
|
||||
.set_msglevel = bcmasp_set_msglevel,
|
||||
.get_wol = bcmasp_get_wol,
|
||||
.set_wol = bcmasp_set_wol,
|
||||
.get_rxnfc = bcmasp_get_rxnfc,
|
||||
.set_rxnfc = bcmasp_set_rxnfc,
|
||||
};
|
||||
|
@ -1300,6 +1300,9 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
|
||||
}
|
||||
umac_wl(intf, reg, UMC_MPD_CTRL);
|
||||
|
||||
if (intf->wolopts & WAKE_FILTER)
|
||||
bcmasp_netfilt_suspend(intf);
|
||||
|
||||
/* UniMAC receive needs to be turned on */
|
||||
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user