Merge branch 'add-support-to-offload-macsec-using-netlink-update'
Emeel Hakim says: ==================== Add support to offload macsec using netlink update This series adds support for offloading macsec as part of the netlink update routine, command example: $ ip link set link eth2 macsec0 type macsec offload mac The above is done using the IFLA_MACSEC_OFFLOAD attribute hence the second patch of dumping this attribute as part of the macsec dump. ==================== Link: https://lore.kernel.org/r/20230111150210.8246-1-ehakim@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
72863e08c3
@ -2583,18 +2583,58 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
|
||||
static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
|
||||
{
|
||||
struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
|
||||
enum macsec_offload offload, prev_offload;
|
||||
int (*func)(struct macsec_context *ctx);
|
||||
struct nlattr **attrs = info->attrs;
|
||||
struct net_device *dev;
|
||||
enum macsec_offload prev_offload;
|
||||
const struct macsec_ops *ops;
|
||||
struct macsec_context ctx;
|
||||
struct macsec_dev *macsec;
|
||||
int ret = 0;
|
||||
|
||||
macsec = macsec_priv(dev);
|
||||
|
||||
/* Check if the offloading mode is supported by the underlying layers */
|
||||
if (offload != MACSEC_OFFLOAD_OFF &&
|
||||
!macsec_check_offload(offload, macsec))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Check if the net device is busy. */
|
||||
if (netif_running(dev))
|
||||
return -EBUSY;
|
||||
|
||||
/* Check if the device already has rules configured: we do not support
|
||||
* rules migration.
|
||||
*/
|
||||
if (macsec_is_configured(macsec))
|
||||
return -EBUSY;
|
||||
|
||||
prev_offload = macsec->offload;
|
||||
|
||||
ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
|
||||
macsec, &ctx);
|
||||
if (!ops)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
macsec->offload = offload;
|
||||
|
||||
ctx.secy = &macsec->secy;
|
||||
ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
|
||||
: macsec_offload(ops->mdo_add_secy, &ctx);
|
||||
if (ret)
|
||||
macsec->offload = prev_offload;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
|
||||
struct nlattr **attrs = info->attrs;
|
||||
enum macsec_offload offload;
|
||||
struct macsec_dev *macsec;
|
||||
struct net_device *dev;
|
||||
int ret = 0;
|
||||
|
||||
if (!attrs[MACSEC_ATTR_IFINDEX])
|
||||
return -EINVAL;
|
||||
|
||||
@ -2621,55 +2661,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
|
||||
if (macsec->offload == offload)
|
||||
goto out;
|
||||
|
||||
/* Check if the offloading mode is supported by the underlying layers */
|
||||
if (offload != MACSEC_OFFLOAD_OFF &&
|
||||
!macsec_check_offload(offload, macsec)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check if the net device is busy. */
|
||||
if (netif_running(dev)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
prev_offload = macsec->offload;
|
||||
macsec->offload = offload;
|
||||
|
||||
/* Check if the device already has rules configured: we do not support
|
||||
* rules migration.
|
||||
*/
|
||||
if (macsec_is_configured(macsec)) {
|
||||
ret = -EBUSY;
|
||||
goto rollback;
|
||||
}
|
||||
|
||||
ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
|
||||
macsec, &ctx);
|
||||
if (!ops) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto rollback;
|
||||
}
|
||||
|
||||
if (prev_offload == MACSEC_OFFLOAD_OFF)
|
||||
func = ops->mdo_add_secy;
|
||||
else
|
||||
func = ops->mdo_del_secy;
|
||||
|
||||
ctx.secy = &macsec->secy;
|
||||
ret = macsec_offload(func, &ctx);
|
||||
if (ret)
|
||||
goto rollback;
|
||||
|
||||
rtnl_unlock();
|
||||
return 0;
|
||||
|
||||
rollback:
|
||||
macsec->offload = prev_offload;
|
||||
if (macsec->offload != offload)
|
||||
ret = macsec_update_offload(dev, offload);
|
||||
out:
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
@ -3817,6 +3811,8 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct macsec_dev *macsec = macsec_priv(dev);
|
||||
bool macsec_offload_state_change = false;
|
||||
enum macsec_offload offload;
|
||||
struct macsec_tx_sc tx_sc;
|
||||
struct macsec_secy secy;
|
||||
int ret;
|
||||
@ -3840,8 +3836,18 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
if (data[IFLA_MACSEC_OFFLOAD]) {
|
||||
offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
|
||||
if (macsec->offload != offload) {
|
||||
macsec_offload_state_change = true;
|
||||
ret = macsec_update_offload(dev, offload);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
/* If h/w offloading is available, propagate to the device */
|
||||
if (macsec_is_offloaded(macsec)) {
|
||||
if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
|
||||
const struct macsec_ops *ops;
|
||||
struct macsec_context ctx;
|
||||
|
||||
@ -4240,16 +4246,22 @@ static size_t macsec_get_size(const struct net_device *dev)
|
||||
nla_total_size(1) + /* IFLA_MACSEC_SCB */
|
||||
nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
|
||||
nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
|
||||
nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
|
||||
0;
|
||||
}
|
||||
|
||||
static int macsec_fill_info(struct sk_buff *skb,
|
||||
const struct net_device *dev)
|
||||
{
|
||||
struct macsec_secy *secy = &macsec_priv(dev)->secy;
|
||||
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
|
||||
struct macsec_tx_sc *tx_sc;
|
||||
struct macsec_dev *macsec;
|
||||
struct macsec_secy *secy;
|
||||
u64 csid;
|
||||
|
||||
macsec = macsec_priv(dev);
|
||||
secy = &macsec->secy;
|
||||
tx_sc = &secy->tx_sc;
|
||||
|
||||
switch (secy->key_len) {
|
||||
case MACSEC_GCM_AES_128_SAK_LEN:
|
||||
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
|
||||
@ -4274,6 +4286,7 @@ static int macsec_fill_info(struct sk_buff *skb,
|
||||
nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
|
||||
0)
|
||||
goto nla_put_failure;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user