ipsec-next-2022-12-09

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEH7ZpcWbFyOOp6OJbrB3Eaf9PW7cFAmOS/ooACgkQrB3Eaf9P
 W7cOVA/+L8rHwLe78DDz/PNESyShtTVCYBDF/ngYMV8AIvjSfPresMbFV3NKqO5E
 3qbMl199QH2eWI7dhQaQ+edynSG0QCx5FmPai0UuHPLxATct1pNPJPpvBryO/4jC
 ZouYBIVjdMbq6Y8vD2gJ8UtA7TZpncP0HYOKTvYyDL9kQ+nUmu9KUYxcEcNHL5w+
 TjL9jJafR+GqczCRiwAoMKIFV7lUrTFzh7slfINNN5DVTuzN33H7Tp70z6IKOfVL
 1LATlZv7mqpLVF6dQuMXOt6kd/BEBl1y4ZHTHow5nstJvwu99P96iKwEfIXuOvWK
 fulhDU61eIik8D9QJWeM7TuZDbYewWI77plwVY/R/zRt0At4VLpq7I1m33CmLLMY
 Fb5fMxJPkM8YAtDID+BknYPrSAcxo8ji04BWFrVqQ6InPmtGfnP83XSSkYfxY7FB
 3hUfz4igsJpV5vrS1EFRhjklNwI+jY2yAvIggQtdkJ97ubSUY3E4ACfNqlJ5lJbv
 2KqWnSKlG21F9ZTR68VzcQVhFIQF6j/EuQqro+4TQUIdZswcml2iK32zrel0rs9C
 iAsgQQaMV9a2vEaScRZqdOJ4HENTbm9wD7Mso/i5vr+lnpr1ThKjQo8osU8YUlbC
 SDTMeWRRos+esFML6SP+YZ7SM/qXMluou204x/llJ/VDMXQ5e8k=
 =enQp
 -----END PGP SIGNATURE-----

Merge tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
ipsec-next 2022-12-09

1) Add xfrm packet offload core API.
   From Leon Romanovsky.

2) Add xfrm packet offload support for mlx5.
   From Leon Romanovsky and Raed Salem.

3) Fix a typto in a error message.
   From Colin Ian King.

* tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next: (38 commits)
  xfrm: Fix spelling mistake "oflload" -> "offload"
  net/mlx5e: Open mlx5 driver to accept IPsec packet offload
  net/mlx5e: Handle ESN update events
  net/mlx5e: Handle hardware IPsec limits events
  net/mlx5e: Update IPsec soft and hard limits
  net/mlx5e: Store all XFRM SAs in Xarray
  net/mlx5e: Provide intermediate pointer to access IPsec struct
  net/mlx5e: Skip IPsec encryption for TX path without matching policy
  net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows
  net/mlx5e: Improve IPsec flow steering autogroup
  net/mlx5e: Configure IPsec packet offload flow steering
  net/mlx5e: Use same coding pattern for Rx and Tx flows
  net/mlx5e: Add XFRM policy offload logic
  net/mlx5e: Create IPsec policy offload tables
  net/mlx5e: Generalize creation of default IPsec miss group and rule
  net/mlx5e: Group IPsec miss handles into separate struct
  net/mlx5e: Make clear what IPsec rx_err does
  net/mlx5e: Flatten the IPsec RX add rule path
  net/mlx5e: Refactor FTE setup code to be more clear
  net/mlx5e: Move IPsec flow table creation to separate function
  ...
====================

Link: https://lore.kernel.org/r/20221209093310.4018731-1-steffen.klassert@secunet.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-12-09 20:06:34 -08:00
commit dd8b3a802b
30 changed files with 2161 additions and 532 deletions

View File

@ -5,6 +5,7 @@ XFRM device - offloading the IPsec computations
===============================================
Shannon Nelson <shannon.nelson@oracle.com>
Leon Romanovsky <leonro@nvidia.com>
Overview
@ -18,10 +19,21 @@ can radically increase throughput and decrease CPU utilization. The XFRM
Device interface allows NIC drivers to offer to the stack access to the
hardware offload.
Right now, there are two types of hardware offload that kernel supports.
* IPsec crypto offload:
* NIC performs encrypt/decrypt
* Kernel does everything else
* IPsec packet offload:
* NIC performs encrypt/decrypt
* NIC does encapsulation
* Kernel and NIC have SA and policy in-sync
* NIC handles the SA and policies states
* The Kernel talks to the keymanager
Userland access to the offload is typically through a system such as
libreswan or KAME/raccoon, but the iproute2 'ip xfrm' command set can
be handy when experimenting. An example command might look something
like this::
like this for crypto offload:
ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
reqid 0x07 replay-window 32 \
@ -29,6 +41,17 @@ like this::
sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
offload dev eth4 dir in
and for packet offload
ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
reqid 0x07 replay-window 32 \
aead 'rfc4106(gcm(aes))' 0x44434241343332312423222114131211f4f3f2f1 128 \
sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
offload packet dev eth4 dir in
ip x p add src 14.0.0.70 dst 14.0.0.52 offload packet dev eth4 dir in
tmpl src 14.0.0.70 dst 14.0.0.52 proto esp reqid 10000 mode transport
Yes, that's ugly, but that's what shell scripts and/or libreswan are for.
@ -40,17 +63,24 @@ Callbacks to implement
/* from include/linux/netdevice.h */
struct xfrmdev_ops {
/* Crypto and Packet offload callbacks */
int (*xdo_dev_state_add) (struct xfrm_state *x);
void (*xdo_dev_state_delete) (struct xfrm_state *x);
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
/* Solely packet offload callbacks */
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
The NIC driver offering ipsec offload will need to implement these
callbacks to make the offload available to the network stack's
XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and
The NIC driver offering ipsec offload will need to implement callbacks
relevant to supported offload to make the offload available to the network
stack's XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and
NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
@ -79,7 +109,8 @@ and an indication of whether it is for Rx or Tx. The driver should
=========== ===================================
0 success
-EOPNETSUPP offload not supported, try SW IPsec
-EOPNETSUPP offload not supported, try SW IPsec,
not applicable for packet offload mode
other fail the request
=========== ===================================
@ -96,6 +127,7 @@ will serviceable. This can check the packet information to be sure the
offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and
return true of false to signify its support.
Crypto offload mode:
When ready to send, the driver needs to inspect the Tx packet for the
offload information, including the opaque context, and set up the packet
send accordingly::
@ -139,13 +171,25 @@ the stack in xfrm_input().
In ESN mode, xdo_dev_state_advance_esn() is called from xfrm_replay_advance_esn().
Driver will check packet seq number and update HW ESN state machine if needed.
Packet offload mode:
HW adds and deletes XFRM headers. So in RX path, XFRM stack is bypassed if HW
reported success. In TX path, the packet lefts kernel without extra header
and not encrypted, the HW is responsible to perform it.
When the SA is removed by the user, the driver's xdo_dev_state_delete()
is asked to disable the offload. Later, xdo_dev_state_free() is called
from a garbage collection routine after all reference counts to the state
and xdo_dev_policy_delete() are asked to disable the offload. Later,
xdo_dev_state_free() and xdo_dev_policy_free() are called from a garbage
collection routine after all reference counts to the state and policy
have been removed and any remaining resources can be cleared for the
offload state. How these are used by the driver will depend on specific
hardware needs.
As a netdev is set to DOWN the XFRM stack's netdev listener will call
xdo_dev_state_delete() and xdo_dev_state_free() on any remaining offloaded
states.
xdo_dev_state_delete(), xdo_dev_policy_delete(), xdo_dev_state_free() and
xdo_dev_policy_free() on any remaining offloaded states.
Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
The HW/driver are responsible to perform it and provide accurate data when
xdo_dev_state_update_curlft() is called. In case of one of these limits
occuried, the driver needs to call to xfrm_state_check_expire() to make sure
that XFRM performs rekeying sequence.

View File

@ -283,6 +283,10 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
pr_debug("Unsupported xfrm offload\n");
return -EINVAL;
}
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {

View File

@ -585,6 +585,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
netdev_err(dev, "Unsupported ipsec offload type\n");
return -EINVAL;
}
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;

View File

@ -280,6 +280,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
netdev_err(dev, "Unsupported ipsec offload type\n");
return -EINVAL;
}
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;

View File

@ -1245,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t
int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
#endif
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
#endif /* __MLX5_EN_H__ */

View File

@ -84,7 +84,8 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
#endif
};

View File

@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
aso_ctrl = &aso_wqe->aso_ctrl;
memset(aso_ctrl, 0, sizeof(*aso_ctrl));
aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
MLX5_ASO_ALWAYS_TRUE << 4;

View File

@ -45,55 +45,9 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
}
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
unsigned int handle)
static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_state *ret = NULL;
rcu_read_lock();
hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
if (sa_entry->handle == handle) {
ret = sa_entry->x;
xfrm_state_hold(ret);
break;
}
rcu_read_unlock();
return ret;
}
static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
{
unsigned int handle = sa_entry->ipsec_obj_id;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *_sa_entry;
unsigned long flags;
rcu_read_lock();
hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
if (_sa_entry->handle == handle) {
rcu_read_unlock();
return -EEXIST;
}
rcu_read_unlock();
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
sa_entry->handle = handle;
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
return 0;
}
static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
unsigned long flags;
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
hash_del_rcu(&sa_entry->hlist);
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
}
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
@ -129,8 +83,32 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
return false;
}
static void
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
attrs->hard_packet_limit = x->lft.hard_packet_limit;
if (x->lft.soft_packet_limit == XFRM_INF)
return;
/* Hardware decrements hard_packet_limit counter through
* the operation. While fires an event when soft_packet_limit
* is reached. It emans that we need substitute the numbers
* in order to properly count soft limit.
*
* As an example:
* XFRM user sets soft limit is 2 and hard limit is 9 and
* expects to see soft event after 2 packets and hard event
* after 9 packets. In our case, the hard limit will be set
* to 9 and soft limit is comparator to 7 so user gets the
* soft event after 2 packeta
*/
attrs->soft_packet_limit =
x->lft.hard_packet_limit - x->lft.soft_packet_limit;
}
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
@ -157,33 +135,31 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
sizeof(aes_gcm->salt));
attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
/* esn */
if (sa_entry->esn_state.trigger) {
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
attrs->esn_trigger = true;
attrs->esn = sa_entry->esn_state.esn;
if (sa_entry->esn_state.overlap)
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
attrs->esn_overlap = sa_entry->esn_state.overlap;
attrs->replay_window = x->replay_esn->replay_window;
}
/* action */
attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
MLX5_ACCEL_ESP_ACTION_ENCRYPT :
MLX5_ACCEL_ESP_ACTION_DECRYPT;
/* flags */
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
attrs->dir = x->xso.dir;
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
attrs->is_ipv6 = (x->props.family != AF_INET);
attrs->family = x->props.family;
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
mlx5e_ipsec_init_limits(sa_entry, attrs);
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@ -215,11 +191,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
return -EINVAL;
@ -253,6 +224,67 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
switch (x->xso.type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
if (!(mlx5_ipsec_device_caps(priv->mdev) &
MLX5_IPSEC_CAP_CRYPTO)) {
netdev_info(netdev, "Crypto offload is not supported\n");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
return -EINVAL;
}
break;
case XFRM_DEV_OFFLOAD_PACKET:
if (!(mlx5_ipsec_device_caps(priv->mdev) &
MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
netdev_info(netdev, "Packet offload is not supported\n");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT) {
netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
return -EINVAL;
}
if (x->replay_esn && x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
netdev_info(netdev,
"Unsupported replay window size %u\n",
x->replay_esn->replay_window);
return -EINVAL;
}
if (!x->props.reqid) {
netdev_info(netdev, "Cannot offload without reqid\n");
return -EINVAL;
}
if (x->lft.hard_byte_limit != XFRM_INF ||
x->lft.soft_byte_limit != XFRM_INF) {
netdev_info(netdev,
"Device doesn't support limits in bytes\n");
return -EINVAL;
}
if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
x->lft.hard_packet_limit != XFRM_INF) {
/* XFRM stack doesn't prevent such configuration :(. */
netdev_info(netdev,
"Hard packet limit must be greater than soft one\n");
return -EINVAL;
}
break;
default:
netdev_info(netdev, "Unsupported xfrm offload type %d\n",
x->xso.type);
return -EINVAL;
}
return 0;
}
@ -270,6 +302,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.real_dev;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
int err;
@ -277,6 +310,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
if (!priv->ipsec)
return -EOPNOTSUPP;
ipsec = priv->ipsec;
err = mlx5e_xfrm_validate_state(x);
if (err)
return err;
@ -288,7 +322,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
}
sa_entry->x = x;
sa_entry->ipsec = priv->ipsec;
sa_entry->ipsec = ipsec;
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
@ -299,25 +333,29 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
if (err)
goto err_xfrm;
err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err)
goto err_hw_ctx;
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
err = mlx5e_ipsec_sadb_rx_add(sa_entry);
/* We use *_bh() variant because xfrm_timer_handler(), which runs
* in softirq context, can reach our state delete logic and we need
* xa_erase_bh() there.
*/
err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
GFP_KERNEL);
if (err)
goto err_add_rule;
} else {
if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
}
INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
return 0;
err_add_rule:
mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
@ -329,18 +367,19 @@ out:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
mlx5e_ipsec_sadb_rx_del(sa_entry);
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
cancel_work_sync(&sa_entry->modify_work.work);
mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry);
}
@ -359,23 +398,33 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
if (!ipsec)
return;
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
ipsec->mdev = priv->mdev;
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq)
goto err_wq;
if (mlx5_ipsec_device_caps(priv->mdev) &
MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
ret = mlx5e_ipsec_aso_init(ipsec);
if (ret)
goto err_aso;
}
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
goto err_fs_init;
ipsec->fs = priv->fs;
priv->ipsec = ipsec;
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return;
err_fs_init:
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
mlx5e_ipsec_aso_cleanup(ipsec);
err_aso:
destroy_workqueue(ipsec->wq);
err_wq:
kfree(ipsec);
@ -391,6 +440,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
priv->ipsec = NULL;
@ -426,6 +477,122 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &modify_work->work);
}
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
int err;
lockdep_assert_held(&x->lock);
if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
/* Limits are not configured, as soft limit
* must be lowever than hard limit.
*/
return;
err = mlx5e_ipsec_aso_query(sa_entry, NULL);
if (err)
return;
mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
}
static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
{
struct net_device *netdev = x->xdo.real_dev;
if (x->type != XFRM_POLICY_TYPE_MAIN) {
netdev_info(netdev, "Cannot offload non-main policy types\n");
return -EINVAL;
}
/* Please pay attention that we support only one template */
if (x->xfrm_nr > 1) {
netdev_info(netdev, "Cannot offload more than one template\n");
return -EINVAL;
}
if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
netdev_info(netdev, "Cannot offload forward policy\n");
return -EINVAL;
}
if (!x->xfrm_vec[0].reqid) {
netdev_info(netdev, "Cannot offload policy without reqid\n");
return -EINVAL;
}
if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
netdev_info(netdev, "Unsupported xfrm offload type\n");
return -EINVAL;
}
return 0;
}
static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
struct mlx5_accel_pol_xfrm_attrs *attrs)
{
struct xfrm_policy *x = pol_entry->x;
struct xfrm_selector *sel;
sel = &x->selector;
memset(attrs, 0, sizeof(*attrs));
memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
attrs->family = sel->family;
attrs->dir = x->xdo.dir;
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
attrs->reqid = x->xfrm_vec[0].reqid;
}
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
{
struct net_device *netdev = x->xdo.real_dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
struct mlx5e_priv *priv;
int err;
priv = netdev_priv(netdev);
if (!priv->ipsec)
return -EOPNOTSUPP;
err = mlx5e_xfrm_validate_policy(x);
if (err)
return err;
pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
if (!pol_entry)
return -ENOMEM;
pol_entry->x = x;
pol_entry->ipsec = priv->ipsec;
mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
if (err)
goto err_fs;
x->xdo.offload_handle = (unsigned long)pol_entry;
return 0;
err_fs:
kfree(pol_entry);
return err;
}
static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
{
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
kfree(pol_entry);
}
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
@ -434,6 +601,18 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
};
static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
};
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@ -443,7 +622,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
return;
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
else
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
netdev->features |= NETIF_F_HW_ESP;
netdev->hw_enc_features |= NETIF_F_HW_ESP;

View File

@ -34,27 +34,14 @@
#ifndef __MLX5E_IPSEC_H__
#define __MLX5E_IPSEC_H__
#ifdef CONFIG_MLX5_EN_IPSEC
#include <linux/mlx5/device.h>
#include <net/xfrm.h>
#include <linux/idr.h>
#include "lib/aso.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
enum mlx5_accel_esp_flags {
MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
};
enum mlx5_accel_esp_action {
MLX5_ACCEL_ESP_ACTION_DECRYPT,
MLX5_ACCEL_ESP_ACTION_ENCRYPT,
};
struct aes_gcm_keymat {
u64 seq_iv;
@ -66,7 +53,6 @@ struct aes_gcm_keymat {
};
struct mlx5_accel_esp_xfrm_attrs {
enum mlx5_accel_esp_action action;
u32 esn;
u32 spi;
u32 flags;
@ -82,16 +68,37 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4];
} daddr;
u8 is_ipv6;
u8 dir : 2;
u8 esn_overlap : 1;
u8 esn_trigger : 1;
u8 type : 2;
u8 family;
u32 replay_window;
u32 authsize;
u32 reqid;
u64 hard_packet_limit;
u64 soft_packet_limit;
};
enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1,
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
};
struct mlx5e_priv;
struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_pkts;
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
u64 ipsec_tx_drop_bytes;
};
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
@ -102,17 +109,38 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_trailer;
};
struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec_rx;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec_work {
struct work_struct work;
struct mlx5e_ipsec *ipsec;
u32 id;
};
struct mlx5e_ipsec_aso {
u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
struct mlx5_aso *aso;
/* IPsec ASO caches data on every query call,
* so in nested calls, we can use this boolean to save
* recursive calls to mlx5e_ipsec_aso_query()
*/
u8 use_cache : 1;
};
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct xarray sadb;
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_hw_stats hw_stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
struct mlx5e_ipsec_tx *tx_fs;
struct mlx5e_flow_steering *fs;
struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6;
struct mlx5e_ipsec_tx *tx;
struct mlx5e_ipsec_aso *aso;
struct notifier_block nb;
};
struct mlx5e_ipsec_esn_state {
@ -123,7 +151,8 @@ struct mlx5e_ipsec_esn_state {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *set_modify_hdr;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
};
struct mlx5e_ipsec_modify_state_work {
@ -132,9 +161,7 @@ struct mlx5e_ipsec_modify_state_work {
};
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
@ -146,19 +173,43 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_modify_state_work modify_work;
};
struct mlx5_accel_pol_xfrm_attrs {
union {
__be32 a4;
__be32 a6[4];
} saddr;
union {
__be32 a4;
__be32 a6[4];
} daddr;
u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
u32 reqid;
};
struct mlx5e_ipsec_pol_entry {
struct xfrm_policy *x;
struct mlx5e_ipsec *ipsec;
struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5_accel_pol_xfrm_attrs attrs;
};
#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@ -168,11 +219,30 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data);
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets);
void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
void *ipsec_stats);
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
return sa_entry->ipsec->mdev;
}
static inline struct mlx5_core_dev *
mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
{
return pol_entry->ipsec->mdev;
}
#else
static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{

File diff suppressed because it is too large Load Diff

View File

@ -2,9 +2,14 @@
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
#include "en.h"
#include "ipsec.h"
#include "lib/mlx5.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
};
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps = 0;
@ -31,6 +36,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
if (!caps)
return 0;
@ -46,6 +57,52 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
if (attrs->esn_trigger) {
MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
attrs->replay_window / 64);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
}
/* ASO context */
MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
MLX5_SET(ipsec_obj, obj, full_offload, 1);
MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
* in flow steering to perform matching against. Please be
* aware that this register was chosen arbitrary and can't
* be used in other places as long as IPsec packet offload
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
if (attrs->hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
lower_32_bits(attrs->hard_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->soft_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
lower_32_bits(attrs->soft_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
}
}
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@ -54,6 +111,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p;
struct mlx5e_hw_objs *res;
int err;
obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
@ -66,11 +124,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
if (attrs->esn_trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
}
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
@ -81,6 +138,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
sa_entry->ipsec_obj_id =
@ -152,7 +213,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
void *obj;
int err;
if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
if (!attrs->esn_trigger)
return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
@ -183,8 +244,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
@ -203,3 +263,234 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}
static void
mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_wqe_aso_ctrl_seg data = {};
data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
<< 4;
data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
data.bitwise_data = cpu_to_be64(BIT_ULL(54));
data.data_mask = data.bitwise_data;
mlx5e_ipsec_aso_query(sa_entry, &data);
}
static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mode_param)
{
struct mlx5_accel_esp_xfrm_attrs attrs = {};
if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
sa_entry->esn_state.esn++;
sa_entry->esn_state.overlap = 0;
} else {
sa_entry->esn_state.overlap = 1;
}
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
}
static void mlx5e_ipsec_handle_event(struct work_struct *_work)
{
struct mlx5e_ipsec_work *work =
container_of(_work, struct mlx5e_ipsec_work, work);
struct mlx5_accel_esp_xfrm_attrs *attrs;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec_aso *aso;
struct mlx5e_ipsec *ipsec;
int ret;
sa_entry = xa_load(&work->ipsec->sadb, work->id);
if (!sa_entry)
goto out;
ipsec = sa_entry->ipsec;
aso = ipsec->aso;
attrs = &sa_entry->attrs;
spin_lock(&sa_entry->x->lock);
ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
if (ret)
goto unlock;
aso->use_cache = true;
if (attrs->esn_trigger &&
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
}
if (attrs->soft_packet_limit != XFRM_INF)
if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
!MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
!MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
xfrm_state_check_expire(sa_entry->x);
aso->use_cache = false;
unlock:
spin_unlock(&sa_entry->x->lock);
out:
kfree(work);
}
static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
void *data)
{
struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
struct mlx5_eqe_obj_change *object;
struct mlx5e_ipsec_work *work;
struct mlx5_eqe *eqe = data;
u16 type;
if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
return NOTIFY_DONE;
object = &eqe->data.obj_change;
type = be16_to_cpu(object->obj_type);
if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
return NOTIFY_DONE;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
work->ipsec = ipsec;
work->id = be32_to_cpu(object->obj_id);
queue_work(ipsec->wq, &work->work);
return NOTIFY_OK;
}
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct mlx5e_hw_objs *res;
struct device *pdev;
int err;
aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
if (!aso)
return -ENOMEM;
res = &mdev->mlx5e_res.hw_objs;
pdev = mlx5_core_dma_dev(mdev);
aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
err = dma_mapping_error(pdev, aso->dma_addr);
if (err)
goto err_dma;
aso->aso = mlx5_aso_create(mdev, res->pdn);
if (IS_ERR(aso->aso)) {
err = PTR_ERR(aso->aso);
goto err_aso_create;
}
ipsec->nb.notifier_call = mlx5e_ipsec_event;
mlx5_notifier_register(mdev, &ipsec->nb);
ipsec->aso = aso;
return 0;
err_aso_create:
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
err_dma:
kfree(aso);
return err;
}
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct device *pdev;
aso = ipsec->aso;
pdev = mlx5_core_dma_dev(mdev);
mlx5_notifier_unregister(mdev, &ipsec->nb);
mlx5_aso_destroy(aso->aso);
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
struct mlx5_wqe_aso_ctrl_seg *data)
{
if (!data)
return;
ctrl->data_mask_mode = data->data_mask_mode;
ctrl->condition_1_0_operand = data->condition_1_0_operand;
ctrl->condition_1_0_offset = data->condition_1_0_offset;
ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
ctrl->condition_0_data = data->condition_0_data;
ctrl->condition_0_mask = data->condition_0_mask;
ctrl->condition_1_data = data->condition_1_data;
ctrl->condition_1_mask = data->condition_1_mask;
ctrl->bitwise_data = data->bitwise_data;
ctrl->data_mask = data->data_mask;
}
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_wqe_aso_ctrl_seg *ctrl;
struct mlx5e_hw_objs *res;
struct mlx5_aso_wqe *wqe;
u8 ds_cnt;
lockdep_assert_held(&sa_entry->x->lock);
if (aso->use_cache)
return 0;
res = &mdev->mlx5e_res.hw_objs;
memset(aso->ctx, 0, sizeof(aso->ctx));
wqe = mlx5_aso_get_wqe(aso->aso);
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
ctrl = &wqe->aso_ctrl;
ctrl->va_l =
cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
ctrl->l_key = cpu_to_be32(res->mkey);
mlx5e_ipsec_aso_copy(ctrl, data);
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
return mlx5_aso_poll_cq(aso->aso, false);
}
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
u64 hard_cnt;
hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
/* HW decresases the limit till it reaches zero to fire an avent.
* We need to fix the calculations, so the returned count is a total
* number of passed packets and not how much left.
*/
*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
}

View File

@ -312,27 +312,31 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
struct mlx5e_priv *priv;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_ipsec *ipsec = priv->ipsec;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_offload *xo;
struct xfrm_state *xs;
struct sec_path *sp;
u32 sa_handle;
sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
priv = netdev_priv(netdev);
sp = secpath_set(skb);
if (unlikely(!sp)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return;
}
xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
if (unlikely(!xs)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
rcu_read_lock();
sa_entry = xa_load(&ipsec->sadb, sa_handle);
if (unlikely(!sa_entry)) {
rcu_read_unlock();
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
return;
}
xfrm_state_hold(sa_entry->x);
rcu_read_unlock();
sp->xvec[sp->len++] = xs;
sp->xvec[sp->len++] = sa_entry->x;
sp->olen++;
xo = xfrm_offload(skb);
@ -349,6 +353,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
break;
default:
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}

View File

@ -37,6 +37,17 @@
#include "en.h"
#include "ipsec.h"
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_bytes) },
};
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
@ -50,8 +61,48 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
if (!priv->ipsec)
return 0;
return NUM_IPSEC_HW_COUNTERS;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) {}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
{
unsigned int i;
if (!priv->ipsec)
return idx;
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_ipsec_hw_stats_desc[i].format);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
{
int i;
if (!priv->ipsec)
return idx;
mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
mlx5e_ipsec_hw_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
@ -81,4 +132,5 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx;
}
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);

View File

@ -2480,6 +2480,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_hw),
&MLX5E_STATS_GRP(ipsec_sw),
#endif
&MLX5E_STATS_GRP(tls),

View File

@ -506,6 +506,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
extern MLX5E_DECLARE_STATS_GRP(macsec_hw);

View File

@ -19,6 +19,7 @@
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
#include "devlink.h"
#include "en_accel/ipsec.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@ -578,6 +579,10 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))

View File

@ -111,8 +111,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 7
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
#define KERNEL_NIC_PRIO_NUM_LEVELS 8
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@ -133,7 +133,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 2
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1

View File

@ -353,12 +353,15 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
cseg->general_id = cpu_to_be32(obj_id);
}
void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
{
struct mlx5_aso_wqe *wqe;
u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
memset(wqe, 0, sizeof(*wqe));
return wqe;
}
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,

View File

@ -15,6 +15,7 @@
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
#define ASO_CTRL_READ_EN BIT(0)
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
__be32 va_l; /* include read_enable */
@ -71,13 +72,14 @@ enum {
};
enum {
MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;
void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso);
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode);

View File

@ -302,6 +302,11 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
nn_err(nn, "Unsupported xfrm offload tyoe\n");
return -EINVAL;
}
cfg->spi = ntohl(x->id.spi);
/* Hash/Authentication */

View File

@ -149,6 +149,11 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
netdev_err(dev, "Unsupported ipsec offload type\n");
return -EINVAL;
}
/* find the first unused index */
ret = nsim_ipsec_find_empty_idx(ipsec);
if (ret < 0) {

View File

@ -446,7 +446,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
u8 reserved_at_40[0x6];
u8 reformat_add_esp_trasport[0x1];
u8 reserved_at_41[0x2];
u8 reformat_del_esp_trasport[0x1];
u8 reserved_at_44[0x2];
u8 execute_aso[0x1];
u8 reserved_at_47[0x19];
@ -639,8 +642,10 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 reserved_at_1a0[0x8];
u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
u8 reserved_at_1b8[0x8];
u8 reserved_at_1b0[0x50];
u8 reserved_at_1c0[0x40];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
@ -6452,6 +6457,9 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
@ -11631,6 +11639,41 @@ enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
};
enum {
MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
};
enum {
MLX5_IPSEC_ASO_MODE = 0x0,
MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
u8 mode[0x2];
u8 window_sz[0x2];
u8 soft_lft_arm[0x1];
u8 hard_lft_arm[0x1];
u8 remove_flow_enable[0x1];
u8 esn_event_arm[0x1];
u8 reserved_at_20a[0x16];
u8 remove_flow_pkt_cnt[0x20];
u8 remove_flow_soft_lft[0x20];
u8 reserved_at_260[0x80];
u8 mode_parameter[0x20];
u8 replay_protection_window[0x100];
};
struct mlx5_ifc_ipsec_obj_bits {
u8 modify_field_select[0x40];
u8 full_offload[0x1];
@ -11652,7 +11695,11 @@ struct mlx5_ifc_ipsec_obj_bits {
u8 implicit_iv[0x40];
u8 reserved_at_100[0x700];
u8 reserved_at_100[0x8];
u8 ipsec_aso_access_pd[0x18];
u8 reserved_at_120[0xe0];
struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
};
struct mlx5_ifc_create_ipsec_obj_in_bits {

View File

@ -1041,6 +1041,10 @@ struct xfrmdev_ops {
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
#endif

View File

@ -129,6 +129,13 @@ struct xfrm_state_walk {
enum {
XFRM_DEV_OFFLOAD_IN = 1,
XFRM_DEV_OFFLOAD_OUT,
XFRM_DEV_OFFLOAD_FWD,
};
enum {
XFRM_DEV_OFFLOAD_UNSPECIFIED,
XFRM_DEV_OFFLOAD_CRYPTO,
XFRM_DEV_OFFLOAD_PACKET,
};
struct xfrm_dev_offload {
@ -137,6 +144,7 @@ struct xfrm_dev_offload {
struct net_device *real_dev;
unsigned long offload_handle;
u8 dir : 2;
u8 type : 2;
};
struct xfrm_mode {
@ -534,6 +542,8 @@ struct xfrm_policy {
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
struct xfrm_dev_offload xdo;
};
static inline struct net *xp_net(const struct xfrm_policy *xp)
@ -1092,6 +1102,29 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
return !0;
}
#ifdef CONFIG_XFRM
static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
struct sec_path *sp = skb_sec_path(skb);
return sp->xvec[sp->len - 1];
}
#endif
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
struct sec_path *sp = skb_sec_path(skb);
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
return &sp->ovec[sp->olen - 1];
#else
return NULL;
#endif
}
#ifdef CONFIG_XFRM
int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
unsigned short family);
@ -1123,10 +1156,19 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
{
struct net *net = dev_net(skb->dev);
int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
struct xfrm_offload *xo = xfrm_offload(skb);
struct xfrm_state *x;
if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, ndir, skb, family);
if (xo) {
x = xfrm_input_state(skb);
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
return (xo->flags & CRYPTO_DONE) &&
(xo->status & CRYPTO_SUCCESS);
}
return __xfrm_check_nopolicy(net, skb, dir) ||
__xfrm_check_dev_nopolicy(skb, dir, family) ||
__xfrm_policy_check(sk, ndir, skb, family);
@ -1529,6 +1571,23 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
#ifdef CONFIG_XFRM_OFFLOAD
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
struct net_device *dev = xdo->dev;
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
if (dev && dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_update_curlft)
dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
}
#else
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
#endif
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);
int xfrm_state_update(struct xfrm_state *x);
@ -1578,6 +1637,8 @@ struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_delete(struct xfrm_state *x);
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
bool task_valid);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
@ -1860,29 +1921,6 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n)
}
#endif
#ifdef CONFIG_XFRM
static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
struct sec_path *sp = skb_sec_path(skb);
return sp->xvec[sp->len - 1];
}
#endif
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
struct sec_path *sp = skb_sec_path(skb);
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
return &sp->ovec[sp->olen - 1];
#else
return NULL;
#endif
}
void __init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD
@ -1892,6 +1930,9 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo,
struct netlink_ext_ack *extack);
int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
struct xfrm_user_offload *xuo, u8 dir,
struct netlink_ext_ack *extack);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
@ -1940,6 +1981,28 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
netdev_put(dev, &xso->dev_tracker);
}
}
static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
{
struct xfrm_dev_offload *xdo = &x->xdo;
struct net_device *dev = xdo->dev;
if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
dev->xfrmdev_ops->xdo_dev_policy_delete(x);
}
static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
{
struct xfrm_dev_offload *xdo = &x->xdo;
struct net_device *dev = xdo->dev;
if (dev && dev->xfrmdev_ops) {
if (dev->xfrmdev_ops->xdo_dev_policy_free)
dev->xfrmdev_ops->xdo_dev_policy_free(x);
xdo->dev = NULL;
netdev_put(dev, &xdo->dev_tracker);
}
}
#else
static inline void xfrm_dev_resume(struct sk_buff *skb)
{
@ -1967,6 +2030,21 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
}
static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
struct xfrm_user_offload *xuo, u8 dir,
struct netlink_ext_ack *extack)
{
return 0;
}
static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
{
}
static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
{
}
static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
return false;

View File

@ -519,6 +519,12 @@ struct xfrm_user_offload {
*/
#define XFRM_OFFLOAD_IPV6 1
#define XFRM_OFFLOAD_INBOUND 2
/* Two bits above are relevant for state path only, while
* offload is used for both policy and state flows.
*
* In policy offload mode, they are free and can be safely reused.
*/
#define XFRM_OFFLOAD_PACKET 4
struct xfrm_userpolicy_default {
#define XFRM_USERPOLICY_UNSPEC 0

View File

@ -132,6 +132,16 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
return skb;
/* The packet was sent to HW IPsec packet offload engine,
* but to wrong device. Drop the packet, so it won't skip
* XFRM stack.
*/
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
kfree_skb(skb);
dev_core_stats_tx_dropped_inc(dev);
return NULL;
}
/* This skb was already validated on the upper/virtual dev */
if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
return skb;
@ -229,6 +239,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_dev_offload *xso = &x->xso;
xfrm_address_t *saddr;
xfrm_address_t *daddr;
bool is_packet_offload;
if (!x->type_offload) {
NL_SET_ERR_MSG(extack, "Type doesn't support offload");
@ -241,11 +252,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
return -EINVAL;
}
if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) {
if (xuo->flags &
~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
return -EINVAL;
}
is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev) {
if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
@ -260,7 +273,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
x->props.family,
xfrm_smark_get(0, x));
if (IS_ERR(dst))
return 0;
return (is_packet_offload) ? -EINVAL : 0;
dev = dst->dev;
@ -271,7 +284,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
xso->dev = NULL;
dev_put(dev);
return 0;
return (is_packet_offload) ? -EINVAL : 0;
}
if (x->props.flags & XFRM_STATE_ESN &&
@ -291,14 +304,28 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
else
xso->dir = XFRM_DEV_OFFLOAD_OUT;
if (is_packet_offload)
xso->type = XFRM_DEV_OFFLOAD_PACKET;
else
xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
if (err) {
xso->dev = NULL;
xso->dir = 0;
xso->real_dev = NULL;
netdev_put(dev, &xso->dev_tracker);
xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
if (err != -EOPNOTSUPP) {
/* User explicitly requested packet offload mode and configured
* policy in addition to the XFRM state. So be civil to users,
* and return an error instead of taking fallback path.
*
* This WARN_ON() can be seen as a documentation for driver
* authors to do not return -EOPNOTSUPP in packet offload mode.
*/
WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
if (err != -EOPNOTSUPP || is_packet_offload) {
NL_SET_ERR_MSG(extack, "Device failed to offload this state");
return err;
}
@ -308,6 +335,69 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
}
EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
struct xfrm_user_offload *xuo, u8 dir,
struct netlink_ext_ack *extack)
{
struct xfrm_dev_offload *xdo = &xp->xdo;
struct net_device *dev;
int err;
if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
/* We support only packet offload mode and it means
* that user must set XFRM_OFFLOAD_PACKET bit.
*/
NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
return -EINVAL;
}
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev)
return -EINVAL;
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
xdo->dev = NULL;
dev_put(dev);
NL_SET_ERR_MSG(extack, "Policy offload is not supported");
return -EINVAL;
}
xdo->dev = dev;
netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
xdo->real_dev = dev;
xdo->type = XFRM_DEV_OFFLOAD_PACKET;
switch (dir) {
case XFRM_POLICY_IN:
xdo->dir = XFRM_DEV_OFFLOAD_IN;
break;
case XFRM_POLICY_OUT:
xdo->dir = XFRM_DEV_OFFLOAD_OUT;
break;
case XFRM_POLICY_FWD:
xdo->dir = XFRM_DEV_OFFLOAD_FWD;
break;
default:
xdo->dev = NULL;
dev_put(dev);
NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
return -EINVAL;
}
err = dev->xfrmdev_ops->xdo_dev_policy_add(xp);
if (err) {
xdo->dev = NULL;
xdo->real_dev = NULL;
xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
xdo->dir = 0;
netdev_put(dev, &xdo->dev_tracker);
NL_SET_ERR_MSG(extack, "Device failed to offload this policy");
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
int mtu;
@ -318,8 +408,9 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (!x->type_offload || x->encap)
return false;
if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
(!xdst->child->xfrm)) {
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
!xdst->child->xfrm)) {
mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
goto ok;
@ -410,8 +501,10 @@ static int xfrm_api_check(struct net_device *dev)
static int xfrm_dev_down(struct net_device *dev)
{
if (dev->features & NETIF_F_HW_ESP)
if (dev->features & NETIF_F_HW_ESP) {
xfrm_dev_state_flush(dev_net(dev), dev, true);
xfrm_dev_policy_flush(dev_net(dev), dev, true);
}
return NOTIFY_DONE;
}

View File

@ -492,7 +492,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
struct xfrm_state *x = dst->xfrm;
struct net *net = xs_net(x);
if (err <= 0)
if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
goto resume;
do {
@ -717,6 +717,16 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
break;
}
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
if (!xfrm_dev_offload_ok(skb, x)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return -EHOSTUNREACH;
}
return xfrm_output_resume(sk, skb, 0);
}
secpath_reset(skb);
if (xfrm_dev_offload_ok(skb, x)) {

View File

@ -425,6 +425,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
xfrm_dev_policy_free(policy);
call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
}
EXPORT_SYMBOL(xfrm_policy_destroy);
@ -535,7 +536,7 @@ redo:
__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask, dbits, sbits);
if (!entry0) {
if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
hlist_del_rcu(&pol->bydst);
hlist_add_head_rcu(&pol->bydst, ndsttable + h);
h0 = h;
@ -866,7 +867,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
break;
}
if (newpos)
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst, newpos);
else
hlist_add_head_rcu(&policy->bydst, &n->hhead);
@ -1347,7 +1348,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
else
break;
}
if (newpos)
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst, newpos);
else
hlist_add_head_rcu(&policy->bydst, chain);
@ -1524,7 +1525,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
break;
}
if (newpos)
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
else
hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
@ -1561,9 +1562,12 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
break;
}
if (newpos)
if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
else
/* Packet offload policies enter to the head
* to speed-up lookups.
*/
hlist_add_head_rcu(&policy->bydst, chain);
return delpol;
@ -1769,12 +1773,41 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
}
return err;
}
static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
struct net_device *dev,
bool task_valid)
{
struct xfrm_policy *pol;
int err = 0;
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
if (pol->walk.dead ||
xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
pol->xdo.dev != dev)
continue;
err = security_xfrm_policy_delete(pol->security);
if (err) {
xfrm_audit_policy_delete(pol, 0, task_valid);
return err;
}
}
return err;
}
#else
static inline int
xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
{
return 0;
}
static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
struct net_device *dev,
bool task_valid)
{
return 0;
}
#endif
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
@ -1814,6 +1847,44 @@ out:
}
EXPORT_SYMBOL(xfrm_policy_flush);
int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
bool task_valid)
{
int dir, err = 0, cnt = 0;
struct xfrm_policy *pol;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
if (err)
goto out;
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
dir = xfrm_policy_id2dir(pol->index);
if (pol->walk.dead ||
dir >= XFRM_POLICY_MAX ||
pol->xdo.dev != dev)
continue;
__xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again;
}
if (cnt)
__xfrm_policy_inexact_flush(net);
else
err = -ESRCH;
out:
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
}
EXPORT_SYMBOL(xfrm_dev_policy_flush);
int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
int (*func)(struct xfrm_policy *, int, int, void*),
void *data)
@ -2113,6 +2184,9 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
break;
}
}
if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
goto skip_inexact;
bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
daddr))
@ -2245,6 +2319,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
pol = __xfrm_policy_unlink(pol, dir);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
xfrm_dev_policy_delete(pol);
xfrm_policy_kill(pol);
return 0;
}

View File

@ -84,6 +84,25 @@ static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
}
#define XFRM_STATE_INSERT(by, _n, _h, _type) \
{ \
struct xfrm_state *_x = NULL; \
\
if (_type != XFRM_DEV_OFFLOAD_PACKET) { \
hlist_for_each_entry_rcu(_x, _h, by) { \
if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
continue; \
break; \
} \
} \
\
if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
/* SAD is empty or consist from HW SAs only */ \
hlist_add_head_rcu(_n, _h); \
else \
hlist_add_before_rcu(_n, &_x->by); \
}
static void xfrm_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
struct hlist_head *nsrctable,
@ -100,23 +119,25 @@ static void xfrm_hash_transfer(struct hlist_head *list,
h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family,
nhashmask);
hlist_add_head_rcu(&x->bydst, ndsttable + h);
XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
x->props.family,
nhashmask);
hlist_add_head_rcu(&x->bysrc, nsrctable + h);
XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
if (x->id.spi) {
h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
x->id.proto, x->props.family,
nhashmask);
hlist_add_head_rcu(&x->byspi, nspitable + h);
XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
x->xso.type);
}
if (x->km.seq) {
h = __xfrm_seq_hash(x->km.seq, nhashmask);
hlist_add_head_rcu(&x->byseq, nseqtable + h);
XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h,
x->xso.type);
}
}
}
@ -549,6 +570,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0;
spin_lock(&x->lock);
xfrm_dev_state_update_curlft(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
if (x->km.state == XFRM_STATE_EXPIRED)
@ -951,6 +974,49 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
x->props.family = tmpl->encap_family;
}
static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
unsigned short family,
struct xfrm_dev_offload *xdo)
{
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
#ifdef CONFIG_XFRM_OFFLOAD
if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
/* HW states are in the head of list, there is
* no need to iterate further.
*/
break;
/* Packet offload: both policy and SA should
* have same device.
*/
if (xdo->dev != x->xso.dev)
continue;
} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
/* Skip HW policy for SW lookups */
continue;
#endif
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family))
continue;
if ((mark & x->mark.m) != x->mark.v)
continue;
if (!xfrm_state_hold_rcu(x))
continue;
return x;
}
return NULL;
}
static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
const xfrm_address_t *daddr,
__be32 spi, u8 proto,
@ -1092,6 +1158,23 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
rcu_read_lock();
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
/* HW states are in the head of list, there is
* no need to iterate further.
*/
break;
/* Packet offload: both policy and SA should
* have same device.
*/
if (pol->xdo.dev != x->xso.dev)
continue;
} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
/* Skip HW policy for SW lookups */
continue;
#endif
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@ -1109,6 +1192,23 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
/* HW states are in the head of list, there is
* no need to iterate further.
*/
break;
/* Packet offload: both policy and SA should
* have same device.
*/
if (pol->xdo.dev != x->xso.dev)
continue;
} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
/* Skip HW policy for SW lookups */
continue;
#endif
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@ -1126,8 +1226,10 @@ found:
x = best;
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
(x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
tmpl->id.proto, encap_family)) != NULL) {
(x0 = __xfrm_state_lookup_all(net, mark, daddr,
tmpl->id.spi, tmpl->id.proto,
encap_family,
&pol->xdo)) != NULL) {
to_put = x0;
error = -EEXIST;
goto out;
@ -1161,21 +1263,53 @@ found:
x = NULL;
goto out;
}
#ifdef CONFIG_XFRM_OFFLOAD
if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
struct xfrm_dev_offload *xdo = &pol->xdo;
struct xfrm_dev_offload *xso = &x->xso;
xso->type = XFRM_DEV_OFFLOAD_PACKET;
xso->dir = xdo->dir;
xso->dev = xdo->dev;
xso->real_dev = xdo->real_dev;
netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
GFP_ATOMIC);
error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x);
if (error) {
xso->dir = 0;
netdev_put(xso->dev, &xso->dev_tracker);
xso->dev = NULL;
xso->real_dev = NULL;
xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
x->km.state = XFRM_STATE_DEAD;
to_put = x;
x = NULL;
goto out;
}
}
#endif
if (km_query(x, tmpl, pol) == 0) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
XFRM_STATE_INSERT(bydst, &x->bydst,
net->xfrm.state_bydst + h,
x->xso.type);
h = xfrm_src_hash(net, daddr, saddr, encap_family);
hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
XFRM_STATE_INSERT(bysrc, &x->bysrc,
net->xfrm.state_bysrc + h,
x->xso.type);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
XFRM_STATE_INSERT(byspi, &x->byspi,
net->xfrm.state_byspi + h,
x->xso.type);
}
if (x->km.seq) {
h = xfrm_seq_hash(net, x->km.seq);
hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
XFRM_STATE_INSERT(byseq, &x->byseq,
net->xfrm.state_byseq + h,
x->xso.type);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
hrtimer_start(&x->mtimer,
@ -1185,6 +1319,18 @@ found:
xfrm_hash_grow_check(net, x->bydst.next != NULL);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
} else {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrm_dev_offload *xso = &x->xso;
if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
xso->dir = 0;
netdev_put(xso->dev, &xso->dev_tracker);
xso->dev = NULL;
xso->real_dev = NULL;
xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
}
#endif
x->km.state = XFRM_STATE_DEAD;
to_put = x;
x = NULL;
@ -1280,22 +1426,26 @@ static void __xfrm_state_insert(struct xfrm_state *x)
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family);
hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
x->xso.type);
h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
x->xso.type);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
x->props.family);
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
x->xso.type);
}
if (x->km.seq) {
h = xfrm_seq_hash(net, x->km.seq);
hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h,
x->xso.type);
}
hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
@ -1409,9 +1559,11 @@ static struct xfrm_state *__find_acq_core(struct net *net,
ktime_set(net->xfrm.sysctl_acq_expires, 0),
HRTIMER_MODE_REL_SOFT);
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
x->xso.type);
h = xfrm_src_hash(net, daddr, saddr, family);
hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
x->xso.type);
net->xfrm.state_num++;
@ -1786,6 +1938,8 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
xfrm_dev_state_update_curlft(x);
if (!x->curlft.use_time)
x->curlft.use_time = ktime_get_real_seconds();
@ -2094,7 +2248,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->id.spi = newspi;
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
x->xso.type);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0;

View File

@ -956,6 +956,8 @@ static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
xuo->ifindex = xso->dev->ifindex;
if (xso->dir == XFRM_DEV_OFFLOAD_IN)
xuo->flags = XFRM_OFFLOAD_INBOUND;
if (xso->type == XFRM_DEV_OFFLOAD_PACKET)
xuo->flags |= XFRM_OFFLOAD_PACKET;
return 0;
}
@ -1890,6 +1892,15 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net,
if (attrs[XFRMA_IF_ID])
xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
/* configure the hardware if offload is requested */
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_policy_add(net, xp,
nla_data(attrs[XFRMA_OFFLOAD_DEV]),
p->dir, extack);
if (err)
goto error;
}
return xp;
error:
*errp = err;
@ -1929,6 +1940,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
xfrm_audit_policy_add(xp, err ? 0 : 1, true);
if (err) {
xfrm_dev_policy_delete(xp);
security_xfrm_policy_free(xp->security);
kfree(xp);
return err;
@ -2041,6 +2053,8 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
@ -3379,6 +3393,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
@ -3497,6 +3513,8 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
@ -3580,6 +3598,8 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
err = xfrm_mark_put(skb, &xp->mark);
if (!err)
err = xfrm_if_id_put(skb, xp->if_id);
if (!err && xp->xdo.dev)
err = copy_user_offload(&xp->xdo, skb);
if (err)
goto out_free_skb;