8b20137012
This change introduces a parallel path in the kernel for retrieving the local id, flags, if_index for an addr entry in the context of an MPTCP connection that's being managed by a userspace PM. The userspace and in-kernel PM modes deviate in their procedures for obtaining this information. Acked-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Kishen Maloor <kishen.maloor@intel.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
122 lines
2.9 KiB
C
122 lines
2.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Multipath TCP
|
|
*
|
|
* Copyright (c) 2022, Intel Corporation.
|
|
*/
|
|
|
|
#include "protocol.h"
|
|
|
|
void mptcp_free_local_addr_list(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_pm_addr_entry *entry, *tmp;
|
|
struct sock *sk = (struct sock *)msk;
|
|
LIST_HEAD(free_list);
|
|
|
|
if (!mptcp_pm_is_userspace(msk))
|
|
return;
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
list_splice_init(&msk->pm.userspace_pm_local_addr_list, &free_list);
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
|
|
list_for_each_entry_safe(entry, tmp, &free_list, list) {
|
|
sock_kfree_s(sk, entry, sizeof(*entry));
|
|
}
|
|
}
|
|
|
|
int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
|
|
struct mptcp_pm_addr_entry *entry)
|
|
{
|
|
DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
|
|
struct mptcp_pm_addr_entry *match = NULL;
|
|
struct sock *sk = (struct sock *)msk;
|
|
struct mptcp_pm_addr_entry *e;
|
|
bool addr_match = false;
|
|
bool id_match = false;
|
|
int ret = -EINVAL;
|
|
|
|
bitmap_zero(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
|
|
addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
|
|
if (addr_match && entry->addr.id == 0)
|
|
entry->addr.id = e->addr.id;
|
|
id_match = (e->addr.id == entry->addr.id);
|
|
if (addr_match && id_match) {
|
|
match = e;
|
|
break;
|
|
} else if (addr_match || id_match) {
|
|
break;
|
|
}
|
|
__set_bit(e->addr.id, id_bitmap);
|
|
}
|
|
|
|
if (!match && !addr_match && !id_match) {
|
|
/* Memory for the entry is allocated from the
|
|
* sock option buffer.
|
|
*/
|
|
e = sock_kmalloc(sk, sizeof(*e), GFP_ATOMIC);
|
|
if (!e) {
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
*e = *entry;
|
|
if (!e->addr.id)
|
|
e->addr.id = find_next_zero_bit(id_bitmap,
|
|
MPTCP_PM_MAX_ADDR_ID + 1,
|
|
1);
|
|
list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list);
|
|
ret = e->addr.id;
|
|
} else if (match) {
|
|
ret = entry->addr.id;
|
|
}
|
|
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
return ret;
|
|
}
|
|
|
|
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
|
|
unsigned int id,
|
|
u8 *flags, int *ifindex)
|
|
{
|
|
struct mptcp_pm_addr_entry *entry, *match = NULL;
|
|
|
|
*flags = 0;
|
|
*ifindex = 0;
|
|
|
|
spin_lock_bh(&msk->pm.lock);
|
|
list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
|
|
if (id == entry->addr.id) {
|
|
match = entry;
|
|
break;
|
|
}
|
|
}
|
|
spin_unlock_bh(&msk->pm.lock);
|
|
if (match) {
|
|
*flags = match->flags;
|
|
*ifindex = match->ifindex;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
|
|
struct mptcp_addr_info *skc)
|
|
{
|
|
struct mptcp_pm_addr_entry new_entry;
|
|
__be16 msk_sport = ((struct inet_sock *)
|
|
inet_sk((struct sock *)msk))->inet_sport;
|
|
|
|
memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry));
|
|
new_entry.addr = *skc;
|
|
new_entry.addr.id = 0;
|
|
new_entry.flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
|
|
|
|
if (new_entry.addr.port == msk_sport)
|
|
new_entry.addr.port = 0;
|
|
|
|
return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
|
|
}
|