Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Lots of conflicts, by happily all cases of overlapping
changes, parallel adds, things of that nature.

Thanks to Stephen Rothwell, Saeed Mahameed, and others
for their guidance in these resolutions.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2018-12-20 10:53:28 -08:00
314 changed files with 3478 additions and 1739 deletions

View File

@ -56,7 +56,7 @@ enum {
static struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static LIST_HEAD(device_list);
static DEFINE_MUTEX(device_mutex);
static DEFINE_SPINLOCK(device_spinlock);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_sw_proto_ops;
@ -538,11 +538,14 @@ static struct tls_context *create_ctx(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return NULL;
icsk->icsk_ulp_data = ctx;
ctx->setsockopt = sk->sk_prot->setsockopt;
ctx->getsockopt = sk->sk_prot->getsockopt;
ctx->sk_proto_close = sk->sk_prot->close;
return ctx;
}
@ -552,7 +555,7 @@ static int tls_hw_prot(struct sock *sk)
struct tls_device *dev;
int rc = 0;
mutex_lock(&device_mutex);
spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->feature && dev->feature(dev)) {
ctx = create_ctx(sk);
@ -570,7 +573,7 @@ static int tls_hw_prot(struct sock *sk)
}
}
out:
mutex_unlock(&device_mutex);
spin_unlock_bh(&device_spinlock);
return rc;
}
@ -579,12 +582,17 @@ static void tls_hw_unhash(struct sock *sk)
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_device *dev;
mutex_lock(&device_mutex);
spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->unhash)
if (dev->unhash) {
kref_get(&dev->kref);
spin_unlock_bh(&device_spinlock);
dev->unhash(dev, sk);
kref_put(&dev->kref, dev->release);
spin_lock_bh(&device_spinlock);
}
}
mutex_unlock(&device_mutex);
spin_unlock_bh(&device_spinlock);
ctx->unhash(sk);
}
@ -595,12 +603,17 @@ static int tls_hw_hash(struct sock *sk)
int err;
err = ctx->hash(sk);
mutex_lock(&device_mutex);
spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->hash)
if (dev->hash) {
kref_get(&dev->kref);
spin_unlock_bh(&device_spinlock);
err |= dev->hash(dev, sk);
kref_put(&dev->kref, dev->release);
spin_lock_bh(&device_spinlock);
}
}
mutex_unlock(&device_mutex);
spin_unlock_bh(&device_spinlock);
if (err)
tls_hw_unhash(sk);
@ -675,9 +688,6 @@ static int tls_init(struct sock *sk)
rc = -ENOMEM;
goto out;
}
ctx->setsockopt = sk->sk_prot->setsockopt;
ctx->getsockopt = sk->sk_prot->getsockopt;
ctx->sk_proto_close = sk->sk_prot->close;
/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
if (ip_ver == TLSV6 &&
@ -699,17 +709,17 @@ out:
void tls_register_device(struct tls_device *device)
{
mutex_lock(&device_mutex);
spin_lock_bh(&device_spinlock);
list_add_tail(&device->dev_list, &device_list);
mutex_unlock(&device_mutex);
spin_unlock_bh(&device_spinlock);
}
EXPORT_SYMBOL(tls_register_device);
void tls_unregister_device(struct tls_device *device)
{
mutex_lock(&device_mutex);
spin_lock_bh(&device_spinlock);
list_del(&device->dev_list);
mutex_unlock(&device_mutex);
spin_unlock_bh(&device_spinlock);
}
EXPORT_SYMBOL(tls_unregister_device);