Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

The UDP offload conflict is dealt with by simply taking what is
in net-next where we have removed all of the UFO handling code
entirely.

The TCP conflict was a case of local variables in a function
being removed from both net and net-next.

In netvsc we had an assignment right next to where a missing
set of u64 stats sync object inits were added.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-08-09 16:28:45 -07:00
commit 3118e6e19d
103 changed files with 2989 additions and 618 deletions

View File

@ -1161,7 +1161,7 @@ M: Brendan Higgins <brendanhiggins@google.com>
R: Benjamin Herrenschmidt <benh@kernel.crashing.org> R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
R: Joel Stanley <joel@jms.id.au> R: Joel Stanley <joel@jms.id.au>
L: linux-i2c@vger.kernel.org L: linux-i2c@vger.kernel.org
L: openbmc@lists.ozlabs.org L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: drivers/irqchip/irq-aspeed-i2c-ic.c F: drivers/irqchip/irq-aspeed-i2c-ic.c
F: drivers/i2c/busses/i2c-aspeed.c F: drivers/i2c/busses/i2c-aspeed.c
@ -5835,7 +5835,7 @@ F: drivers/staging/greybus/spi.c
F: drivers/staging/greybus/spilib.c F: drivers/staging/greybus/spilib.c
F: drivers/staging/greybus/spilib.h F: drivers/staging/greybus/spilib.h
GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
M: Bryan O'Donoghue <pure.logic@nexus-software.ie> M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
S: Maintained S: Maintained
F: drivers/staging/greybus/loopback.c F: drivers/staging/greybus/loopback.c

1950
arch/mips/net/ebpf_jit.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
insn_count = bpf_jit_insn(jit, fp, i); insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0) if (insn_count < 0)
return -1; return -1;
jit->addrs[i + 1] = jit->prg; /* Next instruction address */ /* Next instruction address */
jit->addrs[i + insn_count] = jit->prg;
} }
bpf_jit_epilogue(jit); bpf_jit_epilogue(jit);

View File

@ -1,5 +1,6 @@
generic-y += bug.h generic-y += bug.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += dma-contiguous.h generic-y += dma-contiguous.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
@ -17,6 +18,7 @@ generic-y += local.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += param.h
generic-y += percpu.h generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
generic-y += rwsem.h generic-y += rwsem.h

View File

@ -1,15 +0,0 @@
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#ifndef _ASM_XTENSA_DEVICE_H
#define _ASM_XTENSA_DEVICE_H
struct dev_archdata {
};
struct pdev_archdata {
};
#endif /* _ASM_XTENSA_DEVICE_H */

View File

@ -1,18 +0,0 @@
/*
* include/asm-xtensa/param.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
#include <uapi/asm/param.h>
# define HZ CONFIG_HZ /* internal timer frequency */
# define USER_HZ 100 /* for user interfaces in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
#endif /* _XTENSA_PARAM_H */

View File

@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
} }
EXPORT_SYMBOL(__sync_fetch_and_or_4); EXPORT_SYMBOL(__sync_fetch_and_or_4);
#ifdef CONFIG_NET
/* /*
* Networking support * Networking support
*/ */
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(csum_partial_copy_generic);
#endif /* CONFIG_NET */
/* /*
* Architecture-specific symbols * Architecture-specific symbols

View File

@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
clear_page_alias(kvaddr, paddr); clear_page_alias(kvaddr, paddr);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(clear_user_highpage);
void copy_user_highpage(struct page *dst, struct page *src, void copy_user_highpage(struct page *dst, struct page *src,
unsigned long vaddr, struct vm_area_struct *vma) unsigned long vaddr, struct vm_area_struct *vma)
@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(copy_user_highpage);
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/* /*
* Any time the kernel writes to a user page cache page, or it is about to * Any time the kernel writes to a user page cache page, or it is about to
@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
/* There shouldn't be an entry in the cache for this page anymore. */ /* There shouldn't be an entry in the cache for this page anymore. */
} }
EXPORT_SYMBOL(flush_dcache_page);
/* /*
* For now, flush the whole cache. FIXME?? * For now, flush the whole cache. FIXME??
@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
__flush_invalidate_dcache_all(); __flush_invalidate_dcache_all();
__invalidate_icache_all(); __invalidate_icache_all();
} }
EXPORT_SYMBOL(local_flush_cache_range);
/* /*
* Remove any entry in the cache for this page. * Remove any entry in the cache for this page.
@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
__flush_invalidate_dcache_page_alias(virt, phys); __flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys); __invalidate_icache_page_alias(virt, phys);
} }
EXPORT_SYMBOL(local_flush_cache_page);
#endif #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
void void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
flush_tlb_page(vma, addr); flush_tlb_page(vma, addr);
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long phys = page_to_phys(page); unsigned long phys = page_to_phys(page);
@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
* flush_dcache_page() on the page. * flush_dcache_page() on the page.
*/ */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE)
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src, unsigned long vaddr, void *dst, const void *src,

View File

@ -71,17 +71,29 @@ struct bfq_service_tree {
* *
* bfq_sched_data is the basic scheduler queue. It supports three * bfq_sched_data is the basic scheduler queue. It supports three
* ioprio_classes, and can be used either as a toplevel queue or as an * ioprio_classes, and can be used either as a toplevel queue or as an
* intermediate queue on a hierarchical setup. @next_in_service * intermediate queue in a hierarchical setup.
* points to the active entity of the sched_data service trees that
* will be scheduled next. It is used to reduce the number of steps
* needed for each hierarchical-schedule update.
* *
* The supported ioprio_classes are the same as in CFQ, in descending * The supported ioprio_classes are the same as in CFQ, in descending
* priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
* Requests from higher priority queues are served before all the * Requests from higher priority queues are served before all the
* requests from lower priority queues; among requests of the same * requests from lower priority queues; among requests of the same
* queue requests are served according to B-WF2Q+. * queue requests are served according to B-WF2Q+.
* All the fields are protected by the queue lock of the containing bfqd. *
* The schedule is implemented by the service trees, plus the field
* @next_in_service, which points to the entity on the active trees
* that will be served next, if 1) no changes in the schedule occurs
* before the current in-service entity is expired, 2) the in-service
* queue becomes idle when it expires, and 3) if the entity pointed by
* in_service_entity is not a queue, then the in-service child entity
* of the entity pointed by in_service_entity becomes idle on
* expiration. This peculiar definition allows for the following
* optimization, not yet exploited: while a given entity is still in
* service, we already know which is the best candidate for next
* service among the other active entitities in the same parent
* entity. We can then quickly compare the timestamps of the
* in-service entity with those of such best candidate.
*
* All fields are protected by the lock of the containing bfqd.
*/ */
struct bfq_sched_data { struct bfq_sched_data {
/* entity in service */ /* entity in service */

View File

@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
/* /*
* This function tells whether entity stops being a candidate for next * This function tells whether entity stops being a candidate for next
* service, according to the following logic. * service, according to the restrictive definition of the field
* next_in_service. In particular, this function is invoked for an
* entity that is about to be set in service.
* *
* This function is invoked for an entity that is about to be set in * If entity is a queue, then the entity is no longer a candidate for
* service. If such an entity is a queue, then the entity is no longer * next service according to the that definition, because entity is
* a candidate for next service (i.e, a candidate entity to serve * about to become the in-service queue. This function then returns
* after the in-service entity is expired). The function then returns * true if entity is a queue.
* true.
* *
* In contrast, the entity could stil be a candidate for next service * In contrast, entity could still be a candidate for next service if
* if it is not a queue, and has more than one child. In fact, even if * it is not a queue, and has more than one active child. In fact,
* one of its children is about to be set in service, other children * even if one of its children is about to be set in service, other
* may still be the next to serve. As a consequence, a non-queue * active children may still be the next to serve, for the parent
* entity is not a candidate for next-service only if it has only one * entity, even according to the above definition. As a consequence, a
* child. And only if this condition holds, then the function returns * non-queue entity is not a candidate for next-service only if it has
* true for a non-queue entity. * only one active child. And only if this condition holds, then this
* function returns true for a non-queue entity.
*/ */
static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
{ {
@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
bfqg = container_of(entity, struct bfq_group, entity); bfqg = container_of(entity, struct bfq_group, entity);
/*
* The field active_entities does not always contain the
* actual number of active children entities: it happens to
* not account for the in-service entity in case the latter is
* removed from its active tree (which may get done after
* invoking the function bfq_no_longer_next_in_service in
* bfq_get_next_queue). Fortunately, here, i.e., while
* bfq_no_longer_next_in_service is not yet completed in
* bfq_get_next_queue, bfq_active_extract has not yet been
* invoked, and thus active_entities still coincides with the
* actual number of active entities.
*/
if (bfqg->active_entities == 1) if (bfqg->active_entities == 1)
return true; return true;
@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
* one of its children receives a new request. * one of its children receives a new request.
* *
* Basically, this function updates the timestamps of entity and * Basically, this function updates the timestamps of entity and
* inserts entity into its active tree, ater possible extracting it * inserts entity into its active tree, ater possibly extracting it
* from its idle tree. * from its idle tree.
*/ */
static void __bfq_activate_entity(struct bfq_entity *entity, static void __bfq_activate_entity(struct bfq_entity *entity,
@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
entity->start = entity->finish; entity->start = entity->finish;
/* /*
* In addition, if the entity had more than one child * In addition, if the entity had more than one child
* when set in service, then was not extracted from * when set in service, then it was not extracted from
* the active tree. This implies that the position of * the active tree. This implies that the position of
* the entity in the active tree may need to be * the entity in the active tree may need to be
* changed now, because we have just updated the start * changed now, because we have just updated the start
@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
* time in a moment (the requeueing is then, more * time in a moment (the requeueing is then, more
* precisely, a repositioning in this case). To * precisely, a repositioning in this case). To
* implement this repositioning, we: 1) dequeue the * implement this repositioning, we: 1) dequeue the
* entity here, 2) update the finish time and * entity here, 2) update the finish time and requeue
* requeue the entity according to the new * the entity according to the new timestamps below.
* timestamps below.
*/ */
if (entity->tree) if (entity->tree)
bfq_active_extract(st, entity); bfq_active_extract(st, entity);
@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
/** /**
* bfq_activate_entity - activate or requeue an entity representing a bfq_queue, * bfq_activate_requeue_entity - activate or requeue an entity representing a
* and activate, requeue or reposition all ancestors * bfq_queue, and activate, requeue or reposition
* for which such an update becomes necessary. * all ancestors for which such an update becomes
* necessary.
* @entity: the entity to activate. * @entity: the entity to activate.
* @non_blocking_wait_rq: true if this entity was waiting for a request * @non_blocking_wait_rq: true if this entity was waiting for a request
* @requeue: true if this is a requeue, which implies that bfqq is * @requeue: true if this is a requeue, which implies that bfqq is
@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
* @ins_into_idle_tree: if false, the entity will not be put into the * @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree. * idle tree.
* *
* Deactivates an entity, independently from its previous state. Must * Deactivates an entity, independently of its previous state. Must
* be invoked only if entity is on a service tree. Extracts the entity * be invoked only if entity is on a service tree. Extracts the entity
* from that tree, and if necessary and allowed, puts it on the idle * from that tree, and if necessary and allowed, puts it into the idle
* tree. * tree.
*/ */
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
st = bfq_entity_service_tree(entity); st = bfq_entity_service_tree(entity);
is_in_service = entity == sd->in_service_entity; is_in_service = entity == sd->in_service_entity;
if (is_in_service) if (is_in_service) {
bfq_calc_finish(entity, entity->service); bfq_calc_finish(entity, entity->service);
sd->in_service_entity = NULL;
}
if (entity->tree == &st->active) if (entity->tree == &st->active)
bfq_active_extract(st, entity); bfq_active_extract(st, entity);
@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
/** /**
* bfq_deactivate_entity - deactivate an entity representing a bfq_queue. * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
* @entity: the entity to deactivate. * @entity: the entity to deactivate.
* @ins_into_idle_tree: true if the entity can be put on the idle tree * @ins_into_idle_tree: true if the entity can be put into the idle tree
*/ */
static void bfq_deactivate_entity(struct bfq_entity *entity, static void bfq_deactivate_entity(struct bfq_entity *entity,
bool ins_into_idle_tree, bool ins_into_idle_tree,
@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
*/ */
bfq_update_next_in_service(sd, NULL); bfq_update_next_in_service(sd, NULL);
if (sd->next_in_service) if (sd->next_in_service || sd->in_service_entity) {
/* /*
* The parent entity is still backlogged, * The parent entity is still active, because
* because next_in_service is not NULL. So, no * either next_in_service or in_service_entity
* further upwards deactivation must be * is not NULL. So, no further upwards
* performed. Yet, next_in_service has * deactivation must be performed. Yet,
* changed. Then the schedule does need to be * next_in_service has changed. Then the
* updated upwards. * schedule does need to be updated upwards.
*
* NOTE If in_service_entity is not NULL, then
* next_in_service may happen to be NULL,
* although the parent entity is evidently
* active. This happens if 1) the entity
* pointed by in_service_entity is the only
* active entity in the parent entity, and 2)
* according to the definition of
* next_in_service, the in_service_entity
* cannot be considered as
* next_in_service. See the comments on the
* definition of next_in_service for details.
*/ */
break; break;
}
/* /*
* If we get here, then the parent is no more * If we get here, then the parent is no more
@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
/* /*
* If entity is no longer a candidate for next * If entity is no longer a candidate for next
* service, then we extract it from its active tree, * service, then it must be extracted from its active
* for the following reason. To further boost the * tree, so as to make sure that it won't be
* throughput in some special case, BFQ needs to know * considered when computing next_in_service. See the
* which is the next candidate entity to serve, while * comments on the function
* there is already an entity in service. In this * bfq_no_longer_next_in_service() for details.
* respect, to make it easy to compute/update the next
* candidate entity to serve after the current
* candidate has been set in service, there is a case
* where it is necessary to extract the current
* candidate from its service tree. Such a case is
* when the entity just set in service cannot be also
* a candidate for next service. Details about when
* this conditions holds are reported in the comments
* on the function bfq_no_longer_next_in_service()
* invoked below.
*/ */
if (bfq_no_longer_next_in_service(entity)) if (bfq_no_longer_next_in_service(entity))
bfq_active_extract(bfq_entity_service_tree(entity), bfq_active_extract(bfq_entity_service_tree(entity),
entity); entity);
/* /*
* For the same reason why we may have just extracted * Even if entity is not to be extracted according to
* entity from its active tree, we may need to update * the above check, a descendant entity may get
* next_in_service for the sched_data of entity too, * extracted in one of the next iterations of this
* regardless of whether entity has been extracted. * loop. Such an event could cause a change in
* In fact, even if entity has not been extracted, a * next_in_service for the level of the descendant
* descendant entity may get extracted. Such an event * entity, and thus possibly back to this level.
* would cause a change in next_in_service for the
* level of the descendant entity, and thus possibly
* back to upper levels.
* *
* We cannot perform the resulting needed update * However, we cannot perform the resulting needed
* before the end of this loop, because, to know which * update of next_in_service for this level before the
* is the correct next-to-serve candidate entity for * end of the whole loop, because, to know which is
* each level, we need first to find the leaf entity * the correct next-to-serve candidate entity for each
* to set in service. In fact, only after we know * level, we need first to find the leaf entity to set
* which is the next-to-serve leaf entity, we can * in service. In fact, only after we know which is
* discover whether the parent entity of the leaf * the next-to-serve leaf entity, we can discover
* entity becomes the next-to-serve, and so on. * whether the parent entity of the leaf entity
* becomes the next-to-serve, and so on.
*/ */
} }
bfqq = bfq_entity_to_bfqq(entity); bfqq = bfq_entity_to_bfqq(entity);

View File

@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct request *rq; struct request *rq;
unsigned int tag; unsigned int tag;
struct blk_mq_ctx *local_ctx = NULL;
blk_queue_enter_live(q); blk_queue_enter_live(q);
data->q = q; data->q = q;
if (likely(!data->ctx)) if (likely(!data->ctx))
data->ctx = blk_mq_get_ctx(q); data->ctx = local_ctx = blk_mq_get_ctx(q);
if (likely(!data->hctx)) if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu); data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
if (op & REQ_NOWAIT) if (op & REQ_NOWAIT)
@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
tag = blk_mq_get_tag(data); tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_TAG_FAIL) { if (tag == BLK_MQ_TAG_FAIL) {
if (local_ctx) {
blk_mq_put_ctx(local_ctx);
data->ctx = NULL;
}
blk_queue_exit(q); blk_queue_exit(q);
return NULL; return NULL;
} }
@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, op, &alloc_data);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq) if (!rq)
return ERR_PTR(-EWOULDBLOCK); return ERR_PTR(-EWOULDBLOCK);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
rq->__data_len = 0; rq->__data_len = 0;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, op, &alloc_data);
blk_queue_exit(q);
if (!rq) if (!rq)
return ERR_PTR(-EWOULDBLOCK); return ERR_PTR(-EWOULDBLOCK);
blk_queue_exit(q);
return rq; return rq;
} }
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

View File

@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
print_once = true; print_once = true;
#endif #endif
pr_notice("random: %s called from %pF with crng_init=%d\n", pr_notice("random: %s called from %pS with crng_init=%d\n",
func_name, caller, crng_init); func_name, caller, crng_init);
} }

View File

@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret) if (ret)
return ret; return ret;
memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) { ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
ctx->base.needs_inv = true; ctx->base.needs_inv = true;
@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
} }
} }
memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
return 0; return 0;
} }

View File

@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
config I2C_VERSATILE config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support" tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
select I2C_ALGOBIT select I2C_ALGOBIT
help help
Say yes if you want to support the I2C serial bus on ARMs Versatile Say yes if you want to support the I2C serial bus on ARMs Versatile

View File

@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
} }
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
/* Some broken DSTDs use 1MiHz instead of 1MHz */
if (acpi_speed == 1048576)
acpi_speed = 1000000;
/* /*
* Find bus speed from the "clock-frequency" device property, ACPI * Find bus speed from the "clock-frequency" device property, ACPI
* or by using fast mode if neither is set. * or by using fast mode if neither is set.
@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (dev->clk_freq != 100000 && dev->clk_freq != 400000 if (dev->clk_freq != 100000 && dev->clk_freq != 400000
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
dev->clk_freq);
ret = -EINVAL; ret = -EINVAL;
goto exit_reset; goto exit_reset;
} }

View File

@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
} }
const struct acpi_device_id *
i2c_acpi_match_device(const struct acpi_device_id *matches,
struct i2c_client *client)
{
if (!(client && matches))
return NULL;
return acpi_match_device(matches, &client->dev);
}
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value) void *data, void **return_value)
{ {
@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
} }
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
static int i2c_acpi_match_adapter(struct device *dev, void *data) static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
{ {
struct i2c_adapter *adapter = i2c_verify_adapter(dev); struct i2c_adapter *adapter = i2c_verify_adapter(dev);
@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
return ACPI_HANDLE(dev) == (acpi_handle)data; return ACPI_HANDLE(dev) == (acpi_handle)data;
} }
static int i2c_acpi_match_device(struct device *dev, void *data) static int i2c_acpi_find_match_device(struct device *dev, void *data)
{ {
return ACPI_COMPANION(dev) == data; return ACPI_COMPANION(dev) == data;
} }
@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
struct device *dev; struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, handle, dev = bus_find_device(&i2c_bus_type, NULL, handle,
i2c_acpi_match_adapter); i2c_acpi_find_match_adapter);
return dev ? i2c_verify_adapter(dev) : NULL; return dev ? i2c_verify_adapter(dev) : NULL;
} }
@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{ {
struct device *dev; struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); dev = bus_find_device(&i2c_bus_type, NULL, adev,
i2c_acpi_find_match_device);
return dev ? i2c_verify_client(dev) : NULL; return dev ? i2c_verify_client(dev) : NULL;
} }

View File

@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev)
* Tree match table entry is supplied for the probing device. * Tree match table entry is supplied for the probing device.
*/ */
if (!driver->id_table && if (!driver->id_table &&
!i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
!i2c_of_match_device(dev->driver->of_match_table, client)) !i2c_of_match_device(dev->driver->of_match_table, client))
return -ENODEV; return -ENODEV;

View File

@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
int i2c_check_7bit_addr_validity_strict(unsigned short addr); int i2c_check_7bit_addr_validity_strict(unsigned short addr);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
const struct acpi_device_id *
i2c_acpi_match_device(const struct acpi_device_id *matches,
struct i2c_client *client);
void i2c_acpi_register_devices(struct i2c_adapter *adap); void i2c_acpi_register_devices(struct i2c_adapter *adap);
#else /* CONFIG_ACPI */ #else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
static inline const struct acpi_device_id *
i2c_acpi_match_device(const struct acpi_device_id *matches,
struct i2c_client *client)
{
return NULL;
}
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
extern struct notifier_block i2c_acpi_notifier; extern struct notifier_block i2c_acpi_notifier;

View File

@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
different sets of pins at run-time. different sets of pins at run-time.
This driver can also be built as a module. If so, the module will be This driver can also be built as a module. If so, the module will be
called pinctrl-i2cmux. called i2c-mux-pinctrl.
config I2C_MUX_REG config I2C_MUX_REG
tristate "Register-based I2C multiplexer" tristate "Register-based I2C multiplexer"

View File

@ -61,6 +61,7 @@ struct addr_req {
void (*callback)(int status, struct sockaddr *src_addr, void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context); struct rdma_dev_addr *addr, void *context);
unsigned long timeout; unsigned long timeout;
struct delayed_work work;
int status; int status;
u32 seq; u32 seq;
}; };
@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
} }
EXPORT_SYMBOL(rdma_translate_ip); EXPORT_SYMBOL(rdma_translate_ip);
static void set_timeout(unsigned long time) static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
{ {
unsigned long delay; unsigned long delay;
@ -303,7 +304,7 @@ static void set_timeout(unsigned long time)
if ((long)delay < 0) if ((long)delay < 0)
delay = 0; delay = 0;
mod_delayed_work(addr_wq, &work, delay); mod_delayed_work(addr_wq, delayed_work, delay);
} }
static void queue_req(struct addr_req *req) static void queue_req(struct addr_req *req)
@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req)
list_add(&req->list, &temp_req->list); list_add(&req->list, &temp_req->list);
if (req_list.next == &req->list) set_timeout(&req->work, req->timeout);
set_timeout(req->timeout);
mutex_unlock(&lock); mutex_unlock(&lock);
} }
@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in,
return ret; return ret;
} }
static void process_one_req(struct work_struct *_work)
{
struct addr_req *req;
struct sockaddr *src_in, *dst_in;
mutex_lock(&lock);
req = container_of(_work, struct addr_req, work.work);
if (req->status == -ENODATA) {
src_in = (struct sockaddr *)&req->src_addr;
dst_in = (struct sockaddr *)&req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout)) {
req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) {
/* requeue the work for retrying again */
set_timeout(&req->work, req->timeout);
mutex_unlock(&lock);
return;
}
}
list_del(&req->list);
mutex_unlock(&lock);
req->callback(req->status, (struct sockaddr *)&req->src_addr,
req->addr, req->context);
put_client(req->client);
kfree(req);
}
static void process_req(struct work_struct *work) static void process_req(struct work_struct *work)
{ {
struct addr_req *req, *temp_req; struct addr_req *req, *temp_req;
@ -591,20 +622,23 @@ static void process_req(struct work_struct *work)
true, req->seq); true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout)) if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT; req->status = -ETIMEDOUT;
else if (req->status == -ENODATA) else if (req->status == -ENODATA) {
set_timeout(&req->work, req->timeout);
continue; continue;
}
} }
list_move_tail(&req->list, &done_list); list_move_tail(&req->list, &done_list);
} }
if (!list_empty(&req_list)) {
req = list_entry(req_list.next, struct addr_req, list);
set_timeout(req->timeout);
}
mutex_unlock(&lock); mutex_unlock(&lock);
list_for_each_entry_safe(req, temp_req, &done_list, list) { list_for_each_entry_safe(req, temp_req, &done_list, list) {
list_del(&req->list); list_del(&req->list);
/* It is safe to cancel other work items from this work item
* because at a time there can be only one work item running
* with this single threaded work queue.
*/
cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *) &req->src_addr, req->callback(req->status, (struct sockaddr *) &req->src_addr,
req->addr, req->context); req->addr, req->context);
put_client(req->client); put_client(req->client);
@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->context = context; req->context = context;
req->client = client; req->client = client;
atomic_inc(&client->refcount); atomic_inc(&client->refcount);
INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
req->status = -ECANCELED; req->status = -ECANCELED;
req->timeout = jiffies; req->timeout = jiffies;
list_move(&req->list, &req_list); list_move(&req->list, &req_list);
set_timeout(req->timeout); set_timeout(&req->work, req->timeout);
break; break;
} }
} }
@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
if (event == NETEVENT_NEIGH_UPDATE) { if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx; struct neighbour *neigh = ctx;
if (neigh->nud_state & NUD_VALID) { if (neigh->nud_state & NUD_VALID)
set_timeout(jiffies); set_timeout(&work, jiffies);
}
} }
return 0; return 0;
} }
@ -820,7 +854,7 @@ static struct notifier_block nb = {
int addr_init(void) int addr_init(void)
{ {
addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
if (!addr_wq) if (!addr_wq)
return -ENOMEM; return -ENOMEM;

View File

@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
int out_len) int out_len)
{ {
struct ib_uverbs_resize_cq cmd; struct ib_uverbs_resize_cq cmd;
struct ib_uverbs_resize_cq_resp resp; struct ib_uverbs_resize_cq_resp resp = {};
struct ib_udata udata; struct ib_udata udata;
struct ib_cq *cq; struct ib_cq *cq;
int ret = -EINVAL; int ret = -EINVAL;

View File

@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount)) if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device); ib_uverbs_comp_dev(file->device);
kobject_put(&file->device->kobj);
kfree(file); kfree(file);
} }
@ -917,7 +918,6 @@ err:
static int ib_uverbs_close(struct inode *inode, struct file *filp) static int ib_uverbs_close(struct inode *inode, struct file *filp)
{ {
struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_file *file = filp->private_data;
struct ib_uverbs_device *dev = file->device;
mutex_lock(&file->cleanup_mutex); mutex_lock(&file->cleanup_mutex);
if (file->ucontext) { if (file->ucontext) {
@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
ib_uverbs_release_async_event_file); ib_uverbs_release_async_event_file);
kref_put(&file->ref, ib_uverbs_release_file); kref_put(&file->ref, ib_uverbs_release_file);
kobject_put(&dev->kobj);
return 0; return 0;
} }

View File

@ -895,7 +895,6 @@ static const struct {
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = { [IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_RESET] = { .valid = 1 },
[IB_QPS_ERR] = { .valid = 1 },
[IB_QPS_INIT] = { [IB_QPS_INIT] = {
.valid = 1, .valid = 1,
.req_param = { .req_param = {

View File

@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
continue; continue;
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
if (IS_ERR(free_mr->mr_free_qp[i])) { if (!free_mr->mr_free_qp[i]) {
dev_err(dev, "Create loop qp failed!\n"); dev_err(dev, "Create loop qp failed!\n");
goto create_lp_qp_failed; goto create_lp_qp_failed;
} }

View File

@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type != IB_QPT_RC) { if (qp->ibqp.qp_type != IB_QPT_RC) {
av = *wqe; av = *wqe;
if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av); *wqe += sizeof(struct mlx5_av);
else else
*wqe += sizeof(struct mlx5_base_av); *wqe += sizeof(struct mlx5_base_av);

View File

@ -336,6 +336,7 @@ struct ipoib_dev_priv {
unsigned long flags; unsigned long flags;
struct rw_semaphore vlan_rwsem; struct rw_semaphore vlan_rwsem;
struct mutex mcast_mutex;
struct rb_root path_tree; struct rb_root path_tree;
struct list_head path_list; struct list_head path_list;

View File

@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REQ_RECEIVED: case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event); return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED: case IB_CM_DREQ_RECEIVED:
p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0); ib_send_cm_drep(cm_id, NULL, 0);
/* Fall through */ /* Fall through */
case IB_CM_REJ_RECEIVED: case IB_CM_REJ_RECEIVED:

View File

@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
IPOIB_NETDEV_STAT(tx_bytes), IPOIB_NETDEV_STAT(tx_bytes),
IPOIB_NETDEV_STAT(tx_errors), IPOIB_NETDEV_STAT(tx_errors),
IPOIB_NETDEV_STAT(rx_dropped), IPOIB_NETDEV_STAT(rx_dropped),
IPOIB_NETDEV_STAT(tx_dropped) IPOIB_NETDEV_STAT(tx_dropped),
IPOIB_NETDEV_STAT(multicast),
}; };
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)

View File

@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
++dev->stats.rx_packets; ++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
if (skb->pkt_type == PACKET_MULTICAST)
dev->stats.multicast++;
skb->dev = dev; skb->dev = dev;
if ((dev->features & NETIF_F_RXCSUM) && if ((dev->features & NETIF_F_RXCSUM) &&
@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
return pending; return pending;
} }
static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
struct ib_qp *qp,
enum ib_qp_state new_state)
{
struct ib_qp_attr qp_attr;
struct ib_qp_init_attr query_init_attr;
int ret;
ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
if (ret) {
ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
return;
}
/* print according to the new-state and the previous state.*/
if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
else
ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
new_state, qp_attr.qp_state);
}
int ipoib_ib_dev_stop_default(struct net_device *dev) int ipoib_ib_dev_stop_default(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
*/ */
qp_attr.qp_state = IB_QPS_ERR; qp_attr.qp_state = IB_QPS_ERR;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
/* Wait for all sends and receives to complete */ /* Wait for all sends and receives to complete */
begin = jiffies; begin = jiffies;

View File

@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
int i, wait_flushed = 0; int i, wait_flushed = 0;
init_completion(&priv->ntbl.flushed); init_completion(&priv->ntbl.flushed);
set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.deleted); init_completion(&priv->ntbl.deleted);
set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */ /* Stop GC if called at init fail need to cancel work */
stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_tx_timeout = ipoib_timeout, .ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list, .ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink, .ndo_get_iflink = ipoib_get_iflink,
.ndo_get_stats64 = ipoib_get_stats,
}; };
void ipoib_setup_common(struct net_device *dev) void ipoib_setup_common(struct net_device *dev)
@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev; priv->dev = dev;
spin_lock_init(&priv->lock); spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem); init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs); INIT_LIST_HEAD(&priv->child_intfs);
@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->dev_id = port - 1; priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr); result = ib_query_port(hca, port, &attr);
if (!result) if (result) {
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n", printk(KERN_WARNING "%s: ib_query_port %d failed\n",
hca->name, port); hca->name, port);
goto device_init_failed; goto device_init_failed;
} }
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
/* MTU will be reset when mcast join happens */ /* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format,
printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
hca->name, port, result); hca->name, port, result);
goto device_init_failed; goto device_init_failed;
} else }
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
sizeof(union ib_gid));
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port); result = ipoib_dev_init(priv->dev, hca, port);
if (result < 0) { if (result) {
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
hca->name, port, result); hca->name, port, result);
goto device_init_failed; goto device_init_failed;
@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM #ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
#endif #endif
/* /*

View File

@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
int ipoib_mcast_stop_thread(struct net_device *dev) int ipoib_mcast_stop_thread(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned long flags;
ipoib_dbg_mcast(priv, "stopping multicast thread\n"); ipoib_dbg_mcast(priv, "stopping multicast thread\n");
spin_lock_irqsave(&priv->lock, flags); cancel_delayed_work_sync(&priv->mcast_task);
cancel_delayed_work(&priv->mcast_task);
spin_unlock_irqrestore(&priv->lock, flags);
flush_workqueue(priv->wq);
return 0; return 0;
} }
@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
{ {
struct ipoib_mcast *mcast, *tmcast; struct ipoib_mcast *mcast, *tmcast;
/*
* make sure the in-flight joins have finished before we attempt
* to leave
*/
list_for_each_entry_safe(mcast, tmcast, remove_list, list)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, remove_list, list) { list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast); ipoib_mcast_free(mcast);
@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
struct ipoib_mcast *mcast, *tmcast; struct ipoib_mcast *mcast, *tmcast;
unsigned long flags; unsigned long flags;
mutex_lock(&priv->mcast_mutex);
ipoib_dbg_mcast(priv, "flushing multicast list\n"); ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
/*
* make sure the in-flight joins have finished before we attempt
* to leave
*/
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
ipoib_mcast_remove_list(&remove_list); ipoib_mcast_remove_list(&remove_list);
mutex_unlock(&priv->mcast_mutex);
} }
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev); netif_addr_unlock(dev);
local_irq_restore(flags); local_irq_restore(flags);
/*
* make sure the in-flight joins have finished before we attempt
* to leave
*/
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
ipoib_mcast_remove_list(&remove_list); ipoib_mcast_remove_list(&remove_list);
/* /*

View File

@ -44,7 +44,6 @@ struct procdata {
char log_name[15]; /* log filename */ char log_name[15]; /* log filename */
struct log_data *log_head, *log_tail; /* head and tail for queue */ struct log_data *log_head, *log_tail; /* head and tail for queue */
int if_used; /* open count for interface */ int if_used; /* open count for interface */
int volatile del_lock; /* lock for delete operations */
unsigned char logtmp[LOG_MAX_LINELEN]; unsigned char logtmp[LOG_MAX_LINELEN];
wait_queue_head_t rd_queue; wait_queue_head_t rd_queue;
}; };
@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
{ {
struct log_data *ib; struct log_data *ib;
struct procdata *pd = card->proclog; struct procdata *pd = card->proclog;
int i;
unsigned long flags; unsigned long flags;
if (!pd) if (!pd)
@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
else else
pd->log_tail->next = ib; /* follows existing messages */ pd->log_tail->next = ib; /* follows existing messages */
pd->log_tail = ib; /* new tail */ pd->log_tail = ib; /* new tail */
i = pd->del_lock++; /* get lock state */
spin_unlock_irqrestore(&card->hysdn_lock, flags);
/* delete old entrys */ /* delete old entrys */
if (!i) while (pd->log_head->next) {
while (pd->log_head->next) { if ((pd->log_head->usage_cnt <= 0) &&
if ((pd->log_head->usage_cnt <= 0) && (pd->log_head->next->usage_cnt <= 0)) {
(pd->log_head->next->usage_cnt <= 0)) { ib = pd->log_head;
ib = pd->log_head; pd->log_head = pd->log_head->next;
pd->log_head = pd->log_head->next; kfree(ib);
kfree(ib); } else {
} else break;
break; }
} /* pd->log_head->next */ } /* pd->log_head->next */
pd->del_lock--; /* release lock level */
spin_unlock_irqrestore(&card->hysdn_lock, flags);
wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
} /* put_log_buffer */ } /* put_log_buffer */

View File

@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
* tRC < 30ns implies EDO mode. This controller does not support this * tRC < 30ns implies EDO mode. This controller does not support this
* mode. * mode.
*/ */
if (conf->timings.sdr.tRC_min < 30) if (conf->timings.sdr.tRC_min < 30000)
return -ENOTSUPP; return -ENOTSUPP;
atmel_smc_cs_conf_init(smcconf); atmel_smc_cs_conf_init(smcconf);

View File

@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
*/ */
struct platform_device *pdev = to_platform_device(userdev); struct platform_device *pdev = to_platform_device(userdev);
const struct atmel_pmecc_caps *caps; const struct atmel_pmecc_caps *caps;
const struct of_device_id *match;
/* No PMECC engine available. */ /* No PMECC engine available. */
if (!of_property_read_bool(userdev->of_node, if (!of_property_read_bool(userdev->of_node,
@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
caps = &at91sam9g45_caps; caps = &at91sam9g45_caps;
/* /* Find the caps associated to the NAND dev node. */
* Try to find the NFC subnode and extract the associated caps match = of_match_node(atmel_pmecc_legacy_match,
* from there. userdev->of_node);
*/ if (match && match->data)
np = of_find_compatible_node(userdev->of_node, NULL, caps = match->data;
"atmel,sama5d3-nfc");
if (np) {
const struct of_device_id *match;
match = of_match_node(atmel_pmecc_legacy_match, np);
if (match && match->data)
caps = match->data;
of_node_put(np);
}
pmecc = atmel_pmecc_create(pdev, caps, 1, 2); pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
} }

View File

@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
if (!section) { if (!section) {
oobregion->offset = 0; oobregion->offset = 0;
oobregion->length = 4; if (mtd->oobsize == 16)
oobregion->length = 4;
else
oobregion->length = 3;
} else { } else {
if (mtd->oobsize == 8)
return -ERANGE;
oobregion->offset = 6; oobregion->offset = 6;
oobregion->length = ecc->total - 4; oobregion->length = ecc->total - 4;
} }
@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
* Ensure the timing mode has been changed on the chip side * Ensure the timing mode has been changed on the chip side
* before changing timings on the controller side. * before changing timings on the controller side.
*/ */
if (chip->onfi_version) { if (chip->onfi_version &&
(le16_to_cpu(chip->onfi_params.opt_cmd) &
ONFI_OPT_CMD_SET_GET_FEATURES)) {
u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
chip->onfi_timing_mode_default, chip->onfi_timing_mode_default,
}; };
@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
* @buf: the data to write * @buf: the data to write
* @oob_required: must write chip->oob_poi to OOB * @oob_required: must write chip->oob_poi to OOB
* @page: page number to write * @page: page number to write
* @cached: cached programming
* @raw: use _raw version of write_page * @raw: use _raw version of write_page
*/ */
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,

View File

@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
struct nand_sdr_timings *timings = &iface->timings.sdr; struct nand_sdr_timings *timings = &iface->timings.sdr;
/* microseconds -> picoseconds */ /* microseconds -> picoseconds */
timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
/* nanoseconds -> picoseconds */ /* nanoseconds -> picoseconds */
timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);

View File

@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
*/ */
chip->clk_rate = NSEC_PER_SEC / min_clk_period; chip->clk_rate = NSEC_PER_SEC / min_clk_period;
real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
if (real_clk_rate <= 0) {
dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
return -EINVAL;
}
/* /*
* ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data

View File

@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
* all finished. * all finished.
*/ */
mt7623_pad_clk_setup(ds); mt7623_pad_clk_setup(ds);
} else {
u16 lcl_adv = 0, rmt_adv = 0;
u8 flowctrl;
u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
switch (phydev->speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_SPEED_1000;
break;
case SPEED_100:
mcr |= PMCR_FORCE_SPEED_100;
break;
};
if (phydev->link)
mcr |= PMCR_FORCE_LNK;
if (phydev->duplex) {
mcr |= PMCR_FORCE_FDX;
if (phydev->pause)
rmt_adv = LPA_PAUSE_CAP;
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
if (phydev->advertising & ADVERTISED_Pause)
lcl_adv |= ADVERTISE_PAUSE_CAP;
if (phydev->advertising & ADVERTISED_Asym_Pause)
lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
mcr |= PMCR_TX_FC_EN;
if (flowctrl & FLOW_CTRL_RX)
mcr |= PMCR_RX_FC_EN;
}
mt7530_write(priv, MT7530_PMCR_P(port), mcr);
} }
} }

View File

@ -151,6 +151,7 @@ enum mt7530_stp_state {
#define PMCR_TX_FC_EN BIT(5) #define PMCR_TX_FC_EN BIT(5)
#define PMCR_RX_FC_EN BIT(4) #define PMCR_RX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3) #define PMCR_FORCE_SPEED_1000 BIT(3)
#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1) #define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0) #define PMCR_FORCE_LNK BIT(0)
#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \

View File

@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_enet_gpiod_get(pdata); xgene_enet_gpiod_get(pdata);
if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { pdata->clk = devm_clk_get(&pdev->dev, NULL);
pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) {
if (IS_ERR(pdata->clk)) { if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
/* Abort if the clock is defined but couldn't be /* Abort if the clock is defined but couldn't be
* retrived. Always abort if the clock is missing on * retrived. Always abort if the clock is missing on
* DT system as the driver can't cope with this case. * DT system as the driver can't cope with this case.

View File

@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
spin_lock_init(&bp->lock); spin_lock_init(&bp->lock);
u64_stats_init(&bp->hw_stats.syncp);
bp->rx_pending = B44_DEF_RX_RING_PENDING; bp->rx_pending = B44_DEF_RX_RING_PENDING;
bp->tx_pending = B44_DEF_TX_RING_PENDING; bp->tx_pending = B44_DEF_TX_RING_PENDING;

View File

@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8); static void send_request_unmap(struct ibmvnic_adapter *, u8);
static void send_login(struct ibmvnic_adapter *adapter); static void send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter); static void send_cap_queries(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *); static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *); static void release_crq_queue(struct ibmvnic_adapter *);
@ -676,6 +677,7 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000); unsigned long timeout = msecs_to_jiffies(30000);
struct device *dev = &adapter->vdev->dev; struct device *dev = &adapter->vdev->dev;
int rc;
do { do {
if (adapter->renegotiate) { if (adapter->renegotiate) {
@ -689,6 +691,18 @@ static int ibmvnic_login(struct net_device *netdev)
dev_err(dev, "Capabilities query timeout\n"); dev_err(dev, "Capabilities query timeout\n");
return -1; return -1;
} }
rc = init_sub_crqs(adapter);
if (rc) {
dev_err(dev,
"Initialization of SCRQ's failed\n");
return -1;
}
rc = init_sub_crq_irqs(adapter);
if (rc) {
dev_err(dev,
"Initialization of SCRQ's irqs failed\n");
return -1;
}
} }
reinit_completion(&adapter->init_done); reinit_completion(&adapter->init_done);
@ -3106,7 +3120,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value, *req_value,
(long int)be64_to_cpu(crq->request_capability_rsp. (long int)be64_to_cpu(crq->request_capability_rsp.
number), name); number), name);
release_sub_crqs(adapter);
*req_value = be64_to_cpu(crq->request_capability_rsp.number); *req_value = be64_to_cpu(crq->request_capability_rsp.number);
ibmvnic_send_req_caps(adapter, 1); ibmvnic_send_req_caps(adapter, 1);
return; return;

View File

@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi) if (!tx_ring->tx_bi)
goto err; goto err;
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */ /* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
/* add u32 for head writeback, align after this takes care of /* add u32 for head writeback, align after this takes care of

View File

@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
if (!tx_ring->tx_buffer_info) if (!tx_ring->tx_buffer_info)
goto err; goto err;
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */ /* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
goto err; goto err;
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */ /* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);

View File

@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol) struct ethtool_wolinfo *wol)
{ {
struct mlx4_en_priv *priv = netdev_priv(netdev); struct mlx4_en_priv *priv = netdev_priv(netdev);
struct mlx4_caps *caps = &priv->mdev->dev->caps;
int err = 0; int err = 0;
u64 config = 0; u64 config = 0;
u64 mask; u64 mask;
@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
MLX4_DEV_CAP_FLAG_WOL_PORT2; MLX4_DEV_CAP_FLAG_WOL_PORT2;
if (!(priv->mdev->dev->caps.flags & mask)) { if (!(caps->flags & mask)) {
wol->supported = 0; wol->supported = 0;
wol->wolopts = 0; wol->wolopts = 0;
return; return;
} }
if (caps->wol_port[priv->port])
wol->supported = WAKE_MAGIC;
else
wol->supported = 0;
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
if (err) { if (err) {
en_err(priv, "Failed to get WoL information\n"); en_err(priv, "Failed to get WoL information\n");
return; return;
} }
if (config & MLX4_EN_WOL_MAGIC) if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
wol->supported = WAKE_MAGIC;
else
wol->supported = 0;
if (config & MLX4_EN_WOL_ENABLED)
wol->wolopts = WAKE_MAGIC; wol->wolopts = WAKE_MAGIC;
else else
wol->wolopts = 0; wol->wolopts = 0;

View File

@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
* header, the HW adds it. To address that, we are subtracting the pseudo * header, the HW adds it. To address that, we are subtracting the pseudo
* header checksum from the checksum value provided by the HW. * header checksum from the checksum value provided by the HW.
*/ */
static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
struct iphdr *iph) struct iphdr *iph)
{ {
__u16 length_for_csum = 0; __u16 length_for_csum = 0;
__wsum csum_pseudo_header = 0; __wsum csum_pseudo_header = 0;
__u8 ipproto = iph->protocol;
if (unlikely(ipproto == IPPROTO_SCTP))
return -1;
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
length_for_csum, iph->protocol, 0); length_for_csum, ipproto, 0);
skb->csum = csum_sub(hw_checksum, csum_pseudo_header); skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
return 0;
} }
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h) struct ipv6hdr *ipv6h)
{ {
__u8 nexthdr = ipv6h->nexthdr;
__wsum csum_pseudo_hdr = 0; __wsum csum_pseudo_hdr = 0;
if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
ipv6h->nexthdr == IPPROTO_HOPOPTS)) nexthdr == IPPROTO_HOPOPTS ||
nexthdr == IPPROTO_SCTP))
return -1; return -1;
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
csum_pseudo_hdr = csum_partial(&ipv6h->saddr, csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
(__force __wsum)htons(nexthdr));
skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
} }
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
get_fixed_ipv4_csum(hw_checksum, skb, hdr); return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
return -1;
#endif #endif
return 0; return 0;
} }

View File

@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[32] = "Loopback source checks support", [32] = "Loopback source checks support",
[33] = "RoCEv2 support", [33] = "RoCEv2 support",
[34] = "DMFS Sniffer support (UC & MC)", [34] = "DMFS Sniffer support (UC & MC)",
[35] = "QinQ VST mode support", [35] = "Diag counters per port",
[36] = "sl to vl mapping table change event support" [36] = "QinQ VST mode support",
[37] = "sl to vl mapping table change event support",
}; };
int i; int i;
@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
#define QUERY_DEV_CAP_WOL_OFFSET 0x43
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
dev_cap->flags = flags | (u64)ext_flags << 32; dev_cap->flags = flags | (u64)ext_flags << 32;
MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
dev_cap->wol_port[1] = !!(field & 0x20);
dev_cap->wol_port[2] = !!(field & 0x40);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4; dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);

View File

@ -129,6 +129,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_range; u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps; struct mlx4_rate_limit_caps rl_caps;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
}; };
struct mlx4_func_cap { struct mlx4_func_cap {

View File

@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support; dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
dev->caps.wol_port[1] = dev_cap->wol_port[1];
dev->caps.wol_port[2] = dev_cap->wol_port[2];
/* Save uar page shift */ /* Save uar page shift */
if (!mlx4_is_slave(dev)) { if (!mlx4_is_slave(dev)) {

View File

@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev); orig_dev);
if (WARN_ON(!bridge_port)) if (!bridge_port)
return -EINVAL; return 0;
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_UC, MLXSW_SP_FLOOD_TYPE_UC,
@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev); orig_dev);
if (WARN_ON(!bridge_port)) if (!bridge_port)
return -EINVAL; return 0;
if (!bridge_port->bridge_device->multicast_enabled) if (!bridge_port->bridge_device->multicast_enabled)
return 0; return 0;
@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0; return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port)) if (!bridge_port)
return -EINVAL; return 0;
bridge_device = bridge_port->bridge_device; bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device, bridge_device,
mdb->vid); mdb->vid);
if (WARN_ON(!mlxsw_sp_port_vlan)) if (!mlxsw_sp_port_vlan)
return -EINVAL; return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
int err = 0; int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port)) if (!bridge_port)
return -EINVAL; return 0;
bridge_device = bridge_port->bridge_device; bridge_device = bridge_port->bridge_device;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
bridge_device, bridge_device,
mdb->vid); mdb->vid);
if (WARN_ON(!mlxsw_sp_port_vlan)) if (!mlxsw_sp_port_vlan)
return -EINVAL; return 0;
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
} }
static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_mid *mid, *tmp;
list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
list_del(&mid->list);
clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
kfree(mid);
}
}
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{ {
struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_bridge *bridge;
@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{ {
mlxsw_sp_fdb_fini(mlxsw_sp); mlxsw_sp_fdb_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); mlxsw_sp_mids_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
kfree(mlxsw_sp->bridge); kfree(mlxsw_sp->bridge);
} }

View File

@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
tx_ring->idx = idx; tx_ring->idx = idx;
tx_ring->r_vec = r_vec; tx_ring->r_vec = r_vec;
tx_ring->is_xdp = is_xdp; tx_ring->is_xdp = is_xdp;
u64_stats_init(&tx_ring->r_vec->tx_sync);
tx_ring->qcidx = tx_ring->idx * nn->stride_tx; tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
rx_ring->idx = idx; rx_ring->idx = idx;
rx_ring->r_vec = r_vec; rx_ring->r_vec = r_vec;
u64_stats_init(&rx_ring->r_vec->rx_sync);
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);

View File

@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
goto err; goto err;
return 0; return 0;

View File

@ -31,9 +31,18 @@
#include "cpts.h" #include "cpts.h"
#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
struct cpts_skb_cb_data {
unsigned long tmo;
};
#define cpts_read32(c, r) readl_relaxed(&c->reg->r) #define cpts_read32(c, r) readl_relaxed(&c->reg->r)
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
u16 ts_seqid, u8 ts_msgtype);
static int event_expired(struct cpts_event *event) static int event_expired(struct cpts_event *event)
{ {
return time_after(jiffies, event->tmo); return time_after(jiffies, event->tmo);
@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1; return removed ? 0 : -1;
} }
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff *skb, *tmp;
u16 seqid;
u8 mtype;
bool found = false;
mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
/* no need to grab txq.lock as access is always done under cpts->lock */
skb_queue_walk_safe(&cpts->txq, skb, tmp) {
struct skb_shared_hwtstamps ssh;
unsigned int class = ptp_classify_raw(skb);
struct cpts_skb_cb_data *skb_cb =
(struct cpts_skb_cb_data *)skb->cb;
if (cpts_match(skb, class, seqid, mtype)) {
u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
memset(&ssh, 0, sizeof(ssh));
ssh.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &ssh);
found = true;
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
mtype, seqid);
} else if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
dev_dbg(cpts->dev,
"expiring tx timestamp mtype %u seqid %04x\n",
mtype, seqid);
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
}
}
return found;
}
/* /*
* Returns zero if matching event type was found. * Returns zero if matching event type was found.
*/ */
@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
event->low = lo; event->low = lo;
type = event_type(event); type = event_type(event);
switch (type) { switch (type) {
case CPTS_EV_TX:
if (cpts_match_tx_ts(cpts, event)) {
/* if the new event matches an existing skb,
* then don't queue it
*/
break;
}
case CPTS_EV_PUSH: case CPTS_EV_PUSH:
case CPTS_EV_RX: case CPTS_EV_RX:
case CPTS_EV_TX:
list_del_init(&event->list); list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events); list_add_tail(&event->list, &cpts->events);
break; break;
@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static long cpts_overflow_check(struct ptp_clock_info *ptp)
{
struct cpts *cpts = container_of(ptp, struct cpts, info);
unsigned long delay = cpts->ov_check_period;
struct timespec64 ts;
unsigned long flags;
spin_lock_irqsave(&cpts->lock, flags);
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
if (!skb_queue_empty(&cpts->txq))
delay = CPTS_SKB_TX_WORK_TIMEOUT;
spin_unlock_irqrestore(&cpts->lock, flags);
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
return (long)delay;
}
static struct ptp_clock_info cpts_info = { static struct ptp_clock_info cpts_info = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "CTPS timer", .name = "CTPS timer",
@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
.gettime64 = cpts_ptp_gettime, .gettime64 = cpts_ptp_gettime,
.settime64 = cpts_ptp_settime, .settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable, .enable = cpts_ptp_enable,
.do_aux_work = cpts_overflow_check,
}; };
static void cpts_overflow_check(struct work_struct *work)
{
struct timespec64 ts;
struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
cpts_ptp_gettime(&cpts->info, &ts);
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
}
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
u16 ts_seqid, u8 ts_msgtype) u16 ts_seqid, u8 ts_msgtype)
{ {
@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
return 0; return 0;
spin_lock_irqsave(&cpts->lock, flags); spin_lock_irqsave(&cpts->lock, flags);
cpts_fifo_read(cpts, CPTS_EV_PUSH); cpts_fifo_read(cpts, -1);
list_for_each_safe(this, next, &cpts->events) { list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct cpts_event, list); event = list_entry(this, struct cpts_event, list);
if (event_expired(event)) { if (event_expired(event)) {
@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
break; break;
} }
} }
if (ev_type == CPTS_EV_TX && !ns) {
struct cpts_skb_cb_data *skb_cb =
(struct cpts_skb_cb_data *)skb->cb;
/* Not found, add frame to queue for processing later.
* The periodic FIFO check will handle this.
*/
skb_get(skb);
/* get the timestamp for timeouts */
skb_cb->tmo = jiffies + msecs_to_jiffies(100);
__skb_queue_tail(&cpts->txq, skb);
ptp_schedule_worker(cpts->clock, 0);
}
spin_unlock_irqrestore(&cpts->lock, flags); spin_unlock_irqrestore(&cpts->lock, flags);
return ns; return ns;
@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
{ {
int err, i; int err, i;
skb_queue_head_init(&cpts->txq);
INIT_LIST_HEAD(&cpts->events); INIT_LIST_HEAD(&cpts->events);
INIT_LIST_HEAD(&cpts->pool); INIT_LIST_HEAD(&cpts->pool);
for (i = 0; i < CPTS_MAX_EVENTS; i++) for (i = 0; i < CPTS_MAX_EVENTS; i++)
@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
} }
cpts->phc_index = ptp_clock_index(cpts->clock); cpts->phc_index = ptp_clock_index(cpts->clock);
schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
return 0; return 0;
err_ptp: err_ptp:
@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
if (WARN_ON(!cpts->clock)) if (WARN_ON(!cpts->clock))
return; return;
cancel_delayed_work_sync(&cpts->overflow_work);
ptp_clock_unregister(cpts->clock); ptp_clock_unregister(cpts->clock);
cpts->clock = NULL; cpts->clock = NULL;
cpts_write32(cpts, 0, int_enable); cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control); cpts_write32(cpts, 0, control);
/* Drop all packet */
skb_queue_purge(&cpts->txq);
clk_disable(cpts->refclk); clk_disable(cpts->refclk);
} }
EXPORT_SYMBOL_GPL(cpts_unregister); EXPORT_SYMBOL_GPL(cpts_unregister);
@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->dev = dev; cpts->dev = dev;
cpts->reg = (struct cpsw_cpts __iomem *)regs; cpts->reg = (struct cpsw_cpts __iomem *)regs;
spin_lock_init(&cpts->lock); spin_lock_init(&cpts->lock);
INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
ret = cpts_of_parse(cpts, node); ret = cpts_of_parse(cpts, node);
if (ret) if (ret)

View File

@ -119,13 +119,13 @@ struct cpts {
u32 cc_mult; /* for the nominal frequency */ u32 cc_mult; /* for the nominal frequency */
struct cyclecounter cc; struct cyclecounter cc;
struct timecounter tc; struct timecounter tc;
struct delayed_work overflow_work;
int phc_index; int phc_index;
struct clk *refclk; struct clk *refclk;
struct list_head events; struct list_head events;
struct list_head pool; struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS]; struct cpts_event pool_data[CPTS_MAX_EVENTS];
unsigned long ov_check_period; unsigned long ov_check_period;
struct sk_buff_head txq;
}; };
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);

View File

@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
gtp->dev = dev; gtp->dev = dev;
dev->tstats = alloc_percpu(struct pcpu_sw_netstats); dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats) if (!dev->tstats)
return -ENOMEM; return -ENOMEM;

View File

@ -776,7 +776,8 @@ struct netvsc_device {
u32 max_chn; u32 max_chn;
u32 num_chn; u32 num_chn;
refcount_t sc_offered; atomic_t open_chn;
wait_queue_head_t subchan_open;
struct rndis_device *extension; struct rndis_device *extension;

View File

@ -76,6 +76,7 @@ static struct netvsc_device *alloc_net_device(void)
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
init_completion(&net_device->channel_init_wait); init_completion(&net_device->channel_init_wait);
init_waitqueue_head(&net_device->subchan_open);
return net_device; return net_device;
} }
@ -1268,6 +1269,8 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
nvchan->channel = device->channel; nvchan->channel = device->channel;
nvchan->net_device = net_device; nvchan->net_device = net_device;
u64_stats_init(&nvchan->tx_stats.syncp);
u64_stats_init(&nvchan->rx_stats.syncp);
} }
/* Enable NAPI handler before init callbacks */ /* Enable NAPI handler before init callbacks */

View File

@ -1050,8 +1050,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
else else
netif_napi_del(&nvchan->napi); netif_napi_del(&nvchan->napi);
if (refcount_dec_and_test(&nvscdev->sc_offered)) atomic_inc(&nvscdev->open_chn);
complete(&nvscdev->channel_init_wait); wake_up(&nvscdev->subchan_open);
} }
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
@ -1091,8 +1091,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
net_device->max_chn = 1; net_device->max_chn = 1;
net_device->num_chn = 1; net_device->num_chn = 1;
refcount_set(&net_device->sc_offered, 0);
net_device->extension = rndis_device; net_device->extension = rndis_device;
rndis_device->ndev = net; rndis_device->ndev = net;
@ -1216,6 +1214,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
net_device->num_chn); net_device->num_chn);
atomic_set(&net_device->open_chn, 1);
num_rss_qs = net_device->num_chn - 1; num_rss_qs = net_device->num_chn - 1;
if (num_rss_qs == 0) if (num_rss_qs == 0)
return net_device; return net_device;
@ -1229,7 +1228,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
} }
} }
refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt; init_packet = &net_device->channel_init_pkt;
@ -1246,15 +1244,19 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
if (ret) if (ret)
goto out; goto out;
wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
ret = -ENODEV; ret = -ENODEV;
goto out; goto out;
} }
wait_for_completion(&net_device->channel_init_wait);
net_device->num_chn = 1 + net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels; init_packet->msg.v5_msg.subchn_comp.num_subchannels;
/* wait for all sub channels to open */
wait_event(net_device->subchan_open,
atomic_read(&net_device->open_chn) == net_device->num_chn);
/* ignore failues from setting rss parameters, still have channels */ /* ignore failues from setting rss parameters, still have channels */
rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
net_device->num_chn); net_device->num_chn);

View File

@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
netdev_lockdep_set_classes(dev); netdev_lockdep_set_classes(dev);
ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats) if (!ipvlan->pcpu_stats)
return -ENOMEM; return -ENOMEM;

View File

@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
spin_unlock(&pch->downl); spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */ /* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) { if (skb_queue_empty(&pch->file.xq)) {
read_lock(&pch->upl);
ppp = pch->ppp; ppp = pch->ppp;
if (ppp) if (ppp)
ppp_xmit_process(ppp); __ppp_xmit_process(ppp);
read_unlock(&pch->upl);
} }
} }
static void ppp_channel_push(struct channel *pch) static void ppp_channel_push(struct channel *pch)
{ {
local_bh_disable(); read_lock_bh(&pch->upl);
if (pch->ppp) {
__ppp_channel_push(pch); (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
__ppp_channel_push(pch);
local_bh_enable(); (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
} else {
__ppp_channel_push(pch);
}
read_unlock_bh(&pch->upl);
} }
/* /*

View File

@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx); struct asix_rx_fixup_info *rx);
int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
void asix_rx_fixup_common_free(struct asix_common_private *dp);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags); gfp_t flags);

View File

@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size); value, index, data, size);
} }
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
{
/* Reset the variables that have a lifetime outside of
* asix_rx_fixup_internal() so that future processing starts from a
* known set of initial conditions.
*/
if (rx->ax_skb) {
/* Discard any incomplete Ethernet frame in the netdev buffer */
kfree_skb(rx->ax_skb);
rx->ax_skb = NULL;
}
/* Assume the Data header 32-bit word is at the start of the current
* or next URB socket buffer so reset all the state variables.
*/
rx->remaining = 0;
rx->split_head = false;
rx->header = 0;
}
int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
struct asix_rx_fixup_info *rx) struct asix_rx_fixup_info *rx)
{ {
@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) { if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
rx->remaining); rx->remaining);
if (rx->ax_skb) { reset_asix_rx_fixup_info(rx);
kfree_skb(rx->ax_skb);
rx->ax_skb = NULL;
/* Discard the incomplete netdev Ethernet frame
* and assume the Data header is at the start of
* the current URB socket buffer.
*/
}
rx->remaining = 0;
} }
} }
@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (size != ((~rx->header >> 16) & 0x7ff)) { if (size != ((~rx->header >> 16) & 0x7ff)) {
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
rx->header, offset); rx->header, offset);
reset_asix_rx_fixup_info(rx);
return 0; return 0;
} }
if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size); size);
reset_asix_rx_fixup_info(rx);
return 0; return 0;
} }
@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (rx->ax_skb) { if (rx->ax_skb) {
skb_put_data(rx->ax_skb, skb->data + offset, skb_put_data(rx->ax_skb, skb->data + offset,
copy_length); copy_length);
if (!rx->remaining) if (!rx->remaining) {
usbnet_skb_return(dev, rx->ax_skb); usbnet_skb_return(dev, rx->ax_skb);
rx->ax_skb = NULL;
}
} }
offset += (copy_length + 1) & 0xfffe; offset += (copy_length + 1) & 0xfffe;
@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
if (skb->len != offset) { if (skb->len != offset) {
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
skb->len, offset); skb->len, offset);
reset_asix_rx_fixup_info(rx);
return 0; return 0;
} }
@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
return asix_rx_fixup_internal(dev, skb, rx); return asix_rx_fixup_internal(dev, skb, rx);
} }
void asix_rx_fixup_common_free(struct asix_common_private *dp)
{
struct asix_rx_fixup_info *rx;
if (!dp)
return;
rx = &dp->rx_fixup_info;
if (rx->ax_skb) {
kfree_skb(rx->ax_skb);
rx->ax_skb = NULL;
}
}
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags) gfp_t flags)
{ {

View File

@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{ {
asix_rx_fixup_common_free(dev->driver_priv);
kfree(dev->driver_priv); kfree(dev->driver_priv);
} }

View File

@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* Init LTM */ /* Init LTM */
lan78xx_init_ltm(dev); lan78xx_init_ltm(dev);
dev->net->hard_header_len += TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
if (dev->udev->speed == USB_SPEED_SUPER) { if (dev->udev->speed == USB_SPEED_SUPER) {
buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
return ret; return ret;
} }
dev->net->hard_header_len += TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
/* Init all registers */ /* Init all registers */
ret = lan78xx_reset(dev); ret = lan78xx_reset(dev);
lan78xx_mdio_init(dev); ret = lan78xx_mdio_init(dev);
dev->net->flags |= IFF_MULTICAST; dev->net->flags |= IFF_MULTICAST;
pdata->wol = WAKE_MAGIC; pdata->wol = WAKE_MAGIC;
return 0; return ret;
} }
static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
udev = interface_to_usbdev(intf); udev = interface_to_usbdev(intf);
udev = usb_get_dev(udev); udev = usb_get_dev(udev);
ret = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct lan78xx_net)); netdev = alloc_etherdev(sizeof(struct lan78xx_net));
if (!netdev) { if (!netdev) {
dev_err(&intf->dev, "Error: OOM\n"); dev_err(&intf->dev, "Error: OOM\n");
goto out1; ret = -ENOMEM;
goto out1;
} }
/* netdev_printk() needs this */ /* netdev_printk() needs this */
@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = register_netdev(netdev); ret = register_netdev(netdev);
if (ret != 0) { if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n"); netif_err(dev, probe, netdev, "couldn't register the device\n");
goto out2; goto out3;
} }
usb_set_intfdata(intf, dev); usb_set_intfdata(intf, dev);

View File

@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */

View File

@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
out: out:
skb_gro_remcsum_cleanup(skb, &grc); skb_gro_remcsum_cleanup(skb, &grc);
skb->remcsum_offload = 0;
NAPI_GRO_CB(skb)->flush |= flush; NAPI_GRO_CB(skb)->flush |= flush;
return pp; return pp;

View File

@ -28,6 +28,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
#include "ptp_private.h" #include "ptp_private.h"
@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
kfree(ptp); kfree(ptp);
} }
static void ptp_aux_kworker(struct kthread_work *work)
{
struct ptp_clock *ptp = container_of(work, struct ptp_clock,
aux_work.work);
struct ptp_clock_info *info = ptp->info;
long delay;
delay = info->do_aux_work(info);
if (delay >= 0)
kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
}
/* public interface */ /* public interface */
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
mutex_init(&ptp->pincfg_mux); mutex_init(&ptp->pincfg_mux);
init_waitqueue_head(&ptp->tsev_wq); init_waitqueue_head(&ptp->tsev_wq);
if (ptp->info->do_aux_work) {
char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
ptp->kworker = kthread_create_worker(0, worker_name ?
worker_name : info->name);
kfree(worker_name);
if (IS_ERR(ptp->kworker)) {
err = PTR_ERR(ptp->kworker);
pr_err("failed to create ptp aux_worker %d\n", err);
goto kworker_err;
}
}
err = ptp_populate_pin_groups(ptp); err = ptp_populate_pin_groups(ptp);
if (err) if (err)
goto no_pin_groups; goto no_pin_groups;
@ -259,6 +287,9 @@ no_pps:
no_device: no_device:
ptp_cleanup_pin_groups(ptp); ptp_cleanup_pin_groups(ptp);
no_pin_groups: no_pin_groups:
if (ptp->kworker)
kthread_destroy_worker(ptp->kworker);
kworker_err:
mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux); mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, index); ida_simple_remove(&ptp_clocks_map, index);
@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
ptp->defunct = 1; ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq); wake_up_interruptible(&ptp->tsev_wq);
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
}
/* Release the clock's resources. */ /* Release the clock's resources. */
if (ptp->pps_source) if (ptp->pps_source)
pps_unregister_source(ptp->pps_source); pps_unregister_source(ptp->pps_source);
@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
} }
EXPORT_SYMBOL(ptp_find_pin); EXPORT_SYMBOL(ptp_find_pin);
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
{
return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
}
EXPORT_SYMBOL(ptp_schedule_worker);
/* module operations */ /* module operations */
static void __exit ptp_exit(void) static void __exit ptp_exit(void)

View File

@ -22,6 +22,7 @@
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/kthread.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/posix-clock.h> #include <linux/posix-clock.h>
#include <linux/ptp_clock.h> #include <linux/ptp_clock.h>
@ -56,6 +57,8 @@ struct ptp_clock {
struct attribute_group pin_attr_group; struct attribute_group pin_attr_group;
/* 1st entry is a pointer to the real group, 2nd is NULL terminator */ /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
const struct attribute_group *pin_attr_groups[2]; const struct attribute_group *pin_attr_groups[2];
struct kthread_worker *kworker;
struct kthread_delayed_work aux_work;
}; };
/* /*

View File

@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rtable *rt = (struct rtable *) dst; struct rtable *rt = (struct rtable *) dst;
__be32 *pkey = &ip_hdr(skb)->daddr; __be32 *pkey = &ip_hdr(skb)->daddr;
if (rt->rt_gateway) if (rt && rt->rt_gateway)
pkey = &rt->rt_gateway; pkey = &rt->rt_gateway;
/* IPv4 */ /* IPv4 */
@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct rt6_info *rt = (struct rt6_info *) dst; struct rt6_info *rt = (struct rt6_info *) dst;
struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
if (!ipv6_addr_any(&rt->rt6i_gateway)) if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
pkey = &rt->rt6i_gateway; pkey = &rt->rt6i_gateway;
/* IPv6 */ /* IPv6 */

View File

@ -3198,10 +3198,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
return -EBUSY; return -EBUSY;
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
return -EFAULT; return -EFAULT;
if (qd.cnum == -1) if (qd.cnum == -1) {
if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
return -EINVAL;
qd.cnum = qd.id; qd.cnum = qd.id;
else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
{
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
return -EINVAL; return -EINVAL;
qd.instance = dev->scsi_host_ptr->host_no; qd.instance = dev->scsi_host_ptr->host_no;

View File

@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = {
}; };
/** /**
* bnx2fc_percpu_thread_create - Create a receive thread for an * bnx2fc_cpu_online - Create a receive thread for an online CPU
* online CPU
* *
* @cpu: cpu index for the online cpu * @cpu: cpu index for the online cpu
*/ */
static void bnx2fc_percpu_thread_create(unsigned int cpu) static int bnx2fc_cpu_online(unsigned int cpu)
{ {
struct bnx2fc_percpu_s *p; struct bnx2fc_percpu_s *p;
struct task_struct *thread; struct task_struct *thread;
@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
thread = kthread_create_on_node(bnx2fc_percpu_io_thread, thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
(void *)p, cpu_to_node(cpu), (void *)p, cpu_to_node(cpu),
"bnx2fc_thread/%d", cpu); "bnx2fc_thread/%d", cpu);
if (IS_ERR(thread))
return PTR_ERR(thread);
/* bind thread to the cpu */ /* bind thread to the cpu */
if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu);
kthread_bind(thread, cpu); p->iothread = thread;
p->iothread = thread; wake_up_process(thread);
wake_up_process(thread); return 0;
}
} }
static void bnx2fc_percpu_thread_destroy(unsigned int cpu) static int bnx2fc_cpu_offline(unsigned int cpu)
{ {
struct bnx2fc_percpu_s *p; struct bnx2fc_percpu_s *p;
struct task_struct *thread; struct task_struct *thread;
@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
thread = p->iothread; thread = p->iothread;
p->iothread = NULL; p->iothread = NULL;
/* Free all work in the list */ /* Free all work in the list */
list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list); list_del_init(&work->list);
@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
if (thread) if (thread)
kthread_stop(thread); kthread_stop(thread);
}
static int bnx2fc_cpu_online(unsigned int cpu)
{
printk(PFX "CPU %x online: Create Rx thread\n", cpu);
bnx2fc_percpu_thread_create(cpu);
return 0;
}
static int bnx2fc_cpu_dead(unsigned int cpu)
{
printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
bnx2fc_percpu_thread_destroy(cpu);
return 0; return 0;
} }
@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void)
spin_lock_init(&p->fp_work_lock); spin_lock_init(&p->fp_work_lock);
} }
get_online_cpus(); rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
bnx2fc_cpu_online, bnx2fc_cpu_offline);
for_each_online_cpu(cpu)
bnx2fc_percpu_thread_create(cpu);
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"scsi/bnx2fc:online",
bnx2fc_cpu_online, NULL);
if (rc < 0) if (rc < 0)
goto stop_threads; goto stop_thread;
bnx2fc_online_state = rc; bnx2fc_online_state = rc;
cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
NULL, bnx2fc_cpu_dead);
put_online_cpus();
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
return 0; return 0;
stop_threads: stop_thread:
for_each_online_cpu(cpu)
bnx2fc_percpu_thread_destroy(cpu);
put_online_cpus();
kthread_stop(l2_thread); kthread_stop(l2_thread);
free_wq: free_wq:
destroy_workqueue(bnx2fc_wq); destroy_workqueue(bnx2fc_wq);
@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void)
struct fcoe_percpu_s *bg; struct fcoe_percpu_s *bg;
struct task_struct *l2_thread; struct task_struct *l2_thread;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int cpu = 0;
/* /*
* NOTE: Since cnic calls register_driver routine rtnl_lock, * NOTE: Since cnic calls register_driver routine rtnl_lock,
@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void)
if (l2_thread) if (l2_thread)
kthread_stop(l2_thread); kthread_stop(l2_thread);
get_online_cpus(); cpuhp_remove_state(bnx2fc_online_state);
/* Destroy per cpu threads */
for_each_online_cpu(cpu) {
bnx2fc_percpu_thread_destroy(cpu);
}
cpuhp_remove_state_nocalls(bnx2fc_online_state);
cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
put_online_cpus();
destroy_workqueue(bnx2fc_wq); destroy_workqueue(bnx2fc_wq);
/* /*

View File

@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
return work; return work;
} }
/* Pending work request completion */
static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
{
unsigned int cpu = wqe % num_possible_cpus();
struct bnx2fc_percpu_s *fps;
struct bnx2fc_work *work;
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (fps->iothread) {
work = bnx2fc_alloc_work(tgt, wqe);
if (work) {
list_add_tail(&work->list, &fps->work_list);
wake_up_process(fps->iothread);
spin_unlock_bh(&fps->fp_work_lock);
return;
}
}
spin_unlock_bh(&fps->fp_work_lock);
bnx2fc_process_cq_compl(tgt, wqe);
}
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
{ {
struct fcoe_cqe *cq; struct fcoe_cqe *cq;
@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */ /* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe); bnx2fc_process_unsol_compl(tgt, wqe);
} else { } else {
/* Pending work request completion */ bnx2fc_pending_work(tgt, wqe);
struct bnx2fc_work *work = NULL;
struct bnx2fc_percpu_s *fps = NULL;
unsigned int cpu = wqe % num_possible_cpus();
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (unlikely(!fps->iothread))
goto unlock;
work = bnx2fc_alloc_work(tgt, wqe);
if (work)
list_add_tail(&work->list,
&fps->work_list);
unlock:
spin_unlock_bh(&fps->fp_work_lock);
/* Pending work request completion */
if (fps->iothread && work)
wake_up_process(fps->iothread);
else
bnx2fc_process_cq_compl(tgt, wqe);
num_free_sqes++; num_free_sqes++;
} }
cqe++; cqe++;

View File

@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle)
/** /**
* bnx2i_percpu_thread_create - Create a receive thread for an * bnx2i_cpu_online - Create a receive thread for an online CPU
* online CPU
* *
* @cpu: cpu index for the online cpu * @cpu: cpu index for the online cpu
*/ */
static void bnx2i_percpu_thread_create(unsigned int cpu) static int bnx2i_cpu_online(unsigned int cpu)
{ {
struct bnx2i_percpu_s *p; struct bnx2i_percpu_s *p;
struct task_struct *thread; struct task_struct *thread;
@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
cpu_to_node(cpu), cpu_to_node(cpu),
"bnx2i_thread/%d", cpu); "bnx2i_thread/%d", cpu);
if (IS_ERR(thread))
return PTR_ERR(thread);
/* bind thread to the cpu */ /* bind thread to the cpu */
if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu);
kthread_bind(thread, cpu); p->iothread = thread;
p->iothread = thread; wake_up_process(thread);
wake_up_process(thread); return 0;
}
} }
static int bnx2i_cpu_offline(unsigned int cpu)
static void bnx2i_percpu_thread_destroy(unsigned int cpu)
{ {
struct bnx2i_percpu_s *p; struct bnx2i_percpu_s *p;
struct task_struct *thread; struct task_struct *thread;
@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
spin_unlock_bh(&p->p_work_lock); spin_unlock_bh(&p->p_work_lock);
if (thread) if (thread)
kthread_stop(thread); kthread_stop(thread);
}
static int bnx2i_cpu_online(unsigned int cpu)
{
pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
bnx2i_percpu_thread_create(cpu);
return 0;
}
static int bnx2i_cpu_dead(unsigned int cpu)
{
pr_info("CPU %x offline: Remove Rx thread\n", cpu);
bnx2i_percpu_thread_destroy(cpu);
return 0; return 0;
} }
@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void)
p->iothread = NULL; p->iothread = NULL;
} }
get_online_cpus(); err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online",
bnx2i_cpu_online, bnx2i_cpu_offline);
for_each_online_cpu(cpu)
bnx2i_percpu_thread_create(cpu);
err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"scsi/bnx2i:online",
bnx2i_cpu_online, NULL);
if (err < 0) if (err < 0)
goto remove_threads; goto unreg_driver;
bnx2i_online_state = err; bnx2i_online_state = err;
cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
NULL, bnx2i_cpu_dead);
put_online_cpus();
return 0; return 0;
remove_threads: unreg_driver:
for_each_online_cpu(cpu)
bnx2i_percpu_thread_destroy(cpu);
put_online_cpus();
cnic_unregister_driver(CNIC_ULP_ISCSI); cnic_unregister_driver(CNIC_ULP_ISCSI);
unreg_xport: unreg_xport:
iscsi_unregister_transport(&bnx2i_iscsi_transport); iscsi_unregister_transport(&bnx2i_iscsi_transport);
@ -551,7 +525,6 @@ out:
static void __exit bnx2i_mod_exit(void) static void __exit bnx2i_mod_exit(void)
{ {
struct bnx2i_hba *hba; struct bnx2i_hba *hba;
unsigned cpu = 0;
mutex_lock(&bnx2i_dev_lock); mutex_lock(&bnx2i_dev_lock);
while (!list_empty(&adapter_list)) { while (!list_empty(&adapter_list)) {
@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void)
} }
mutex_unlock(&bnx2i_dev_lock); mutex_unlock(&bnx2i_dev_lock);
get_online_cpus(); cpuhp_remove_state(bnx2i_online_state);
for_each_online_cpu(cpu)
bnx2i_percpu_thread_destroy(cpu);
cpuhp_remove_state_nocalls(bnx2i_online_state);
cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
put_online_cpus();
iscsi_unregister_transport(&bnx2i_iscsi_transport); iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI); cnic_unregister_driver(CNIC_ULP_ISCSI);

View File

@ -528,7 +528,8 @@ struct fip_vlan {
#define QEDF_WRITE (1 << 0) #define QEDF_WRITE (1 << 0)
#define MAX_FIBRE_LUNS 0xffffffff #define MAX_FIBRE_LUNS 0xffffffff
#define QEDF_MAX_NUM_CQS 8 #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
num_online_cpus())
/* /*
* PCI function probe defines * PCI function probe defines

View File

@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
* we allocation is the minimum off: * we allocation is the minimum off:
* *
* Number of CPUs * Number of CPUs
* Number of MSI-X vectors * Number allocated by qed for our PCI function
* Max number allocated in hardware (QEDF_MAX_NUM_CQS)
*/ */
qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
num_online_cpus());
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
qedf->num_queues); qedf->num_queues);
@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
goto err1; goto err1;
} }
/* Learn information crucial for qedf to progress */
rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
goto err1;
}
/* queue allocation code should come here /* queue allocation code should come here
* order should be * order should be
* slowpath_start * slowpath_start
@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} }
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
/* Learn information crucial for qedf to progress */
rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
goto err1;
}
/* Record BDQ producer doorbell addresses */ /* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;

View File

@ -751,35 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
return count; return count;
} }
static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
{
switch (hp->dxfer_direction) {
case SG_DXFER_NONE:
if (hp->dxferp || hp->dxfer_len > 0)
return false;
return true;
case SG_DXFER_FROM_DEV:
/*
* for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
* can either be NULL or != NULL so there's no point in checking
* it either. So just return true.
*/
return true;
case SG_DXFER_TO_DEV:
case SG_DXFER_TO_FROM_DEV:
if (!hp->dxferp || hp->dxfer_len == 0)
return false;
return true;
case SG_DXFER_UNKNOWN:
if ((!hp->dxferp && hp->dxfer_len) ||
(hp->dxferp && hp->dxfer_len == 0))
return false;
return true;
default:
return false;
}
}
static int static int
sg_common_write(Sg_fd * sfp, Sg_request * srp, sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking) unsigned char *cmnd, int timeout, int blocking)
@ -800,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len)); (int) cmnd[0], (int) hp->cmd_len));
if (!sg_is_valid_dxfer(hp)) if (hp->dxfer_len >= SZ_256M)
return -EINVAL; return -EINVAL;
k = sg_start_req(srp, cmnd); k = sg_start_req(srp, cmnd);

View File

@ -874,7 +874,6 @@ xfs_ialloc(
case S_IFREG: case S_IFREG:
case S_IFDIR: case S_IFDIR:
if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
uint64_t di_flags2 = 0;
uint di_flags = 0; uint di_flags = 0;
if (S_ISDIR(mode)) { if (S_ISDIR(mode)) {
@ -911,20 +910,23 @@ xfs_ialloc(
di_flags |= XFS_DIFLAG_NODEFRAG; di_flags |= XFS_DIFLAG_NODEFRAG;
if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
di_flags |= XFS_DIFLAG_FILESTREAM; di_flags |= XFS_DIFLAG_FILESTREAM;
if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
di_flags2 |= XFS_DIFLAG2_DAX;
ip->i_d.di_flags |= di_flags; ip->i_d.di_flags |= di_flags;
ip->i_d.di_flags2 |= di_flags2;
} }
if (pip && if (pip &&
(pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
pip->i_d.di_version == 3 && pip->i_d.di_version == 3 &&
ip->i_d.di_version == 3) { ip->i_d.di_version == 3) {
uint64_t di_flags2 = 0;
if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
} }
if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
di_flags2 |= XFS_DIFLAG2_DAX;
ip->i_d.di_flags2 |= di_flags2;
} }
/* FALLTHROUGH */ /* FALLTHROUGH */
case S_IFLNK: case S_IFLNK:

View File

@ -539,6 +539,7 @@ xlog_discard_endio(
INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
queue_work(xfs_discard_wq, &ctx->discard_endio_work); queue_work(xfs_discard_wq, &ctx->discard_endio_work);
bio_put(bio);
} }
static void static void

View File

@ -39,8 +39,6 @@ enum cpuhp_state {
CPUHP_PCI_XGENE_DEAD, CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD, CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD, CPUHP_LUSTRE_CFS_DEAD,
CPUHP_SCSI_BNX2FC_DEAD,
CPUHP_SCSI_BNX2I_DEAD,
CPUHP_WORKQUEUE_PREP, CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE, CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE, CPUHP_HRTIMERS_PREPARE,

View File

@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */ #define I2C_CLASS_SPD (1<<7) /* Memory modules */
#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */ /* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
/* Internal numbers to terminate lists */ /* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU #define I2C_CLIENT_END 0xfffeU

View File

@ -620,6 +620,7 @@ struct mlx4_caps {
u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range; u32 dmfs_high_rate_qpn_range;
u32 vf_caps; u32 vf_caps;
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps; struct mlx4_rate_limit_caps rl_caps;
}; };

View File

@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
#define MLX5_WQE_AV_EXT 0x80000000
enum { enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,

View File

@ -681,10 +681,10 @@ struct nand_buffers {
* @tWW_min: WP# transition to WE# low * @tWW_min: WP# transition to WE# low
*/ */
struct nand_sdr_timings { struct nand_sdr_timings {
u32 tBERS_max; u64 tBERS_max;
u32 tCCS_min; u32 tCCS_min;
u32 tPROG_max; u64 tPROG_max;
u32 tR_max; u64 tR_max;
u32 tALH_min; u32 tALH_min;
u32 tADL_min; u32 tADL_min;
u32 tALS_min; u32 tALS_min;

View File

@ -99,6 +99,11 @@ struct system_device_crosststamp;
* parameter func: the desired function to use. * parameter func: the desired function to use.
* parameter chan: the function channel index to use. * parameter chan: the function channel index to use.
* *
* @do_work: Request driver to perform auxiliary (periodic) operations
* Driver should return delay of the next auxiliary work scheduling
* time (>=0) or negative value in case further scheduling
* is not required.
*
* Drivers should embed their ptp_clock_info within a private * Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of(). * structure, obtaining a reference to it using container_of().
* *
@ -126,6 +131,7 @@ struct ptp_clock_info {
struct ptp_clock_request *request, int on); struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan); enum ptp_pin_function func, unsigned int chan);
long (*do_aux_work)(struct ptp_clock_info *ptp);
}; };
struct ptp_clock; struct ptp_clock;
@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp, int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan); enum ptp_pin_function func, unsigned int chan);
/**
* ptp_schedule_worker() - schedule ptp auxiliary work
*
* @ptp: The clock obtained from ptp_clock_register().
* @delay: number of jiffies to wait before queuing
* See kthread_queue_delayed_work() for more info.
*/
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
#else #else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent) struct device *parent)
@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp, static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan) enum ptp_pin_function func, unsigned int chan)
{ return -1; } { return -1; }
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
#endif #endif
#endif #endif

View File

@ -1902,6 +1902,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time); u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk); extern void tcp_rack_reo_timeout(struct sock *sk);
/* At how many usecs into the future should the RTO fire? */
static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_write_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
}
/* /*
* Save and compile IPv4 options, return a pointer to it * Save and compile IPv4 options, return a pointer to it
*/ */

View File

@ -670,13 +670,14 @@ again:
* this reference was taken by ihold under the page lock * this reference was taken by ihold under the page lock
* pinning the inode in place so i_lock was unnecessary. The * pinning the inode in place so i_lock was unnecessary. The
* only way for this check to fail is if the inode was * only way for this check to fail is if the inode was
* truncated in parallel so warn for now if this happens. * truncated in parallel which is almost certainly an
* application bug. In such a case, just retry.
* *
* We are not calling into get_futex_key_refs() in file-backed * We are not calling into get_futex_key_refs() in file-backed
* cases, therefore a successful atomic_inc return below will * cases, therefore a successful atomic_inc return below will
* guarantee that get_futex_key() will still imply smp_mb(); (B). * guarantee that get_futex_key() will still imply smp_mb(); (B).
*/ */
if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { if (!atomic_inc_not_zero(&inode->i_count)) {
rcu_read_unlock(); rcu_read_unlock();
put_page(page); put_page(page);

View File

@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
return found; return found;
} }
/**
* batadv_tt_global_sync_flags - update TT sync flags
* @tt_global: the TT global entry to update sync flags in
*
* Updates the sync flag bits in the tt_global flag attribute with a logical
* OR of all sync flags from any of its TT orig entries.
*/
static void
batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
{
struct batadv_tt_orig_list_entry *orig_entry;
const struct hlist_head *head;
u16 flags = BATADV_NO_FLAGS;
rcu_read_lock();
head = &tt_global->orig_list;
hlist_for_each_entry_rcu(orig_entry, head, list)
flags |= orig_entry->flags;
rcu_read_unlock();
flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
tt_global->common.flags = flags;
}
/**
* batadv_tt_global_orig_entry_add - add or update a TT orig entry
* @tt_global: the TT global entry to add an orig entry in
* @orig_node: the originator to add an orig entry for
* @ttvn: translation table version number of this changeset
* @flags: TT sync flags
*/
static void static void
batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
struct batadv_orig_node *orig_node, int ttvn) struct batadv_orig_node *orig_node, int ttvn,
u8 flags)
{ {
struct batadv_tt_orig_list_entry *orig_entry; struct batadv_tt_orig_list_entry *orig_entry;
@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
* was added during a "temporary client detection" * was added during a "temporary client detection"
*/ */
orig_entry->ttvn = ttvn; orig_entry->ttvn = ttvn;
goto out; orig_entry->flags = flags;
goto sync_flags;
} }
orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
batadv_tt_global_size_inc(orig_node, tt_global->common.vid); batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
orig_entry->orig_node = orig_node; orig_entry->orig_node = orig_node;
orig_entry->ttvn = ttvn; orig_entry->ttvn = ttvn;
orig_entry->flags = flags;
kref_init(&orig_entry->refcount); kref_init(&orig_entry->refcount);
spin_lock_bh(&tt_global->list_lock); spin_lock_bh(&tt_global->list_lock);
@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
spin_unlock_bh(&tt_global->list_lock); spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count); atomic_inc(&tt_global->orig_list_count);
sync_flags:
batadv_tt_global_sync_flags(tt_global);
out: out:
if (orig_entry) if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry); batadv_tt_orig_list_entry_put(orig_entry);
@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
} }
/* the change can carry possible "attribute" flags like the /* the change can carry possible "attribute" flags like the
* TT_CLIENT_WIFI, therefore they have to be copied in the * TT_CLIENT_TEMP, therefore they have to be copied in the
* client entry * client entry
*/ */
common->flags |= flags; common->flags |= flags & (~BATADV_TT_SYNC_MASK);
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a * one originator left in the list and we previously received a
@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
} }
add_orig_entry: add_orig_entry:
/* add the new orig_entry (if needed) or update it */ /* add the new orig_entry (if needed) or update it */
batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
flags & BATADV_TT_SYNC_MASK);
batadv_dbg(BATADV_DBG_TT, bat_priv, batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new global tt entry: %pM (vid: %d, via %pM)\n", "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_tt_orig_list_entry *orig, struct batadv_tt_orig_list_entry *orig,
bool best) bool best)
{ {
u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
void *hdr; void *hdr;
struct batadv_orig_node_vlan *vlan; struct batadv_orig_node_vlan *vlan;
u8 last_ttvn; u8 last_ttvn;
@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
goto nla_put_failure; goto nla_put_failure;
if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
unsigned short vid) unsigned short vid)
{ {
struct batadv_hashtable *hash = bat_priv->tt.global_hash; struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct batadv_tt_orig_list_entry *tt_orig;
struct batadv_tt_common_entry *tt_common; struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global; struct batadv_tt_global_entry *tt_global;
struct hlist_head *head; struct hlist_head *head;
@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
/* find out if this global entry is announced by this /* find out if this global entry is announced by this
* originator * originator
*/ */
if (!batadv_tt_global_entry_has_orig(tt_global, tt_orig = batadv_tt_global_orig_entry_find(tt_global,
orig_node)) orig_node);
if (!tt_orig)
continue; continue;
/* use network order to read the VID: this ensures that /* use network order to read the VID: this ensures that
@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
/* compute the CRC on flags that have to be kept in sync /* compute the CRC on flags that have to be kept in sync
* among nodes * among nodes
*/ */
flags = tt_common->flags & BATADV_TT_SYNC_MASK; flags = tt_orig->flags;
crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
batadv_tt_orig_list_entry_put(tt_orig);
} }
rcu_read_unlock(); rcu_read_unlock();
} }

View File

@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry {
* struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
* @orig_node: pointer to orig node announcing this non-mesh client * @orig_node: pointer to orig node announcing this non-mesh client
* @ttvn: translation table version number which added the non-mesh client * @ttvn: translation table version number which added the non-mesh client
* @flags: per orig entry TT sync flags
* @list: list node for batadv_tt_global_entry::orig_list * @list: list node for batadv_tt_global_entry::orig_list
* @refcount: number of contexts the object is used * @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner * @rcu: struct used for freeing in an RCU-safe manner
@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry {
struct batadv_tt_orig_list_entry { struct batadv_tt_orig_list_entry {
struct batadv_orig_node *orig_node; struct batadv_orig_node *orig_node;
u8 ttvn; u8 ttvn;
u8 flags;
struct hlist_node list; struct hlist_node list;
struct kref refcount; struct kref refcount;
struct rcu_head rcu; struct rcu_head rcu;

View File

@ -2732,7 +2732,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{ {
if (tx_path) if (tx_path)
return skb->ip_summed != CHECKSUM_PARTIAL && return skb->ip_summed != CHECKSUM_PARTIAL &&
skb->ip_summed != CHECKSUM_NONE; skb->ip_summed != CHECKSUM_UNNECESSARY;
return skb->ip_summed == CHECKSUM_NONE; return skb->ip_summed == CHECKSUM_NONE;
} }

View File

@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
int taglen; int taglen;
for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
if (optptr[0] == IPOPT_CIPSO) switch (optptr[0]) {
case IPOPT_CIPSO:
return optptr; return optptr;
taglen = optptr[1]; case IPOPT_END:
return NULL;
case IPOPT_NOOP:
taglen = 1;
break;
default:
taglen = optptr[1];
}
optlen -= taglen; optlen -= taglen;
optptr += taglen; optptr += taglen;
} }

View File

@ -450,6 +450,7 @@ out_unlock:
out: out:
NAPI_GRO_CB(skb)->flush |= flush; NAPI_GRO_CB(skb)->flush |= flush;
skb_gro_remcsum_cleanup(skb, &grc); skb_gro_remcsum_cleanup(skb, &grc);
skb->remcsum_offload = 0;
return pp; return pp;
} }

View File

@ -106,6 +106,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
return; return;
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_stamp = tcp_jiffies32; tp->snd_cwnd_stamp = tcp_jiffies32;
} }
@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
/* Offset the time elapsed after installing regular RTO */ /* Offset the time elapsed after installing regular RTO */
if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
struct sk_buff *skb = tcp_write_queue_head(sk); s64 delta_us = tcp_rto_delta_us(sk);
u64 rto_time_stamp = skb->skb_mstamp +
jiffies_to_usecs(rto);
s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
/* delta_us may not be positive if the socket is locked /* delta_us may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled. * when the retrans timer fires and is rescheduled.
*/ */
@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
} }
} }
/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
static void tcp_set_xmit_timer(struct sock *sk)
{
if (!tcp_schedule_loss_probe(sk))
tcp_rearm_rto(sk);
}
/* If we get here, the whole TSO packet has not been acked. */ /* If we get here, the whole TSO packet has not been acked. */
static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
{ {
@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
ca_rtt_us, sack->rate); ca_rtt_us, sack->rate);
if (flag & FLAG_ACKED) { if (flag & FLAG_ACKED) {
tcp_rearm_rto(sk); flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
if (unlikely(icsk->icsk_mtup.probe_size && if (unlikely(icsk->icsk_mtup.probe_size &&
!after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
tcp_mtup_probe_success(sk); tcp_mtup_probe_success(sk);
@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
* after when the head was last (re)transmitted. Otherwise the * after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery. * timeout may continue to extend in loss recovery.
*/ */
tcp_rearm_rto(sk); flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
} }
if (icsk->icsk_ca_ops->pkts_acked) { if (icsk->icsk_ca_ops->pkts_acked) {
@ -3575,9 +3580,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, tp->snd_nxt)) if (after(ack, tp->snd_nxt))
goto invalid_ack; goto invalid_ack;
if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
tcp_rearm_rto(sk);
if (after(ack, prior_snd_una)) { if (after(ack, prior_snd_una)) {
flag |= FLAG_SND_UNA_ADVANCED; flag |= FLAG_SND_UNA_ADVANCED;
icsk->icsk_retransmits = 0; icsk->icsk_retransmits = 0;
@ -3626,18 +3628,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
&sack_state); &sack_state);
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
/* If needed, reset TLP/RTO timer; RACK may later override this. */
if (flag & FLAG_SET_XMIT_TIMER)
tcp_set_xmit_timer(sk);
if (tcp_ack_is_dubious(sk, flag)) { if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
} }
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
sk_dst_confirm(sk); sk_dst_confirm(sk);
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
tcp_schedule_loss_probe(sk);
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
lost = tp->lost - lost; /* freshly marked lost */ lost = tp->lost - lost; /* freshly marked lost */
tcp_rate_gen(sk, delivered, lost, sack_state.rate); tcp_rate_gen(sk, delivered, lost, sack_state.rate);

View File

@ -2375,23 +2375,14 @@ bool tcp_schedule_loss_probe(struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
u32 timeout, tlp_time_stamp, rto_time_stamp; u32 timeout, rto_delta_us;
/* No consecutive loss probes. */
if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
tcp_rearm_rto(sk);
return false;
}
/* Don't do any loss probe on a Fast Open connection before 3WHS /* Don't do any loss probe on a Fast Open connection before 3WHS
* finishes. * finishes.
*/ */
if (tp->fastopen_rsk) if (tp->fastopen_rsk)
return false; return false;
/* TLP is only scheduled when next timer event is RTO. */
if (icsk->icsk_pending != ICSK_TIME_RETRANS)
return false;
/* Schedule a loss probe in 2*RTT for SACK capable connections /* Schedule a loss probe in 2*RTT for SACK capable connections
* in Open state, that are either limited by cwnd or application. * in Open state, that are either limited by cwnd or application.
*/ */
@ -2418,14 +2409,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
timeout = TCP_TIMEOUT_INIT; timeout = TCP_TIMEOUT_INIT;
} }
/* If RTO is shorter, just schedule TLP in its place. */ /* If the RTO formula yields an earlier time, then use that time. */
tlp_time_stamp = tcp_jiffies32 + timeout; rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; if (rto_delta_us > 0)
if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
s32 delta = rto_time_stamp - tcp_jiffies32;
if (delta > 0)
timeout = delta;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
TCP_RTO_MAX); TCP_RTO_MAX);
@ -3450,6 +3437,10 @@ int tcp_connect(struct sock *sk)
int err; int err;
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
tcp_connect_init(sk); tcp_connect_init(sk);
if (unlikely(tp->repair)) { if (unlikely(tp->repair)) {

View File

@ -640,7 +640,8 @@ static void tcp_keepalive_timer (unsigned long data)
goto death; goto death;
} }
if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) if (!sock_flag(sk, SOCK_KEEPOPEN) ||
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
goto out; goto out;
elapsed = keepalive_time_when(tp); elapsed = keepalive_time_when(tp);

View File

@ -2356,6 +2356,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
if (on_link) if (on_link)
nrt->rt6i_flags &= ~RTF_GATEWAY; nrt->rt6i_flags &= ~RTF_GATEWAY;
nrt->rt6i_protocol = RTPROT_REDIRECT;
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
if (ip6_ins_rt(nrt)) if (ip6_ins_rt(nrt))
@ -2466,6 +2467,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
.fc_dst_len = prefixlen, .fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref), RTF_UP | RTF_PREF(pref),
.fc_protocol = RTPROT_RA,
.fc_nlinfo.portid = 0, .fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL, .fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = net, .fc_nlinfo.nl_net = net,
@ -2518,6 +2520,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
.fc_ifindex = dev->ifindex, .fc_ifindex = dev->ifindex,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
RTF_UP | RTF_EXPIRES | RTF_PREF(pref), RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
.fc_protocol = RTPROT_RA,
.fc_nlinfo.portid = 0, .fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL, .fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = dev_net(dev), .fc_nlinfo.nl_net = dev_net(dev),
@ -3432,14 +3435,6 @@ static int rt6_fill_node(struct net *net,
rtm->rtm_flags = 0; rtm->rtm_flags = 0;
rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->rt6i_protocol; rtm->rtm_protocol = rt->rt6i_protocol;
if (rt->rt6i_flags & RTF_DYNAMIC)
rtm->rtm_protocol = RTPROT_REDIRECT;
else if (rt->rt6i_flags & RTF_ADDRCONF) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
rtm->rtm_protocol = RTPROT_RA;
else
rtm->rtm_protocol = RTPROT_KERNEL;
}
if (rt->rt6i_flags & RTF_CACHE) if (rt->rt6i_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED; rtm->rtm_flags |= RTM_F_CLONED;

View File

@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
if (rds_ib_ring_empty(&ic->i_recv_ring)) if (rds_ib_ring_empty(&ic->i_recv_ring))
rds_ib_stats_inc(s_ib_rx_ring_empty); rds_ib_stats_inc(s_ib_rx_ring_empty);
if (rds_ib_ring_low(&ic->i_recv_ring)) if (rds_ib_ring_low(&ic->i_recv_ring)) {
rds_ib_recv_refill(conn, 0, GFP_NOWAIT); rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
rds_ib_stats_inc(s_ib_rx_refill_from_cq);
}
} }
int rds_ib_recv_path(struct rds_conn_path *cp) int rds_ib_recv_path(struct rds_conn_path *cp)
@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
if (rds_conn_up(conn)) { if (rds_conn_up(conn)) {
rds_ib_attempt_ack(ic); rds_ib_attempt_ack(ic);
rds_ib_recv_refill(conn, 0, GFP_KERNEL); rds_ib_recv_refill(conn, 0, GFP_KERNEL);
rds_ib_stats_inc(s_ib_rx_refill_from_thread);
} }
return ret; return ret;

View File

@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
static unsigned int xt_net_id; static unsigned int xt_net_id;
static struct tc_action_ops act_xt_ops; static struct tc_action_ops act_xt_ops;
static int ipt_init_target(struct xt_entry_target *t, char *table, static int ipt_init_target(struct net *net, struct xt_entry_target *t,
unsigned int hook) char *table, unsigned int hook)
{ {
struct xt_tgchk_param par; struct xt_tgchk_param par;
struct xt_target *target; struct xt_target *target;
@ -49,6 +49,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
return PTR_ERR(target); return PTR_ERR(target);
t->u.kernel.target = target; t->u.kernel.target = target;
par.net = net;
par.table = table; par.table = table;
par.entryinfo = NULL; par.entryinfo = NULL;
par.target = target; par.target = target;
@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
[TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
}; };
static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int ovr, int bind) const struct tc_action_ops *ops, int ovr, int bind)
{ {
struct tc_action_net *tn = net_generic(net, id);
struct nlattr *tb[TCA_IPT_MAX + 1]; struct nlattr *tb[TCA_IPT_MAX + 1];
struct tcf_ipt *ipt; struct tcf_ipt *ipt;
struct xt_entry_target *td, *t; struct xt_entry_target *td, *t;
@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
if (unlikely(!t)) if (unlikely(!t))
goto err2; goto err2;
err = ipt_init_target(t, tname, hook); err = ipt_init_target(net, t, tname, hook);
if (err < 0) if (err < 0)
goto err3; goto err3;
@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr, struct nlattr *est, struct tc_action **a, int ovr,
int bind) int bind)
{ {
struct tc_action_net *tn = net_generic(net, ipt_net_id); return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
bind);
return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
} }
static int tcf_xt_init(struct net *net, struct nlattr *nla, static int tcf_xt_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a, int ovr, struct nlattr *est, struct tc_action **a, int ovr,
int bind) int bind)
{ {
struct tc_action_net *tn = net_generic(net, xt_net_id); return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
bind);
return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
} }
static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,

View File

@ -18,6 +18,7 @@ my $V = '0.26';
use Getopt::Long qw(:config no_auto_abbrev); use Getopt::Long qw(:config no_auto_abbrev);
use Cwd; use Cwd;
use File::Find;
my $cur_path = fastgetcwd() . '/'; my $cur_path = fastgetcwd() . '/';
my $lk_path = "./"; my $lk_path = "./";
@ -58,6 +59,7 @@ my $from_filename = 0;
my $pattern_depth = 0; my $pattern_depth = 0;
my $version = 0; my $version = 0;
my $help = 0; my $help = 0;
my $find_maintainer_files = 0;
my $vcs_used = 0; my $vcs_used = 0;
@ -249,6 +251,7 @@ if (!GetOptions(
'sections!' => \$sections, 'sections!' => \$sections,
'fe|file-emails!' => \$file_emails, 'fe|file-emails!' => \$file_emails,
'f|file' => \$from_filename, 'f|file' => \$from_filename,
'find-maintainer-files' => \$find_maintainer_files,
'v|version' => \$version, 'v|version' => \$version,
'h|help|usage' => \$help, 'h|help|usage' => \$help,
)) { )) {
@ -307,36 +310,74 @@ if (!top_of_kernel_tree($lk_path)) {
my @typevalue = (); my @typevalue = ();
my %keyword_hash; my %keyword_hash;
my @mfiles = ();
open (my $maint, '<', "${lk_path}MAINTAINERS") sub read_maintainer_file {
or die "$P: Can't open MAINTAINERS: $!\n"; my ($file) = @_;
while (<$maint>) {
my $line = $_;
if ($line =~ m/^([A-Z]):\s*(.*)/) { open (my $maint, '<', "$file")
my $type = $1; or die "$P: Can't open MAINTAINERS file '$file': $!\n";
my $value = $2; while (<$maint>) {
my $line = $_;
##Filename pattern matching if ($line =~ m/^([A-Z]):\s*(.*)/) {
if ($type eq "F" || $type eq "X") { my $type = $1;
$value =~ s@\.@\\\.@g; ##Convert . to \. my $value = $2;
$value =~ s/\*/\.\*/g; ##Convert * to .*
$value =~ s/\?/\./g; ##Convert ? to . ##Filename pattern matching
##if pattern is a directory and it lacks a trailing slash, add one if ($type eq "F" || $type eq "X") {
if ((-d $value)) { $value =~ s@\.@\\\.@g; ##Convert . to \.
$value =~ s@([^/])$@$1/@; $value =~ s/\*/\.\*/g; ##Convert * to .*
$value =~ s/\?/\./g; ##Convert ? to .
##if pattern is a directory and it lacks a trailing slash, add one
if ((-d $value)) {
$value =~ s@([^/])$@$1/@;
}
} elsif ($type eq "K") {
$keyword_hash{@typevalue} = $value;
} }
} elsif ($type eq "K") { push(@typevalue, "$type:$value");
$keyword_hash{@typevalue} = $value; } elsif (!(/^\s*$/ || /^\s*\#/)) {
$line =~ s/\n$//g;
push(@typevalue, $line);
} }
push(@typevalue, "$type:$value"); }
} elsif (!/^(\s)*$/) { close($maint);
$line =~ s/\n$//g; }
push(@typevalue, $line);
sub find_is_maintainer_file {
my ($file) = $_;
return if ($file !~ m@/MAINTAINERS$@);
$file = $File::Find::name;
return if (! -f $file);
push(@mfiles, $file);
}
sub find_ignore_git {
return grep { $_ !~ /^\.git$/; } @_;
}
if (-d "${lk_path}MAINTAINERS") {
opendir(DIR, "${lk_path}MAINTAINERS") or die $!;
my @files = readdir(DIR);
closedir(DIR);
foreach my $file (@files) {
push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);
} }
} }
close($maint);
if ($find_maintainer_files) {
find( { wanted => \&find_is_maintainer_file,
preprocess => \&find_ignore_git,
no_chdir => 1,
}, "${lk_path}");
} else {
push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";
}
foreach my $file (@mfiles) {
read_maintainer_file("$file");
}
# #
# Read mail address map # Read mail address map
@ -873,7 +914,7 @@ sub top_of_kernel_tree {
if ( (-f "${lk_path}COPYING") if ( (-f "${lk_path}COPYING")
&& (-f "${lk_path}CREDITS") && (-f "${lk_path}CREDITS")
&& (-f "${lk_path}Kbuild") && (-f "${lk_path}Kbuild")
&& (-f "${lk_path}MAINTAINERS") && (-e "${lk_path}MAINTAINERS")
&& (-f "${lk_path}Makefile") && (-f "${lk_path}Makefile")
&& (-f "${lk_path}README") && (-f "${lk_path}README")
&& (-d "${lk_path}Documentation") && (-d "${lk_path}Documentation")

View File

@ -2,9 +2,9 @@
use strict; use strict;
my %map; my $P = $0;
# sort comparison function # sort comparison functions
sub by_category($$) { sub by_category($$) {
my ($a, $b) = @_; my ($a, $b) = @_;
@ -15,20 +15,33 @@ sub by_category($$) {
$a =~ s/THE REST/ZZZZZZ/g; $a =~ s/THE REST/ZZZZZZ/g;
$b =~ s/THE REST/ZZZZZZ/g; $b =~ s/THE REST/ZZZZZZ/g;
$a cmp $b; return $a cmp $b;
} }
sub alpha_output { sub by_pattern($$) {
my $key; my ($a, $b) = @_;
my $sort_method = \&by_category; my $preferred_order = 'MRPLSWTQBCFXNK';
my $sep = "";
foreach $key (sort $sort_method keys %map) { my $a1 = uc(substr($a, 0, 1));
if ($key ne " ") { my $b1 = uc(substr($b, 0, 1));
print $sep . $key . "\n";
$sep = "\n"; my $a_index = index($preferred_order, $a1);
} my $b_index = index($preferred_order, $b1);
print $map{$key};
$a_index = 1000 if ($a_index == -1);
$b_index = 1000 if ($b_index == -1);
if (($a1 =~ /^F$/ && $b1 =~ /^F$/) ||
($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
return $a cmp $b;
}
if ($a_index < $b_index) {
return -1;
} elsif ($a_index == $b_index) {
return 0;
} else {
return 1;
} }
} }
@ -39,39 +52,77 @@ sub trim {
return $s; return $s;
} }
sub alpha_output {
my ($hashref, $filename) = (@_);
open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n";
foreach my $key (sort by_category keys %$hashref) {
if ($key eq " ") {
chomp $$hashref{$key};
print $file $$hashref{$key};
} else {
print $file "\n" . $key . "\n";
foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
print $file ($pattern . "\n");
}
}
}
close($file);
}
sub file_input { sub file_input {
my ($hashref, $filename) = (@_);
my $lastline = ""; my $lastline = "";
my $case = " "; my $case = " ";
$map{$case} = ""; $$hashref{$case} = "";
while (<>) { open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n";
while (<$file>) {
my $line = $_; my $line = $_;
# Pattern line? # Pattern line?
if ($line =~ m/^([A-Z]):\s*(.*)/) { if ($line =~ m/^([A-Z]):\s*(.*)/) {
$line = $1 . ":\t" . trim($2) . "\n"; $line = $1 . ":\t" . trim($2) . "\n";
if ($lastline eq "") { if ($lastline eq "") {
$map{$case} = $map{$case} . $line; $$hashref{$case} = $$hashref{$case} . $line;
next; next;
} }
$case = trim($lastline); $case = trim($lastline);
exists $map{$case} and die "Header '$case' already exists"; exists $$hashref{$case} and die "Header '$case' already exists";
$map{$case} = $line; $$hashref{$case} = $line;
$lastline = ""; $lastline = "";
next; next;
} }
if ($case eq " ") { if ($case eq " ") {
$map{$case} = $map{$case} . $lastline; $$hashref{$case} = $$hashref{$case} . $lastline;
$lastline = $line; $lastline = $line;
next; next;
} }
trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'"); trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'");
$lastline = $line; $lastline = $line;
} }
$map{$case} = $map{$case} . $lastline; $$hashref{$case} = $$hashref{$case} . $lastline;
close($file);
} }
&file_input; my %hash;
&alpha_output; my %new_hash;
file_input(\%hash, "MAINTAINERS");
foreach my $type (@ARGV) {
foreach my $key (keys %hash) {
if ($key =~ /$type/ || $hash{$key} =~ /$type/) {
$new_hash{$key} = $hash{$key};
delete $hash{$key};
}
}
}
alpha_output(\%hash, "MAINTAINERS.new");
alpha_output(\%new_hash, "SECTION.new");
exit(0); exit(0);

View File

@ -11,6 +11,8 @@
# define __NR_bpf 280 # define __NR_bpf 280
# elif defined(__sparc__) # elif defined(__sparc__)
# define __NR_bpf 349 # define __NR_bpf 349
# elif defined(__s390__)
# define __NR_bpf 351
# else # else
# error __NR_bpf not defined. libbpf does not support your arch. # error __NR_bpf not defined. libbpf does not support your arch.
# endif # endif

Some files were not shown because too many files have changed in this diff Show More