Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Three trivial overlapping conflicts. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ff24e4980a
@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0
|
|||||||
then the interface is considered to be idle, and the kernel may
|
then the interface is considered to be idle, and the kernel may
|
||||||
autosuspend the device.
|
autosuspend the device.
|
||||||
|
|
||||||
Drivers need not be concerned about balancing changes to the usage
|
Drivers must be careful to balance their overall changes to the usage
|
||||||
counter; the USB core will undo any remaining "get"s when a driver
|
counter. Unbalanced "get"s will remain in effect when a driver is
|
||||||
is unbound from its interface. As a corollary, drivers must not call
|
unbound from its interface, preventing the device from going into
|
||||||
any of the ``usb_autopm_*`` functions after their ``disconnect``
|
runtime suspend should the interface be bound to a driver again. On
|
||||||
routine has returned.
|
the other hand, drivers are allowed to achieve this balance by calling
|
||||||
|
the ``usb_autopm_*`` functions even after their ``disconnect`` routine
|
||||||
|
has returned -- say from within a work-queue routine -- provided they
|
||||||
|
retain an active reference to the interface (via ``usb_get_intf`` and
|
||||||
|
``usb_put_intf``).
|
||||||
|
|
||||||
Drivers using the async routines are responsible for their own
|
Drivers using the async routines are responsible for their own
|
||||||
synchronization and mutual exclusion.
|
synchronization and mutual exclusion.
|
||||||
|
@ -1342,6 +1342,7 @@ tag - INTEGER
|
|||||||
Default value is 0.
|
Default value is 0.
|
||||||
|
|
||||||
xfrm4_gc_thresh - INTEGER
|
xfrm4_gc_thresh - INTEGER
|
||||||
|
(Obsolete since linux-4.14)
|
||||||
The threshold at which we will start garbage collecting for IPv4
|
The threshold at which we will start garbage collecting for IPv4
|
||||||
destination cache entries. At twice this value the system will
|
destination cache entries. At twice this value the system will
|
||||||
refuse new allocations.
|
refuse new allocations.
|
||||||
@ -1950,6 +1951,7 @@ echo_ignore_anycast - BOOLEAN
|
|||||||
Default: 0
|
Default: 0
|
||||||
|
|
||||||
xfrm6_gc_thresh - INTEGER
|
xfrm6_gc_thresh - INTEGER
|
||||||
|
(Obsolete since linux-4.14)
|
||||||
The threshold at which we will start garbage collecting for IPv6
|
The threshold at which we will start garbage collecting for IPv6
|
||||||
destination cache entries. At twice this value the system will
|
destination cache entries. At twice this value the system will
|
||||||
refuse new allocations.
|
refuse new allocations.
|
||||||
|
@ -132,7 +132,7 @@ version that should be applied. If there is any doubt, the maintainer
|
|||||||
will reply and ask what should be done.
|
will reply and ask what should be done.
|
||||||
|
|
||||||
Q: I made changes to only a few patches in a patch series should I resend only those changed?
|
Q: I made changes to only a few patches in a patch series should I resend only those changed?
|
||||||
--------------------------------------------------------------------------------------------
|
---------------------------------------------------------------------------------------------
|
||||||
A: No, please resend the entire patch series and make sure you do number your
|
A: No, please resend the entire patch series and make sure you do number your
|
||||||
patches such that it is clear this is the latest and greatest set of patches
|
patches such that it is clear this is the latest and greatest set of patches
|
||||||
that can be applied.
|
that can be applied.
|
||||||
|
@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
|
|||||||
increase the success rate of future high-order allocations such as SLUB
|
increase the success rate of future high-order allocations such as SLUB
|
||||||
allocations, THP and hugetlbfs pages.
|
allocations, THP and hugetlbfs pages.
|
||||||
|
|
||||||
To make it sensible with respect to the watermark_scale_factor parameter,
|
To make it sensible with respect to the watermark_scale_factor
|
||||||
the unit is in fractions of 10,000. The default value of 15,000 means
|
parameter, the unit is in fractions of 10,000. The default value of
|
||||||
that up to 150% of the high watermark will be reclaimed in the event of
|
15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
|
||||||
a pageblock being mixed due to fragmentation. The level of reclaim is
|
watermark will be reclaimed in the event of a pageblock being mixed due
|
||||||
determined by the number of fragmentation events that occurred in the
|
to fragmentation. The level of reclaim is determined by the number of
|
||||||
recent past. If this value is smaller than a pageblock then a pageblocks
|
fragmentation events that occurred in the recent past. If this value is
|
||||||
worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
|
smaller than a pageblock then a pageblocks worth of pages will be reclaimed
|
||||||
of 0 will disable the feature.
|
(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
|
||||||
|
|
||||||
=============================================================
|
=============================================================
|
||||||
|
|
||||||
|
4
Makefile
4
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 1
|
PATCHLEVEL = 1
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Shy Crocodile
|
NAME = Shy Crocodile
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
@ -679,6 +679,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
|
|||||||
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
|
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
|
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
|
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
|
||||||
|
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
|
||||||
|
|
||||||
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||||
KBUILD_CFLAGS += -Os
|
KBUILD_CFLAGS += -Os
|
||||||
@ -720,7 +721,6 @@ ifdef CONFIG_CC_IS_CLANG
|
|||||||
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
|
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
|
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
|
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
|
|
||||||
# Quiet clang warning: comparison of unsigned expression < 0 is always false
|
# Quiet clang warning: comparison of unsigned expression < 0 is always false
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
|
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
|
||||||
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
|
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
|
||||||
|
@ -18,8 +18,8 @@
|
|||||||
model = "snps,hsdk";
|
model = "snps,hsdk";
|
||||||
compatible = "snps,hsdk";
|
compatible = "snps,hsdk";
|
||||||
|
|
||||||
#address-cells = <1>;
|
#address-cells = <2>;
|
||||||
#size-cells = <1>;
|
#size-cells = <2>;
|
||||||
|
|
||||||
chosen {
|
chosen {
|
||||||
bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
|
bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
|
||||||
@ -105,7 +105,7 @@
|
|||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
interrupt-parent = <&idu_intc>;
|
interrupt-parent = <&idu_intc>;
|
||||||
|
|
||||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
|
||||||
|
|
||||||
cgu_rst: reset-controller@8a0 {
|
cgu_rst: reset-controller@8a0 {
|
||||||
compatible = "snps,hsdk-reset";
|
compatible = "snps,hsdk-reset";
|
||||||
@ -269,9 +269,10 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
memory@80000000 {
|
memory@80000000 {
|
||||||
#address-cells = <1>;
|
#address-cells = <2>;
|
||||||
#size-cells = <1>;
|
#size-cells = <2>;
|
||||||
device_type = "memory";
|
device_type = "memory";
|
||||||
reg = <0x80000000 0x40000000>; /* 1 GiB */
|
reg = <0x0 0x80000000 0x0 0x40000000>; /* 1 GB lowmem */
|
||||||
|
/* 0x1 0x00000000 0x0 0x40000000>; 1 GB highmem */
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -30,10 +30,10 @@
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
.macro PREALLOC_INSTR
|
.macro PREALLOC_INSTR reg, off
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro PREFETCHW_INSTR
|
.macro PREFETCHW_INSTR reg, off
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -113,10 +113,24 @@ static void read_decode_cache_bcr_arcv2(int cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
|
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
|
||||||
if (cbcr.c)
|
if (cbcr.c) {
|
||||||
ioc_exists = 1;
|
ioc_exists = 1;
|
||||||
else
|
|
||||||
|
/*
|
||||||
|
* As for today we don't support both IOC and ZONE_HIGHMEM enabled
|
||||||
|
* simultaneously. This happens because as of today IOC aperture covers
|
||||||
|
* only ZONE_NORMAL (low mem) and any dma transactions outside this
|
||||||
|
* region won't be HW coherent.
|
||||||
|
* If we want to use both IOC and ZONE_HIGHMEM we can use
|
||||||
|
* bounce_buffer to handle dma transactions to HIGHMEM.
|
||||||
|
* Also it is possible to modify dma_direct cache ops or increase IOC
|
||||||
|
* aperture size if we are planning to use HIGHMEM without PAE.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
|
||||||
ioc_enable = 0;
|
ioc_enable = 0;
|
||||||
|
} else {
|
||||||
|
ioc_enable = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* HS 2.0 didn't have AUX_VOL */
|
/* HS 2.0 didn't have AUX_VOL */
|
||||||
if (cpuinfo_arc700[cpu].core.family > 0x51) {
|
if (cpuinfo_arc700[cpu].core.family > 0x51) {
|
||||||
@ -1158,19 +1172,6 @@ noinline void __init arc_ioc_setup(void)
|
|||||||
if (!ioc_enable)
|
if (!ioc_enable)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* As for today we don't support both IOC and ZONE_HIGHMEM enabled
|
|
||||||
* simultaneously. This happens because as of today IOC aperture covers
|
|
||||||
* only ZONE_NORMAL (low mem) and any dma transactions outside this
|
|
||||||
* region won't be HW coherent.
|
|
||||||
* If we want to use both IOC and ZONE_HIGHMEM we can use
|
|
||||||
* bounce_buffer to handle dma transactions to HIGHMEM.
|
|
||||||
* Also it is possible to modify dma_direct cache ops or increase IOC
|
|
||||||
* aperture size if we are planning to use HIGHMEM without PAE.
|
|
||||||
*/
|
|
||||||
if (IS_ENABLED(CONFIG_HIGHMEM))
|
|
||||||
panic("IOC and HIGHMEM can't be used simultaneously");
|
|
||||||
|
|
||||||
/* Flush + invalidate + disable L1 dcache */
|
/* Flush + invalidate + disable L1 dcache */
|
||||||
__dc_disable();
|
__dc_disable();
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ config ARM
|
|||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
||||||
select HAVE_EXIT_THREAD
|
select HAVE_EXIT_THREAD
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
|
||||||
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
|
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
|
||||||
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
|
||||||
select HAVE_GCC_PLUGINS
|
select HAVE_GCC_PLUGINS
|
||||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
|
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
|
||||||
|
@ -47,8 +47,8 @@ config DEBUG_WX
|
|||||||
|
|
||||||
choice
|
choice
|
||||||
prompt "Choose kernel unwinder"
|
prompt "Choose kernel unwinder"
|
||||||
default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
|
default UNWINDER_ARM if AEABI
|
||||||
default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
|
default UNWINDER_FRAME_POINTER if !AEABI
|
||||||
help
|
help
|
||||||
This determines which method will be used for unwinding kernel stack
|
This determines which method will be used for unwinding kernel stack
|
||||||
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
|
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
|
||||||
@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
|
|||||||
|
|
||||||
config UNWINDER_ARM
|
config UNWINDER_ARM
|
||||||
bool "ARM EABI stack unwinder"
|
bool "ARM EABI stack unwinder"
|
||||||
depends on AEABI
|
depends on AEABI && !FUNCTION_GRAPH_TRACER
|
||||||
select ARM_UNWIND
|
select ARM_UNWIND
|
||||||
help
|
help
|
||||||
This option enables stack unwinding support in the kernel
|
This option enables stack unwinding support in the kernel
|
||||||
|
@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
|
|||||||
|
|
||||||
@ Preserve return value of efi_entry() in r4
|
@ Preserve return value of efi_entry() in r4
|
||||||
mov r4, r0
|
mov r4, r0
|
||||||
bl cache_clean_flush
|
|
||||||
|
@ our cache maintenance code relies on CP15 barrier instructions
|
||||||
|
@ but since we arrived here with the MMU and caches configured
|
||||||
|
@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
|
||||||
|
@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
|
||||||
|
@ the enable path will be executed on v7+ only.
|
||||||
|
mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
|
||||||
|
tst r1, #(1 << 5) @ CP15BEN bit set?
|
||||||
|
bne 0f
|
||||||
|
orr r1, r1, #(1 << 5) @ CP15 barrier instructions
|
||||||
|
mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
|
||||||
|
ARM( .inst 0xf57ff06f @ v7+ isb )
|
||||||
|
THUMB( isb )
|
||||||
|
|
||||||
|
0: bl cache_clean_flush
|
||||||
bl cache_off
|
bl cache_off
|
||||||
|
|
||||||
@ Set parameters for booting zImage according to boot protocol
|
@ Set parameters for booting zImage according to boot protocol
|
||||||
|
@ -133,9 +133,9 @@ __secondary_data:
|
|||||||
*/
|
*/
|
||||||
.text
|
.text
|
||||||
__after_proc_init:
|
__after_proc_init:
|
||||||
#ifdef CONFIG_ARM_MPU
|
|
||||||
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
|
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
|
||||||
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
|
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
|
||||||
|
#ifdef CONFIG_ARM_MPU
|
||||||
M_CLASS(ldr r3, [r12, 0x50])
|
M_CLASS(ldr r3, [r12, 0x50])
|
||||||
AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
|
AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
|
||||||
and r3, r3, #(MMFR0_PMSA) @ PMSA field
|
and r3, r3, #(MMFR0_PMSA) @ PMSA field
|
||||||
|
@ -103,10 +103,15 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||||||
* to be revisited if support for multiple ftrace entry points
|
* to be revisited if support for multiple ftrace entry points
|
||||||
* is added in the future, but for now, the pr_err() below
|
* is added in the future, but for now, the pr_err() below
|
||||||
* deals with a theoretical issue only.
|
* deals with a theoretical issue only.
|
||||||
|
*
|
||||||
|
* Note that PLTs are place relative, and plt_entries_equal()
|
||||||
|
* checks whether they point to the same target. Here, we need
|
||||||
|
* to check if the actual opcodes are in fact identical,
|
||||||
|
* regardless of the offset in memory so use memcmp() instead.
|
||||||
*/
|
*/
|
||||||
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
||||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
|
||||||
&trampoline)) {
|
sizeof(trampoline))) {
|
||||||
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
|
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
|
||||||
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -363,7 +363,7 @@ void __init arm64_memblock_init(void)
|
|||||||
* Otherwise, this is a no-op
|
* Otherwise, this is a no-op
|
||||||
*/
|
*/
|
||||||
u64 base = phys_initrd_start & PAGE_MASK;
|
u64 base = phys_initrd_start & PAGE_MASK;
|
||||||
u64 size = PAGE_ALIGN(phys_initrd_size);
|
u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can only add back the initrd memory if we don't end up
|
* We can only add back the initrd memory if we don't end up
|
||||||
|
@ -186,7 +186,8 @@ enum which_ebpf_reg {
|
|||||||
* separate frame pointer, so BPF_REG_10 relative accesses are
|
* separate frame pointer, so BPF_REG_10 relative accesses are
|
||||||
* adjusted to be $sp relative.
|
* adjusted to be $sp relative.
|
||||||
*/
|
*/
|
||||||
int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
|
static int ebpf_to_mips_reg(struct jit_ctx *ctx,
|
||||||
|
const struct bpf_insn *insn,
|
||||||
enum which_ebpf_reg w)
|
enum which_ebpf_reg w)
|
||||||
{
|
{
|
||||||
int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
|
int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
|
||||||
|
@ -266,6 +266,7 @@ CONFIG_UDF_FS=m
|
|||||||
CONFIG_MSDOS_FS=m
|
CONFIG_MSDOS_FS=m
|
||||||
CONFIG_VFAT_FS=m
|
CONFIG_VFAT_FS=m
|
||||||
CONFIG_PROC_KCORE=y
|
CONFIG_PROC_KCORE=y
|
||||||
|
CONFIG_HUGETLBFS=y
|
||||||
# CONFIG_MISC_FILESYSTEMS is not set
|
# CONFIG_MISC_FILESYSTEMS is not set
|
||||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||||
CONFIG_NLS=y
|
CONFIG_NLS=y
|
||||||
|
@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||||||
unsigned long entries, unsigned long dev_hpa,
|
unsigned long entries, unsigned long dev_hpa,
|
||||||
struct mm_iommu_table_group_mem_t **pmem)
|
struct mm_iommu_table_group_mem_t **pmem)
|
||||||
{
|
{
|
||||||
struct mm_iommu_table_group_mem_t *mem;
|
struct mm_iommu_table_group_mem_t *mem, *mem2;
|
||||||
long i, ret, locked_entries = 0;
|
long i, ret, locked_entries = 0, pinned = 0;
|
||||||
unsigned int pageshift;
|
unsigned int pageshift;
|
||||||
|
unsigned long entry, chunk;
|
||||||
mutex_lock(&mem_list_mutex);
|
|
||||||
|
|
||||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
|
|
||||||
next) {
|
|
||||||
/* Overlap? */
|
|
||||||
if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
|
|
||||||
(ua < (mem->ua +
|
|
||||||
(mem->entries << PAGE_SHIFT)))) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto unlock_exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
|
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
|
||||||
ret = mm_iommu_adjust_locked_vm(mm, entries, true);
|
ret = mm_iommu_adjust_locked_vm(mm, entries, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unlock_exit;
|
return ret;
|
||||||
|
|
||||||
locked_entries = entries;
|
locked_entries = entries;
|
||||||
}
|
}
|
||||||
@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||||||
}
|
}
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
|
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
|
||||||
up_read(&mm->mmap_sem);
|
sizeof(struct vm_area_struct *);
|
||||||
if (ret != entries) {
|
chunk = min(chunk, entries);
|
||||||
/* free the reference taken */
|
for (entry = 0; entry < entries; entry += chunk) {
|
||||||
for (i = 0; i < ret; i++)
|
unsigned long n = min(entries - entry, chunk);
|
||||||
put_page(mem->hpages[i]);
|
|
||||||
|
|
||||||
vfree(mem->hpas);
|
ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
|
||||||
kfree(mem);
|
FOLL_WRITE, mem->hpages + entry, NULL);
|
||||||
|
if (ret == n) {
|
||||||
|
pinned += n;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (ret > 0)
|
||||||
|
pinned += ret;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
if (pinned != entries) {
|
||||||
|
if (!ret)
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto unlock_exit;
|
goto free_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
pageshift = PAGE_SHIFT;
|
pageshift = PAGE_SHIFT;
|
||||||
@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||||||
}
|
}
|
||||||
|
|
||||||
good_exit:
|
good_exit:
|
||||||
ret = 0;
|
|
||||||
atomic64_set(&mem->mapped, 1);
|
atomic64_set(&mem->mapped, 1);
|
||||||
mem->used = 1;
|
mem->used = 1;
|
||||||
mem->ua = ua;
|
mem->ua = ua;
|
||||||
mem->entries = entries;
|
mem->entries = entries;
|
||||||
*pmem = mem;
|
|
||||||
|
mutex_lock(&mem_list_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
|
||||||
|
/* Overlap? */
|
||||||
|
if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
|
||||||
|
(ua < (mem2->ua +
|
||||||
|
(mem2->entries << PAGE_SHIFT)))) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
mutex_unlock(&mem_list_mutex);
|
||||||
|
goto free_exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
|
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
|
||||||
|
|
||||||
unlock_exit:
|
|
||||||
if (locked_entries && ret)
|
|
||||||
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
|
|
||||||
|
|
||||||
mutex_unlock(&mem_list_mutex);
|
mutex_unlock(&mem_list_mutex);
|
||||||
|
|
||||||
|
*pmem = mem;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
free_exit:
|
||||||
|
/* free the reference taken */
|
||||||
|
for (i = 0; i < pinned; i++)
|
||||||
|
put_page(mem->hpages[i]);
|
||||||
|
|
||||||
|
vfree(mem->hpas);
|
||||||
|
kfree(mem);
|
||||||
|
|
||||||
|
unlock_exit:
|
||||||
|
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
|
|||||||
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
|
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
|
||||||
{
|
{
|
||||||
long ret = 0;
|
long ret = 0;
|
||||||
unsigned long entries, dev_hpa;
|
unsigned long unlock_entries = 0;
|
||||||
|
|
||||||
mutex_lock(&mem_list_mutex);
|
mutex_lock(&mem_list_mutex);
|
||||||
|
|
||||||
@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
|
|||||||
goto unlock_exit;
|
goto unlock_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* @mapped became 0 so now mappings are disabled, release the region */
|
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
|
||||||
entries = mem->entries;
|
unlock_entries = mem->entries;
|
||||||
dev_hpa = mem->dev_hpa;
|
|
||||||
mm_iommu_release(mem);
|
|
||||||
|
|
||||||
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
|
/* @mapped became 0 so now mappings are disabled, release the region */
|
||||||
mm_iommu_adjust_locked_vm(mm, entries, false);
|
mm_iommu_release(mem);
|
||||||
|
|
||||||
unlock_exit:
|
unlock_exit:
|
||||||
mutex_unlock(&mem_list_mutex);
|
mutex_unlock(&mem_list_mutex);
|
||||||
|
|
||||||
|
mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mm_iommu_put);
|
EXPORT_SYMBOL_GPL(mm_iommu_put);
|
||||||
|
@ -324,7 +324,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
|||||||
|
|
||||||
config PPC_RADIX_MMU
|
config PPC_RADIX_MMU
|
||||||
bool "Radix MMU Support"
|
bool "Radix MMU Support"
|
||||||
depends on PPC_BOOK3S_64
|
depends on PPC_BOOK3S_64 && HUGETLB_PAGE
|
||||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
|
@ -352,7 +352,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
|
|||||||
boot_params->hdr.loadflags &= ~KASLR_FLAG;
|
boot_params->hdr.loadflags &= ~KASLR_FLAG;
|
||||||
|
|
||||||
/* Save RSDP address for later use. */
|
/* Save RSDP address for later use. */
|
||||||
boot_params->acpi_rsdp_addr = get_rsdp_addr();
|
/* boot_params->acpi_rsdp_addr = get_rsdp_addr(); */
|
||||||
|
|
||||||
sanitize_boot_params(boot_params);
|
sanitize_boot_params(boot_params);
|
||||||
|
|
||||||
|
@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
|
|||||||
extern time_t __vdso_time(time_t *t);
|
extern time_t __vdso_time(time_t *t);
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_CLOCK
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
||||||
extern u8 pvclock_page
|
extern u8 pvclock_page[PAGE_SIZE]
|
||||||
__attribute__((visibility("hidden")));
|
__attribute__((visibility("hidden")));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HYPERV_TSCPAGE
|
#ifdef CONFIG_HYPERV_TSCPAGE
|
||||||
extern u8 hvclock_page
|
extern u8 hvclock_page[PAGE_SIZE]
|
||||||
__attribute__((visibility("hidden")));
|
__attribute__((visibility("hidden")));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -76,15 +76,15 @@
|
|||||||
* Scope: Package (physical package)
|
* Scope: Package (physical package)
|
||||||
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
|
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
|
||||||
* perf code: 0x04
|
* perf code: 0x04
|
||||||
* Available model: HSW ULT,CNL
|
* Available model: HSW ULT,KBL,CNL
|
||||||
* Scope: Package (physical package)
|
* Scope: Package (physical package)
|
||||||
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
|
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
|
||||||
* perf code: 0x05
|
* perf code: 0x05
|
||||||
* Available model: HSW ULT,CNL
|
* Available model: HSW ULT,KBL,CNL
|
||||||
* Scope: Package (physical package)
|
* Scope: Package (physical package)
|
||||||
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
|
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
|
||||||
* perf code: 0x06
|
* perf code: 0x06
|
||||||
* Available model: HSW ULT,GLM,CNL
|
* Available model: HSW ULT,KBL,GLM,CNL
|
||||||
* Scope: Package (physical package)
|
* Scope: Package (physical package)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@ -566,8 +566,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
|||||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
|
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
|
||||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
|
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
|
||||||
|
|
||||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
|
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
|
||||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
|
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
|
||||||
|
|
||||||
X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
|
X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ void ptdump_walk_user_pgd_level_checkwx(void);
|
|||||||
*/
|
*/
|
||||||
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
||||||
__visible;
|
__visible;
|
||||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
|
||||||
|
|
||||||
extern spinlock_t pgd_lock;
|
extern spinlock_t pgd_lock;
|
||||||
extern struct list_head pgd_list;
|
extern struct list_head pgd_list;
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/swapfile.h>
|
#include <linux/swapfile.h>
|
||||||
#include <linux/swapops.h>
|
#include <linux/swapops.h>
|
||||||
|
#include <linux/kmemleak.h>
|
||||||
|
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
#include <asm/e820/api.h>
|
#include <asm/e820/api.h>
|
||||||
@ -766,6 +767,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
|
|||||||
if (debug_pagealloc_enabled()) {
|
if (debug_pagealloc_enabled()) {
|
||||||
pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
|
pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
|
||||||
begin, end - 1);
|
begin, end - 1);
|
||||||
|
/*
|
||||||
|
* Inform kmemleak about the hole in the memory since the
|
||||||
|
* corresponding pages will be unmapped.
|
||||||
|
*/
|
||||||
|
kmemleak_free_part((void *)begin, end - begin);
|
||||||
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
|
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
|
|||||||
{
|
{
|
||||||
struct skcipher_request *req = areq->data;
|
struct skcipher_request *req = areq->data;
|
||||||
|
|
||||||
if (!err)
|
if (!err) {
|
||||||
|
struct rctx *rctx = skcipher_request_ctx(req);
|
||||||
|
|
||||||
|
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||||
err = xor_tweak_post(req);
|
err = xor_tweak_post(req);
|
||||||
|
}
|
||||||
|
|
||||||
skcipher_request_complete(req, err);
|
skcipher_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
|
|||||||
{
|
{
|
||||||
struct skcipher_request *req = areq->data;
|
struct skcipher_request *req = areq->data;
|
||||||
|
|
||||||
if (!err)
|
if (!err) {
|
||||||
|
struct rctx *rctx = skcipher_request_ctx(req);
|
||||||
|
|
||||||
|
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||||
err = xor_tweak_post(req);
|
err = xor_tweak_post(req);
|
||||||
|
}
|
||||||
|
|
||||||
skcipher_request_complete(req, err);
|
skcipher_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
@ -81,12 +81,8 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|||||||
|
|
||||||
ACPI_FUNCTION_TRACE(ev_enable_gpe);
|
ACPI_FUNCTION_TRACE(ev_enable_gpe);
|
||||||
|
|
||||||
/* Clear the GPE status */
|
|
||||||
status = acpi_hw_clear_gpe(gpe_event_info);
|
|
||||||
if (ACPI_FAILURE(status))
|
|
||||||
return_ACPI_STATUS(status);
|
|
||||||
|
|
||||||
/* Enable the requested GPE */
|
/* Enable the requested GPE */
|
||||||
|
|
||||||
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
|
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
|
||||||
return_ACPI_STATUS(status);
|
return_ACPI_STATUS(status);
|
||||||
}
|
}
|
||||||
|
@ -774,18 +774,18 @@ struct zram_work {
|
|||||||
struct zram *zram;
|
struct zram *zram;
|
||||||
unsigned long entry;
|
unsigned long entry;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
struct bio_vec bvec;
|
||||||
};
|
};
|
||||||
|
|
||||||
#if PAGE_SIZE != 4096
|
#if PAGE_SIZE != 4096
|
||||||
static void zram_sync_read(struct work_struct *work)
|
static void zram_sync_read(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct bio_vec bvec;
|
|
||||||
struct zram_work *zw = container_of(work, struct zram_work, work);
|
struct zram_work *zw = container_of(work, struct zram_work, work);
|
||||||
struct zram *zram = zw->zram;
|
struct zram *zram = zw->zram;
|
||||||
unsigned long entry = zw->entry;
|
unsigned long entry = zw->entry;
|
||||||
struct bio *bio = zw->bio;
|
struct bio *bio = zw->bio;
|
||||||
|
|
||||||
read_from_bdev_async(zram, &bvec, entry, bio);
|
read_from_bdev_async(zram, &zw->bvec, entry, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
|
|||||||
{
|
{
|
||||||
struct zram_work work;
|
struct zram_work work;
|
||||||
|
|
||||||
|
work.bvec = *bvec;
|
||||||
work.zram = zram;
|
work.zram = zram;
|
||||||
work.entry = entry;
|
work.entry = entry;
|
||||||
work.bio = bio;
|
work.bio = bio;
|
||||||
|
@ -671,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
|
|||||||
d = bcm2835_dma_create_cb_chain(chan, direction, false,
|
d = bcm2835_dma_create_cb_chain(chan, direction, false,
|
||||||
info, extra,
|
info, extra,
|
||||||
frames, src, dst, 0, 0,
|
frames, src, dst, 0, 0,
|
||||||
GFP_KERNEL);
|
GFP_NOWAIT);
|
||||||
if (!d)
|
if (!d)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
|
|||||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||||
mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
|
mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
|
||||||
#else
|
#else
|
||||||
mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
|
mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* setup the length */
|
/* setup the length */
|
||||||
|
@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||||||
enum dma_status status;
|
enum dma_status status;
|
||||||
unsigned int residue = 0;
|
unsigned int residue = 0;
|
||||||
unsigned int dptr = 0;
|
unsigned int dptr = 0;
|
||||||
|
unsigned int chcrb;
|
||||||
|
unsigned int tcrb;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return 0;
|
return 0;
|
||||||
@ -1329,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to read two registers.
|
||||||
|
* Make sure the control register does not skip to next chunk
|
||||||
|
* while reading the counter.
|
||||||
|
* Trying it 3 times should be enough: Initial read, retry, retry
|
||||||
|
* for the paranoid.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < 3; i++) {
|
||||||
|
chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||||
|
RCAR_DMACHCRB_DPTR_MASK;
|
||||||
|
tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
|
||||||
|
/* Still the same? */
|
||||||
|
if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||||
|
RCAR_DMACHCRB_DPTR_MASK))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
WARN_ONCE(i >= 3, "residue might be not continuous!");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In descriptor mode the descriptor running pointer is not maintained
|
* In descriptor mode the descriptor running pointer is not maintained
|
||||||
* by the interrupt handler, find the running descriptor from the
|
* by the interrupt handler, find the running descriptor from the
|
||||||
@ -1336,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||||||
* mode just use the running descriptor pointer.
|
* mode just use the running descriptor pointer.
|
||||||
*/
|
*/
|
||||||
if (desc->hwdescs.use) {
|
if (desc->hwdescs.use) {
|
||||||
dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
|
||||||
RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
|
|
||||||
if (dptr == 0)
|
if (dptr == 0)
|
||||||
dptr = desc->nchunks;
|
dptr = desc->nchunks;
|
||||||
dptr--;
|
dptr--;
|
||||||
@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Add the residue for the current chunk. */
|
/* Add the residue for the current chunk. */
|
||||||
residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
|
residue += tcrb << desc->xfer_shift;
|
||||||
|
|
||||||
return residue;
|
return residue;
|
||||||
}
|
}
|
||||||
@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
|
|||||||
enum dma_status status;
|
enum dma_status status;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int residue;
|
unsigned int residue;
|
||||||
|
bool cyclic;
|
||||||
|
|
||||||
status = dma_cookie_status(chan, cookie, txstate);
|
status = dma_cookie_status(chan, cookie, txstate);
|
||||||
if (status == DMA_COMPLETE || !txstate)
|
if (status == DMA_COMPLETE || !txstate)
|
||||||
@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
|
|||||||
|
|
||||||
spin_lock_irqsave(&rchan->lock, flags);
|
spin_lock_irqsave(&rchan->lock, flags);
|
||||||
residue = rcar_dmac_chan_get_residue(rchan, cookie);
|
residue = rcar_dmac_chan_get_residue(rchan, cookie);
|
||||||
|
cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
|
||||||
spin_unlock_irqrestore(&rchan->lock, flags);
|
spin_unlock_irqrestore(&rchan->lock, flags);
|
||||||
|
|
||||||
/* if there's no residue, the cookie is complete */
|
/* if there's no residue, the cookie is complete */
|
||||||
if (!residue)
|
if (!residue && !cyclic)
|
||||||
return DMA_COMPLETE;
|
return DMA_COMPLETE;
|
||||||
|
|
||||||
dma_set_residue(txstate, residue);
|
dma_set_residue(txstate, residue);
|
||||||
|
@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
|
|||||||
irq_set_handler_locked(data, handle_edge_irq);
|
irq_set_handler_locked(data, handle_edge_irq);
|
||||||
break;
|
break;
|
||||||
case IRQ_TYPE_EDGE_BOTH:
|
case IRQ_TYPE_EDGE_BOTH:
|
||||||
|
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
|
||||||
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
|
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
|
||||||
irq_set_handler_locked(data, handle_edge_irq);
|
irq_set_handler_locked(data, handle_edge_irq);
|
||||||
break;
|
break;
|
||||||
|
@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||||||
|
|
||||||
status = gpiochip_add_irqchip(chip, lock_key, request_key);
|
status = gpiochip_add_irqchip(chip, lock_key, request_key);
|
||||||
if (status)
|
if (status)
|
||||||
goto err_remove_chip;
|
goto err_free_gpiochip_mask;
|
||||||
|
|
||||||
status = of_gpiochip_add(chip);
|
status = of_gpiochip_add(chip);
|
||||||
if (status)
|
if (status)
|
||||||
@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||||||
|
|
||||||
status = gpiochip_init_valid_mask(chip);
|
status = gpiochip_init_valid_mask(chip);
|
||||||
if (status)
|
if (status)
|
||||||
goto err_remove_chip;
|
goto err_remove_of_chip;
|
||||||
|
|
||||||
for (i = 0; i < chip->ngpio; i++) {
|
for (i = 0; i < chip->ngpio; i++) {
|
||||||
struct gpio_desc *desc = &gdev->descs[i];
|
struct gpio_desc *desc = &gdev->descs[i];
|
||||||
@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||||||
if (gpiolib_initialized) {
|
if (gpiolib_initialized) {
|
||||||
status = gpiochip_setup_dev(gdev);
|
status = gpiochip_setup_dev(gdev);
|
||||||
if (status)
|
if (status)
|
||||||
goto err_remove_chip;
|
goto err_remove_acpi_chip;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_remove_chip:
|
err_remove_acpi_chip:
|
||||||
acpi_gpiochip_remove(chip);
|
acpi_gpiochip_remove(chip);
|
||||||
|
err_remove_of_chip:
|
||||||
gpiochip_free_hogs(chip);
|
gpiochip_free_hogs(chip);
|
||||||
of_gpiochip_remove(chip);
|
of_gpiochip_remove(chip);
|
||||||
|
err_remove_chip:
|
||||||
|
gpiochip_irqchip_remove(chip);
|
||||||
|
err_free_gpiochip_mask:
|
||||||
gpiochip_free_valid_mask(chip);
|
gpiochip_free_valid_mask(chip);
|
||||||
err_remove_irqchip_mask:
|
err_remove_irqchip_mask:
|
||||||
gpiochip_irqchip_free_valid_mask(chip);
|
gpiochip_irqchip_free_valid_mask(chip);
|
||||||
|
@ -1046,6 +1046,10 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
|
|||||||
if (hdmi->version < 0x200a)
|
if (hdmi->version < 0x200a)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/* Disable if no DDC bus */
|
||||||
|
if (!hdmi->ddc)
|
||||||
|
return false;
|
||||||
|
|
||||||
/* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
|
/* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
|
||||||
if (!display->hdmi.scdc.supported ||
|
if (!display->hdmi.scdc.supported ||
|
||||||
!display->hdmi.scdc.scrambling.supported)
|
!display->hdmi.scdc.scrambling.supported)
|
||||||
@ -1684,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||||||
* Source Devices compliant shall set the
|
* Source Devices compliant shall set the
|
||||||
* Source Version = 1.
|
* Source Version = 1.
|
||||||
*/
|
*/
|
||||||
drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION,
|
drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
|
||||||
&bytes);
|
&bytes);
|
||||||
drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION,
|
drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
|
||||||
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
|
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
|
||||||
|
|
||||||
/* Enabled Scrambling in the Sink */
|
/* Enabled Scrambling in the Sink */
|
||||||
drm_scdc_set_scrambling(&hdmi->i2c->adap, 1);
|
drm_scdc_set_scrambling(hdmi->ddc, 1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To activate the scrambler feature, you must ensure
|
* To activate the scrambler feature, you must ensure
|
||||||
@ -1706,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||||||
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
|
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
|
||||||
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
|
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
|
||||||
HDMI_MC_SWRSTZ);
|
HDMI_MC_SWRSTZ);
|
||||||
drm_scdc_set_scrambling(&hdmi->i2c->adap, 0);
|
drm_scdc_set_scrambling(hdmi->ddc, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1800,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
|
|||||||
* iteration for others.
|
* iteration for others.
|
||||||
* The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
|
* The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
|
||||||
* the workaround with a single iteration.
|
* the workaround with a single iteration.
|
||||||
|
* The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
|
||||||
|
* been identified as needing the workaround with a single iteration.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
switch (hdmi->version) {
|
switch (hdmi->version) {
|
||||||
@ -1808,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
|
|||||||
break;
|
break;
|
||||||
case 0x131a:
|
case 0x131a:
|
||||||
case 0x132a:
|
case 0x132a:
|
||||||
|
case 0x200a:
|
||||||
case 0x201a:
|
case 0x201a:
|
||||||
|
case 0x211a:
|
||||||
case 0x212a:
|
case 0x212a:
|
||||||
count = 1;
|
count = 1;
|
||||||
break;
|
break;
|
||||||
|
@ -3862,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
|
|||||||
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
|
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
|
||||||
else
|
else
|
||||||
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
|
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (IS_GEN9_LP(dev_priv) && ret)
|
if (IS_GEN9_LP(dev_priv))
|
||||||
pipe_config->lane_lat_optim_mask =
|
pipe_config->lane_lat_optim_mask =
|
||||||
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
|
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
|
||||||
|
|
||||||
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
|
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1886,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
|
|||||||
int pipe_bpp;
|
int pipe_bpp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
|
||||||
|
intel_dp_supports_fec(intel_dp, pipe_config);
|
||||||
|
|
||||||
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
|
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -2116,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
|
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
|
|
||||||
intel_dp_supports_fec(intel_dp, pipe_config);
|
|
||||||
|
|
||||||
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
|
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
|
|||||||
if (disable_partial)
|
if (disable_partial)
|
||||||
ipu_plane_disable(ipu_crtc->plane[1], true);
|
ipu_plane_disable(ipu_crtc->plane[1], true);
|
||||||
if (disable_full)
|
if (disable_full)
|
||||||
ipu_plane_disable(ipu_crtc->plane[0], false);
|
ipu_plane_disable(ipu_crtc->plane[0], true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
|
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||||
|
@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
|
|||||||
EXPORT_SYMBOL(drm_sched_increase_karma);
|
EXPORT_SYMBOL(drm_sched_increase_karma);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
|
* drm_sched_stop - stop the scheduler
|
||||||
*
|
*
|
||||||
* @sched: scheduler instance
|
* @sched: scheduler instance
|
||||||
* @bad: bad scheduler job
|
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
void drm_sched_stop(struct drm_gpu_scheduler *sched)
|
void drm_sched_stop(struct drm_gpu_scheduler *sched)
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <linux/of_reserved_mem.h>
|
#include <linux/of_reserved_mem.h>
|
||||||
|
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
|
#include <drm/drm_atomic_helper.h>
|
||||||
#include <drm/drm_fb_cma_helper.h>
|
#include <drm/drm_fb_cma_helper.h>
|
||||||
#include <drm/drm_fb_helper.h>
|
#include <drm/drm_fb_helper.h>
|
||||||
#include <drm/drm_gem_cma_helper.h>
|
#include <drm/drm_gem_cma_helper.h>
|
||||||
@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto free_drm;
|
goto free_drm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_set_drvdata(dev, drm);
|
||||||
drm->dev_private = drv;
|
drm->dev_private = drv;
|
||||||
INIT_LIST_HEAD(&drv->frontend_list);
|
INIT_LIST_HEAD(&drv->frontend_list);
|
||||||
INIT_LIST_HEAD(&drv->engine_list);
|
INIT_LIST_HEAD(&drv->engine_list);
|
||||||
@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
|
|||||||
|
|
||||||
drm_dev_unregister(drm);
|
drm_dev_unregister(drm);
|
||||||
drm_kms_helper_poll_fini(drm);
|
drm_kms_helper_poll_fini(drm);
|
||||||
|
drm_atomic_helper_shutdown(drm);
|
||||||
drm_mode_config_cleanup(drm);
|
drm_mode_config_cleanup(drm);
|
||||||
|
|
||||||
|
component_unbind_all(dev, NULL);
|
||||||
of_reserved_mem_device_release(dev);
|
of_reserved_mem_device_release(dev);
|
||||||
|
|
||||||
drm_dev_put(drm);
|
drm_dev_put(drm);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
static int sun4i_drv_remove(struct platform_device *pdev)
|
static int sun4i_drv_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
|
component_master_del(&pdev->dev, &sun4i_drv_master_ops);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
|
|||||||
* ttm_global_mutex - protecting the global BO state
|
* ttm_global_mutex - protecting the global BO state
|
||||||
*/
|
*/
|
||||||
DEFINE_MUTEX(ttm_global_mutex);
|
DEFINE_MUTEX(ttm_global_mutex);
|
||||||
struct ttm_bo_global ttm_bo_glob = {
|
unsigned ttm_bo_glob_use_count;
|
||||||
.use_count = 0
|
struct ttm_bo_global ttm_bo_glob;
|
||||||
};
|
|
||||||
|
|
||||||
static struct attribute ttm_bo_count = {
|
static struct attribute ttm_bo_count = {
|
||||||
.name = "bo_count",
|
.name = "bo_count",
|
||||||
@ -1531,12 +1530,13 @@ static void ttm_bo_global_release(void)
|
|||||||
struct ttm_bo_global *glob = &ttm_bo_glob;
|
struct ttm_bo_global *glob = &ttm_bo_glob;
|
||||||
|
|
||||||
mutex_lock(&ttm_global_mutex);
|
mutex_lock(&ttm_global_mutex);
|
||||||
if (--glob->use_count > 0)
|
if (--ttm_bo_glob_use_count > 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
kobject_del(&glob->kobj);
|
kobject_del(&glob->kobj);
|
||||||
kobject_put(&glob->kobj);
|
kobject_put(&glob->kobj);
|
||||||
ttm_mem_global_release(&ttm_mem_glob);
|
ttm_mem_global_release(&ttm_mem_glob);
|
||||||
|
memset(glob, 0, sizeof(*glob));
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&ttm_global_mutex);
|
mutex_unlock(&ttm_global_mutex);
|
||||||
}
|
}
|
||||||
@ -1548,7 +1548,7 @@ static int ttm_bo_global_init(void)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
mutex_lock(&ttm_global_mutex);
|
mutex_lock(&ttm_global_mutex);
|
||||||
if (++glob->use_count > 1)
|
if (++ttm_bo_glob_use_count > 1)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = ttm_mem_global_init(&ttm_mem_glob);
|
ret = ttm_mem_global_init(&ttm_mem_glob);
|
||||||
|
@ -461,8 +461,8 @@ out_no_zone:
|
|||||||
|
|
||||||
void ttm_mem_global_release(struct ttm_mem_global *glob)
|
void ttm_mem_global_release(struct ttm_mem_global *glob)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
struct ttm_mem_zone *zone;
|
struct ttm_mem_zone *zone;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
/* let the page allocator first stop the shrink work. */
|
/* let the page allocator first stop the shrink work. */
|
||||||
ttm_page_alloc_fini();
|
ttm_page_alloc_fini();
|
||||||
@ -478,6 +478,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
|
|||||||
}
|
}
|
||||||
kobject_del(&glob->kobj);
|
kobject_del(&glob->kobj);
|
||||||
kobject_put(&glob->kobj);
|
kobject_put(&glob->kobj);
|
||||||
|
memset(glob, 0, sizeof(*glob));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ttm_check_swapping(struct ttm_mem_global *glob)
|
static void ttm_check_swapping(struct ttm_mem_global *glob)
|
||||||
|
@ -1042,7 +1042,7 @@ static void
|
|||||||
vc4_crtc_reset(struct drm_crtc *crtc)
|
vc4_crtc_reset(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
if (crtc->state)
|
if (crtc->state)
|
||||||
__drm_atomic_helper_crtc_destroy_state(crtc->state);
|
vc4_crtc_destroy_state(crtc, crtc->state);
|
||||||
|
|
||||||
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
|
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
|
||||||
if (crtc->state)
|
if (crtc->state)
|
||||||
|
@ -545,30 +545,14 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
|||||||
dev_priv->initial_height = height;
|
dev_priv->initial_height = height;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* vmw_assume_iommu - Figure out whether coherent dma-remapping might be
|
|
||||||
* taking place.
|
|
||||||
* @dev: Pointer to the struct drm_device.
|
|
||||||
*
|
|
||||||
* Return: true if iommu present, false otherwise.
|
|
||||||
*/
|
|
||||||
static bool vmw_assume_iommu(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev->dev);
|
|
||||||
|
|
||||||
return !dma_is_direct(ops) && ops &&
|
|
||||||
ops->map_page != dma_direct_map_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
|
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
|
||||||
* system.
|
* system.
|
||||||
*
|
*
|
||||||
* @dev_priv: Pointer to a struct vmw_private
|
* @dev_priv: Pointer to a struct vmw_private
|
||||||
*
|
*
|
||||||
* This functions tries to determine the IOMMU setup and what actions
|
* This functions tries to determine what actions need to be taken by the
|
||||||
* need to be taken by the driver to make system pages visible to the
|
* driver to make system pages visible to the device.
|
||||||
* device.
|
|
||||||
* If this function decides that DMA is not possible, it returns -EINVAL.
|
* If this function decides that DMA is not possible, it returns -EINVAL.
|
||||||
* The driver may then try to disable features of the device that require
|
* The driver may then try to disable features of the device that require
|
||||||
* DMA.
|
* DMA.
|
||||||
@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
|||||||
static const char *names[vmw_dma_map_max] = {
|
static const char *names[vmw_dma_map_max] = {
|
||||||
[vmw_dma_phys] = "Using physical TTM page addresses.",
|
[vmw_dma_phys] = "Using physical TTM page addresses.",
|
||||||
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
||||||
[vmw_dma_map_populate] = "Keeping DMA mappings.",
|
[vmw_dma_map_populate] = "Caching DMA mappings.",
|
||||||
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
||||||
|
|
||||||
if (vmw_force_coherent)
|
if (vmw_force_coherent)
|
||||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||||
else if (vmw_assume_iommu(dev_priv->dev))
|
else if (vmw_restrict_iommu)
|
||||||
dev_priv->map_mode = vmw_dma_map_populate;
|
dev_priv->map_mode = vmw_dma_map_bind;
|
||||||
else if (!vmw_force_iommu)
|
|
||||||
dev_priv->map_mode = vmw_dma_phys;
|
|
||||||
else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
|
|
||||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
|
||||||
else
|
else
|
||||||
dev_priv->map_mode = vmw_dma_map_populate;
|
dev_priv->map_mode = vmw_dma_map_populate;
|
||||||
|
|
||||||
if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
|
|
||||||
dev_priv->map_mode = vmw_dma_map_bind;
|
|
||||||
|
|
||||||
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
|
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
|
||||||
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
|
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
|
||||||
(dev_priv->map_mode == vmw_dma_alloc_coherent))
|
(dev_priv->map_mode == vmw_dma_alloc_coherent))
|
||||||
|
@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
|
|||||||
ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
|
ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
|
||||||
DP_COM_CONF_CSC_DEF_BOTH);
|
DP_COM_CONF_CSC_DEF_BOTH);
|
||||||
} else {
|
} else {
|
||||||
if (flow->foreground.in_cs == flow->out_cs)
|
if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
|
||||||
|
flow->foreground.in_cs == flow->out_cs)
|
||||||
/*
|
/*
|
||||||
* foreground identical to output, apply color
|
* foreground identical to output, apply color
|
||||||
* conversion on background
|
* conversion on background
|
||||||
@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
|
|||||||
struct ipu_dp_priv *priv = flow->priv;
|
struct ipu_dp_priv *priv = flow->priv;
|
||||||
u32 reg, csc;
|
u32 reg, csc;
|
||||||
|
|
||||||
|
dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
|
||||||
|
|
||||||
if (!dp->foreground)
|
if (!dp->foreground)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
|
|||||||
|
|
||||||
reg = readl(flow->base + DP_COM_CONF);
|
reg = readl(flow->base + DP_COM_CONF);
|
||||||
csc = reg & DP_COM_CONF_CSC_DEF_MASK;
|
csc = reg & DP_COM_CONF_CSC_DEF_MASK;
|
||||||
if (csc == DP_COM_CONF_CSC_DEF_FG)
|
|
||||||
reg &= ~DP_COM_CONF_CSC_DEF_MASK;
|
reg &= ~DP_COM_CONF_CSC_DEF_MASK;
|
||||||
|
if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
|
||||||
|
reg |= DP_COM_CONF_CSC_DEF_BG;
|
||||||
|
|
||||||
reg &= ~DP_COM_CONF_FG_EN;
|
reg &= ~DP_COM_CONF_FG_EN;
|
||||||
writel(reg, flow->base + DP_COM_CONF);
|
writel(reg, flow->base + DP_COM_CONF);
|
||||||
@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
|
|||||||
mutex_init(&priv->mutex);
|
mutex_init(&priv->mutex);
|
||||||
|
|
||||||
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
|
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
|
||||||
|
priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
|
||||||
|
priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
|
||||||
priv->flow[i].foreground.foreground = true;
|
priv->flow[i].foreground.foreground = true;
|
||||||
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
|
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
|
||||||
priv->flow[i].priv = priv;
|
priv->flow[i].priv = priv;
|
||||||
|
@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
|
|||||||
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
|
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
|
||||||
{
|
{
|
||||||
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
|
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
|
||||||
int i = 0, scl = 1, ret;
|
int i = 0, scl = 1, ret = 0;
|
||||||
|
|
||||||
if (bri->prepare_recovery)
|
if (bri->prepare_recovery)
|
||||||
bri->prepare_recovery(adap);
|
bri->prepare_recovery(adap);
|
||||||
|
@ -160,6 +160,7 @@ struct ib_uverbs_file {
|
|||||||
|
|
||||||
struct mutex umap_lock;
|
struct mutex umap_lock;
|
||||||
struct list_head umaps;
|
struct list_head umaps;
|
||||||
|
struct page *disassociate_page;
|
||||||
|
|
||||||
struct idr idr;
|
struct idr idr;
|
||||||
/* spinlock protects write access to idr */
|
/* spinlock protects write access to idr */
|
||||||
|
@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
|
|||||||
kref_put(&file->async_file->ref,
|
kref_put(&file->async_file->ref,
|
||||||
ib_uverbs_release_async_event_file);
|
ib_uverbs_release_async_event_file);
|
||||||
put_device(&file->device->dev);
|
put_device(&file->device->dev);
|
||||||
|
|
||||||
|
if (file->disassociate_page)
|
||||||
|
__free_pages(file->disassociate_page, 0);
|
||||||
kfree(file);
|
kfree(file);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -877,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
|
|||||||
kfree(priv);
|
kfree(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Once the zap_vma_ptes has been called touches to the VMA will come here and
|
||||||
|
* we return a dummy writable zero page for all the pfns.
|
||||||
|
*/
|
||||||
|
static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
|
||||||
|
{
|
||||||
|
struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
|
||||||
|
struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
|
||||||
|
vm_fault_t ret = 0;
|
||||||
|
|
||||||
|
if (!priv)
|
||||||
|
return VM_FAULT_SIGBUS;
|
||||||
|
|
||||||
|
/* Read only pages can just use the system zero page. */
|
||||||
|
if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
|
||||||
|
vmf->page = ZERO_PAGE(vmf->address);
|
||||||
|
get_page(vmf->page);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&ufile->umap_lock);
|
||||||
|
if (!ufile->disassociate_page)
|
||||||
|
ufile->disassociate_page =
|
||||||
|
alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
|
||||||
|
|
||||||
|
if (ufile->disassociate_page) {
|
||||||
|
/*
|
||||||
|
* This VMA is forced to always be shared so this doesn't have
|
||||||
|
* to worry about COW.
|
||||||
|
*/
|
||||||
|
vmf->page = ufile->disassociate_page;
|
||||||
|
get_page(vmf->page);
|
||||||
|
} else {
|
||||||
|
ret = VM_FAULT_SIGBUS;
|
||||||
|
}
|
||||||
|
mutex_unlock(&ufile->umap_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct vm_operations_struct rdma_umap_ops = {
|
static const struct vm_operations_struct rdma_umap_ops = {
|
||||||
.open = rdma_umap_open,
|
.open = rdma_umap_open,
|
||||||
.close = rdma_umap_close,
|
.close = rdma_umap_close,
|
||||||
|
.fault = rdma_umap_fault,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
|
static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
|
||||||
@ -889,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
|
|||||||
struct ib_uverbs_file *ufile = ucontext->ufile;
|
struct ib_uverbs_file *ufile = ucontext->ufile;
|
||||||
struct rdma_umap_priv *priv;
|
struct rdma_umap_priv *priv;
|
||||||
|
|
||||||
|
if (!(vma->vm_flags & VM_SHARED))
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
if (vma->vm_end - vma->vm_start != size)
|
if (vma->vm_end - vma->vm_start != size)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
@ -992,7 +1039,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
|||||||
* at a time to get the lock ordering right. Typically there
|
* at a time to get the lock ordering right. Typically there
|
||||||
* will only be one mm, so no big deal.
|
* will only be one mm, so no big deal.
|
||||||
*/
|
*/
|
||||||
down_write(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
if (!mmget_still_valid(mm))
|
if (!mmget_still_valid(mm))
|
||||||
goto skip_mm;
|
goto skip_mm;
|
||||||
mutex_lock(&ufile->umap_lock);
|
mutex_lock(&ufile->umap_lock);
|
||||||
@ -1006,11 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
|||||||
|
|
||||||
zap_vma_ptes(vma, vma->vm_start,
|
zap_vma_ptes(vma, vma->vm_start,
|
||||||
vma->vm_end - vma->vm_start);
|
vma->vm_end - vma->vm_start);
|
||||||
vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&ufile->umap_lock);
|
mutex_unlock(&ufile->umap_lock);
|
||||||
skip_mm:
|
skip_mm:
|
||||||
up_write(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -533,7 +533,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
|
|||||||
|
|
||||||
static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
|
static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
|
||||||
{
|
{
|
||||||
if (attr->qp_type == IB_QPT_XRC_TGT)
|
if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1119,6 +1119,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||||||
if (MLX5_CAP_GEN(mdev, qp_packet_based))
|
if (MLX5_CAP_GEN(mdev, qp_packet_based))
|
||||||
resp.flags |=
|
resp.flags |=
|
||||||
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
|
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
|
||||||
|
|
||||||
|
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (field_avail(typeof(resp), sw_parsing_caps,
|
if (field_avail(typeof(resp), sw_parsing_caps,
|
||||||
@ -2066,6 +2068,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
|
|||||||
|
|
||||||
if (vma->vm_flags & VM_WRITE)
|
if (vma->vm_flags & VM_WRITE)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
vma->vm_flags &= ~VM_MAYWRITE;
|
||||||
|
|
||||||
if (!dev->mdev->clock_info_page)
|
if (!dev->mdev->clock_info_page)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
@ -2231,19 +2234,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
|
|||||||
|
|
||||||
if (vma->vm_flags & VM_WRITE)
|
if (vma->vm_flags & VM_WRITE)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
vma->vm_flags &= ~VM_MAYWRITE;
|
||||||
|
|
||||||
/* Don't expose to user-space information it shouldn't have */
|
/* Don't expose to user-space information it shouldn't have */
|
||||||
if (PAGE_SIZE > 4096)
|
if (PAGE_SIZE > 4096)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
||||||
pfn = (dev->mdev->iseg_base +
|
pfn = (dev->mdev->iseg_base +
|
||||||
offsetof(struct mlx5_init_seg, internal_timer_h)) >>
|
offsetof(struct mlx5_init_seg, internal_timer_h)) >>
|
||||||
PAGE_SHIFT;
|
PAGE_SHIFT;
|
||||||
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
|
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
|
||||||
PAGE_SIZE, vma->vm_page_prot))
|
PAGE_SIZE,
|
||||||
return -EAGAIN;
|
pgprot_noncached(vma->vm_page_prot));
|
||||||
break;
|
|
||||||
case MLX5_IB_MMAP_CLOCK_INFO:
|
case MLX5_IB_MMAP_CLOCK_INFO:
|
||||||
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
|
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
|
||||||
|
|
||||||
|
@ -1818,13 +1818,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
|
|||||||
|
|
||||||
rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
|
rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
|
||||||
|
|
||||||
if (rcqe_sz == 128) {
|
if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
|
||||||
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
|
if (rcqe_sz == 128)
|
||||||
|
MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (init_attr->qp_type != MLX5_IB_QPT_DCT)
|
MLX5_SET(qpc, qpc, cs_res,
|
||||||
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
|
rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
|
||||||
|
MLX5_RES_SCAT_DATA32_CQE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
|
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
|
||||||
|
@ -608,11 +608,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
if (unlikely(mapped_segs == mr->mr.max_segs))
|
if (unlikely(mapped_segs == mr->mr.max_segs))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (mr->mr.length == 0) {
|
|
||||||
mr->mr.user_base = addr;
|
|
||||||
mr->mr.iova = addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
m = mapped_segs / RVT_SEGSZ;
|
m = mapped_segs / RVT_SEGSZ;
|
||||||
n = mapped_segs % RVT_SEGSZ;
|
n = mapped_segs % RVT_SEGSZ;
|
||||||
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
|
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
|
||||||
@ -630,17 +625,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|||||||
* @sg_nents: number of entries in sg
|
* @sg_nents: number of entries in sg
|
||||||
* @sg_offset: offset in bytes into sg
|
* @sg_offset: offset in bytes into sg
|
||||||
*
|
*
|
||||||
|
* Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
|
||||||
|
*
|
||||||
* Return: number of sg elements mapped to the memory region
|
* Return: number of sg elements mapped to the memory region
|
||||||
*/
|
*/
|
||||||
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||||
int sg_nents, unsigned int *sg_offset)
|
int sg_nents, unsigned int *sg_offset)
|
||||||
{
|
{
|
||||||
struct rvt_mr *mr = to_imr(ibmr);
|
struct rvt_mr *mr = to_imr(ibmr);
|
||||||
|
int ret;
|
||||||
|
|
||||||
mr->mr.length = 0;
|
mr->mr.length = 0;
|
||||||
mr->mr.page_shift = PAGE_SHIFT;
|
mr->mr.page_shift = PAGE_SHIFT;
|
||||||
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
|
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
|
||||||
rvt_set_page);
|
mr->mr.user_base = ibmr->iova;
|
||||||
|
mr->mr.iova = ibmr->iova;
|
||||||
|
mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
|
||||||
|
mr->mr.length = (size_t)ibmr->length;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -671,6 +673,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
|
|||||||
ibmr->rkey = key;
|
ibmr->rkey = key;
|
||||||
mr->mr.lkey = key;
|
mr->mr.lkey = key;
|
||||||
mr->mr.access_flags = access;
|
mr->mr.access_flags = access;
|
||||||
|
mr->mr.iova = ibmr->iova;
|
||||||
atomic_set(&mr->mr.lkey_invalid, 0);
|
atomic_set(&mr->mr.lkey_invalid, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -420,7 +420,7 @@ config KEYBOARD_MPR121
|
|||||||
|
|
||||||
config KEYBOARD_SNVS_PWRKEY
|
config KEYBOARD_SNVS_PWRKEY
|
||||||
tristate "IMX SNVS Power Key Driver"
|
tristate "IMX SNVS Power Key Driver"
|
||||||
depends on SOC_IMX6SX || SOC_IMX7D
|
depends on ARCH_MXC || COMPILE_TEST
|
||||||
depends on OF
|
depends on OF
|
||||||
help
|
help
|
||||||
This is the snvs powerkey driver for the Freescale i.MX application
|
This is the snvs powerkey driver for the Freescale i.MX application
|
||||||
|
@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
|
|||||||
|
|
||||||
error = rmi_register_function(fn);
|
error = rmi_register_function(fn);
|
||||||
if (error)
|
if (error)
|
||||||
goto err_put_fn;
|
return error;
|
||||||
|
|
||||||
if (pdt->function_number == 0x01)
|
if (pdt->function_number == 0x01)
|
||||||
data->f01_container = fn;
|
data->f01_container = fn;
|
||||||
@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
|
|||||||
list_add_tail(&fn->node, &data->function_list);
|
list_add_tail(&fn->node, &data->function_list);
|
||||||
|
|
||||||
return RMI_SCAN_CONTINUE;
|
return RMI_SCAN_CONTINUE;
|
||||||
|
|
||||||
err_put_fn:
|
|
||||||
put_device(&fn->dev);
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
|
void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
|
||||||
|
@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rc = f11_write_control_regs(fn, &f11->sens_query,
|
rc = f11_write_control_regs(fn, &f11->sens_query,
|
||||||
&f11->dev_controls, fn->fd.query_base_addr);
|
&f11->dev_controls, fn->fd.control_base_addr);
|
||||||
if (rc)
|
if (rc)
|
||||||
dev_warn(&fn->dev, "Failed to write control registers\n");
|
dev_warn(&fn->dev, "Failed to write control registers\n");
|
||||||
|
|
||||||
|
@ -722,12 +722,6 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
|
|||||||
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
|
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
|
||||||
u32 ndcr_generic;
|
u32 ndcr_generic;
|
||||||
|
|
||||||
if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
|
|
||||||
return;
|
|
||||||
|
|
||||||
writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
|
|
||||||
writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset the NDCR register to a clean state for this particular chip,
|
* Reset the NDCR register to a clean state for this particular chip,
|
||||||
* also clear ND_RUN bit.
|
* also clear ND_RUN bit.
|
||||||
@ -739,6 +733,12 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
|
|||||||
/* Also reset the interrupt status register */
|
/* Also reset the interrupt status register */
|
||||||
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
|
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
|
||||||
|
|
||||||
|
if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
|
||||||
|
return;
|
||||||
|
|
||||||
|
writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
|
||||||
|
writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
|
||||||
|
|
||||||
nfc->selected_chip = chip;
|
nfc->selected_chip = chip;
|
||||||
marvell_nand->selected_die = die_nr;
|
marvell_nand->selected_die = die_nr;
|
||||||
}
|
}
|
||||||
|
@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
|
|||||||
fs->m_ext.data[1]))
|
fs->m_ext.data[1]))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (fs->location != RX_CLS_LOC_ANY &&
|
if (fs->location != RX_CLS_LOC_ANY &&
|
||||||
test_bit(fs->location, priv->cfp.used))
|
test_bit(fs->location, priv->cfp.used))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
|
|||||||
struct cfp_rule *rule;
|
struct cfp_rule *rule;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (loc >= CFP_NUM_RULES)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* Refuse deleting unused rules, and those that are not unique since
|
/* Refuse deleting unused rules, and those that are not unique since
|
||||||
* that could leave IPv6 rules with one of the chained rule in the
|
* that could leave IPv6 rules with one of the chained rule in the
|
||||||
* table.
|
* table.
|
||||||
|
@ -1625,7 +1625,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||||||
netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
|
netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
|
||||||
bnxt_sched_reset(bp, rxr);
|
bnxt_sched_reset(bp, rxr);
|
||||||
}
|
}
|
||||||
goto next_rx;
|
goto next_rx_no_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
|
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
|
||||||
@ -1706,12 +1706,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||||||
rc = 1;
|
rc = 1;
|
||||||
|
|
||||||
next_rx:
|
next_rx:
|
||||||
rxr->rx_prod = NEXT_RX(prod);
|
|
||||||
rxr->rx_next_cons = NEXT_RX(cons);
|
|
||||||
|
|
||||||
cpr->rx_packets += 1;
|
cpr->rx_packets += 1;
|
||||||
cpr->rx_bytes += len;
|
cpr->rx_bytes += len;
|
||||||
|
|
||||||
|
next_rx_no_len:
|
||||||
|
rxr->rx_prod = NEXT_RX(prod);
|
||||||
|
rxr->rx_next_cons = NEXT_RX(cons);
|
||||||
|
|
||||||
next_rx_no_prod_no_len:
|
next_rx_no_prod_no_len:
|
||||||
*raw_cons = tmp_raw_cons;
|
*raw_cons = tmp_raw_cons;
|
||||||
|
|
||||||
@ -5135,10 +5136,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
|
|||||||
for (i = 0; i < bp->tx_nr_rings; i++) {
|
for (i = 0; i < bp->tx_nr_rings; i++) {
|
||||||
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
||||||
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
|
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
|
||||||
u32 cmpl_ring_id;
|
|
||||||
|
|
||||||
cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
|
|
||||||
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
||||||
|
u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
|
||||||
|
|
||||||
hwrm_ring_free_send_msg(bp, ring,
|
hwrm_ring_free_send_msg(bp, ring,
|
||||||
RING_FREE_REQ_RING_TYPE_TX,
|
RING_FREE_REQ_RING_TYPE_TX,
|
||||||
close_path ? cmpl_ring_id :
|
close_path ? cmpl_ring_id :
|
||||||
@ -5151,10 +5152,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
|
|||||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
||||||
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
|
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
|
||||||
u32 grp_idx = rxr->bnapi->index;
|
u32 grp_idx = rxr->bnapi->index;
|
||||||
u32 cmpl_ring_id;
|
|
||||||
|
|
||||||
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
|
||||||
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
||||||
|
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
||||||
|
|
||||||
hwrm_ring_free_send_msg(bp, ring,
|
hwrm_ring_free_send_msg(bp, ring,
|
||||||
RING_FREE_REQ_RING_TYPE_RX,
|
RING_FREE_REQ_RING_TYPE_RX,
|
||||||
close_path ? cmpl_ring_id :
|
close_path ? cmpl_ring_id :
|
||||||
@ -5173,10 +5174,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
|
|||||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
||||||
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
|
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
|
||||||
u32 grp_idx = rxr->bnapi->index;
|
u32 grp_idx = rxr->bnapi->index;
|
||||||
u32 cmpl_ring_id;
|
|
||||||
|
|
||||||
cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
|
||||||
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
|
||||||
|
u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
|
||||||
|
|
||||||
hwrm_ring_free_send_msg(bp, ring, type,
|
hwrm_ring_free_send_msg(bp, ring, type,
|
||||||
close_path ? cmpl_ring_id :
|
close_path ? cmpl_ring_id :
|
||||||
INVALID_HW_RING_ID);
|
INVALID_HW_RING_ID);
|
||||||
@ -5315,17 +5316,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
|
|||||||
req->num_tx_rings = cpu_to_le16(tx_rings);
|
req->num_tx_rings = cpu_to_le16(tx_rings);
|
||||||
if (BNXT_NEW_RM(bp)) {
|
if (BNXT_NEW_RM(bp)) {
|
||||||
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
|
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
|
||||||
|
enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||||
if (bp->flags & BNXT_FLAG_CHIP_P5) {
|
if (bp->flags & BNXT_FLAG_CHIP_P5) {
|
||||||
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
|
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
|
||||||
enables |= tx_rings + ring_grps ?
|
enables |= tx_rings + ring_grps ?
|
||||||
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||||
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
|
||||||
enables |= rx_rings ?
|
enables |= rx_rings ?
|
||||||
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
||||||
} else {
|
} else {
|
||||||
enables |= cp_rings ?
|
enables |= cp_rings ?
|
||||||
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||||
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
|
||||||
enables |= ring_grps ?
|
enables |= ring_grps ?
|
||||||
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
|
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
|
||||||
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
||||||
@ -5365,14 +5365,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
|
|||||||
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
|
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
|
||||||
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
|
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
|
||||||
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
|
||||||
|
enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||||
if (bp->flags & BNXT_FLAG_CHIP_P5) {
|
if (bp->flags & BNXT_FLAG_CHIP_P5) {
|
||||||
enables |= tx_rings + ring_grps ?
|
enables |= tx_rings + ring_grps ?
|
||||||
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||||
FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
|
||||||
} else {
|
} else {
|
||||||
enables |= cp_rings ?
|
enables |= cp_rings ?
|
||||||
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
|
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
|
||||||
FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
|
||||||
enables |= ring_grps ?
|
enables |= ring_grps ?
|
||||||
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
|
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
|
||||||
}
|
}
|
||||||
@ -6753,6 +6752,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
|||||||
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
|
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
|
||||||
struct hwrm_port_qstats_ext_input req = {0};
|
struct hwrm_port_qstats_ext_input req = {0};
|
||||||
struct bnxt_pf_info *pf = &bp->pf;
|
struct bnxt_pf_info *pf = &bp->pf;
|
||||||
|
u32 tx_stat_size;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
|
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
|
||||||
@ -6762,13 +6762,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
|||||||
req.port_id = cpu_to_le16(pf->port_id);
|
req.port_id = cpu_to_le16(pf->port_id);
|
||||||
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
|
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
|
||||||
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
|
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
|
||||||
req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
|
tx_stat_size = bp->hw_tx_port_stats_ext ?
|
||||||
|
sizeof(*bp->hw_tx_port_stats_ext) : 0;
|
||||||
|
req.tx_stat_size = cpu_to_le16(tx_stat_size);
|
||||||
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
|
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
|
||||||
mutex_lock(&bp->hwrm_cmd_lock);
|
mutex_lock(&bp->hwrm_cmd_lock);
|
||||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
|
bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
|
||||||
bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
|
bp->fw_tx_stats_ext_size = tx_stat_size ?
|
||||||
|
le16_to_cpu(resp->tx_stat_size) / 8 : 0;
|
||||||
} else {
|
} else {
|
||||||
bp->fw_rx_stats_ext_size = 0;
|
bp->fw_rx_stats_ext_size = 0;
|
||||||
bp->fw_tx_stats_ext_size = 0;
|
bp->fw_tx_stats_ext_size = 0;
|
||||||
@ -8961,8 +8964,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
|
|||||||
|
|
||||||
skip_uc:
|
skip_uc:
|
||||||
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
|
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
|
||||||
|
if (rc && vnic->mc_list_count) {
|
||||||
|
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
|
||||||
|
rc);
|
||||||
|
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
||||||
|
vnic->mc_list_count = 0;
|
||||||
|
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
|
||||||
|
}
|
||||||
if (rc)
|
if (rc)
|
||||||
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
|
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
|
||||||
rc);
|
rc);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
@ -10699,6 +10709,7 @@ init_err_cleanup_tc:
|
|||||||
bnxt_clear_int_mode(bp);
|
bnxt_clear_int_mode(bp);
|
||||||
|
|
||||||
init_err_pci_clean:
|
init_err_pci_clean:
|
||||||
|
bnxt_free_hwrm_short_cmd_req(bp);
|
||||||
bnxt_free_hwrm_resources(bp);
|
bnxt_free_hwrm_resources(bp);
|
||||||
bnxt_free_ctx_mem(bp);
|
bnxt_free_ctx_mem(bp);
|
||||||
kfree(bp->ctx);
|
kfree(bp->ctx);
|
||||||
|
@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
|
|||||||
*/
|
*/
|
||||||
dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
|
dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
|
||||||
"stm32_pwr_wakeup");
|
"stm32_pwr_wakeup");
|
||||||
|
if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
|
||||||
if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
|
if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
|
||||||
err = device_init_wakeup(&pdev->dev, true);
|
err = device_init_wakeup(&pdev->dev, true);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -160,7 +160,7 @@ static const struct dmi_system_id quark_pci_dmi[] = {
|
|||||||
.driver_data = (void *)&galileo_stmmac_dmi_data,
|
.driver_data = (void *)&galileo_stmmac_dmi_data,
|
||||||
},
|
},
|
||||||
/*
|
/*
|
||||||
* There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
|
* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
|
||||||
* The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
|
* The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
|
||||||
* has only one pci network device while other asset tags are
|
* has only one pci network device while other asset tags are
|
||||||
* for IOT2040 which has two.
|
* for IOT2040 which has two.
|
||||||
|
@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
|
|||||||
dev_dbg(printdev(lp), "no slotted operation\n");
|
dev_dbg(printdev(lp), "no slotted operation\n");
|
||||||
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
|
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
|
||||||
DAR_PHY_CTRL1_SLOTTED, 0x0);
|
DAR_PHY_CTRL1_SLOTTED, 0x0);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* enable irq */
|
/* enable irq */
|
||||||
enable_irq(lp->spi->irq);
|
enable_irq(lp->spi->irq);
|
||||||
@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
|
|||||||
/* Unmask SEQ interrupt */
|
/* Unmask SEQ interrupt */
|
||||||
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
|
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
|
||||||
DAR_PHY_CTRL2_SEQMSK, 0x0);
|
DAR_PHY_CTRL2_SEQMSK, 0x0);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* Start the RX sequence */
|
/* Start the RX sequence */
|
||||||
dev_dbg(printdev(lp), "start the RX sequence\n");
|
dev_dbg(printdev(lp), "start the RX sequence\n");
|
||||||
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
|
ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
|
||||||
DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
|
DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1597,9 +1597,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
|
|||||||
|
|
||||||
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
|
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
|
||||||
{
|
{
|
||||||
|
int count = marvell_get_sset_count(phydev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
|
for (i = 0; i < count; i++) {
|
||||||
strlcpy(data + i * ETH_GSTRING_LEN,
|
strlcpy(data + i * ETH_GSTRING_LEN,
|
||||||
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
|
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
|
||||||
}
|
}
|
||||||
@ -1627,9 +1628,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
|
|||||||
static void marvell_get_stats(struct phy_device *phydev,
|
static void marvell_get_stats(struct phy_device *phydev,
|
||||||
struct ethtool_stats *stats, u64 *data)
|
struct ethtool_stats *stats, u64 *data)
|
||||||
{
|
{
|
||||||
|
int count = marvell_get_sset_count(phydev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
|
for (i = 0; i < count; i++)
|
||||||
data[i] = marvell_get_stat(phydev, i);
|
data[i] = marvell_get_stat(phydev, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ out_fail:
|
|||||||
void
|
void
|
||||||
slhc_free(struct slcompress *comp)
|
slhc_free(struct slcompress *comp)
|
||||||
{
|
{
|
||||||
if ( comp == NULLSLCOMPR )
|
if ( IS_ERR_OR_NULL(comp) )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ( comp->tstate != NULLSLSTATE )
|
if ( comp->tstate != NULLSLSTATE )
|
||||||
|
@ -1131,9 +1131,16 @@ static const struct usb_device_id products[] = {
|
|||||||
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
|
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
|
||||||
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
|
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
|
||||||
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
|
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
|
||||||
|
{QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
|
||||||
|
{QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
|
||||||
|
{QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
|
||||||
|
{QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
|
||||||
|
{QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
|
||||||
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
|
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
|
||||||
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
|
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
|
||||||
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
|
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
|
||||||
|
{QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
|
||||||
|
{QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
|
||||||
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
|
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
|
||||||
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
|
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
|
||||||
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
|
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
|
||||||
@ -1189,6 +1196,7 @@ static const struct usb_device_id products[] = {
|
|||||||
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
|
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
|
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
|
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
|
||||||
|
{QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
|
{QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
|
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
|
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
|
||||||
@ -1209,7 +1217,9 @@ static const struct usb_device_id products[] = {
|
|||||||
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
|
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
|
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
|
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
|
||||||
|
{QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
|
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
|
||||||
|
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
|
||||||
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
|
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
|
||||||
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
|
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
|
||||||
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
|
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
|
||||||
|
@ -1855,7 +1855,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
|
|||||||
struct ath10k_ce_crash_data ce_data;
|
struct ath10k_ce_crash_data ce_data;
|
||||||
u32 addr, id;
|
u32 addr, id;
|
||||||
|
|
||||||
lockdep_assert_held(&ar->data_lock);
|
lockdep_assert_held(&ar->dump_mutex);
|
||||||
|
|
||||||
ath10k_err(ar, "Copy Engine register dump:\n");
|
ath10k_err(ar, "Copy Engine register dump:\n");
|
||||||
|
|
||||||
|
@ -3119,6 +3119,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
|||||||
goto err_free_wq;
|
goto err_free_wq;
|
||||||
|
|
||||||
mutex_init(&ar->conf_mutex);
|
mutex_init(&ar->conf_mutex);
|
||||||
|
mutex_init(&ar->dump_mutex);
|
||||||
spin_lock_init(&ar->data_lock);
|
spin_lock_init(&ar->data_lock);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&ar->peers);
|
INIT_LIST_HEAD(&ar->peers);
|
||||||
|
@ -1063,6 +1063,9 @@ struct ath10k {
|
|||||||
/* prevents concurrent FW reconfiguration */
|
/* prevents concurrent FW reconfiguration */
|
||||||
struct mutex conf_mutex;
|
struct mutex conf_mutex;
|
||||||
|
|
||||||
|
/* protects coredump data */
|
||||||
|
struct mutex dump_mutex;
|
||||||
|
|
||||||
/* protects shared structure data */
|
/* protects shared structure data */
|
||||||
spinlock_t data_lock;
|
spinlock_t data_lock;
|
||||||
|
|
||||||
|
@ -1102,7 +1102,7 @@ struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
|
|||||||
{
|
{
|
||||||
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
|
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
|
||||||
|
|
||||||
lockdep_assert_held(&ar->data_lock);
|
lockdep_assert_held(&ar->dump_mutex);
|
||||||
|
|
||||||
if (ath10k_coredump_mask == 0)
|
if (ath10k_coredump_mask == 0)
|
||||||
/* coredump disabled */
|
/* coredump disabled */
|
||||||
@ -1146,7 +1146,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
|
|||||||
if (!buf)
|
if (!buf)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
spin_lock_bh(&ar->data_lock);
|
mutex_lock(&ar->dump_mutex);
|
||||||
|
|
||||||
dump_data = (struct ath10k_dump_file_data *)(buf);
|
dump_data = (struct ath10k_dump_file_data *)(buf);
|
||||||
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
|
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
|
||||||
@ -1213,7 +1213,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
|
|||||||
sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
|
sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&ar->data_lock);
|
mutex_unlock(&ar->dump_mutex);
|
||||||
|
|
||||||
return dump_data;
|
return dump_data;
|
||||||
}
|
}
|
||||||
|
@ -5774,7 +5774,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (changed & BSS_CHANGED_MCAST_RATE &&
|
if (changed & BSS_CHANGED_MCAST_RATE &&
|
||||||
!WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
|
!ath10k_mac_vif_chan(arvif->vif, &def)) {
|
||||||
band = def.chan->band;
|
band = def.chan->band;
|
||||||
rateidx = vif->bss_conf.mcast_rate[band] - 1;
|
rateidx = vif->bss_conf.mcast_rate[band] - 1;
|
||||||
|
|
||||||
@ -5812,7 +5812,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (changed & BSS_CHANGED_BASIC_RATES) {
|
if (changed & BSS_CHANGED_BASIC_RATES) {
|
||||||
if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
|
if (ath10k_mac_vif_chan(vif, &def)) {
|
||||||
mutex_unlock(&ar->conf_mutex);
|
mutex_unlock(&ar->conf_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1441,7 +1441,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
|
|||||||
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
|
__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
lockdep_assert_held(&ar->data_lock);
|
lockdep_assert_held(&ar->dump_mutex);
|
||||||
|
|
||||||
ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
|
ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
|
||||||
hi_failure_state,
|
hi_failure_state,
|
||||||
@ -1656,7 +1656,7 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
|
|||||||
int ret, i;
|
int ret, i;
|
||||||
u8 *buf;
|
u8 *buf;
|
||||||
|
|
||||||
lockdep_assert_held(&ar->data_lock);
|
lockdep_assert_held(&ar->dump_mutex);
|
||||||
|
|
||||||
if (!crash_data)
|
if (!crash_data)
|
||||||
return;
|
return;
|
||||||
@ -1734,14 +1734,19 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
static void ath10k_pci_fw_dump_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
|
||||||
|
dump_work);
|
||||||
struct ath10k_fw_crash_data *crash_data;
|
struct ath10k_fw_crash_data *crash_data;
|
||||||
|
struct ath10k *ar = ar_pci->ar;
|
||||||
char guid[UUID_STRING_LEN + 1];
|
char guid[UUID_STRING_LEN + 1];
|
||||||
|
|
||||||
spin_lock_bh(&ar->data_lock);
|
mutex_lock(&ar->dump_mutex);
|
||||||
|
|
||||||
|
spin_lock_bh(&ar->data_lock);
|
||||||
ar->stats.fw_crash_counter++;
|
ar->stats.fw_crash_counter++;
|
||||||
|
spin_unlock_bh(&ar->data_lock);
|
||||||
|
|
||||||
crash_data = ath10k_coredump_new(ar);
|
crash_data = ath10k_coredump_new(ar);
|
||||||
|
|
||||||
@ -1756,11 +1761,18 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
|||||||
ath10k_ce_dump_registers(ar, crash_data);
|
ath10k_ce_dump_registers(ar, crash_data);
|
||||||
ath10k_pci_dump_memory(ar, crash_data);
|
ath10k_pci_dump_memory(ar, crash_data);
|
||||||
|
|
||||||
spin_unlock_bh(&ar->data_lock);
|
mutex_unlock(&ar->dump_mutex);
|
||||||
|
|
||||||
queue_work(ar->workqueue, &ar->restart_work);
|
queue_work(ar->workqueue, &ar->restart_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
||||||
|
{
|
||||||
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||||
|
|
||||||
|
queue_work(ar->workqueue, &ar_pci->dump_work);
|
||||||
|
}
|
||||||
|
|
||||||
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
||||||
int force)
|
int force)
|
||||||
{
|
{
|
||||||
@ -3442,6 +3454,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
|
|||||||
spin_lock_init(&ar_pci->ps_lock);
|
spin_lock_init(&ar_pci->ps_lock);
|
||||||
mutex_init(&ar_pci->ce_diag_mutex);
|
mutex_init(&ar_pci->ce_diag_mutex);
|
||||||
|
|
||||||
|
INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
|
||||||
|
|
||||||
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
|
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
|
||||||
|
|
||||||
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
|
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
|
||||||
|
@ -121,6 +121,8 @@ struct ath10k_pci {
|
|||||||
/* For protecting ce_diag */
|
/* For protecting ce_diag */
|
||||||
struct mutex ce_diag_mutex;
|
struct mutex ce_diag_mutex;
|
||||||
|
|
||||||
|
struct work_struct dump_work;
|
||||||
|
|
||||||
struct ath10k_ce ce;
|
struct ath10k_ce ce;
|
||||||
struct timer_list rx_post_retry;
|
struct timer_list rx_post_retry;
|
||||||
|
|
||||||
|
@ -207,7 +207,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
|||||||
#define IWL_DEVICE_AX210 \
|
#define IWL_DEVICE_AX210 \
|
||||||
IWL_DEVICE_AX200_COMMON, \
|
IWL_DEVICE_AX200_COMMON, \
|
||||||
.device_family = IWL_DEVICE_FAMILY_AX210, \
|
.device_family = IWL_DEVICE_FAMILY_AX210, \
|
||||||
.base_params = &iwl_22000_base_params, \
|
.base_params = &iwl_22560_base_params, \
|
||||||
.csr = &iwl_csr_v1, \
|
.csr = &iwl_csr_v1, \
|
||||||
.min_txq_size = 128, \
|
.min_txq_size = 128, \
|
||||||
.gp2_reg_addr = 0xd02c68, \
|
.gp2_reg_addr = 0xd02c68, \
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
*
|
*
|
||||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||||
* Copyright(c) 2018 Intel Corporation
|
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms of version 2 of the GNU General Public License as
|
* under the terms of version 2 of the GNU General Public License as
|
||||||
@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
|
|||||||
.ht_params = &iwl5000_ht_params,
|
.ht_params = &iwl5000_ht_params,
|
||||||
.led_mode = IWL_LED_BLINK,
|
.led_mode = IWL_LED_BLINK,
|
||||||
.internal_wimax_coex = true,
|
.internal_wimax_coex = true,
|
||||||
|
.csr = &iwl_csr_v1,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IWL_DEVICE_5150 \
|
#define IWL_DEVICE_5150 \
|
||||||
|
@ -93,7 +93,7 @@ struct iwl_ucode_header {
|
|||||||
} u;
|
} u;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IWL_UCODE_INI_TLV_GROUP BIT(24)
|
#define IWL_UCODE_INI_TLV_GROUP 0x1000000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* new TLV uCode file layout
|
* new TLV uCode file layout
|
||||||
@ -148,11 +148,14 @@ enum iwl_ucode_tlv_type {
|
|||||||
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
|
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
|
||||||
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
|
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
|
||||||
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
|
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
|
||||||
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
|
|
||||||
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
|
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
|
||||||
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
|
IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
|
||||||
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
|
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2,
|
||||||
IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
|
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3,
|
||||||
|
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4,
|
||||||
|
IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5,
|
||||||
|
IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
|
||||||
|
|
||||||
/* TLVs 0x1000-0x2000 are for internal driver usage */
|
/* TLVs 0x1000-0x2000 are for internal driver usage */
|
||||||
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
|
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
|
||||||
|
@ -129,7 +129,8 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
|
|||||||
len -= ALIGN(tlv_len, 4);
|
len -= ALIGN(tlv_len, 4);
|
||||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||||
|
|
||||||
if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP))
|
if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE ||
|
||||||
|
tlv_type > IWL_UCODE_TLV_DEBUG_MAX)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hdr = (void *)&tlv->data[0];
|
hdr = (void *)&tlv->data[0];
|
||||||
|
@ -773,6 +773,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
|
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
|
||||||
|
if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
|
||||||
|
IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
|
||||||
|
dbgfs_dir);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
|
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
|
||||||
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
|
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
|
||||||
|
@ -1121,7 +1121,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|||||||
ret = iwl_mvm_load_rt_fw(mvm);
|
ret = iwl_mvm_load_rt_fw(mvm);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||||
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
|
if (ret != -ERFKILL)
|
||||||
|
iwl_fw_dbg_error_collect(&mvm->fwrt,
|
||||||
|
FW_DBG_TRIGGER_DRIVER);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -834,7 +834,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||||||
mutex_lock(&mvm->mutex);
|
mutex_lock(&mvm->mutex);
|
||||||
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
|
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||||
err = iwl_run_init_mvm_ucode(mvm, true);
|
err = iwl_run_init_mvm_ucode(mvm, true);
|
||||||
if (err)
|
if (err && err != -ERFKILL)
|
||||||
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
|
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
|
||||||
if (!iwlmvm_mod_params.init_dbg || !err)
|
if (!iwlmvm_mod_params.init_dbg || !err)
|
||||||
iwl_mvm_stop_device(mvm);
|
iwl_mvm_stop_device(mvm);
|
||||||
|
@ -169,8 +169,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* iwl_mvm_create_skb Adds the rxb to a new skb */
|
/* iwl_mvm_create_skb Adds the rxb to a new skb */
|
||||||
static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||||
u16 len, u8 crypt_len,
|
struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
|
||||||
struct iwl_rx_cmd_buffer *rxb)
|
struct iwl_rx_cmd_buffer *rxb)
|
||||||
{
|
{
|
||||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||||
@ -204,6 +204,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
|||||||
* present before copying packet data.
|
* present before copying packet data.
|
||||||
*/
|
*/
|
||||||
hdrlen += crypt_len;
|
hdrlen += crypt_len;
|
||||||
|
|
||||||
|
if (WARN_ONCE(headlen < hdrlen,
|
||||||
|
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
|
||||||
|
hdrlen, len, crypt_len)) {
|
||||||
|
/*
|
||||||
|
* We warn and trace because we want to be able to see
|
||||||
|
* it in trace-cmd as well.
|
||||||
|
*/
|
||||||
|
IWL_DEBUG_RX(mvm,
|
||||||
|
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
|
||||||
|
hdrlen, len, crypt_len);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
skb_put_data(skb, hdr, hdrlen);
|
skb_put_data(skb, hdr, hdrlen);
|
||||||
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
|
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
|
||||||
|
|
||||||
@ -216,6 +230,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
|
|||||||
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
||||||
fraglen, rxb->truesize);
|
fraglen, rxb->truesize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
|
static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
|
||||||
@ -1671,7 +1687,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||||||
rx_status->boottime_ns = ktime_get_boot_ns();
|
rx_status->boottime_ns = ktime_get_boot_ns();
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
|
if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
|
||||||
|
kfree_skb(skb);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
|
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
|
||||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
|
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
|
||||||
sta, csi);
|
sta, csi);
|
||||||
|
@ -3654,20 +3654,27 @@ out_no_pci:
|
|||||||
|
|
||||||
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
|
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
|
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
|
||||||
|
u32 inta_addr, sw_err_bit;
|
||||||
|
|
||||||
|
if (trans_pcie->msix_enabled) {
|
||||||
|
inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
|
||||||
|
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
|
||||||
|
} else {
|
||||||
|
inta_addr = CSR_INT;
|
||||||
|
sw_err_bit = CSR_INT_BIT_SW_ERR;
|
||||||
|
}
|
||||||
|
|
||||||
iwl_disable_interrupts(trans);
|
iwl_disable_interrupts(trans);
|
||||||
iwl_force_nmi(trans);
|
iwl_force_nmi(trans);
|
||||||
while (time_after(timeout, jiffies)) {
|
while (time_after(timeout, jiffies)) {
|
||||||
u32 inta_hw = iwl_read32(trans,
|
u32 inta_hw = iwl_read32(trans, inta_addr);
|
||||||
CSR_MSIX_HW_INT_CAUSES_AD);
|
|
||||||
|
|
||||||
/* Error detected by uCode */
|
/* Error detected by uCode */
|
||||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) {
|
if (inta_hw & sw_err_bit) {
|
||||||
/* Clear causes register */
|
/* Clear causes register */
|
||||||
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
|
iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
|
||||||
inta_hw &
|
|
||||||
MSIX_HW_INT_CAUSES_REG_SW_ERR);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
|
|||||||
|
|
||||||
adapter = card->adapter;
|
adapter = card->adapter;
|
||||||
|
|
||||||
if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
|
if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
|
||||||
mwifiex_dbg(adapter, WARN,
|
mwifiex_dbg(adapter, WARN,
|
||||||
"device already resumed\n");
|
"device already resumed\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -6262,8 +6262,7 @@ static int __init pci_setup(char *str)
|
|||||||
} else if (!strncmp(str, "pcie_scan_all", 13)) {
|
} else if (!strncmp(str, "pcie_scan_all", 13)) {
|
||||||
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
|
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
|
||||||
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
|
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
|
||||||
disable_acs_redir_param =
|
disable_acs_redir_param = str + 18;
|
||||||
kstrdup(str + 18, GFP_KERNEL);
|
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_ERR "PCI: Unknown option `%s'\n",
|
printk(KERN_ERR "PCI: Unknown option `%s'\n",
|
||||||
str);
|
str);
|
||||||
@ -6274,3 +6273,19 @@ static int __init pci_setup(char *str)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_param("pci", pci_setup);
|
early_param("pci", pci_setup);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
|
||||||
|
* to data in the __initdata section which will be freed after the init
|
||||||
|
* sequence is complete. We can't allocate memory in pci_setup() because some
|
||||||
|
* architectures do not have any memory allocation service available during
|
||||||
|
* an early_param() call. So we allocate memory and copy the variable here
|
||||||
|
* before the init section is freed.
|
||||||
|
*/
|
||||||
|
static int __init pci_realloc_setup_params(void)
|
||||||
|
{
|
||||||
|
disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
pure_initcall(pci_realloc_setup_params);
|
||||||
|
@ -142,3 +142,11 @@ config PCIE_PTM
|
|||||||
|
|
||||||
This is only useful if you have devices that support PTM, but it
|
This is only useful if you have devices that support PTM, but it
|
||||||
is safe to enable even if you don't.
|
is safe to enable even if you don't.
|
||||||
|
|
||||||
|
config PCIE_BW
|
||||||
|
bool "PCI Express Bandwidth Change Notification"
|
||||||
|
depends on PCIEPORTBUS
|
||||||
|
help
|
||||||
|
This enables PCI Express Bandwidth Change Notification. If
|
||||||
|
you know link width or rate changes occur only to correct
|
||||||
|
unreliable links, you may answer Y.
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
# Makefile for PCI Express features and port driver
|
# Makefile for PCI Express features and port driver
|
||||||
|
|
||||||
pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
|
pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
|
||||||
pcieportdrv-y += bw_notification.o
|
|
||||||
|
|
||||||
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
|
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
|
||||||
|
|
||||||
@ -13,3 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
|
|||||||
obj-$(CONFIG_PCIE_PME) += pme.o
|
obj-$(CONFIG_PCIE_PME) += pme.o
|
||||||
obj-$(CONFIG_PCIE_DPC) += dpc.o
|
obj-$(CONFIG_PCIE_DPC) += dpc.o
|
||||||
obj-$(CONFIG_PCIE_PTM) += ptm.o
|
obj-$(CONFIG_PCIE_PTM) += ptm.o
|
||||||
|
obj-$(CONFIG_PCIE_BW) += bw_notification.o
|
||||||
|
@ -49,7 +49,11 @@ int pcie_dpc_init(void);
|
|||||||
static inline int pcie_dpc_init(void) { return 0; }
|
static inline int pcie_dpc_init(void) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_PCIE_BW
|
||||||
int pcie_bandwidth_notification_init(void);
|
int pcie_bandwidth_notification_init(void);
|
||||||
|
#else
|
||||||
|
static inline int pcie_bandwidth_notification_init(void) { return 0; }
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Port Type */
|
/* Port Type */
|
||||||
#define PCIE_ANY_PORT (~0)
|
#define PCIE_ANY_PORT (~0)
|
||||||
|
@ -55,7 +55,8 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
|
|||||||
* 7.8.2, 7.10.10, 7.31.2.
|
* 7.8.2, 7.10.10, 7.31.2.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
|
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
|
||||||
|
PCIE_PORT_SERVICE_BWNOTIF)) {
|
||||||
pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16);
|
pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16);
|
||||||
*pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
|
*pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
|
||||||
nvec = *pme + 1;
|
nvec = *pme + 1;
|
||||||
|
@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
|
|||||||
int avg_current;
|
int avg_current;
|
||||||
u32 cc_lsb;
|
u32 cc_lsb;
|
||||||
|
|
||||||
|
if (!divider)
|
||||||
|
return 0;
|
||||||
|
|
||||||
sample &= 0xffffff; /* 24-bits, unsigned */
|
sample &= 0xffffff; /* 24-bits, unsigned */
|
||||||
offset &= 0x7ff; /* 10-bits, signed */
|
offset &= 0x7ff; /* 10-bits, signed */
|
||||||
|
|
||||||
|
@ -383,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|||||||
char *prop_buf;
|
char *prop_buf;
|
||||||
char *attrname;
|
char *attrname;
|
||||||
|
|
||||||
dev_dbg(dev, "uevent\n");
|
|
||||||
|
|
||||||
if (!psy || !psy->desc) {
|
if (!psy || !psy->desc) {
|
||||||
dev_dbg(dev, "No power supply yet\n");
|
dev_dbg(dev, "No power supply yet\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
|
|
||||||
|
|
||||||
ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
|
ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -427,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
|
|
||||||
|
|
||||||
ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
|
ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
|
||||||
kfree(attrname);
|
kfree(attrname);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
|
|||||||
pm_runtime_disable(dev);
|
pm_runtime_disable(dev);
|
||||||
pm_runtime_set_suspended(dev);
|
pm_runtime_set_suspended(dev);
|
||||||
|
|
||||||
/* Undo any residual pm_autopm_get_interface_* calls */
|
|
||||||
for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
|
|
||||||
usb_autopm_put_interface_no_suspend(intf);
|
|
||||||
atomic_set(&intf->pm_usage_cnt, 0);
|
|
||||||
|
|
||||||
if (!error)
|
if (!error)
|
||||||
usb_autosuspend_device(udev);
|
usb_autosuspend_device(udev);
|
||||||
|
|
||||||
@ -1633,7 +1628,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
usb_mark_last_busy(udev);
|
usb_mark_last_busy(udev);
|
||||||
atomic_dec(&intf->pm_usage_cnt);
|
|
||||||
status = pm_runtime_put_sync(&intf->dev);
|
status = pm_runtime_put_sync(&intf->dev);
|
||||||
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
||||||
__func__, atomic_read(&intf->dev.power.usage_count),
|
__func__, atomic_read(&intf->dev.power.usage_count),
|
||||||
@ -1662,7 +1656,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
usb_mark_last_busy(udev);
|
usb_mark_last_busy(udev);
|
||||||
atomic_dec(&intf->pm_usage_cnt);
|
|
||||||
status = pm_runtime_put(&intf->dev);
|
status = pm_runtime_put(&intf->dev);
|
||||||
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
||||||
__func__, atomic_read(&intf->dev.power.usage_count),
|
__func__, atomic_read(&intf->dev.power.usage_count),
|
||||||
@ -1684,7 +1677,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
|
|||||||
struct usb_device *udev = interface_to_usbdev(intf);
|
struct usb_device *udev = interface_to_usbdev(intf);
|
||||||
|
|
||||||
usb_mark_last_busy(udev);
|
usb_mark_last_busy(udev);
|
||||||
atomic_dec(&intf->pm_usage_cnt);
|
|
||||||
pm_runtime_put_noidle(&intf->dev);
|
pm_runtime_put_noidle(&intf->dev);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
|
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
|
||||||
@ -1715,8 +1707,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
|
|||||||
status = pm_runtime_get_sync(&intf->dev);
|
status = pm_runtime_get_sync(&intf->dev);
|
||||||
if (status < 0)
|
if (status < 0)
|
||||||
pm_runtime_put_sync(&intf->dev);
|
pm_runtime_put_sync(&intf->dev);
|
||||||
else
|
|
||||||
atomic_inc(&intf->pm_usage_cnt);
|
|
||||||
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
||||||
__func__, atomic_read(&intf->dev.power.usage_count),
|
__func__, atomic_read(&intf->dev.power.usage_count),
|
||||||
status);
|
status);
|
||||||
@ -1750,8 +1740,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
|
|||||||
status = pm_runtime_get(&intf->dev);
|
status = pm_runtime_get(&intf->dev);
|
||||||
if (status < 0 && status != -EINPROGRESS)
|
if (status < 0 && status != -EINPROGRESS)
|
||||||
pm_runtime_put_noidle(&intf->dev);
|
pm_runtime_put_noidle(&intf->dev);
|
||||||
else
|
|
||||||
atomic_inc(&intf->pm_usage_cnt);
|
|
||||||
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
|
||||||
__func__, atomic_read(&intf->dev.power.usage_count),
|
__func__, atomic_read(&intf->dev.power.usage_count),
|
||||||
status);
|
status);
|
||||||
@ -1775,7 +1763,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
|
|||||||
struct usb_device *udev = interface_to_usbdev(intf);
|
struct usb_device *udev = interface_to_usbdev(intf);
|
||||||
|
|
||||||
usb_mark_last_busy(udev);
|
usb_mark_last_busy(udev);
|
||||||
atomic_inc(&intf->pm_usage_cnt);
|
|
||||||
pm_runtime_get_noresume(&intf->dev);
|
pm_runtime_get_noresume(&intf->dev);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
|
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
|
||||||
|
@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
|
|||||||
|
|
||||||
if (dev->state == USB_STATE_SUSPENDED)
|
if (dev->state == USB_STATE_SUSPENDED)
|
||||||
return -EHOSTUNREACH;
|
return -EHOSTUNREACH;
|
||||||
if (size <= 0 || !buf || !index)
|
if (size <= 0 || !buf)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
buf[0] = 0;
|
buf[0] = 0;
|
||||||
|
if (index <= 0 || index >= 256)
|
||||||
|
return -EINVAL;
|
||||||
tbuf = kmalloc(256, GFP_NOIO);
|
tbuf = kmalloc(256, GFP_NOIO);
|
||||||
if (!tbuf)
|
if (!tbuf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -979,8 +979,18 @@ static int dummy_udc_start(struct usb_gadget *g,
|
|||||||
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
|
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
|
||||||
struct dummy *dum = dum_hcd->dum;
|
struct dummy *dum = dum_hcd->dum;
|
||||||
|
|
||||||
if (driver->max_speed == USB_SPEED_UNKNOWN)
|
switch (g->speed) {
|
||||||
|
/* All the speeds we support */
|
||||||
|
case USB_SPEED_LOW:
|
||||||
|
case USB_SPEED_FULL:
|
||||||
|
case USB_SPEED_HIGH:
|
||||||
|
case USB_SPEED_SUPER:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
|
||||||
|
driver->max_speed);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SLAVE side init ... the layer above hardware, which
|
* SLAVE side init ... the layer above hardware, which
|
||||||
@ -1784,9 +1794,10 @@ static void dummy_timer(struct timer_list *t)
|
|||||||
/* Bus speed is 500000 bytes/ms, so use a little less */
|
/* Bus speed is 500000 bytes/ms, so use a little less */
|
||||||
total = 490000;
|
total = 490000;
|
||||||
break;
|
break;
|
||||||
default:
|
default: /* Can't happen */
|
||||||
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
|
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
|
||||||
return;
|
total = 0;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME if HZ != 1000 this will probably misbehave ... */
|
/* FIXME if HZ != 1000 this will probably misbehave ... */
|
||||||
@ -1828,7 +1839,7 @@ restart:
|
|||||||
|
|
||||||
/* Used up this frame's bandwidth? */
|
/* Used up this frame's bandwidth? */
|
||||||
if (total <= 0)
|
if (total <= 0)
|
||||||
break;
|
continue;
|
||||||
|
|
||||||
/* find the gadget's ep for this request (if configured) */
|
/* find the gadget's ep for this request (if configured) */
|
||||||
address = usb_pipeendpoint (urb->pipe);
|
address = usb_pipeendpoint (urb->pipe);
|
||||||
|
@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface)
|
|||||||
usb_deregister_dev(interface, &yurex_class);
|
usb_deregister_dev(interface, &yurex_class);
|
||||||
|
|
||||||
/* prevent more I/O from starting */
|
/* prevent more I/O from starting */
|
||||||
|
usb_poison_urb(dev->urb);
|
||||||
mutex_lock(&dev->io_mutex);
|
mutex_lock(&dev->io_mutex);
|
||||||
dev->interface = NULL;
|
dev->interface = NULL;
|
||||||
mutex_unlock(&dev->io_mutex);
|
mutex_unlock(&dev->io_mutex);
|
||||||
|
@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t)
|
|||||||
break;
|
break;
|
||||||
case RTS51X_STAT_IDLE:
|
case RTS51X_STAT_IDLE:
|
||||||
case RTS51X_STAT_SS:
|
case RTS51X_STAT_SS:
|
||||||
usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
|
usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
|
||||||
atomic_read(&us->pusb_intf->pm_usage_cnt),
|
|
||||||
atomic_read(&us->pusb_intf->dev.power.usage_count));
|
atomic_read(&us->pusb_intf->dev.power.usage_count));
|
||||||
|
|
||||||
if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
|
if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
|
||||||
usb_stor_dbg(us, "Ready to enter SS state\n");
|
usb_stor_dbg(us, "Ready to enter SS state\n");
|
||||||
rts51x_set_stat(chip, RTS51X_STAT_SS);
|
rts51x_set_stat(chip, RTS51X_STAT_SS);
|
||||||
/* ignore mass storage interface's children */
|
/* ignore mass storage interface's children */
|
||||||
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
|
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
|
||||||
usb_autopm_put_interface_async(us->pusb_intf);
|
usb_autopm_put_interface_async(us->pusb_intf);
|
||||||
usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
|
usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
|
||||||
atomic_read(&us->pusb_intf->pm_usage_cnt),
|
|
||||||
atomic_read(&us->pusb_intf->dev.power.usage_count));
|
atomic_read(&us->pusb_intf->dev.power.usage_count));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (working_scsi(srb)) {
|
if (working_scsi(srb)) {
|
||||||
usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
|
usb_stor_dbg(us, "working scsi, power.usage:%d\n",
|
||||||
atomic_read(&us->pusb_intf->pm_usage_cnt),
|
|
||||||
atomic_read(&us->pusb_intf->dev.power.usage_count));
|
atomic_read(&us->pusb_intf->dev.power.usage_count));
|
||||||
|
|
||||||
if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
|
if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
|
||||||
ret = usb_autopm_get_interface(us->pusb_intf);
|
ret = usb_autopm_get_interface(us->pusb_intf);
|
||||||
usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
|
usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
|
||||||
}
|
}
|
||||||
|
@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (usb_endpoint_xfer_isoc(epd)) {
|
if (usb_endpoint_xfer_isoc(epd)) {
|
||||||
/* validate packet size and number of packets */
|
/* validate number of packets */
|
||||||
unsigned int maxp, packets, bytes;
|
|
||||||
|
|
||||||
maxp = usb_endpoint_maxp(epd);
|
|
||||||
maxp *= usb_endpoint_maxp_mult(epd);
|
|
||||||
bytes = pdu->u.cmd_submit.transfer_buffer_length;
|
|
||||||
packets = DIV_ROUND_UP(bytes, maxp);
|
|
||||||
|
|
||||||
if (pdu->u.cmd_submit.number_of_packets < 0 ||
|
if (pdu->u.cmd_submit.number_of_packets < 0 ||
|
||||||
pdu->u.cmd_submit.number_of_packets > packets) {
|
pdu->u.cmd_submit.number_of_packets >
|
||||||
|
USBIP_MAX_ISO_PACKETS) {
|
||||||
dev_err(&sdev->udev->dev,
|
dev_err(&sdev->udev->dev,
|
||||||
"CMD_SUBMIT: isoc invalid num packets %d\n",
|
"CMD_SUBMIT: isoc invalid num packets %d\n",
|
||||||
pdu->u.cmd_submit.number_of_packets);
|
pdu->u.cmd_submit.number_of_packets);
|
||||||
|
@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug;
|
|||||||
#define USBIP_DIR_OUT 0x00
|
#define USBIP_DIR_OUT 0x00
|
||||||
#define USBIP_DIR_IN 0x01
|
#define USBIP_DIR_IN 0x01
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Arbitrary limit for the maximum number of isochronous packets in an URB,
|
||||||
|
* compare for example the uhci_submit_isochronous function in
|
||||||
|
* drivers/usb/host/uhci-q.c
|
||||||
|
*/
|
||||||
|
#define USBIP_MAX_ISO_PACKETS 1024
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct usbip_header_basic - data pertinent to every request
|
* struct usbip_header_basic - data pertinent to every request
|
||||||
* @command: the usbip request type
|
* @command: the usbip request type
|
||||||
|
@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf,
|
|||||||
/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
|
/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
|
||||||
alt = 3;
|
alt = 3;
|
||||||
err = usb_set_interface(dev->udev,
|
err = usb_set_interface(dev->udev,
|
||||||
intf->altsetting[alt].desc.bInterfaceNumber, alt);
|
intf->cur_altsetting->desc.bInterfaceNumber, alt);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
|
dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
|
||||||
"for %d interface: err=%d.\n", alt,
|
"for %d interface: err=%d.\n", alt,
|
||||||
intf->altsetting[alt].desc.bInterfaceNumber, err);
|
intf->cur_altsetting->desc.bInterfaceNumber, err);
|
||||||
goto err_out_clear;
|
goto err_out_clear;
|
||||||
}
|
}
|
||||||
|
|
||||||
iface_desc = &intf->altsetting[alt];
|
iface_desc = intf->cur_altsetting;
|
||||||
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
|
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
|
||||||
pr_info("Num endpoints=%d. It is not DS9490R.\n",
|
pr_info("Num endpoints=%d. It is not DS9490R.\n",
|
||||||
iface_desc->desc.bNumEndpoints);
|
iface_desc->desc.bNumEndpoints);
|
||||||
|
@ -264,6 +264,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
bio_for_each_segment_all(bvec, &bio, i, iter_all) {
|
bio_for_each_segment_all(bvec, &bio, i, iter_all) {
|
||||||
if (should_dirty && !PageCompound(bvec->bv_page))
|
if (should_dirty && !PageCompound(bvec->bv_page))
|
||||||
set_page_dirty_lock(bvec->bv_page);
|
set_page_dirty_lock(bvec->bv_page);
|
||||||
|
if (!bio_flagged(&bio, BIO_NO_PAGE_REF))
|
||||||
put_page(bvec->bv_page);
|
put_page(bvec->bv_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "disk-io.h"
|
#include "disk-io.h"
|
||||||
#include "transaction.h"
|
#include "transaction.h"
|
||||||
@ -427,9 +428,13 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
|||||||
unsigned long this_sum_bytes = 0;
|
unsigned long this_sum_bytes = 0;
|
||||||
int i;
|
int i;
|
||||||
u64 offset;
|
u64 offset;
|
||||||
|
unsigned nofs_flag;
|
||||||
|
|
||||||
|
nofs_flag = memalloc_nofs_save();
|
||||||
|
sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
|
||||||
|
GFP_KERNEL);
|
||||||
|
memalloc_nofs_restore(nofs_flag);
|
||||||
|
|
||||||
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
|
|
||||||
GFP_NOFS);
|
|
||||||
if (!sums)
|
if (!sums)
|
||||||
return BLK_STS_RESOURCE;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
@ -472,8 +477,10 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
|||||||
|
|
||||||
bytes_left = bio->bi_iter.bi_size - total_bytes;
|
bytes_left = bio->bi_iter.bi_size - total_bytes;
|
||||||
|
|
||||||
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
|
nofs_flag = memalloc_nofs_save();
|
||||||
GFP_NOFS);
|
sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
|
||||||
|
bytes_left), GFP_KERNEL);
|
||||||
|
memalloc_nofs_restore(nofs_flag);
|
||||||
BUG_ON(!sums); /* -ENOMEM */
|
BUG_ON(!sums); /* -ENOMEM */
|
||||||
sums->len = bytes_left;
|
sums->len = bytes_left;
|
||||||
ordered = btrfs_lookup_ordered_extent(inode,
|
ordered = btrfs_lookup_ordered_extent(inode,
|
||||||
|
@ -6783,7 +6783,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
|||||||
u64 extent_start = 0;
|
u64 extent_start = 0;
|
||||||
u64 extent_end = 0;
|
u64 extent_end = 0;
|
||||||
u64 objectid = btrfs_ino(inode);
|
u64 objectid = btrfs_ino(inode);
|
||||||
u8 extent_type;
|
int extent_type = -1;
|
||||||
struct btrfs_path *path = NULL;
|
struct btrfs_path *path = NULL;
|
||||||
struct btrfs_root *root = inode->root;
|
struct btrfs_root *root = inode->root;
|
||||||
struct btrfs_file_extent_item *item;
|
struct btrfs_file_extent_item *item;
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
#include "ctree.h"
|
#include "ctree.h"
|
||||||
#include "transaction.h"
|
#include "transaction.h"
|
||||||
#include "btrfs_inode.h"
|
#include "btrfs_inode.h"
|
||||||
@ -442,7 +443,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
|
|||||||
cur = entry->list.next;
|
cur = entry->list.next;
|
||||||
sum = list_entry(cur, struct btrfs_ordered_sum, list);
|
sum = list_entry(cur, struct btrfs_ordered_sum, list);
|
||||||
list_del(&sum->list);
|
list_del(&sum->list);
|
||||||
kfree(sum);
|
kvfree(sum);
|
||||||
}
|
}
|
||||||
kmem_cache_free(btrfs_ordered_extent_cache, entry);
|
kmem_cache_free(btrfs_ordered_extent_cache, entry);
|
||||||
}
|
}
|
||||||
|
@ -1766,6 +1766,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
|
|||||||
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
|
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
|
||||||
{
|
{
|
||||||
struct ceph_inode_info *dci = ceph_inode(dir);
|
struct ceph_inode_info *dci = ceph_inode(dir);
|
||||||
|
unsigned hash;
|
||||||
|
|
||||||
switch (dci->i_dir_layout.dl_dir_hash) {
|
switch (dci->i_dir_layout.dl_dir_hash) {
|
||||||
case 0: /* for backward compat */
|
case 0: /* for backward compat */
|
||||||
@ -1773,8 +1774,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
|
|||||||
return dn->d_name.hash;
|
return dn->d_name.hash;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
|
spin_lock(&dn->d_lock);
|
||||||
|
hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
|
||||||
dn->d_name.name, dn->d_name.len);
|
dn->d_name.name, dn->d_name.len);
|
||||||
|
spin_unlock(&dn->d_lock);
|
||||||
|
return hash;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1163,6 +1163,19 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int d_name_cmp(struct dentry *dentry, const char *name, size_t len)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* take d_lock to ensure dentry->d_name stability */
|
||||||
|
spin_lock(&dentry->d_lock);
|
||||||
|
ret = dentry->d_name.len - len;
|
||||||
|
if (!ret)
|
||||||
|
ret = memcmp(dentry->d_name.name, name, len);
|
||||||
|
spin_unlock(&dentry->d_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Incorporate results into the local cache. This is either just
|
* Incorporate results into the local cache. This is either just
|
||||||
* one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
|
* one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
|
||||||
@ -1412,7 +1425,8 @@ retry_lookup:
|
|||||||
err = splice_dentry(&req->r_dentry, in);
|
err = splice_dentry(&req->r_dentry, in);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto done;
|
goto done;
|
||||||
} else if (rinfo->head->is_dentry) {
|
} else if (rinfo->head->is_dentry &&
|
||||||
|
!d_name_cmp(req->r_dentry, rinfo->dname, rinfo->dname_len)) {
|
||||||
struct ceph_vino *ptvino = NULL;
|
struct ceph_vino *ptvino = NULL;
|
||||||
|
|
||||||
if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) ||
|
if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) ||
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user