Linux 3.8-rc7
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.13 (GNU/Linux) iQEcBAABAgAGBQJRFWw4AAoJEHm+PkMAQRiGnTAH/jBHA2umNc3lc7VkUpusve4q GGIlNzYh6iuvIGwKQVj9YPsl37qtQnkDUmY8f8WxZjfxiIDw3TkRKDgNLJaM3Jy5 E426/FGlRx/Iia5+4tuBeoVYMoIPnndgW5lEAMRK1SvhTByhIYAXsaM0UwPBetb+ Z5NMdH1f1HVF7RCCmHAkzEk9z78UpdeCzI0t0XuasP2hp2ARAcE1okdO7fNaLiyo EmenGhRvy9bAsbRwV0rCdl0rQiZXEYM353DWS7n6q4fyVm8MXFwloUxnWCJTzOIL ZLJaz18adFj7Ip/X6ksnMQiQU2Q3B7aThs5ycv0QGxxL2rDFveYRRQ5ICrXOy3M= =jjBc -----END PGP SIGNATURE----- Merge tag 'v3.8-rc7' into next/boards Linux 3.8-rc7
This commit is contained in:
commit
2cb6a0708e
@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
real-time workloads. It can also improve energy
|
||||
efficiency for asymmetric multiprocessors.
|
||||
|
||||
rcu_nocbs_poll [KNL,BOOT]
|
||||
rcu_nocb_poll [KNL,BOOT]
|
||||
Rather than requiring that offloaded CPUs
|
||||
(specified by rcu_nocbs= above) explicitly
|
||||
awaken the corresponding "rcuoN" kthreads,
|
||||
|
@ -57,7 +57,7 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment
|
||||
Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
|
||||
protocol entry point.
|
||||
|
||||
Protocol 2.12: (Kernel 3.9) Added the xloadflags field and extension fields
|
||||
Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
|
||||
to struct boot_params for for loading bzImage and ramdisk
|
||||
above 4G in 64bit.
|
||||
|
||||
|
@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE
|
||||
M: Haavard Skinnemoen <hskinnemoen@gmail.com>
|
||||
M: Hans-Christian Egtvedt <egtvedt@samfundet.no>
|
||||
W: http://www.atmel.com/products/AVR32/
|
||||
W: http://avr32linux.org/
|
||||
W: http://mirror.egtvedt.no/avr32linux.org/
|
||||
W: http://avrfreaks.net/
|
||||
S: Maintained
|
||||
F: arch/avr32/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
|
||||
irq_set_chained_handler(irq, gic_handle_cascade_irq);
|
||||
}
|
||||
|
||||
static u8 gic_get_cpumask(struct gic_chip_data *gic)
|
||||
{
|
||||
void __iomem *base = gic_data_dist_base(gic);
|
||||
u32 mask, i;
|
||||
|
||||
for (i = mask = 0; i < 32; i += 4) {
|
||||
mask = readl_relaxed(base + GIC_DIST_TARGET + i);
|
||||
mask |= mask >> 16;
|
||||
mask |= mask >> 8;
|
||||
if (mask)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!mask)
|
||||
pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void __init gic_dist_init(struct gic_chip_data *gic)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
|
||||
/*
|
||||
* Set all global interrupts to this CPU only.
|
||||
*/
|
||||
cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
|
||||
cpumask = gic_get_cpumask(gic);
|
||||
cpumask |= cpumask << 8;
|
||||
cpumask |= cpumask << 16;
|
||||
for (i = 32; i < gic_irqs; i += 4)
|
||||
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
|
||||
|
||||
@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
|
||||
* Get what the GIC says our CPU mask is.
|
||||
*/
|
||||
BUG_ON(cpu >= NR_GIC_CPU_IF);
|
||||
cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
|
||||
cpu_mask = gic_get_cpumask(gic);
|
||||
gic_cpu_map[cpu] = cpu_mask;
|
||||
|
||||
/*
|
||||
|
@ -37,7 +37,7 @@
|
||||
*/
|
||||
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
||||
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
|
||||
#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
|
||||
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
|
||||
|
||||
/*
|
||||
* The maximum size of a 26-bit user space task.
|
||||
|
@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
|
||||
select CPU_EXYNOS4210
|
||||
select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
|
||||
select PINCTRL
|
||||
select PINCTRL_EXYNOS4
|
||||
select PINCTRL_EXYNOS
|
||||
select USE_OF
|
||||
help
|
||||
Machine support for Samsung Exynos4 machine with device tree enabled.
|
||||
|
@ -115,7 +115,7 @@
|
||||
/*
|
||||
* Only define NR_IRQS if less than NR_IRQS_EB
|
||||
*/
|
||||
#define NR_IRQS_EB (IRQ_EB_GIC_START + 96)
|
||||
#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
|
||||
|
||||
#if defined(CONFIG_MACH_REALVIEW_EB) \
|
||||
&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
|
||||
|
@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
|
||||
if (is_coherent || nommu())
|
||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||
else if (gfp & GFP_ATOMIC)
|
||||
else if (!(gfp & __GFP_WAIT))
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
else if (!IS_ENABLED(CONFIG_CMA))
|
||||
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
||||
|
@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
/* drivers/base/dma-mapping.c */
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size);
|
||||
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||
|
||||
#endif /* __ASM_AVR32_DMA_MAPPING_H */
|
||||
|
@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
_dma_sync((dma_addr_t)vaddr, size, dir);
|
||||
}
|
||||
|
||||
/* drivers/base/dma-mapping.c */
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size);
|
||||
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||
|
||||
#endif /* _BLACKFIN_DMA_MAPPING_H */
|
||||
|
@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
|
||||
|
||||
/* Not supported for now */
|
||||
static inline int dma_mmap_coherent(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif /* _ASM_C6X_DMA_MAPPING_H */
|
||||
|
@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
{
|
||||
}
|
||||
|
||||
/* drivers/base/dma-mapping.c */
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size);
|
||||
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
/* Not supported for now */
|
||||
static inline int dma_mmap_coherent(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif /* _ASM_DMA_MAPPING_H */
|
||||
|
@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
|
||||
#include <asm-generic/dma-mapping-broken.h>
|
||||
#endif
|
||||
|
||||
/* drivers/base/dma-mapping.c */
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size);
|
||||
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||
|
||||
#endif /* _M68K_DMA_MAPPING_H */
|
||||
|
@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
|
||||
mn10300_dcache_flush_inv();
|
||||
}
|
||||
|
||||
/* Not supported for now */
|
||||
static inline int dma_mmap_coherent(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
|
||||
/* At the moment, we panic on error for IOMMU resource exaustion */
|
||||
#define dma_mapping_error(dev, x) 0
|
||||
|
||||
/* This API cannot be supported on PA-RISC */
|
||||
static inline int dma_mmap_coherent(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
sldi r29,r5,SID_SHIFT - VPN_SHIFT
|
||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
||||
or r29,r28,r29
|
||||
|
||||
/* Calculate hash value for primary slot and store it in r28 */
|
||||
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
||||
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
|
||||
xor r28,r5,r0
|
||||
/*
|
||||
* Calculate hash value for primary slot and store it in r28
|
||||
* r3 = va, r5 = vsid
|
||||
* r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
|
||||
*/
|
||||
rldicl r0,r3,64-12,48
|
||||
xor r28,r5,r0 /* hash */
|
||||
b 4f
|
||||
|
||||
3: /* Calc vpn and put it in r29 */
|
||||
@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
/*
|
||||
* calculate hash value for primary slot and
|
||||
* store it in r28 for 1T segment
|
||||
* r3 = va, r5 = vsid
|
||||
*/
|
||||
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
||||
clrldi r5,r5,40 /* vsid & 0xffffff */
|
||||
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
|
||||
xor r28,r28,r5
|
||||
sldi r28,r5,25 /* vsid << 25 */
|
||||
/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
|
||||
rldicl r0,r3,64-12,36
|
||||
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
|
||||
xor r28,r28,r0 /* hash */
|
||||
|
||||
/* Convert linux PTE bits into HW equivalents */
|
||||
@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
*/
|
||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
||||
or r29,r28,r29
|
||||
|
||||
/* Calculate hash value for primary slot and store it in r28 */
|
||||
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
||||
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
|
||||
xor r28,r5,r0
|
||||
/*
|
||||
* Calculate hash value for primary slot and store it in r28
|
||||
* r3 = va, r5 = vsid
|
||||
* r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
|
||||
*/
|
||||
rldicl r0,r3,64-12,48
|
||||
xor r28,r5,r0 /* hash */
|
||||
b 4f
|
||||
|
||||
3: /* Calc vpn and put it in r29 */
|
||||
@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
/*
|
||||
* Calculate hash value for primary slot and
|
||||
* store it in r28 for 1T segment
|
||||
* r3 = va, r5 = vsid
|
||||
*/
|
||||
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
||||
clrldi r5,r5,40 /* vsid & 0xffffff */
|
||||
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
|
||||
xor r28,r28,r5
|
||||
sldi r28,r5,25 /* vsid << 25 */
|
||||
/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
|
||||
rldicl r0,r3,64-12,36
|
||||
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
|
||||
xor r28,r28,r0 /* hash */
|
||||
|
||||
/* Convert linux PTE bits into HW equivalents */
|
||||
@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
||||
or r29,r28,r29
|
||||
|
||||
/* Calculate hash value for primary slot and store it in r28 */
|
||||
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
||||
rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
|
||||
xor r28,r5,r0
|
||||
/* Calculate hash value for primary slot and store it in r28
|
||||
* r3 = va, r5 = vsid
|
||||
* r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
|
||||
*/
|
||||
rldicl r0,r3,64-16,52
|
||||
xor r28,r5,r0 /* hash */
|
||||
b 4f
|
||||
|
||||
3: /* Calc vpn and put it in r29 */
|
||||
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
|
||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
|
||||
or r29,r28,r29
|
||||
|
||||
/*
|
||||
* calculate hash value for primary slot and
|
||||
* store it in r28 for 1T segment
|
||||
* r3 = va, r5 = vsid
|
||||
*/
|
||||
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
||||
clrldi r5,r5,40 /* vsid & 0xffffff */
|
||||
rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
|
||||
xor r28,r28,r5
|
||||
sldi r28,r5,25 /* vsid << 25 */
|
||||
/* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
|
||||
rldicl r0,r3,64-16,40
|
||||
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
|
||||
xor r28,r28,r0 /* hash */
|
||||
|
||||
/* Convert linux PTE bits into HW equivalents */
|
||||
|
@ -207,7 +207,7 @@ sysexit_from_sys_call:
|
||||
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
||||
jnz ia32_ret_from_sys_call
|
||||
TRACE_IRQS_ON
|
||||
sti
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
movl %eax,%esi /* second arg, syscall return value */
|
||||
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
|
||||
jbe 1f
|
||||
@ -217,7 +217,7 @@ sysexit_from_sys_call:
|
||||
call __audit_syscall_exit
|
||||
movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
|
||||
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
|
||||
cli
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
||||
jz \exit
|
||||
|
@ -298,8 +298,7 @@ struct _cache_attr {
|
||||
unsigned int);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_AMD_NB
|
||||
|
||||
#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
|
||||
/*
|
||||
* L3 cache descriptors
|
||||
*/
|
||||
@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
|
||||
static struct _cache_attr subcaches =
|
||||
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
|
||||
|
||||
#else /* CONFIG_AMD_NB */
|
||||
#else
|
||||
#define amd_init_l3_cache(x, y)
|
||||
#endif /* CONFIG_AMD_NB */
|
||||
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
|
||||
|
||||
static int
|
||||
__cpuinit cpuid4_cache_lookup_regs(int index,
|
||||
|
@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
|
||||
break;
|
||||
|
||||
case 28: /* Atom */
|
||||
case 54: /* Cedariew */
|
||||
case 38: /* Lincroft */
|
||||
case 39: /* Penwell */
|
||||
case 53: /* Cloverview */
|
||||
case 54: /* Cedarview */
|
||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
|
||||
pr_cont("SandyBridge events, ");
|
||||
break;
|
||||
case 58: /* IvyBridge */
|
||||
case 62: /* IvyBridge EP */
|
||||
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
|
||||
|
@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
|
||||
|
||||
};
|
||||
|
||||
static __initconst u64 p6_hw_cache_event_ids
|
||||
static u64 p6_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
|
@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
|
||||
static void usage(const char *err)
|
||||
{
|
||||
if (err)
|
||||
fprintf(stderr, "Error: %s\n\n", err);
|
||||
fprintf(stderr, "%s: Error: %s\n\n", prog, err);
|
||||
fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
|
||||
fprintf(stderr, "\t-y 64bit mode\n");
|
||||
fprintf(stderr, "\t-n 32bit mode\n");
|
||||
@ -269,7 +269,13 @@ int main(int argc, char **argv)
|
||||
insns++;
|
||||
}
|
||||
|
||||
fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
|
||||
fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
|
||||
prog,
|
||||
(errors) ? "Failure" : "Success",
|
||||
insns,
|
||||
(input_file) ? "given" : "random",
|
||||
errors,
|
||||
seed);
|
||||
|
||||
return errors ? 1 : 0;
|
||||
}
|
||||
|
@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
consistent_sync(vaddr, size, direction);
|
||||
}
|
||||
|
||||
/* Not supported for now */
|
||||
static inline int dma_mmap_coherent(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#endif /* _XTENSA_DMA_MAPPING_H */
|
||||
|
@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);
|
||||
|
||||
static struct device_type disk_type;
|
||||
|
||||
static void disk_check_events(struct disk_events *ev,
|
||||
unsigned int *clearing_ptr);
|
||||
static void disk_alloc_events(struct gendisk *disk);
|
||||
static void disk_add_events(struct gendisk *disk);
|
||||
static void disk_del_events(struct gendisk *disk);
|
||||
@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
|
||||
const struct block_device_operations *bdops = disk->fops;
|
||||
struct disk_events *ev = disk->ev;
|
||||
unsigned int pending;
|
||||
unsigned int clearing = mask;
|
||||
|
||||
if (!ev) {
|
||||
/* for drivers still using the old ->media_changed method */
|
||||
@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* tell the workfn about the events being cleared */
|
||||
disk_block_events(disk);
|
||||
|
||||
/*
|
||||
* store the union of mask and ev->clearing on the stack so that the
|
||||
* race with disk_flush_events does not cause ambiguity (ev->clearing
|
||||
* can still be modified even if events are blocked).
|
||||
*/
|
||||
spin_lock_irq(&ev->lock);
|
||||
ev->clearing |= mask;
|
||||
clearing |= ev->clearing;
|
||||
ev->clearing = 0;
|
||||
spin_unlock_irq(&ev->lock);
|
||||
|
||||
/* uncondtionally schedule event check and wait for it to finish */
|
||||
disk_block_events(disk);
|
||||
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
|
||||
flush_delayed_work(&ev->dwork);
|
||||
__disk_unblock_events(disk, false);
|
||||
disk_check_events(ev, &clearing);
|
||||
/*
|
||||
* if ev->clearing is not 0, the disk_flush_events got called in the
|
||||
* middle of this function, so we want to run the workfn without delay.
|
||||
*/
|
||||
__disk_unblock_events(disk, ev->clearing ? true : false);
|
||||
|
||||
/* then, fetch and clear pending events */
|
||||
spin_lock_irq(&ev->lock);
|
||||
WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
|
||||
pending = ev->pending & mask;
|
||||
ev->pending &= ~mask;
|
||||
spin_unlock_irq(&ev->lock);
|
||||
WARN_ON_ONCE(clearing & mask);
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
||||
/*
|
||||
* Separate this part out so that a different pointer for clearing_ptr can be
|
||||
* passed in for disk_clear_events.
|
||||
*/
|
||||
static void disk_events_workfn(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
|
||||
|
||||
disk_check_events(ev, &ev->clearing);
|
||||
}
|
||||
|
||||
static void disk_check_events(struct disk_events *ev,
|
||||
unsigned int *clearing_ptr)
|
||||
{
|
||||
struct gendisk *disk = ev->disk;
|
||||
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
|
||||
unsigned int clearing = ev->clearing;
|
||||
unsigned int clearing = *clearing_ptr;
|
||||
unsigned int events;
|
||||
unsigned long intv;
|
||||
int nr_events = 0, i;
|
||||
@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)
|
||||
|
||||
events &= ~ev->pending;
|
||||
ev->pending |= events;
|
||||
ev->clearing &= ~clearing;
|
||||
*clearing_ptr &= ~clearing;
|
||||
|
||||
intv = disk_events_poll_jiffies(disk);
|
||||
if (!ev->block && intv)
|
||||
|
@ -636,82 +636,82 @@ struct rx_buf_desc {
|
||||
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
|
||||
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
|
||||
|
||||
typedef volatile u_int freg_t;
|
||||
typedef volatile u_int ffreg_t;
|
||||
typedef u_int rreg_t;
|
||||
|
||||
typedef struct _ffredn_t {
|
||||
freg_t idlehead_high; /* Idle cell header (high) */
|
||||
freg_t idlehead_low; /* Idle cell header (low) */
|
||||
freg_t maxrate; /* Maximum rate */
|
||||
freg_t stparms; /* Traffic Management Parameters */
|
||||
freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
|
||||
freg_t rm_type; /* */
|
||||
u_int filler5[0x17 - 0x06];
|
||||
freg_t cmd_reg; /* Command register */
|
||||
u_int filler18[0x20 - 0x18];
|
||||
freg_t cbr_base; /* CBR Pointer Base */
|
||||
freg_t vbr_base; /* VBR Pointer Base */
|
||||
freg_t abr_base; /* ABR Pointer Base */
|
||||
freg_t ubr_base; /* UBR Pointer Base */
|
||||
u_int filler24;
|
||||
freg_t vbrwq_base; /* VBR Wait Queue Base */
|
||||
freg_t abrwq_base; /* ABR Wait Queue Base */
|
||||
freg_t ubrwq_base; /* UBR Wait Queue Base */
|
||||
freg_t vct_base; /* Main VC Table Base */
|
||||
freg_t vcte_base; /* Extended Main VC Table Base */
|
||||
u_int filler2a[0x2C - 0x2A];
|
||||
freg_t cbr_tab_beg; /* CBR Table Begin */
|
||||
freg_t cbr_tab_end; /* CBR Table End */
|
||||
freg_t cbr_pointer; /* CBR Pointer */
|
||||
u_int filler2f[0x30 - 0x2F];
|
||||
freg_t prq_st_adr; /* Packet Ready Queue Start Address */
|
||||
freg_t prq_ed_adr; /* Packet Ready Queue End Address */
|
||||
freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
|
||||
freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
|
||||
freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
|
||||
freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
|
||||
freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
|
||||
freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
|
||||
u_int filler38[0x40 - 0x38];
|
||||
freg_t queue_base; /* Base address for PRQ and TCQ */
|
||||
freg_t desc_base; /* Base address of descriptor table */
|
||||
u_int filler42[0x45 - 0x42];
|
||||
freg_t mode_reg_0; /* Mode register 0 */
|
||||
freg_t mode_reg_1; /* Mode register 1 */
|
||||
freg_t intr_status_reg;/* Interrupt Status register */
|
||||
freg_t mask_reg; /* Mask Register */
|
||||
freg_t cell_ctr_high1; /* Total cell transfer count (high) */
|
||||
freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
|
||||
freg_t state_reg; /* Status register */
|
||||
u_int filler4c[0x58 - 0x4c];
|
||||
freg_t curr_desc_num; /* Contains the current descriptor num */
|
||||
freg_t next_desc; /* Next descriptor */
|
||||
freg_t next_vc; /* Next VC */
|
||||
u_int filler5b[0x5d - 0x5b];
|
||||
freg_t present_slot_cnt;/* Present slot count */
|
||||
u_int filler5e[0x6a - 0x5e];
|
||||
freg_t new_desc_num; /* New descriptor number */
|
||||
freg_t new_vc; /* New VC */
|
||||
freg_t sched_tbl_ptr; /* Schedule table pointer */
|
||||
freg_t vbrwq_wptr; /* VBR wait queue write pointer */
|
||||
freg_t vbrwq_rptr; /* VBR wait queue read pointer */
|
||||
freg_t abrwq_wptr; /* ABR wait queue write pointer */
|
||||
freg_t abrwq_rptr; /* ABR wait queue read pointer */
|
||||
freg_t ubrwq_wptr; /* UBR wait queue write pointer */
|
||||
freg_t ubrwq_rptr; /* UBR wait queue read pointer */
|
||||
freg_t cbr_vc; /* CBR VC */
|
||||
freg_t vbr_sb_vc; /* VBR SB VC */
|
||||
freg_t abr_sb_vc; /* ABR SB VC */
|
||||
freg_t ubr_sb_vc; /* UBR SB VC */
|
||||
freg_t vbr_next_link; /* VBR next link */
|
||||
freg_t abr_next_link; /* ABR next link */
|
||||
freg_t ubr_next_link; /* UBR next link */
|
||||
u_int filler7a[0x7c-0x7a];
|
||||
freg_t out_rate_head; /* Out of rate head */
|
||||
u_int filler7d[0xca-0x7d]; /* pad out to full address space */
|
||||
freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
|
||||
freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
|
||||
u_int fillercc[0x100-0xcc]; /* pad out to full address space */
|
||||
ffreg_t idlehead_high; /* Idle cell header (high) */
|
||||
ffreg_t idlehead_low; /* Idle cell header (low) */
|
||||
ffreg_t maxrate; /* Maximum rate */
|
||||
ffreg_t stparms; /* Traffic Management Parameters */
|
||||
ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
|
||||
ffreg_t rm_type; /* */
|
||||
u_int filler5[0x17 - 0x06];
|
||||
ffreg_t cmd_reg; /* Command register */
|
||||
u_int filler18[0x20 - 0x18];
|
||||
ffreg_t cbr_base; /* CBR Pointer Base */
|
||||
ffreg_t vbr_base; /* VBR Pointer Base */
|
||||
ffreg_t abr_base; /* ABR Pointer Base */
|
||||
ffreg_t ubr_base; /* UBR Pointer Base */
|
||||
u_int filler24;
|
||||
ffreg_t vbrwq_base; /* VBR Wait Queue Base */
|
||||
ffreg_t abrwq_base; /* ABR Wait Queue Base */
|
||||
ffreg_t ubrwq_base; /* UBR Wait Queue Base */
|
||||
ffreg_t vct_base; /* Main VC Table Base */
|
||||
ffreg_t vcte_base; /* Extended Main VC Table Base */
|
||||
u_int filler2a[0x2C - 0x2A];
|
||||
ffreg_t cbr_tab_beg; /* CBR Table Begin */
|
||||
ffreg_t cbr_tab_end; /* CBR Table End */
|
||||
ffreg_t cbr_pointer; /* CBR Pointer */
|
||||
u_int filler2f[0x30 - 0x2F];
|
||||
ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
|
||||
ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
|
||||
ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
|
||||
ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
|
||||
ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
|
||||
ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
|
||||
ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
|
||||
ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
|
||||
u_int filler38[0x40 - 0x38];
|
||||
ffreg_t queue_base; /* Base address for PRQ and TCQ */
|
||||
ffreg_t desc_base; /* Base address of descriptor table */
|
||||
u_int filler42[0x45 - 0x42];
|
||||
ffreg_t mode_reg_0; /* Mode register 0 */
|
||||
ffreg_t mode_reg_1; /* Mode register 1 */
|
||||
ffreg_t intr_status_reg;/* Interrupt Status register */
|
||||
ffreg_t mask_reg; /* Mask Register */
|
||||
ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
|
||||
ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
|
||||
ffreg_t state_reg; /* Status register */
|
||||
u_int filler4c[0x58 - 0x4c];
|
||||
ffreg_t curr_desc_num; /* Contains the current descriptor num */
|
||||
ffreg_t next_desc; /* Next descriptor */
|
||||
ffreg_t next_vc; /* Next VC */
|
||||
u_int filler5b[0x5d - 0x5b];
|
||||
ffreg_t present_slot_cnt;/* Present slot count */
|
||||
u_int filler5e[0x6a - 0x5e];
|
||||
ffreg_t new_desc_num; /* New descriptor number */
|
||||
ffreg_t new_vc; /* New VC */
|
||||
ffreg_t sched_tbl_ptr; /* Schedule table pointer */
|
||||
ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
|
||||
ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
|
||||
ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
|
||||
ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
|
||||
ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
|
||||
ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
|
||||
ffreg_t cbr_vc; /* CBR VC */
|
||||
ffreg_t vbr_sb_vc; /* VBR SB VC */
|
||||
ffreg_t abr_sb_vc; /* ABR SB VC */
|
||||
ffreg_t ubr_sb_vc; /* UBR SB VC */
|
||||
ffreg_t vbr_next_link; /* VBR next link */
|
||||
ffreg_t abr_next_link; /* ABR next link */
|
||||
ffreg_t ubr_next_link; /* UBR next link */
|
||||
u_int filler7a[0x7c-0x7a];
|
||||
ffreg_t out_rate_head; /* Out of rate head */
|
||||
u_int filler7d[0xca-0x7d]; /* pad out to full address space */
|
||||
ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
|
||||
ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
|
||||
u_int fillercc[0x100-0xcc]; /* pad out to full address space */
|
||||
} ffredn_t;
|
||||
|
||||
typedef struct _rfredn_t {
|
||||
|
@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
|
||||
#ifdef CONFIG_BCMA_DRIVER_GPIO
|
||||
/* driver_gpio.c */
|
||||
int bcma_gpio_init(struct bcma_drv_cc *cc);
|
||||
int bcma_gpio_unregister(struct bcma_drv_cc *cc);
|
||||
#else
|
||||
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BCMA_DRIVER_GPIO */
|
||||
|
||||
#endif
|
||||
|
@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
|
||||
struct bcma_bus *bus = cc->core->bus;
|
||||
|
||||
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
|
||||
cc->core->id.rev != 0x38) {
|
||||
cc->core->id.rev != 38) {
|
||||
bcma_err(bus, "NAND flash on unsupported board!\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||
|
||||
return gpiochip_add(chip);
|
||||
}
|
||||
|
||||
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
|
||||
{
|
||||
return gpiochip_remove(&cc->gpio);
|
||||
}
|
||||
|
@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)
|
||||
void bcma_bus_unregister(struct bcma_bus *bus)
|
||||
{
|
||||
struct bcma_device *cores[3];
|
||||
int err;
|
||||
|
||||
err = bcma_gpio_unregister(&bus->drv_cc);
|
||||
if (err == -EBUSY)
|
||||
bcma_err(bus, "Some GPIOs are still in use.\n");
|
||||
else if (err)
|
||||
bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
|
||||
|
||||
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
|
||||
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
|
||||
|
@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {
|
||||
}
|
||||
|
||||
/* must hold resource->req_lock */
|
||||
static void start_new_tl_epoch(struct drbd_tconn *tconn)
|
||||
void start_new_tl_epoch(struct drbd_tconn *tconn)
|
||||
{
|
||||
/* no point closing an epoch, if it is empty, anyways. */
|
||||
if (tconn->current_tle_writes == 0)
|
||||
|
@ -267,6 +267,7 @@ struct bio_and_error {
|
||||
int error;
|
||||
};
|
||||
|
||||
extern void start_new_tl_epoch(struct drbd_tconn *tconn);
|
||||
extern void drbd_req_destroy(struct kref *kref);
|
||||
extern void _req_may_be_done(struct drbd_request *req,
|
||||
struct bio_and_error *m);
|
||||
|
@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
|
||||
enum drbd_state_rv rv = SS_SUCCESS;
|
||||
enum sanitize_state_warnings ssw;
|
||||
struct after_state_chg_work *ascw;
|
||||
bool did_remote, should_do_remote;
|
||||
|
||||
os = drbd_read_state(mdev);
|
||||
|
||||
@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
|
||||
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
|
||||
atomic_inc(&mdev->local_cnt);
|
||||
|
||||
did_remote = drbd_should_do_remote(mdev->state);
|
||||
mdev->state.i = ns.i;
|
||||
should_do_remote = drbd_should_do_remote(mdev->state);
|
||||
mdev->tconn->susp = ns.susp;
|
||||
mdev->tconn->susp_nod = ns.susp_nod;
|
||||
mdev->tconn->susp_fen = ns.susp_fen;
|
||||
|
||||
/* put replicated vs not-replicated requests in seperate epochs */
|
||||
if (did_remote != should_do_remote)
|
||||
start_new_tl_epoch(mdev->tconn);
|
||||
|
||||
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
|
||||
drbd_print_uuids(mdev, "attached to UUIDs");
|
||||
|
||||
|
@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
|
||||
}
|
||||
}
|
||||
|
||||
if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
|
||||
if (cmdto_cnt) {
|
||||
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
|
||||
|
||||
mtip_restart_port(port);
|
||||
if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
|
||||
mtip_restart_port(port);
|
||||
wake_up_interruptible(&port->svc_wait);
|
||||
}
|
||||
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
|
||||
wake_up_interruptible(&port->svc_wait);
|
||||
}
|
||||
|
||||
if (port->ic_pause_timer) {
|
||||
@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
|
||||
* Delete our gendisk structure. This also removes the device
|
||||
* from /dev
|
||||
*/
|
||||
del_gendisk(dd->disk);
|
||||
if (dd->disk) {
|
||||
if (dd->disk->queue)
|
||||
del_gendisk(dd->disk);
|
||||
else
|
||||
put_disk(dd->disk);
|
||||
}
|
||||
|
||||
spin_lock(&rssd_index_lock);
|
||||
ida_remove(&rssd_index_ida, dd->index);
|
||||
@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
|
||||
"Shutting down %s ...\n", dd->disk->disk_name);
|
||||
|
||||
/* Delete our gendisk structure, and cleanup the blk queue. */
|
||||
del_gendisk(dd->disk);
|
||||
if (dd->disk) {
|
||||
if (dd->disk->queue)
|
||||
del_gendisk(dd->disk);
|
||||
else
|
||||
put_disk(dd->disk);
|
||||
}
|
||||
|
||||
|
||||
spin_lock(&rssd_index_lock);
|
||||
ida_remove(&rssd_index_ida, dd->index);
|
||||
|
@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
|
||||
static void make_response(struct xen_blkif *blkif, u64 id,
|
||||
unsigned short op, int st);
|
||||
|
||||
#define foreach_grant(pos, rbtree, node) \
|
||||
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
|
||||
#define foreach_grant_safe(pos, n, rbtree, node) \
|
||||
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
|
||||
(n) = rb_next(&(pos)->node); \
|
||||
&(pos)->node != NULL; \
|
||||
(pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
|
||||
(pos) = container_of(n, typeof(*(pos)), node), \
|
||||
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
|
||||
|
||||
|
||||
static void add_persistent_gnt(struct rb_root *root,
|
||||
@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
|
||||
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||
struct persistent_gnt *persistent_gnt;
|
||||
struct rb_node *n;
|
||||
int ret = 0;
|
||||
int segs_to_unmap = 0;
|
||||
|
||||
foreach_grant(persistent_gnt, root, node) {
|
||||
foreach_grant_safe(persistent_gnt, n, root, node) {
|
||||
BUG_ON(persistent_gnt->handle ==
|
||||
BLKBACK_INVALID_HANDLE);
|
||||
gnttab_set_unmap_op(&unmap[segs_to_unmap],
|
||||
@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
|
||||
persistent_gnt->handle);
|
||||
|
||||
pages[segs_to_unmap] = persistent_gnt->page;
|
||||
rb_erase(&persistent_gnt->node, root);
|
||||
kfree(persistent_gnt);
|
||||
num--;
|
||||
|
||||
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
|
||||
!rb_next(&persistent_gnt->node)) {
|
||||
@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
|
||||
BUG_ON(ret);
|
||||
segs_to_unmap = 0;
|
||||
}
|
||||
|
||||
rb_erase(&persistent_gnt->node, root);
|
||||
kfree(persistent_gnt);
|
||||
num--;
|
||||
}
|
||||
BUG_ON(num != 0);
|
||||
}
|
||||
|
@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||
{
|
||||
struct llist_node *all_gnts;
|
||||
struct grant *persistent_gnt;
|
||||
struct llist_node *n;
|
||||
|
||||
/* Prevent new requests being issued until we fix things up. */
|
||||
spin_lock_irq(&info->io_lock);
|
||||
@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||
/* Remove all persistent grants */
|
||||
if (info->persistent_gnts_c) {
|
||||
all_gnts = llist_del_all(&info->persistent_gnts);
|
||||
llist_for_each_entry(persistent_gnt, all_gnts, node) {
|
||||
llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
|
||||
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
|
||||
__free_page(pfn_to_page(persistent_gnt->pfn));
|
||||
kfree(persistent_gnt);
|
||||
@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||
static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||
struct blkif_response *bret)
|
||||
{
|
||||
int i;
|
||||
int i = 0;
|
||||
struct bio_vec *bvec;
|
||||
struct req_iterator iter;
|
||||
unsigned long flags;
|
||||
@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||
*/
|
||||
rq_for_each_segment(bvec, s->request, iter) {
|
||||
BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
|
||||
i = offset >> PAGE_SHIFT;
|
||||
if (bvec->bv_offset < offset)
|
||||
i++;
|
||||
BUG_ON(i >= s->req.u.rw.nr_segments);
|
||||
shared_data = kmap_atomic(
|
||||
pfn_to_page(s->grants_used[i]->pfn));
|
||||
@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||
bvec->bv_len);
|
||||
bvec_kunmap_irq(bvec_data, &flags);
|
||||
kunmap_atomic(shared_data);
|
||||
offset += bvec->bv_len;
|
||||
offset = bvec->bv_offset + bvec->bv_len;
|
||||
}
|
||||
}
|
||||
/* Add the persistent grant into the list of free grants */
|
||||
|
@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
|
||||
/* Disable interrupts for vqs */
|
||||
vdev->config->reset(vdev);
|
||||
/* Finish up work that's lined up */
|
||||
cancel_work_sync(&portdev->control_work);
|
||||
if (use_multiport(portdev))
|
||||
cancel_work_sync(&portdev->control_work);
|
||||
|
||||
list_for_each_entry_safe(port, port2, &portdev->ports, list)
|
||||
unplug_port(port);
|
||||
|
@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
||||
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
|
||||
radeon_wait_for_vblank(rdev, i);
|
||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
} else {
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
|
||||
radeon_wait_for_vblank(rdev, i);
|
||||
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
}
|
||||
/* wait for the next frame */
|
||||
@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
||||
blackout &= ~BLACKOUT_MODE_MASK;
|
||||
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
|
||||
}
|
||||
/* wait for the MC to settle */
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
||||
@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
|
||||
if (ASIC_IS_DCE6(rdev)) {
|
||||
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
|
||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
} else {
|
||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
}
|
||||
/* wait for the next frame */
|
||||
frame_count = radeon_get_vblank_counter(rdev, i);
|
||||
@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
||||
WREG32(DMA_TILING_CONFIG, gb_addr_config);
|
||||
|
||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
|
||||
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
|
||||
if ((rdev->config.evergreen.max_backends == 1) &&
|
||||
(rdev->flags & RADEON_IS_IGP)) {
|
||||
if ((disabled_rb_mask & 3) == 1) {
|
||||
/* RB0 disabled, RB1 enabled */
|
||||
tmp = 0x11111111;
|
||||
} else {
|
||||
/* RB1 disabled, RB0 enabled */
|
||||
tmp = 0x00000000;
|
||||
}
|
||||
} else {
|
||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
|
||||
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
|
||||
}
|
||||
WREG32(GB_BACKEND_MAP, tmp);
|
||||
|
||||
WREG32(CGTS_SYS_TCC_DISABLE, 0);
|
||||
|
@ -1462,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
||||
u32 disabled_rb_mask)
|
||||
{
|
||||
u32 rendering_pipe_num, rb_num_width, req_rb_num;
|
||||
u32 pipe_rb_ratio, pipe_rb_remain;
|
||||
u32 pipe_rb_ratio, pipe_rb_remain, tmp;
|
||||
u32 data = 0, mask = 1 << (max_rb_num - 1);
|
||||
unsigned i, j;
|
||||
|
||||
/* mask out the RBs that don't exist on that asic */
|
||||
disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
|
||||
tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
|
||||
/* make sure at least one RB is available */
|
||||
if ((tmp & 0xff) != 0xff)
|
||||
disabled_rb_mask = tmp;
|
||||
|
||||
rendering_pipe_num = 1 << tiling_pipe_num;
|
||||
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
|
||||
|
@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
|
||||
.vm = {
|
||||
.init = &si_vm_init,
|
||||
.fini = &si_vm_fini,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.set_page = &si_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
|
@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
|
||||
1),
|
||||
ATOM_DEVICE_CRT1_SUPPORT);
|
||||
}
|
||||
/* RV100 board with external TDMS bit mis-set.
|
||||
* Actually uses internal TMDS, clear the bit.
|
||||
*/
|
||||
if (dev->pdev->device == 0x5159 &&
|
||||
dev->pdev->subsystem_vendor == 0x1014 &&
|
||||
dev->pdev->subsystem_device == 0x029A) {
|
||||
tmp &= ~(1 << 4);
|
||||
}
|
||||
if ((tmp >> 4) & 0x1) {
|
||||
devices |= ATOM_DEVICE_DFP2_SUPPORT;
|
||||
radeon_add_legacy_encoder(dev,
|
||||
|
@ -1115,8 +1115,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
|
||||
}
|
||||
|
||||
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
|
||||
if (radeon_fb == NULL)
|
||||
if (radeon_fb == NULL) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
|
||||
if (ret) {
|
||||
|
@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
|
||||
{
|
||||
int r;
|
||||
|
||||
/* make sure we aren't trying to allocate more space than there is on the ring */
|
||||
if (ndw > (ring->ring_size / 4))
|
||||
return -ENOMEM;
|
||||
/* Align requested size with padding so unlock_commit can
|
||||
* pad safely */
|
||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||
|
@ -1,5 +1,6 @@
|
||||
cayman 0x9400
|
||||
0x0000802C GRBM_GFX_INDEX
|
||||
0x00008040 WAIT_UNTIL
|
||||
0x000084FC CP_STRMOUT_CNTL
|
||||
0x000085F0 CP_COHER_CNTL
|
||||
0x000085F4 CP_COHER_SIZE
|
||||
|
@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
|
||||
WREG32(R600_CITF_CNTL, blackout);
|
||||
}
|
||||
}
|
||||
/* wait for the MC to settle */
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
|
||||
|
@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
|
||||
fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
|
||||
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
|
||||
if (!fbo)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
fbo->vm_node = NULL;
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
spin_lock(&bdev->fence_lock);
|
||||
if (bo->sync_obj)
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
else
|
||||
fbo->sync_obj = NULL;
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
kref_init(&fbo->list_kref);
|
||||
kref_init(&fbo->kref);
|
||||
fbo->destroy = &ttm_transfered_destroy;
|
||||
@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
|
||||
/* ttm_buffer_object_transfer accesses bo->sync_obj */
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
spin_unlock(&bdev->fence_lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
|
||||
struct qib_qp __rcu **qpp;
|
||||
|
||||
qpp = &dev->qp_table[n];
|
||||
q = rcu_dereference_protected(*qpp,
|
||||
lockdep_is_held(&dev->qpt_lock));
|
||||
for (; q; qpp = &q->next) {
|
||||
for (; (q = rcu_dereference_protected(*qpp,
|
||||
lockdep_is_held(&dev->qpt_lock))) != NULL;
|
||||
qpp = &q->next)
|
||||
if (q == qp) {
|
||||
atomic_dec(&qp->refcount);
|
||||
*qpp = qp->next;
|
||||
rcu_assign_pointer(qp->next, NULL);
|
||||
q = rcu_dereference_protected(*qpp,
|
||||
lockdep_is_held(&dev->qpt_lock));
|
||||
break;
|
||||
}
|
||||
q = rcu_dereference_protected(*qpp,
|
||||
lockdep_is_held(&dev->qpt_lock));
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->qpt_lock, flags);
|
||||
|
@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||
|
||||
tx_req->mapping = addr;
|
||||
|
||||
skb_orphan(skb);
|
||||
skb_dst_drop(skb);
|
||||
|
||||
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
|
||||
addr, skb->len);
|
||||
if (unlikely(rc)) {
|
||||
@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||
dev->trans_start = jiffies;
|
||||
++tx->tx_head;
|
||||
|
||||
skb_orphan(skb);
|
||||
skb_dst_drop(skb);
|
||||
|
||||
if (++priv->tx_outstanding == ipoib_sendq_size) {
|
||||
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
|
||||
tx->qp->qp_num);
|
||||
|
@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
|
||||
skb_orphan(skb);
|
||||
skb_dst_drop(skb);
|
||||
|
||||
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
|
||||
address->ah, qpn, tx_req, phead, hlen);
|
||||
if (unlikely(rc)) {
|
||||
@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||
|
||||
address->last_send = priv->tx_head;
|
||||
++priv->tx_head;
|
||||
|
||||
skb_orphan(skb);
|
||||
skb_dst_drop(skb);
|
||||
}
|
||||
|
||||
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
|
||||
|
@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
|
||||
radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
|
||||
radio->vdev.lock = &radio->lock;
|
||||
radio->vdev.release = video_device_release_empty;
|
||||
radio->vdev.vfl_dir = VFL_DIR_TX;
|
||||
|
||||
radio->usbdev = interface_to_usbdev(intf);
|
||||
radio->intf = intf;
|
||||
|
@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
|
||||
.name = "radio-si4713",
|
||||
.release = video_device_release,
|
||||
.ioctl_ops = &radio_si4713_ioctl_ops,
|
||||
.vfl_dir = VFL_DIR_TX,
|
||||
};
|
||||
|
||||
/* Platform driver interface */
|
||||
|
@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
|
||||
.ioctl_ops = &wl1273_ioctl_ops,
|
||||
.name = WL1273_FM_DRIVER_NAME,
|
||||
.release = wl1273_vdev_release,
|
||||
.vfl_dir = VFL_DIR_TX,
|
||||
};
|
||||
|
||||
static int wl1273_fm_radio_remove(struct platform_device *pdev)
|
||||
|
@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
|
||||
.ioctl_ops = &fm_drv_ioctl_ops,
|
||||
.name = FM_DRV_NAME,
|
||||
.release = video_device_release,
|
||||
/*
|
||||
* To ensure both the tuner and modulator ioctls are accessible we
|
||||
* set the vfl_dir to M2M to indicate this.
|
||||
*
|
||||
* It is not really a mem2mem device of course, but it can both receive
|
||||
* and transmit using the same radio device. It's the only radio driver
|
||||
* that does this and it should really be split in two radio devices,
|
||||
* but that would affect applications using this driver.
|
||||
*/
|
||||
.vfl_dir = VFL_DIR_M2M,
|
||||
};
|
||||
|
||||
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
|
||||
|
@ -272,6 +272,7 @@ config MTD_DOCG3
|
||||
tristate "M-Systems Disk-On-Chip G3"
|
||||
select BCH
|
||||
select BCH_CONST_PARAMS
|
||||
select BITREVERSE
|
||||
---help---
|
||||
This provides an MTD device driver for the M-Systems DiskOnChip
|
||||
G3 devices.
|
||||
|
@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)
|
||||
resource_size_t res_size;
|
||||
struct mtd_part_parser_data ppdata;
|
||||
bool map_indirect;
|
||||
const char *mtd_name;
|
||||
const char *mtd_name = NULL;
|
||||
|
||||
match = of_match_device(of_flash_match, &dev->dev);
|
||||
if (!match)
|
||||
|
@ -17,8 +17,8 @@
|
||||
#include "bcm47xxnflash.h"
|
||||
|
||||
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
|
||||
* shown 164 retries as maxiumum. */
|
||||
#define NFLASH_READY_RETRIES 1000
|
||||
* shown ~1000 retries as maxiumum. */
|
||||
#define NFLASH_READY_RETRIES 10000
|
||||
|
||||
#define NFLASH_SECTOR_SIZE 512
|
||||
|
||||
|
@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
|
||||
static const struct of_device_id davinci_nand_of_match[] = {
|
||||
{.compatible = "ti,davinci-nand", },
|
||||
{},
|
||||
}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
|
||||
|
||||
static struct davinci_nand_pdata
|
||||
|
@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
int i;
|
||||
int val;
|
||||
|
||||
/* ONFI need to be probed in 8 bits mode */
|
||||
WARN_ON(chip->options & NAND_BUSWIDTH_16);
|
||||
/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
|
||||
if (chip->options & NAND_BUSWIDTH_16) {
|
||||
pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
|
||||
return 0;
|
||||
}
|
||||
/* Try ONFI for unknown chip or LP */
|
||||
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
|
||||
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
|
||||
|
@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
|
||||
pr_info("%s: Setting primary slave to None.\n",
|
||||
bond->dev->name);
|
||||
bond->primary_slave = NULL;
|
||||
memset(bond->params.primary, 0, sizeof(bond->params.primary));
|
||||
bond_select_active_slave(bond);
|
||||
goto out;
|
||||
}
|
||||
|
@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
|
||||
|
||||
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
|
||||
IFX_WRITE_LOW_16BIT(mask));
|
||||
|
||||
/* According to C_CAN documentation, the reserved bit
|
||||
* in IFx_MASK2 register is fixed 1
|
||||
*/
|
||||
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
|
||||
IFX_WRITE_HIGH_16BIT(mask));
|
||||
IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
|
||||
|
||||
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
|
||||
IFX_WRITE_LOW_16BIT(id));
|
||||
|
@ -36,13 +36,13 @@
|
||||
|
||||
#define DRV_VER "4.4.161.0u"
|
||||
#define DRV_NAME "be2net"
|
||||
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
|
||||
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
|
||||
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
|
||||
#define BE_NAME "Emulex BladeEngine2"
|
||||
#define BE3_NAME "Emulex BladeEngine3"
|
||||
#define OC_NAME "Emulex OneConnect"
|
||||
#define OC_NAME_BE OC_NAME "(be3)"
|
||||
#define OC_NAME_LANCER OC_NAME "(Lancer)"
|
||||
#define OC_NAME_SH OC_NAME "(Skyhawk)"
|
||||
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
|
||||
#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
|
||||
|
||||
#define BE_VENDOR_ID 0x19a2
|
||||
#define EMULEX_VENDOR_ID 0x10df
|
||||
|
@ -25,7 +25,7 @@
|
||||
MODULE_VERSION(DRV_VER);
|
||||
MODULE_DEVICE_TABLE(pci, be_dev_ids);
|
||||
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
|
||||
MODULE_AUTHOR("ServerEngines Corporation");
|
||||
MODULE_AUTHOR("Emulex Corporation");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static unsigned int num_vfs;
|
||||
|
@ -232,6 +232,7 @@
|
||||
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
|
||||
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
|
||||
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
|
||||
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
|
||||
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
|
||||
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
|
||||
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
|
||||
@ -389,6 +390,12 @@
|
||||
|
||||
#define E1000_PBS_16K E1000_PBA_16K
|
||||
|
||||
/* Uncorrectable/correctable ECC Error counts and enable bits */
|
||||
#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
|
||||
#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
|
||||
#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
|
||||
#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
|
||||
|
||||
#define IFS_MAX 80
|
||||
#define IFS_MIN 40
|
||||
#define IFS_RATIO 4
|
||||
@ -408,6 +415,7 @@
|
||||
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
|
||||
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
|
||||
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
|
||||
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
|
||||
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
|
||||
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
|
||||
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
|
||||
@ -443,6 +451,7 @@
|
||||
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
|
||||
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
|
||||
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
|
||||
#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
|
||||
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
|
||||
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
|
||||
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
|
||||
|
@ -309,6 +309,8 @@ struct e1000_adapter {
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
unsigned int uncorr_errors; /* uncorrectable ECC errors */
|
||||
unsigned int corr_errors; /* correctable ECC errors */
|
||||
unsigned int restart_queue;
|
||||
u32 txd_cmd;
|
||||
|
||||
|
@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
|
||||
E1000_STAT("dropped_smbus", stats.mgpdc),
|
||||
E1000_STAT("rx_dma_failed", rx_dma_failed),
|
||||
E1000_STAT("tx_dma_failed", tx_dma_failed),
|
||||
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
|
||||
E1000_STAT("corr_ecc_errors", corr_errors),
|
||||
};
|
||||
|
||||
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
|
||||
|
@ -77,6 +77,7 @@ enum e1e_registers {
|
||||
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
|
||||
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
|
||||
E1000_PBS = 0x01008, /* Packet Buffer Size */
|
||||
E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
|
||||
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
|
||||
E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
|
||||
E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
|
||||
|
@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
|
||||
if (hw->mac.type == e1000_ich8lan)
|
||||
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
|
||||
ew32(RFCTL, reg);
|
||||
|
||||
/* Enable ECC on Lynxpoint */
|
||||
if (hw->mac.type == e1000_pch_lpt) {
|
||||
reg = er32(PBECCSTS);
|
||||
reg |= E1000_PBECCSTS_ECC_ENABLE;
|
||||
ew32(PBECCSTS, reg);
|
||||
|
||||
reg = er32(CTRL);
|
||||
reg |= E1000_CTRL_MEHE;
|
||||
ew32(CTRL, reg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
/* Reset on uncorrectable ECC error */
|
||||
if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
|
||||
u32 pbeccsts = er32(PBECCSTS);
|
||||
|
||||
adapter->corr_errors +=
|
||||
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
|
||||
adapter->uncorr_errors +=
|
||||
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
|
||||
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
|
||||
|
||||
/* Do the reset outside of interrupt context */
|
||||
schedule_work(&adapter->reset_task);
|
||||
|
||||
/* return immediately since reset is imminent */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (napi_schedule_prep(&adapter->napi)) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
/* Reset on uncorrectable ECC error */
|
||||
if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
|
||||
u32 pbeccsts = er32(PBECCSTS);
|
||||
|
||||
adapter->corr_errors +=
|
||||
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
|
||||
adapter->uncorr_errors +=
|
||||
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
|
||||
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
|
||||
|
||||
/* Do the reset outside of interrupt context */
|
||||
schedule_work(&adapter->reset_task);
|
||||
|
||||
/* return immediately since reset is imminent */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (napi_schedule_prep(&adapter->napi)) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
|
||||
if (adapter->msix_entries) {
|
||||
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
|
||||
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
|
||||
} else if (hw->mac.type == e1000_pch_lpt) {
|
||||
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
|
||||
} else {
|
||||
ew32(IMS, IMS_ENABLE_MASK);
|
||||
}
|
||||
@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
|
||||
adapter->stats.mgptc += er32(MGTPTC);
|
||||
adapter->stats.mgprc += er32(MGTPRC);
|
||||
adapter->stats.mgpdc += er32(MGTPDC);
|
||||
|
||||
/* Correctable ECC Errors */
|
||||
if (hw->mac.type == e1000_pch_lpt) {
|
||||
u32 pbeccsts = er32(PBECCSTS);
|
||||
adapter->corr_errors +=
|
||||
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
|
||||
adapter->uncorr_errors +=
|
||||
(pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
|
||||
E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||
}
|
||||
}
|
||||
|
||||
if ((dev_cap->flags &
|
||||
if ((dev->caps.flags &
|
||||
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
|
||||
mlx4_is_master(dev))
|
||||
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
|
||||
|
@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
|
||||
rp->tx_skbuff[entry]->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
dev_kfree_skb_irq(rp->tx_skbuff[entry]);
|
||||
dev_kfree_skb(rp->tx_skbuff[entry]);
|
||||
rp->tx_skbuff[entry] = NULL;
|
||||
entry = (++rp->dirty_tx) % TX_RING_SIZE;
|
||||
}
|
||||
@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
|
||||
if (intr_status & IntrPCIErr)
|
||||
netif_warn(rp, hw, dev, "PCI error\n");
|
||||
|
||||
napi_disable(&rp->napi);
|
||||
rhine_irq_disable(rp);
|
||||
/* Slow and safe. Consider __napi_schedule as a replacement ? */
|
||||
napi_enable(&rp->napi);
|
||||
napi_schedule(&rp->napi);
|
||||
iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&rp->task_lock);
|
||||
|
@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
|
||||
}
|
||||
|
||||
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
|
||||
u16 queue_index)
|
||||
struct tun_file *tfile)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct tun_flow_entry *e;
|
||||
unsigned long delay = tun->ageing_time;
|
||||
u16 queue_index = tfile->queue_index;
|
||||
|
||||
if (!rxhash)
|
||||
return;
|
||||
@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (tun->numqueues == 1)
|
||||
/* We may get a very small possibility of OOO during switching, not
|
||||
* worth to optimize.*/
|
||||
if (tun->numqueues == 1 || tfile->detached)
|
||||
goto unlock;
|
||||
|
||||
e = tun_flow_find(head, rxhash);
|
||||
@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
||||
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
|
||||
if (tun) {
|
||||
if (tun && !tfile->detached) {
|
||||
u16 index = tfile->queue_index;
|
||||
BUG_ON(index >= tun->numqueues);
|
||||
dev = tun->dev;
|
||||
|
||||
rcu_assign_pointer(tun->tfiles[index],
|
||||
tun->tfiles[tun->numqueues - 1]);
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
ntfile = rtnl_dereference(tun->tfiles[index]);
|
||||
ntfile->queue_index = index;
|
||||
|
||||
--tun->numqueues;
|
||||
if (clean)
|
||||
if (clean) {
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
sock_put(&tfile->sk);
|
||||
else
|
||||
} else
|
||||
tun_disable_queue(tun, tfile);
|
||||
|
||||
synchronize_net();
|
||||
@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
||||
}
|
||||
|
||||
if (clean) {
|
||||
if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
|
||||
!(tun->flags & TUN_PERSIST))
|
||||
if (tun->dev->reg_state == NETREG_REGISTERED)
|
||||
if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
|
||||
netif_carrier_off(tun->dev);
|
||||
|
||||
if (!(tun->flags & TUN_PERSIST) &&
|
||||
tun->dev->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdevice(tun->dev);
|
||||
}
|
||||
|
||||
BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
|
||||
&tfile->socket.flags));
|
||||
@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev)
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
--tun->numqueues;
|
||||
}
|
||||
list_for_each_entry(tfile, &tun->disabled, next) {
|
||||
wake_up_all(&tfile->wq.wait);
|
||||
rcu_assign_pointer(tfile->tun, NULL);
|
||||
}
|
||||
BUG_ON(tun->numqueues != 0);
|
||||
|
||||
synchronize_net();
|
||||
@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
|
||||
goto out;
|
||||
|
||||
err = -EINVAL;
|
||||
if (rtnl_dereference(tfile->tun))
|
||||
if (rtnl_dereference(tfile->tun) && !tfile->detached)
|
||||
goto out;
|
||||
|
||||
err = -EBUSY;
|
||||
@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
tun->dev->stats.rx_packets++;
|
||||
tun->dev->stats.rx_bytes += len;
|
||||
|
||||
tun_flow_update(tun, rxhash, tfile->queue_index);
|
||||
tun_flow_update(tun, rxhash, tfile);
|
||||
return total_len;
|
||||
}
|
||||
|
||||
@ -1658,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
|
||||
device_create_file(&tun->dev->dev, &dev_attr_group))
|
||||
pr_err("Failed to create tun sysfs files\n");
|
||||
|
||||
netif_carrier_on(tun->dev);
|
||||
}
|
||||
|
||||
netif_carrier_on(tun->dev);
|
||||
|
||||
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
|
||||
|
||||
if (ifr->ifr_flags & IFF_NO_PI)
|
||||
@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
|
||||
ret = tun_attach(tun, file);
|
||||
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
if (!tun || !(tun->flags & TUN_TAP_MQ))
|
||||
if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
__tun_detach(tfile, false);
|
||||
|
@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = {
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
|
||||
/* Infineon(now Intel) HSPA Modem platform */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
|
||||
|
@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
|
||||
/* 2. Combined interface devices matching on class+protocol */
|
||||
{ /* Huawei E367 and possibly others in "Windows mode" */
|
||||
@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{ /* Pantech UML290, P4200 and more */
|
||||
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
@ -461,6 +473,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
|
||||
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
|
||||
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
|
||||
unsigned long lockflags;
|
||||
size_t size = dev->rx_urb_size;
|
||||
|
||||
/* prevent rx skb allocation when error ratio is high */
|
||||
if (test_bit(EVENT_RX_KILL, &dev->flags)) {
|
||||
usb_free_urb(urb);
|
||||
return -ENOLINK;
|
||||
}
|
||||
|
||||
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
|
||||
if (!skb) {
|
||||
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
|
||||
@ -539,6 +545,17 @@ block:
|
||||
break;
|
||||
}
|
||||
|
||||
/* stop rx if packet error rate is high */
|
||||
if (++dev->pkt_cnt > 30) {
|
||||
dev->pkt_cnt = 0;
|
||||
dev->pkt_err = 0;
|
||||
} else {
|
||||
if (state == rx_cleanup)
|
||||
dev->pkt_err++;
|
||||
if (dev->pkt_err > 20)
|
||||
set_bit(EVENT_RX_KILL, &dev->flags);
|
||||
}
|
||||
|
||||
state = defer_bh(dev, skb, &dev->rxq, state);
|
||||
|
||||
if (urb) {
|
||||
@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
|
||||
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
|
||||
"simple");
|
||||
|
||||
/* reset rx error state */
|
||||
dev->pkt_cnt = 0;
|
||||
dev->pkt_err = 0;
|
||||
clear_bit(EVENT_RX_KILL, &dev->flags);
|
||||
|
||||
// delay posting reads until we're fully open
|
||||
tasklet_schedule (&dev->bh);
|
||||
if (info->manage_power) {
|
||||
@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
|
||||
if (info->tx_fixup) {
|
||||
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
if (netif_msg_tx_err(dev)) {
|
||||
netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
|
||||
goto drop;
|
||||
} else {
|
||||
/* cdc_ncm collected packet; waits for more */
|
||||
/* packet collected; minidriver waiting for more */
|
||||
if (info->flags & FLAG_MULTI_PACKET)
|
||||
goto not_drop;
|
||||
}
|
||||
netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
length = skb->len;
|
||||
@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
|
||||
}
|
||||
}
|
||||
|
||||
/* restart RX again after disabling due to high error rate */
|
||||
clear_bit(EVENT_RX_KILL, &dev->flags);
|
||||
|
||||
// waiting for all pending urbs to complete?
|
||||
if (dev->wait) {
|
||||
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
|
||||
|
@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
|
||||
if (ret & 1) { /* Link is up. */
|
||||
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
|
||||
adapter->netdev->name, adapter->link_speed);
|
||||
if (!netif_carrier_ok(adapter->netdev))
|
||||
netif_carrier_on(adapter->netdev);
|
||||
netif_carrier_on(adapter->netdev);
|
||||
|
||||
if (affectTxQueue) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
|
||||
} else {
|
||||
printk(KERN_INFO "%s: NIC Link is Down\n",
|
||||
adapter->netdev->name);
|
||||
if (netif_carrier_ok(adapter->netdev))
|
||||
netif_carrier_off(adapter->netdev);
|
||||
netif_carrier_off(adapter->netdev);
|
||||
|
||||
if (affectTxQueue) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
||||
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
|
||||
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
err = register_netdev(netdev);
|
||||
|
||||
if (err) {
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "debug.h"
|
||||
|
||||
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
|
||||
#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
|
||||
|
||||
/* Flags we support */
|
||||
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
|
||||
@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
|
||||
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
|
||||
}
|
||||
|
||||
static bool brcms_tx_flush_completed(struct brcms_info *wl)
|
||||
{
|
||||
bool result;
|
||||
|
||||
spin_lock_bh(&wl->lock);
|
||||
result = brcms_c_tx_flush_completed(wl->wlc);
|
||||
spin_unlock_bh(&wl->lock);
|
||||
return result;
|
||||
}
|
||||
|
||||
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
|
||||
{
|
||||
struct brcms_info *wl = hw->priv;
|
||||
int ret;
|
||||
|
||||
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
|
||||
|
||||
/* wait for packet queue and dma fifos to run empty */
|
||||
spin_lock_bh(&wl->lock);
|
||||
brcms_c_wait_for_tx_completion(wl->wlc, drop);
|
||||
spin_unlock_bh(&wl->lock);
|
||||
ret = wait_event_timeout(wl->tx_flush_wq,
|
||||
brcms_tx_flush_completed(wl),
|
||||
msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
|
||||
|
||||
brcms_dbg_mac80211(wl->wlc->hw->d11core,
|
||||
"ret=%d\n", jiffies_to_msecs(ret));
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops brcms_ops = {
|
||||
@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)
|
||||
|
||||
done:
|
||||
spin_unlock_bh(&wl->lock);
|
||||
wake_up(&wl->tx_flush_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
|
||||
|
||||
atomic_set(&wl->callbacks, 0);
|
||||
|
||||
init_waitqueue_head(&wl->tx_flush_wq);
|
||||
|
||||
/* setup the bottom half handler */
|
||||
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
|
||||
|
||||
@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
|
||||
spin_lock_bh(&wl->lock);
|
||||
return blocked;
|
||||
}
|
||||
|
||||
/*
|
||||
* precondition: perimeter lock has been acquired
|
||||
*/
|
||||
void brcms_msleep(struct brcms_info *wl, uint ms)
|
||||
{
|
||||
spin_unlock_bh(&wl->lock);
|
||||
msleep(ms);
|
||||
spin_lock_bh(&wl->lock);
|
||||
}
|
||||
|
@ -68,6 +68,8 @@ struct brcms_info {
|
||||
spinlock_t lock; /* per-device perimeter lock */
|
||||
spinlock_t isr_lock; /* per-device ISR synchronization lock */
|
||||
|
||||
/* tx flush */
|
||||
wait_queue_head_t tx_flush_wq;
|
||||
|
||||
/* timer related fields */
|
||||
atomic_t callbacks; /* # outstanding callback functions */
|
||||
@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
|
||||
extern void brcms_free_timer(struct brcms_timer *timer);
|
||||
extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
|
||||
extern bool brcms_del_timer(struct brcms_timer *timer);
|
||||
extern void brcms_msleep(struct brcms_info *wl, uint ms);
|
||||
extern void brcms_dpc(unsigned long data);
|
||||
extern void brcms_timer(struct brcms_timer *t);
|
||||
extern void brcms_fatal_error(struct brcms_info *wl);
|
||||
|
@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
|
||||
static bool
|
||||
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
|
||||
{
|
||||
bool morepending = false;
|
||||
struct bcma_device *core;
|
||||
struct tx_status txstatus, *txs;
|
||||
u32 s1, s2;
|
||||
@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
|
||||
txs = &txstatus;
|
||||
core = wlc_hw->d11core;
|
||||
*fatal = false;
|
||||
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
|
||||
while (!(*fatal)
|
||||
&& (s1 & TXS_V)) {
|
||||
/* !give others some time to run! */
|
||||
if (n >= max_tx_num) {
|
||||
morepending = true;
|
||||
break;
|
||||
}
|
||||
|
||||
while (n < max_tx_num) {
|
||||
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
|
||||
if (s1 == 0xffffffff) {
|
||||
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
|
||||
__func__);
|
||||
*fatal = true;
|
||||
return false;
|
||||
}
|
||||
s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
|
||||
/* only process when valid */
|
||||
if (!(s1 & TXS_V))
|
||||
break;
|
||||
|
||||
s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
|
||||
txs->status = s1 & TXS_STATUS_MASK;
|
||||
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
|
||||
txs->sequence = s2 & TXS_SEQ_MASK;
|
||||
@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
|
||||
txs->lasttxtime = 0;
|
||||
|
||||
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
|
||||
|
||||
s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
|
||||
if (*fatal == true)
|
||||
return false;
|
||||
n++;
|
||||
}
|
||||
|
||||
if (*fatal)
|
||||
return false;
|
||||
|
||||
return morepending;
|
||||
return n >= max_tx_num;
|
||||
}
|
||||
|
||||
static void brcms_c_tbtt(struct brcms_c_info *wlc)
|
||||
@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
|
||||
return wlc->band->bandunit;
|
||||
}
|
||||
|
||||
void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
|
||||
bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
|
||||
{
|
||||
int timeout = 20;
|
||||
int i;
|
||||
|
||||
/* Kick DMA to send any pending AMPDU */
|
||||
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
|
||||
if (wlc->hw->di[i])
|
||||
dma_txflush(wlc->hw->di[i]);
|
||||
dma_kick_tx(wlc->hw->di[i]);
|
||||
|
||||
/* wait for queue and DMA fifos to run dry */
|
||||
while (brcms_txpktpendtot(wlc) > 0) {
|
||||
brcms_msleep(wlc->wl, 1);
|
||||
|
||||
if (--timeout == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(timeout == 0);
|
||||
return !brcms_txpktpendtot(wlc);
|
||||
}
|
||||
|
||||
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
|
||||
|
@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
|
||||
extern void brcms_c_scan_start(struct brcms_c_info *wlc);
|
||||
extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
|
||||
extern int brcms_c_get_curband(struct brcms_c_info *wlc);
|
||||
extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
|
||||
bool drop);
|
||||
extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
|
||||
extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
|
||||
extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
|
||||
@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
|
||||
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
|
||||
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
|
||||
extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
|
||||
extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
|
||||
|
||||
#endif /* _BRCM_PUB_H_ */
|
||||
|
@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||
next_reclaimed = ssn;
|
||||
}
|
||||
|
||||
if (tid != IWL_TID_NON_QOS) {
|
||||
priv->tid_data[sta_id][tid].next_reclaimed =
|
||||
next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||
next_reclaimed);
|
||||
}
|
||||
|
||||
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
|
||||
|
||||
iwlagn_check_ratid_empty(priv, sta_id, tid);
|
||||
@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||
if (!is_agg)
|
||||
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
|
||||
|
||||
/*
|
||||
* W/A for FW bug - the seq_ctl isn't updated when the
|
||||
* queues are flushed. Fetch it from the packet itself
|
||||
*/
|
||||
if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
|
||||
next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
|
||||
next_reclaimed =
|
||||
SEQ_TO_SN(next_reclaimed + 0x10);
|
||||
}
|
||||
|
||||
is_offchannel_skb =
|
||||
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
|
||||
freed++;
|
||||
}
|
||||
|
||||
if (tid != IWL_TID_NON_QOS) {
|
||||
priv->tid_data[sta_id][tid].next_reclaimed =
|
||||
next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||
next_reclaimed);
|
||||
}
|
||||
|
||||
WARN_ON(!is_agg && freed != 1);
|
||||
|
||||
/*
|
||||
|
@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
|
||||
scan_rsp->number_of_sets);
|
||||
ret = -1;
|
||||
goto done;
|
||||
goto check_next_scan;
|
||||
}
|
||||
|
||||
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
|
||||
@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
if (!beacon_size || beacon_size > bytes_left) {
|
||||
bss_info += bytes_left;
|
||||
bytes_left = 0;
|
||||
return -1;
|
||||
ret = -1;
|
||||
goto check_next_scan;
|
||||
}
|
||||
|
||||
/* Initialize the current working beacon pointer for this BSS
|
||||
@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
dev_err(priv->adapter->dev,
|
||||
"%s: bytes left < IE length\n",
|
||||
__func__);
|
||||
goto done;
|
||||
goto check_next_scan;
|
||||
}
|
||||
if (element_id == WLAN_EID_DS_PARAMS) {
|
||||
channel = *(current_ptr + sizeof(struct ieee_types_header));
|
||||
@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
}
|
||||
}
|
||||
|
||||
check_next_scan:
|
||||
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
|
||||
if (list_empty(&adapter->scan_pending_q)) {
|
||||
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
|
||||
@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
||||
is_tx ? "Tx" : "Rx");
|
||||
|
||||
if (is_tx) {
|
||||
rtl_lps_leave(hw);
|
||||
schedule_work(&rtlpriv->
|
||||
works.lps_leave_work);
|
||||
ppsc->last_delaylps_stamp_jiffies =
|
||||
jiffies;
|
||||
}
|
||||
@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
||||
}
|
||||
} else if (ETH_P_ARP == ether_type) {
|
||||
if (is_tx) {
|
||||
rtl_lps_leave(hw);
|
||||
schedule_work(&rtlpriv->works.lps_leave_work);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
}
|
||||
|
||||
@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
||||
"802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
|
||||
|
||||
if (is_tx) {
|
||||
rtl_lps_leave(hw);
|
||||
schedule_work(&rtlpriv->works.lps_leave_work);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
}
|
||||
|
||||
|
@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
WARN_ON(skb_queue_empty(&rx_queue));
|
||||
while (!skb_queue_empty(&rx_queue)) {
|
||||
_skb = skb_dequeue(&rx_queue);
|
||||
_rtl_usb_rx_process_agg(hw, skb);
|
||||
ieee80211_rx_irqsafe(hw, skb);
|
||||
_rtl_usb_rx_process_agg(hw, _skb);
|
||||
ieee80211_rx_irqsafe(hw, _skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
|
||||
/* Notify xenvif that ring now has space to send an skb to the frontend */
|
||||
void xenvif_notify_tx_completion(struct xenvif *vif);
|
||||
|
||||
/* Prevent the device from generating any further traffic. */
|
||||
void xenvif_carrier_off(struct xenvif *vif);
|
||||
|
||||
/* Returns number of ring slots required to send an skb to the frontend */
|
||||
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
|
||||
|
||||
|
@ -343,17 +343,22 @@ err:
|
||||
return err;
|
||||
}
|
||||
|
||||
void xenvif_disconnect(struct xenvif *vif)
|
||||
void xenvif_carrier_off(struct xenvif *vif)
|
||||
{
|
||||
struct net_device *dev = vif->dev;
|
||||
if (netif_carrier_ok(dev)) {
|
||||
rtnl_lock();
|
||||
netif_carrier_off(dev); /* discard queued packets */
|
||||
if (netif_running(dev))
|
||||
xenvif_down(vif);
|
||||
rtnl_unlock();
|
||||
xenvif_put(vif);
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
netif_carrier_off(dev); /* discard queued packets */
|
||||
if (netif_running(dev))
|
||||
xenvif_down(vif);
|
||||
rtnl_unlock();
|
||||
xenvif_put(vif);
|
||||
}
|
||||
|
||||
void xenvif_disconnect(struct xenvif *vif)
|
||||
{
|
||||
if (netif_carrier_ok(vif->dev))
|
||||
xenvif_carrier_off(vif);
|
||||
|
||||
atomic_dec(&vif->refcnt);
|
||||
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
|
||||
|
@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
|
||||
atomic_dec(&netbk->netfront_count);
|
||||
}
|
||||
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
|
||||
u8 status);
|
||||
static void make_tx_response(struct xenvif *vif,
|
||||
struct xen_netif_tx_request *txp,
|
||||
s8 st);
|
||||
@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
|
||||
|
||||
do {
|
||||
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
|
||||
if (cons >= end)
|
||||
if (cons == end)
|
||||
break;
|
||||
txp = RING_GET_REQUEST(&vif->tx, cons++);
|
||||
} while (1);
|
||||
@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
|
||||
xenvif_put(vif);
|
||||
}
|
||||
|
||||
static void netbk_fatal_tx_err(struct xenvif *vif)
|
||||
{
|
||||
netdev_err(vif->dev, "fatal error; disabling device\n");
|
||||
xenvif_carrier_off(vif);
|
||||
xenvif_put(vif);
|
||||
}
|
||||
|
||||
static int netbk_count_requests(struct xenvif *vif,
|
||||
struct xen_netif_tx_request *first,
|
||||
struct xen_netif_tx_request *txp,
|
||||
@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
|
||||
|
||||
do {
|
||||
if (frags >= work_to_do) {
|
||||
netdev_dbg(vif->dev, "Need more frags\n");
|
||||
netdev_err(vif->dev, "Need more frags\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
|
||||
if (unlikely(frags >= MAX_SKB_FRAGS)) {
|
||||
netdev_dbg(vif->dev, "Too many frags\n");
|
||||
netdev_err(vif->dev, "Too many frags\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
|
||||
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
|
||||
sizeof(*txp));
|
||||
if (txp->size > first->size) {
|
||||
netdev_dbg(vif->dev, "Frags galore\n");
|
||||
netdev_err(vif->dev, "Frag is bigger than frame.\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
|
||||
@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
|
||||
frags++;
|
||||
|
||||
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
|
||||
netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
|
||||
netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
|
||||
txp->offset, txp->size);
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -frags;
|
||||
}
|
||||
} while ((txp++)->flags & XEN_NETTXF_more_data);
|
||||
@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
|
||||
pending_idx = netbk->pending_ring[index];
|
||||
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
|
||||
if (!page)
|
||||
return NULL;
|
||||
goto err;
|
||||
|
||||
gop->source.u.ref = txp->gref;
|
||||
gop->source.domid = vif->domid;
|
||||
@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
|
||||
}
|
||||
|
||||
return gop;
|
||||
err:
|
||||
/* Unwind, freeing all pages and sending error responses. */
|
||||
while (i-- > start) {
|
||||
xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
}
|
||||
/* The head too, if necessary. */
|
||||
if (start)
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
{
|
||||
struct gnttab_copy *gop = *gopp;
|
||||
u16 pending_idx = *((u16 *)skb->data);
|
||||
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
|
||||
struct xenvif *vif = pending_tx_info[pending_idx].vif;
|
||||
struct xen_netif_tx_request *txp;
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
int nr_frags = shinfo->nr_frags;
|
||||
int i, err, start;
|
||||
|
||||
/* Check status of header. */
|
||||
err = gop->status;
|
||||
if (unlikely(err)) {
|
||||
pending_ring_idx_t index;
|
||||
index = pending_index(netbk->pending_prod++);
|
||||
txp = &pending_tx_info[pending_idx].req;
|
||||
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
|
||||
netbk->pending_ring[index] = pending_idx;
|
||||
xenvif_put(vif);
|
||||
}
|
||||
if (unlikely(err))
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
|
||||
|
||||
/* Skip first skb fragment if it is on same page as header fragment. */
|
||||
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
|
||||
|
||||
for (i = start; i < nr_frags; i++) {
|
||||
int j, newerr;
|
||||
pending_ring_idx_t index;
|
||||
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
|
||||
|
||||
@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
if (likely(!newerr)) {
|
||||
/* Had a previous error? Invalidate this fragment. */
|
||||
if (unlikely(err))
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Error on this fragment: respond to client with an error. */
|
||||
txp = &netbk->pending_tx_info[pending_idx].req;
|
||||
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
|
||||
index = pending_index(netbk->pending_prod++);
|
||||
netbk->pending_ring[index] = pending_idx;
|
||||
xenvif_put(vif);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
|
||||
|
||||
/* Not the first error? Preceding frags already invalidated. */
|
||||
if (err)
|
||||
@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
|
||||
|
||||
/* First error: invalidate header and preceding fragments. */
|
||||
pending_idx = *((u16 *)skb->data);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
for (j = start; j < i; j++) {
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
/* Remember the error: invalidate all subsequent fragments. */
|
||||
@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
|
||||
|
||||
/* Take an extra reference to offset xen_netbk_idx_release */
|
||||
get_page(netbk->mmap_pages[pending_idx]);
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
|
||||
|
||||
do {
|
||||
if (unlikely(work_to_do-- <= 0)) {
|
||||
netdev_dbg(vif->dev, "Missing extra info\n");
|
||||
netdev_err(vif->dev, "Missing extra info\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EBADR;
|
||||
}
|
||||
|
||||
@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
|
||||
if (unlikely(!extra.type ||
|
||||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||
vif->tx.req_cons = ++cons;
|
||||
netdev_dbg(vif->dev,
|
||||
netdev_err(vif->dev,
|
||||
"Invalid extra type: %d\n", extra.type);
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
|
||||
struct xen_netif_extra_info *gso)
|
||||
{
|
||||
if (!gso->u.gso.size) {
|
||||
netdev_dbg(vif->dev, "GSO size must not be zero.\n");
|
||||
netdev_err(vif->dev, "GSO size must not be zero.\n");
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Currently only TCPv4 S.O. is supported. */
|
||||
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
|
||||
netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
|
||||
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
|
||||
netbk_fatal_tx_err(vif);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
|
||||
/* Get a netif from the list with work to do. */
|
||||
vif = poll_net_schedule_list(netbk);
|
||||
/* This can sometimes happen because the test of
|
||||
* list_empty(net_schedule_list) at the top of the
|
||||
* loop is unlocked. Just go back and have another
|
||||
* look.
|
||||
*/
|
||||
if (!vif)
|
||||
continue;
|
||||
|
||||
if (vif->tx.sring->req_prod - vif->tx.req_cons >
|
||||
XEN_NETIF_TX_RING_SIZE) {
|
||||
netdev_err(vif->dev,
|
||||
"Impossible number of requests. "
|
||||
"req_prod %d, req_cons %d, size %ld\n",
|
||||
vif->tx.sring->req_prod, vif->tx.req_cons,
|
||||
XEN_NETIF_TX_RING_SIZE);
|
||||
netbk_fatal_tx_err(vif);
|
||||
continue;
|
||||
}
|
||||
|
||||
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
|
||||
if (!work_to_do) {
|
||||
xenvif_put(vif);
|
||||
@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
work_to_do = xen_netbk_get_extras(vif, extras,
|
||||
work_to_do);
|
||||
idx = vif->tx.req_cons;
|
||||
if (unlikely(work_to_do < 0)) {
|
||||
netbk_tx_err(vif, &txreq, idx);
|
||||
if (unlikely(work_to_do < 0))
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
|
||||
if (unlikely(ret < 0)) {
|
||||
netbk_tx_err(vif, &txreq, idx - ret);
|
||||
if (unlikely(ret < 0))
|
||||
continue;
|
||||
}
|
||||
|
||||
idx += ret;
|
||||
|
||||
if (unlikely(txreq.size < ETH_HLEN)) {
|
||||
@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
|
||||
/* No crossing a page as the payload mustn't fragment. */
|
||||
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
|
||||
netdev_dbg(vif->dev,
|
||||
netdev_err(vif->dev,
|
||||
"txreq.offset: %x, size: %u, end: %lu\n",
|
||||
txreq.offset, txreq.size,
|
||||
(txreq.offset&~PAGE_MASK) + txreq.size);
|
||||
netbk_tx_err(vif, &txreq, idx);
|
||||
netbk_fatal_tx_err(vif);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
||||
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
|
||||
|
||||
if (netbk_set_skb_gso(vif, skb, gso)) {
|
||||
/* Failure in netbk_set_skb_gso is fatal. */
|
||||
kfree_skb(skb);
|
||||
netbk_tx_err(vif, &txreq, idx);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
|
||||
txp->size -= data_len;
|
||||
} else {
|
||||
/* Schedule a response immediately. */
|
||||
xen_netbk_idx_release(netbk, pending_idx);
|
||||
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
if (txp->flags & XEN_NETTXF_csum_blank)
|
||||
@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
|
||||
xen_netbk_tx_submit(netbk);
|
||||
}
|
||||
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
|
||||
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
|
||||
u8 status)
|
||||
{
|
||||
struct xenvif *vif;
|
||||
struct pending_tx_info *pending_tx_info;
|
||||
@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
|
||||
|
||||
vif = pending_tx_info->vif;
|
||||
|
||||
make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
|
||||
make_tx_response(vif, &pending_tx_info->req, status);
|
||||
|
||||
index = pending_index(netbk->pending_prod++);
|
||||
netbk->pending_ring[index] = pending_idx;
|
||||
|
@ -184,8 +184,8 @@ config PINCTRL_SAMSUNG
|
||||
select PINMUX
|
||||
select PINCONF
|
||||
|
||||
config PINCTRL_EXYNOS4
|
||||
bool "Pinctrl driver data for Exynos4 SoC"
|
||||
config PINCTRL_EXYNOS
|
||||
bool "Pinctrl driver data for Samsung EXYNOS SoCs"
|
||||
depends on OF && GPIOLIB
|
||||
select PINCTRL_SAMSUNG
|
||||
|
||||
|
@ -36,7 +36,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
|
||||
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
|
||||
obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
|
||||
obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
|
||||
obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o
|
||||
obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
|
||||
obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
|
||||
obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
|
||||
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
|
||||
|
@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)
|
||||
return of_iomap(np, 0);
|
||||
}
|
||||
|
||||
static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
|
||||
const struct of_phandle_args *gpiospec,
|
||||
u32 *flags)
|
||||
{
|
||||
if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags)
|
||||
*flags = gpiospec->args[1];
|
||||
|
||||
return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE;
|
||||
}
|
||||
|
||||
static int sirfsoc_pinmux_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np)
|
||||
bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;
|
||||
bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);
|
||||
bank->chip.gc.of_node = np;
|
||||
bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate;
|
||||
bank->chip.gc.of_gpio_n_cells = 2;
|
||||
bank->chip.regs = regs;
|
||||
bank->id = i;
|
||||
bank->is_marco = is_marco;
|
||||
|
@ -379,9 +379,10 @@ static struct regulator_desc regulators[] = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
|
||||
static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
|
||||
struct max77686_platform_data *pdata)
|
||||
{
|
||||
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
|
||||
struct device_node *pmic_np, *regulators_np;
|
||||
struct max77686_regulator_data *rdata;
|
||||
struct of_regulator_match rmatch;
|
||||
@ -390,15 +391,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
|
||||
pmic_np = iodev->dev->of_node;
|
||||
regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
|
||||
if (!regulators_np) {
|
||||
dev_err(iodev->dev, "could not find regulators sub-node\n");
|
||||
dev_err(&pdev->dev, "could not find regulators sub-node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pdata->num_regulators = ARRAY_SIZE(regulators);
|
||||
rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
|
||||
rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
|
||||
pdata->num_regulators, GFP_KERNEL);
|
||||
if (!rdata) {
|
||||
dev_err(iodev->dev,
|
||||
dev_err(&pdev->dev,
|
||||
"could not allocate memory for regulator data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -407,7 +408,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
|
||||
rmatch.name = regulators[i].name;
|
||||
rmatch.init_data = NULL;
|
||||
rmatch.of_node = NULL;
|
||||
of_regulator_match(iodev->dev, regulators_np, &rmatch, 1);
|
||||
of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);
|
||||
rdata[i].initdata = rmatch.init_data;
|
||||
rdata[i].of_node = rmatch.of_node;
|
||||
}
|
||||
@ -417,7 +418,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
|
||||
static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
|
||||
struct max77686_platform_data *pdata)
|
||||
{
|
||||
return 0;
|
||||
@ -440,7 +441,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (iodev->dev->of_node) {
|
||||
ret = max77686_pmic_dt_parse_pdata(iodev, pdata);
|
||||
ret = max77686_pmic_dt_parse_pdata(pdev, pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -237,8 +237,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_regulator_match(pdev->dev.parent, regulators,
|
||||
max8907_matches,
|
||||
ret = of_regulator_match(&pdev->dev, regulators, max8907_matches,
|
||||
ARRAY_SIZE(max8907_matches));
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
|
||||
|
@ -934,7 +934,7 @@ static struct regulator_desc regulators[] = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
|
||||
static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
|
||||
struct max8997_platform_data *pdata,
|
||||
struct device_node *pmic_np)
|
||||
{
|
||||
@ -944,7 +944,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
|
||||
gpio = of_get_named_gpio(pmic_np,
|
||||
"max8997,pmic-buck125-dvs-gpios", i);
|
||||
if (!gpio_is_valid(gpio)) {
|
||||
dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
|
||||
dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
pdata->buck125_gpios[i] = gpio;
|
||||
@ -952,22 +952,23 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
|
||||
static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
|
||||
struct max8997_platform_data *pdata)
|
||||
{
|
||||
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
|
||||
struct device_node *pmic_np, *regulators_np, *reg_np;
|
||||
struct max8997_regulator_data *rdata;
|
||||
unsigned int i, dvs_voltage_nr = 1, ret;
|
||||
|
||||
pmic_np = iodev->dev->of_node;
|
||||
if (!pmic_np) {
|
||||
dev_err(iodev->dev, "could not find pmic sub-node\n");
|
||||
dev_err(&pdev->dev, "could not find pmic sub-node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
regulators_np = of_find_node_by_name(pmic_np, "regulators");
|
||||
if (!regulators_np) {
|
||||
dev_err(iodev->dev, "could not find regulators sub-node\n");
|
||||
dev_err(&pdev->dev, "could not find regulators sub-node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -976,11 +977,10 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
|
||||
for_each_child_of_node(regulators_np, reg_np)
|
||||
pdata->num_regulators++;
|
||||
|
||||
rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
|
||||
rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
|
||||
pdata->num_regulators, GFP_KERNEL);
|
||||
if (!rdata) {
|
||||
dev_err(iodev->dev, "could not allocate memory for "
|
||||
"regulator data\n");
|
||||
dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -991,14 +991,14 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
|
||||
break;
|
||||
|
||||
if (i == ARRAY_SIZE(regulators)) {
|
||||
dev_warn(iodev->dev, "don't know how to configure "
|
||||
"regulator %s\n", reg_np->name);
|
||||
dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
|
||||
reg_np->name);
|
||||
continue;
|
||||
}
|
||||
|
||||
rdata->id = i;
|
||||
rdata->initdata = of_get_regulator_init_data(
|
||||
iodev->dev, reg_np);
|
||||
rdata->initdata = of_get_regulator_init_data(&pdev->dev,
|
||||
reg_np);
|
||||
rdata->reg_node = reg_np;
|
||||
rdata++;
|
||||
}
|
||||
@ -1014,7 +1014,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
|
||||
|
||||
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
|
||||
pdata->buck5_gpiodvs) {
|
||||
ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
|
||||
ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1025,8 +1025,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
|
||||
} else {
|
||||
if (pdata->buck125_default_idx >= 8) {
|
||||
pdata->buck125_default_idx = 0;
|
||||
dev_info(iodev->dev, "invalid value for "
|
||||
"default dvs index, using 0 instead\n");
|
||||
dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1040,28 +1039,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
|
||||
if (of_property_read_u32_array(pmic_np,
|
||||
"max8997,pmic-buck1-dvs-voltage",
|
||||
pdata->buck1_voltage, dvs_voltage_nr)) {
|
||||
dev_err(iodev->dev, "buck1 voltages not specified\n");
|
||||
dev_err(&pdev->dev, "buck1 voltages not specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_property_read_u32_array(pmic_np,
|
||||
"max8997,pmic-buck2-dvs-voltage",
|
||||
pdata->buck2_voltage, dvs_voltage_nr)) {
|
||||
dev_err(iodev->dev, "buck2 voltages not specified\n");
|
||||
dev_err(&pdev->dev, "buck2 voltages not specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_property_read_u32_array(pmic_np,
|
||||
"max8997,pmic-buck5-dvs-voltage",
|
||||
pdata->buck5_voltage, dvs_voltage_nr)) {
|
||||
dev_err(iodev->dev, "buck5 voltages not specified\n");
|
||||
dev_err(&pdev->dev, "buck5 voltages not specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
|
||||
static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
|
||||
struct max8997_platform_data *pdata)
|
||||
{
|
||||
return 0;
|
||||
@ -1085,7 +1084,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (iodev->dev->of_node) {
|
||||
ret = max8997_pmic_dt_parse_pdata(iodev, pdata);
|
||||
ret = max8997_pmic_dt_parse_pdata(pdev, pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = {
|
||||
.min = 2800000, .step = 100000, .max = 3100000,
|
||||
};
|
||||
static const struct voltage_map_desc ldo10_voltage_map_desc = {
|
||||
.min = 95000, .step = 50000, .max = 1300000,
|
||||
.min = 950000, .step = 50000, .max = 1300000,
|
||||
};
|
||||
static const struct voltage_map_desc ldo1213_voltage_map_desc = {
|
||||
.min = 800000, .step = 100000, .max = 3300000,
|
||||
|
@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,
|
||||
if (!dev || !node)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < num_matches; i++) {
|
||||
struct of_regulator_match *match = &matches[i];
|
||||
match->init_data = NULL;
|
||||
match->of_node = NULL;
|
||||
}
|
||||
|
||||
for_each_child_of_node(node, child) {
|
||||
name = of_get_property(child,
|
||||
"regulator-compatible", NULL);
|
||||
|
@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {
|
||||
.min_uV = S2MPS11_BUCK_MIN2, \
|
||||
.uV_step = S2MPS11_BUCK_STEP2, \
|
||||
.n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
|
||||
.vsel_reg = S2MPS11_REG_B9CTRL2, \
|
||||
.vsel_reg = S2MPS11_REG_B10CTRL2, \
|
||||
.vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
|
||||
.enable_reg = S2MPS11_REG_B9CTRL1, \
|
||||
.enable_reg = S2MPS11_REG_B10CTRL1, \
|
||||
.enable_mask = S2MPS11_ENABLE_MASK \
|
||||
}
|
||||
|
||||
|
@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
|
||||
if (!regs)
|
||||
return NULL;
|
||||
|
||||
count = of_regulator_match(pdev->dev.parent, regs,
|
||||
reg_matches, TPS65217_NUM_REGULATOR);
|
||||
count = of_regulator_match(&pdev->dev, regs, reg_matches,
|
||||
TPS65217_NUM_REGULATOR);
|
||||
of_node_put(regs);
|
||||
if ((count < 0) || (count > TPS65217_NUM_REGULATOR))
|
||||
return NULL;
|
||||
|
@ -998,7 +998,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = of_regulator_match(pdev->dev.parent, regulators, matches, count);
|
||||
ret = of_regulator_match(&pdev->dev, regulators, matches, count);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
|
||||
ret);
|
||||
|
@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
|
||||
struct i2c_client *client = data;
|
||||
struct rtc_device *rtc = i2c_get_clientdata(client);
|
||||
int handled = 0, sr, err;
|
||||
|
||||
/*
|
||||
@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data)
|
||||
if (sr & ISL1208_REG_SR_ALM) {
|
||||
dev_dbg(&client->dev, "alarm!\n");
|
||||
|
||||
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
|
||||
|
||||
/* Clear the alarm */
|
||||
sr &= ~ISL1208_REG_SR_ALM;
|
||||
sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
|
||||
|
@ -44,6 +44,7 @@
|
||||
#define RTC_YMR 0x34 /* Year match register */
|
||||
#define RTC_YLR 0x38 /* Year data load register */
|
||||
|
||||
#define RTC_CR_EN (1 << 0) /* counter enable bit */
|
||||
#define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */
|
||||
|
||||
#define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
|
||||
@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
struct pl031_local *ldata;
|
||||
struct pl031_vendor_data *vendor = id->data;
|
||||
struct rtc_class_ops *ops = &vendor->ops;
|
||||
unsigned long time;
|
||||
unsigned long time, data;
|
||||
|
||||
ret = amba_request_regions(adev, NULL);
|
||||
if (ret)
|
||||
@ -345,10 +346,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));
|
||||
dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev));
|
||||
|
||||
data = readl(ldata->base + RTC_CR);
|
||||
/* Enable the clockwatch on ST Variants */
|
||||
if (vendor->clockwatch)
|
||||
writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
|
||||
ldata->base + RTC_CR);
|
||||
data |= RTC_CR_CWEN;
|
||||
writel(data | RTC_CR_EN, ldata->base + RTC_CR);
|
||||
|
||||
/*
|
||||
* On ST PL031 variants, the RTC reset value does not provide correct
|
||||
|
@ -137,7 +137,7 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
|
||||
writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
|
||||
| (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
|
||||
| (bin2bcd(tm->tm_mday))
|
||||
| ((tm->tm_year >= 200) << DATE_CENTURY_S),
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user