Merge branch 'perf/urgent' into perf/core to fix conflicts

Conflicts:
	tools/perf/bench/numa.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2013-11-04 07:49:35 +01:00
commit 2a3ede8cb2
83 changed files with 730 additions and 682 deletions

View File

@ -8917,61 +8917,14 @@ W: http://pegasus2.sourceforge.net/
S: Maintained S: Maintained
F: drivers/net/usb/rtl8150.c F: drivers/net/usb/rtl8150.c
USB SERIAL BELKIN F5U103 DRIVER USB SERIAL SUBSYSTEM
M: William Greathouse <wgreathouse@smva.com> M: Johan Hovold <jhovold@gmail.com>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
F: drivers/usb/serial/belkin_sa.*
USB SERIAL CYPRESS M8 DRIVER
M: Lonnie Mendez <dignome@gmail.com>
L: linux-usb@vger.kernel.org
S: Maintained
W: http://geocities.com/i0xox0i
W: http://firstlight.net/cvs
F: drivers/usb/serial/cypress_m8.*
USB SERIAL CYBERJACK DRIVER
M: Matthias Bruestle and Harald Welte <support@reiner-sct.com>
W: http://www.reiner-sct.de/support/treiber_cyberjack.php
S: Maintained
F: drivers/usb/serial/cyberjack.c
USB SERIAL DIGI ACCELEPORT DRIVER
M: Peter Berger <pberger@brimson.com>
M: Al Borchers <alborchers@steinerpoint.com>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/serial/digi_acceleport.c
USB SERIAL DRIVER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-usb@vger.kernel.org
S: Supported
F: Documentation/usb/usb-serial.txt F: Documentation/usb/usb-serial.txt
F: drivers/usb/serial/generic.c F: drivers/usb/serial/
F: drivers/usb/serial/usb-serial.c
F: include/linux/usb/serial.h F: include/linux/usb/serial.h
USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
M: Gary Brubaker <xavyer@ix.netcom.com>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/serial/empeg.c
USB SERIAL KEYSPAN DRIVER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/serial/*keyspan*
USB SERIAL WHITEHEAT DRIVER
M: Support Department <support@connecttech.com>
L: linux-usb@vger.kernel.org
W: http://www.connecttech.com
S: Supported
F: drivers/usb/serial/whiteheat*
USB SMSC75XX ETHERNET DRIVER USB SMSC75XX ETHERNET DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net> M: Steve Glendinning <steve.glendinning@shawell.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc7 EXTRAVERSION =
NAME = One Giant Leap for Frogkind NAME = One Giant Leap for Frogkind
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -17,7 +17,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/mmu.h> #include <asm/mmu.h>
static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) static int handle_vmalloc_fault(unsigned long address)
{ {
/* /*
* Synchronize this task's top level page-table * Synchronize this task's top level page-table
@ -27,7 +27,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
pud_t *pud, *pud_k; pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k; pmd_t *pmd, *pmd_k;
pgd = pgd_offset_fast(mm, address); pgd = pgd_offset_fast(current->active_mm, address);
pgd_k = pgd_offset_k(address); pgd_k = pgd_offset_k(address);
if (!pgd_present(*pgd_k)) if (!pgd_present(*pgd_k))
@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
* nothing more. * nothing more.
*/ */
if (address >= VMALLOC_START && address <= VMALLOC_END) { if (address >= VMALLOC_START && address <= VMALLOC_END) {
ret = handle_vmalloc_fault(mm, address); ret = handle_vmalloc_fault(address);
if (unlikely(ret)) if (unlikely(ret))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
else else

View File

@ -971,11 +971,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
[C(LL)] = { [C(LL)] = {
[C(OP_READ)] = { [C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
}, },
[C(OP_WRITE)] = { [C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
}, },
}, },
[C(ITLB)] = { [C(ITLB)] = {

View File

@ -473,7 +473,7 @@ static void __init fill_ipi_map(void)
{ {
int cpu; int cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1); fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2); fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
} }
@ -574,8 +574,9 @@ void __init arch_init_irq(void)
/* FIXME */ /* FIXME */
int i; int i;
#if defined(CONFIG_MIPS_MT_SMP) #if defined(CONFIG_MIPS_MT_SMP)
gic_call_int_base = GIC_NUM_INTRS - NR_CPUS; gic_call_int_base = GIC_NUM_INTRS -
gic_resched_int_base = gic_call_int_base - NR_CPUS; (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
fill_ipi_map(); fill_ipi_map();
#endif #endif
gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
@ -599,7 +600,7 @@ void __init arch_init_irq(void)
printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status()); printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
write_c0_status(0x1100dc00); write_c0_status(0x1100dc00);
printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status()); printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
arch_init_ipiirq(MIPS_GIC_IRQ_BASE + arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
GIC_RESCHED_INT(i), &irq_resched); GIC_RESCHED_INT(i), &irq_resched);
arch_init_ipiirq(MIPS_GIC_IRQ_BASE + arch_init_ipiirq(MIPS_GIC_IRQ_BASE +

View File

@ -126,7 +126,7 @@ static int rt_timer_probe(struct platform_device *pdev)
return -ENOENT; return -ENOENT;
} }
rt->membase = devm_request_and_ioremap(&pdev->dev, res); rt->membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rt->membase)) if (IS_ERR(rt->membase))
return PTR_ERR(rt->membase); return PTR_ERR(rt->membase);

View File

@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos) const char __user *buffer, size_t count, loff_t *pos)
{ {
char *end, buf[sizeof("nnnnn\0")]; char *end, buf[sizeof("nnnnn\0")];
size_t size;
int tmp; int tmp;
if (copy_from_user(buf, buffer, count)) size = min(count, sizeof(buf));
if (copy_from_user(buf, buffer, size))
return -EFAULT; return -EFAULT;
tmp = simple_strtol(buf, &end, 0); tmp = simple_strtol(buf, &end, 0);

View File

@ -128,7 +128,8 @@ do { \
do { \ do { \
typedef typeof(var) pao_T__; \ typedef typeof(var) pao_T__; \
const int pao_ID__ = (__builtin_constant_p(val) && \ const int pao_ID__ = (__builtin_constant_p(val) && \
((val) == 1 || (val) == -1)) ? (val) : 0; \ ((val) == 1 || (val) == -1)) ? \
(int)(val) : 0; \
if (0) { \ if (0) { \
pao_T__ pao_tmp__; \ pao_T__ pao_tmp__; \
pao_tmp__ = (val); \ pao_tmp__ = (val); \

View File

@ -1276,16 +1276,16 @@ void perf_events_lapic_init(void)
static int __kprobes static int __kprobes
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{ {
int ret;
u64 start_clock; u64 start_clock;
u64 finish_clock; u64 finish_clock;
int ret;
if (!atomic_read(&active_events)) if (!atomic_read(&active_events))
return NMI_DONE; return NMI_DONE;
start_clock = local_clock(); start_clock = sched_clock();
ret = x86_pmu.handle_irq(regs); ret = x86_pmu.handle_irq(regs);
finish_clock = local_clock(); finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock); perf_sample_event_took(finish_clock - start_clock);

View File

@ -609,7 +609,7 @@ static struct dentry *d_kvm_debug;
struct dentry *kvm_init_debugfs(void) struct dentry *kvm_init_debugfs(void)
{ {
d_kvm_debug = debugfs_create_dir("kvm", NULL); d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
if (!d_kvm_debug) if (!d_kvm_debug)
printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");

View File

@ -113,10 +113,10 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
u64 before, delta, whole_msecs; u64 before, delta, whole_msecs;
int remainder_ns, decimal_msecs, thishandled; int remainder_ns, decimal_msecs, thishandled;
before = local_clock(); before = sched_clock();
thishandled = a->handler(type, regs); thishandled = a->handler(type, regs);
handled += thishandled; handled += thishandled;
delta = local_clock() - before; delta = sched_clock() - before;
trace_nmi_handler(a->handler, (int)delta, thishandled); trace_nmi_handler(a->handler, (int)delta, thishandled);
if (delta < nmi_longest_ns) if (delta < nmi_longest_ns)

View File

@ -1122,7 +1122,7 @@ ENDPROC(fast_syscall_spill_registers)
* a3: exctable, original value in excsave1 * a3: exctable, original value in excsave1
*/ */
fast_syscall_spill_registers_fixup: ENTRY(fast_syscall_spill_registers_fixup)
rsr a2, windowbase # get current windowbase (a2 is saved) rsr a2, windowbase # get current windowbase (a2 is saved)
xsr a0, depc # restore depc and a0 xsr a0, depc # restore depc and a0
@ -1134,22 +1134,26 @@ fast_syscall_spill_registers_fixup:
*/ */
xsr a3, excsave1 # get spill-mask xsr a3, excsave1 # get spill-mask
slli a2, a3, 1 # shift left by one slli a3, a3, 1 # shift left by one
slli a3, a2, 32-WSBITS slli a2, a3, 32-WSBITS
src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
wsr a2, windowstart # set corrected windowstart wsr a2, windowstart # set corrected windowstart
rsr a3, excsave1 srli a3, a3, 1
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 rsr a2, excsave1
l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
xsr a2, excsave1
s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
xsr a2, excsave1
/* Return to the original (user task) WINDOWBASE. /* Return to the original (user task) WINDOWBASE.
* We leave the following frame behind: * We leave the following frame behind:
* a0, a1, a2 same * a0, a1, a2 same
* a3: trashed (saved in excsave_1) * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
* depc: depc (we have to return to that address) * depc: depc (we have to return to that address)
* excsave_1: a3 * excsave_1: exctable
*/ */
wsr a3, windowbase wsr a3, windowbase
@ -1159,9 +1163,9 @@ fast_syscall_spill_registers_fixup:
* a0: return address * a0: return address
* a1: used, stack pointer * a1: used, stack pointer
* a2: kernel stack pointer * a2: kernel stack pointer
* a3: available, saved in EXCSAVE_1 * a3: available
* depc: exception address * depc: exception address
* excsave: a3 * excsave: exctable
* Note: This frame might be the same as above. * Note: This frame might be the same as above.
*/ */
@ -1181,9 +1185,12 @@ fast_syscall_spill_registers_fixup:
rsr a0, exccause rsr a0, exccause
addx4 a0, a0, a3 # find entry in table addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
jx a0 jx a0
fast_syscall_spill_registers_fixup_return: ENDPROC(fast_syscall_spill_registers_fixup)
ENTRY(fast_syscall_spill_registers_fixup_return)
/* When we return here, all registers have been restored (a2: DEPC) */ /* When we return here, all registers have been restored (a2: DEPC) */
@ -1191,13 +1198,13 @@ fast_syscall_spill_registers_fixup_return:
/* Restore fixup handler. */ /* Restore fixup handler. */
xsr a3, excsave1 rsr a2, excsave1
movi a2, fast_syscall_spill_registers_fixup s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
s32i a2, a3, EXC_TABLE_FIXUP movi a3, fast_syscall_spill_registers_fixup
s32i a0, a3, EXC_TABLE_DOUBLE_SAVE s32i a3, a2, EXC_TABLE_FIXUP
rsr a2, windowbase rsr a3, windowbase
s32i a2, a3, EXC_TABLE_PARAM s32i a3, a2, EXC_TABLE_PARAM
l32i a2, a3, EXC_TABLE_KSTK l32i a2, a2, EXC_TABLE_KSTK
/* Load WB at the time the exception occurred. */ /* Load WB at the time the exception occurred. */
@ -1206,8 +1213,12 @@ fast_syscall_spill_registers_fixup_return:
wsr a3, windowbase wsr a3, windowbase
rsync rsync
rsr a3, excsave1
l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
rfde rfde
ENDPROC(fast_syscall_spill_registers_fixup_return)
/* /*
* spill all registers. * spill all registers.

View File

@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sp = regs->areg[1]; sp = regs->areg[1];
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) { if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
sp = current->sas_ss_sp + current->sas_ss_size; sp = current->sas_ss_sp + current->sas_ss_size;
} }

View File

@ -737,7 +737,8 @@ static int __init iss_net_setup(char *str)
return 1; return 1;
} }
if ((new = alloc_bootmem(sizeof new)) == NULL) { new = alloc_bootmem(sizeof(*new));
if (new == NULL) {
printk("Alloc_bootmem failed\n"); printk("Alloc_bootmem failed\n");
return 1; return 1;
} }

View File

@ -27,6 +27,14 @@
*/ */
#define SRC_CR 0x00U #define SRC_CR 0x00U
#define SRC_CR_T0_ENSEL BIT(15)
#define SRC_CR_T1_ENSEL BIT(17)
#define SRC_CR_T2_ENSEL BIT(19)
#define SRC_CR_T3_ENSEL BIT(21)
#define SRC_CR_T4_ENSEL BIT(23)
#define SRC_CR_T5_ENSEL BIT(25)
#define SRC_CR_T6_ENSEL BIT(27)
#define SRC_CR_T7_ENSEL BIT(29)
#define SRC_XTALCR 0x0CU #define SRC_XTALCR 0x0CU
#define SRC_XTALCR_XTALTIMEN BIT(20) #define SRC_XTALCR_XTALTIMEN BIT(20)
#define SRC_XTALCR_SXTALDIS BIT(19) #define SRC_XTALCR_SXTALDIS BIT(19)
@ -543,6 +551,19 @@ void __init nomadik_clk_init(void)
__func__, np->name); __func__, np->name);
return; return;
} }
/* Set all timers to use the 2.4 MHz TIMCLK */
val = readl(src_base + SRC_CR);
val |= SRC_CR_T0_ENSEL;
val |= SRC_CR_T1_ENSEL;
val |= SRC_CR_T2_ENSEL;
val |= SRC_CR_T3_ENSEL;
val |= SRC_CR_T4_ENSEL;
val |= SRC_CR_T5_ENSEL;
val |= SRC_CR_T6_ENSEL;
val |= SRC_CR_T7_ENSEL;
writel(val, src_base + SRC_CR);
val = readl(src_base + SRC_XTALCR); val = readl(src_base + SRC_XTALCR);
pr_info("SXTALO is %s\n", pr_info("SXTALO is %s\n",
(val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled"); (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");

View File

@ -39,8 +39,8 @@ static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
}; };
static const u32 a370_tclk_freqs[] __initconst = { static const u32 a370_tclk_freqs[] __initconst = {
16600000, 166000000,
20000000, 200000000,
}; };
static u32 __init a370_get_tclk_freq(void __iomem *sar) static u32 __init a370_get_tclk_freq(void __iomem *sar)

View File

@ -49,7 +49,7 @@
#define SOCFPGA_L4_SP_CLK "l4_sp_clk" #define SOCFPGA_L4_SP_CLK "l4_sp_clk"
#define SOCFPGA_NAND_CLK "nand_clk" #define SOCFPGA_NAND_CLK "nand_clk"
#define SOCFPGA_NAND_X_CLK "nand_x_clk" #define SOCFPGA_NAND_X_CLK "nand_x_clk"
#define SOCFPGA_MMC_CLK "mmc_clk" #define SOCFPGA_MMC_CLK "sdmmc_clk"
#define SOCFPGA_DB_CLK "gpio_db_clk" #define SOCFPGA_DB_CLK "gpio_db_clk"
#define div_mask(width) ((1 << (width)) - 1) #define div_mask(width) ((1 << (width)) - 1)

View File

@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
vco = icst_hz_to_vco(icst->params, rate); vco = icst_hz_to_vco(icst->params, rate);
icst->rate = icst_hz(icst->params, vco); icst->rate = icst_hz(icst->params, vco);
vco_set(icst->vcoreg, icst->lockreg, vco); vco_set(icst->lockreg, icst->vcoreg, vco);
return 0; return 0;
} }

View File

@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data,
/** Ioctl table */ /** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = { static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),

View File

@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
return true; return true;
} }
static void intel_crt_get_config(struct intel_encoder *encoder, static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
struct intel_crtc_config *pipe_config)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder);
@ -102,7 +101,25 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
else else
flags |= DRM_MODE_FLAG_NVSYNC; flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags; return flags;
}
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
intel_ddi_get_config(encoder, pipe_config);
pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
} }
/* Note: The caller is required to filter out dpms modes not supported by the /* Note: The caller is required to filter out dpms modes not supported by the
@ -799,7 +816,10 @@ void intel_crt_init(struct drm_device *dev)
crt->base.mode_set = intel_crt_mode_set; crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt; crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt; crt->base.enable = intel_enable_crt;
crt->base.get_config = intel_crt_get_config; if (IS_HASWELL(dev))
crt->base.get_config = hsw_crt_get_config;
else
crt->base.get_config = intel_crt_get_config;
if (I915_HAS_HOTPLUG(dev)) if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT; crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev)) if (HAS_DDI(dev))

View File

@ -1249,8 +1249,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_dp); intel_dp_check_link_status(intel_dp);
} }
static void intel_ddi_get_config(struct intel_encoder *encoder, void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC; flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags; pipe_config->adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
case TRANS_DDI_BPC_8:
pipe_config->pipe_bpp = 24;
break;
case TRANS_DDI_BPC_10:
pipe_config->pipe_bpp = 30;
break;
case TRANS_DDI_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
} }
static void intel_ddi_destroy(struct drm_encoder *encoder) static void intel_ddi_destroy(struct drm_encoder *encoder)

View File

@ -2327,9 +2327,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE); FDI_FE_ERRC_ENABLE);
} }
static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc) static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
{ {
return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder; return crtc->base.enabled && crtc->active &&
crtc->config.has_pch_encoder;
} }
static void ivb_modeset_global_resources(struct drm_device *dev) static void ivb_modeset_global_resources(struct drm_device *dev)
@ -2979,6 +2980,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder))); I915_READ(VSYNCSHIFT(cpu_transcoder)));
} }
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
if (temp & FDI_BC_BIFURCATION_SELECT)
return;
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
temp |= FDI_BC_BIFURCATION_SELECT;
DRM_DEBUG_KMS("enabling fdi C rx\n");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
switch (intel_crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (intel_crtc->config.fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
break;
case PIPE_C:
cpt_enable_fdi_bc_bifurcation(dev);
break;
default:
BUG();
}
}
/* /*
* Enable PCH resources required for PCH ports: * Enable PCH resources required for PCH ports:
* - PCH PLLs * - PCH PLLs
@ -2997,6 +3040,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_pch_transcoder_disabled(dev_priv, pipe); assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
/* Write the TU size bits before fdi link training, so that error /* Write the TU size bits before fdi link training, so that error
* detection works. */ * detection works. */
I915_WRITE(FDI_RX_TUSIZE1(pipe), I915_WRITE(FDI_RX_TUSIZE1(pipe),
@ -4983,6 +5029,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE)) if (!(tmp & PIPECONF_ENABLE))
return false; return false;
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
break;
case PIPECONF_8BPC:
pipe_config->pipe_bpp = 24;
break;
case PIPECONF_10BPC:
pipe_config->pipe_bpp = 30;
break;
default:
break;
}
}
intel_get_pipe_timings(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config);
i9xx_get_pfit_config(crtc, pipe_config); i9xx_get_pfit_config(crtc, pipe_config);
@ -5576,48 +5638,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
return true; return true;
} }
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
if (temp & FDI_BC_BIFURCATION_SELECT)
return;
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
temp |= FDI_BC_BIFURCATION_SELECT;
DRM_DEBUG_KMS("enabling fdi C rx\n");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
switch (intel_crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (intel_crtc->config.fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
break;
case PIPE_C:
cpt_enable_fdi_bc_bifurcation(dev);
break;
default:
BUG();
}
}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
{ {
/* /*
@ -5811,9 +5831,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
&intel_crtc->config.fdi_m_n); &intel_crtc->config.fdi_m_n);
} }
if (IS_IVYBRIDGE(dev))
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
ironlake_set_pipeconf(crtc); ironlake_set_pipeconf(crtc);
/* Set up the display plane register */ /* Set up the display plane register */
@ -5881,6 +5898,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE)) if (!(tmp & PIPECONF_ENABLE))
return false; return false;
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
pipe_config->pipe_bpp = 18;
break;
case PIPECONF_8BPC:
pipe_config->pipe_bpp = 24;
break;
case PIPECONF_10BPC:
pipe_config->pipe_bpp = 30;
break;
case PIPECONF_12BPC:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll; struct intel_shared_dpll *pll;
@ -8612,6 +8646,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1); PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
#undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_FLAGS

View File

@ -1401,6 +1401,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
else else
pipe_config->port_clock = 270000; pipe_config->port_clock = 270000;
} }
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
* This is a big fat ugly hack.
*
* Some machines in UEFI boot mode provide us a VBT that has 18
* bpp and 1.62 GHz link bandwidth for eDP, which for reasons
* unknown we fail to light up. Yet the same BIOS boots up with
* 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
* max, not what it tells us to use.
*
* Note: This will still be broken if the eDP panel is not lit
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
} }
static bool is_edp_psr(struct intel_dp *intel_dp) static bool is_edp_psr(struct intel_dp *intel_dp)

View File

@ -765,6 +765,8 @@ extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
extern bool extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
extern void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
extern void intel_display_handle_reset(struct drm_device *dev); extern void intel_display_handle_reset(struct drm_device *dev);
extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,

View File

@ -698,6 +698,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
}, },
}, },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D410PT",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Intel D425KT",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
},
},
{ {
.callback = intel_no_lvds_dmi_callback, .callback = intel_no_lvds_dmi_callback,
.ident = "Intel D510MO", .ident = "Intel D510MO",

View File

@ -291,6 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
WREG32(HDMI_ACR_PACKET_CONTROL + offset, WREG32(HDMI_ACR_PACKET_CONTROL + offset,
HDMI_ACR_SOURCE | /* select SW CTS value */
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
evergreen_hdmi_update_ACR(encoder, mode->clock); evergreen_hdmi_update_ACR(encoder, mode->clock);

View File

@ -2635,7 +2635,7 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true; pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true; pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false; pi->disable_nb_ps3_in_battery = false;
pi->bapm_enable = true; pi->bapm_enable = false;
pi->voltage_drop_t = 0; pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false; pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */ pi->caps_fps = false; /* true? */

View File

@ -1272,8 +1272,8 @@ struct radeon_blacklist_clocks
struct radeon_clock_and_voltage_limits { struct radeon_clock_and_voltage_limits {
u32 sclk; u32 sclk;
u32 mclk; u32 mclk;
u32 vddc; u16 vddc;
u32 vddci; u16 vddci;
}; };
struct radeon_clock_array { struct radeon_clock_array {

View File

@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
*/ */
struct input_dev *input_allocate_device(void) struct input_dev *input_allocate_device(void)
{ {
static atomic_t input_no = ATOMIC_INIT(0);
struct input_dev *dev; struct input_dev *dev;
dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
device_initialize(&dev->dev); device_initialize(&dev->dev);
mutex_init(&dev->mutex); mutex_init(&dev->mutex);
spin_lock_init(&dev->event_lock); spin_lock_init(&dev->event_lock);
init_timer(&dev->timer);
INIT_LIST_HEAD(&dev->h_list); INIT_LIST_HEAD(&dev->h_list);
INIT_LIST_HEAD(&dev->node); INIT_LIST_HEAD(&dev->node);
dev_set_name(&dev->dev, "input%ld",
(unsigned long) atomic_inc_return(&input_no) - 1);
__module_get(THIS_MODULE); __module_get(THIS_MODULE);
} }
@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
*/ */
int input_register_device(struct input_dev *dev) int input_register_device(struct input_dev *dev)
{ {
static atomic_t input_no = ATOMIC_INIT(0);
struct input_devres *devres = NULL; struct input_devres *devres = NULL;
struct input_handler *handler; struct input_handler *handler;
unsigned int packet_size; unsigned int packet_size;
@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
* If delay and period are pre-set by the driver, then autorepeating * If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c. * is handled by the driver itself and we don't do it in input.c.
*/ */
init_timer(&dev->timer);
if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) { if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
dev->timer.data = (long) dev; dev->timer.data = (long) dev;
dev->timer.function = input_repeat_key; dev->timer.function = input_repeat_key;
@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
if (!dev->setkeycode) if (!dev->setkeycode)
dev->setkeycode = input_default_setkeycode; dev->setkeycode = input_default_setkeycode;
dev_set_name(&dev->dev, "input%ld",
(unsigned long) atomic_inc_return(&input_no) - 1);
error = device_add(&dev->dev); error = device_add(&dev->dev);
if (error) if (error)
goto err_free_vals; goto err_free_vals;

View File

@ -786,10 +786,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_set_capability(input_dev, EV_MSC, MSC_SCAN);
if (pdata) if (pdata) {
error = pxa27x_keypad_build_keycode(keypad); error = pxa27x_keypad_build_keycode(keypad);
else } else {
error = pxa27x_keypad_build_keycode_from_dt(keypad); error = pxa27x_keypad_build_keycode_from_dt(keypad);
/*
* Data that we get from DT resides in dynamically
* allocated memory so we need to update our pdata
* pointer.
*/
pdata = keypad->pdata;
}
if (error) { if (error) {
dev_err(&pdev->dev, "failed to build keycode\n"); dev_err(&pdev->dev, "failed to build keycode\n");
goto failed_put_clk; goto failed_put_clk;

View File

@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
if (status) { if (status) {
if (status == -ESHUTDOWN) if (status == -ESHUTDOWN)
return; return;
dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
__func__, status);
goto out;
} }
/* Special keys */ /* Special keys */
@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
dev->ctl_data->byte[2], dev->ctl_data->byte[2],
dev->ctl_data->byte[3]); dev->ctl_data->byte[3]);
if (status) if (status) {
dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); if (status == -ESHUTDOWN)
return;
dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
__func__, status);
}
spin_lock(&dev->ctl_submit_lock); spin_lock(&dev->ctl_submit_lock);
@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
if (likely(!dev->shutdown)) { if (likely(!dev->shutdown)) {
if (dev->buzzer_pending) { if (dev->buzzer_pending || status) {
dev->buzzer_pending = 0; dev->buzzer_pending = 0;
dev->ctl_urb_pending = 1; dev->ctl_urb_pending = 1;
cm109_submit_buzz_toggle(dev); cm109_submit_buzz_toggle(dev);

View File

@ -103,6 +103,7 @@ static const struct alps_model_info alps_model_data[] = {
/* Dell Latitude E5500, E6400, E6500, Precision M4400 */ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
{ { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
{ { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */
{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */

View File

@ -223,21 +223,26 @@ static int i8042_flush(void)
{ {
unsigned long flags; unsigned long flags;
unsigned char data, str; unsigned char data, str;
int i = 0; int count = 0;
int retval = 0;
spin_lock_irqsave(&i8042_lock, flags); spin_lock_irqsave(&i8042_lock, flags);
while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) { while ((str = i8042_read_status()) & I8042_STR_OBF) {
udelay(50); if (count++ < I8042_BUFFER_SIZE) {
data = i8042_read_data(); udelay(50);
i++; data = i8042_read_data();
dbg("%02x <- i8042 (flush, %s)\n", dbg("%02x <- i8042 (flush, %s)\n",
data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
} else {
retval = -EIO;
break;
}
} }
spin_unlock_irqrestore(&i8042_lock, flags); spin_unlock_irqrestore(&i8042_lock, flags);
return i; return retval;
} }
/* /*
@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
static int i8042_controller_check(void) static int i8042_controller_check(void)
{ {
if (i8042_flush() == I8042_BUFFER_SIZE) { if (i8042_flush()) {
pr_err("No controller found\n"); pr_err("No controller found\n");
return -ENODEV; return -ENODEV;
} }

View File

@ -1031,6 +1031,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
} }
static enum power_supply_property wacom_battery_props[] = { static enum power_supply_property wacom_battery_props[] = {
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CAPACITY POWER_SUPPLY_PROP_CAPACITY
}; };
@ -1042,6 +1043,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
int ret = 0; int ret = 0;
switch (psp) { switch (psp) {
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
break;
case POWER_SUPPLY_PROP_CAPACITY: case POWER_SUPPLY_PROP_CAPACITY:
val->intval = val->intval =
wacom->wacom_wac.battery_capacity * 100 / 31; wacom->wacom_wac.battery_capacity * 100 / 31;

View File

@ -2054,6 +2054,12 @@ static const struct wacom_features wacom_features_0x101 =
static const struct wacom_features wacom_features_0x10D = static const struct wacom_features wacom_features_0x10D =
{ "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x10E =
{ "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x10F =
{ "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x4001 = static const struct wacom_features wacom_features_0x4001 =
{ "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@ -2248,6 +2254,8 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x100) },
{ USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x101) },
{ USB_DEVICE_WACOM(0x10D) }, { USB_DEVICE_WACOM(0x10D) },
{ USB_DEVICE_WACOM(0x10E) },
{ USB_DEVICE_WACOM(0x10F) },
{ USB_DEVICE_WACOM(0x300) }, { USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) }, { USB_DEVICE_WACOM(0x301) },
{ USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_WACOM(0x304) },

View File

@ -552,9 +552,8 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
struct acpiphp_func *func; struct acpiphp_func *func;
int max, pass; int max, pass;
LIST_HEAD(add_list); LIST_HEAD(add_list);
int nr_found;
nr_found = acpiphp_rescan_slot(slot); acpiphp_rescan_slot(slot);
max = acpiphp_max_busnr(bus); max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) { for (pass = 0; pass < 2; pass++) {
list_for_each_entry(dev, &bus->devices, bus_list) { list_for_each_entry(dev, &bus->devices, bus_list) {
@ -574,9 +573,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
} }
} }
__pci_bus_assign_resources(bus, &add_list, NULL); __pci_bus_assign_resources(bus, &add_list, NULL);
/* Nothing more to do here if there are no new devices on this bus. */
if (!nr_found && (slot->flags & SLOT_ENABLED))
return;
acpiphp_sanitize_bus(bus); acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus); acpiphp_set_hpp_values(bus);

View File

@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{ {
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
} }

View File

@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
static int sg_add(struct device *, struct class_interface *); static int sg_add(struct device *, struct class_interface *);
static void sg_remove(struct device *, struct class_interface *); static void sg_remove(struct device *, struct class_interface *);
static DEFINE_SPINLOCK(sg_open_exclusive_lock);
static DEFINE_IDR(sg_index_idr); static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
file descriptor list for device */
static struct class_interface sg_interface = { static struct class_interface sg_interface = {
.add_dev = sg_add, .add_dev = sg_add,
@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
} Sg_request; } Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */ typedef struct sg_fd { /* holds the state of a file descriptor */
struct list_head sfd_siblings; /* protected by sfd_lock of device */ /* sfd_siblings is protected by sg_index_lock */
struct list_head sfd_siblings;
struct sg_device *parentdp; /* owning device */ struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */ wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */ rwlock_t rq_list_lock; /* protect access to list in req_arr */
@ -166,12 +170,13 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
typedef struct sg_device { /* holds the state of each scsi generic device */ typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device; struct scsi_device *device;
wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
int sg_tablesize; /* adapter's max scatter-gather table size */ int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */ u32 index; /* device index number */
spinlock_t sfd_lock; /* protect file descriptor list for device */ /* sfds is protected by sg_index_lock */
struct list_head sfds; struct list_head sfds;
struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
volatile char detached; /* 0->attached, 1->detached pending removal */ volatile char detached; /* 0->attached, 1->detached pending removal */
/* exclude protected by sg_open_exclusive_lock */
char exclude; /* opened for exclusive access */ char exclude; /* opened for exclusive access */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk; struct gendisk *disk;
@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
} }
static int get_exclude(Sg_device *sdp)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&sg_open_exclusive_lock, flags);
ret = sdp->exclude;
spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
return ret;
}
static int set_exclude(Sg_device *sdp, char val)
{
unsigned long flags;
spin_lock_irqsave(&sg_open_exclusive_lock, flags);
sdp->exclude = val;
spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
return val;
}
static int sfds_list_empty(Sg_device *sdp) static int sfds_list_empty(Sg_device *sdp)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&sdp->sfd_lock, flags); read_lock_irqsave(&sg_index_lock, flags);
ret = list_empty(&sdp->sfds); ret = list_empty(&sdp->sfds);
spin_unlock_irqrestore(&sdp->sfd_lock, flags); read_unlock_irqrestore(&sg_index_lock, flags);
return ret; return ret;
} }
@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)
struct request_queue *q; struct request_queue *q;
Sg_device *sdp; Sg_device *sdp;
Sg_fd *sfp; Sg_fd *sfp;
int res;
int retval; int retval;
nonseekable_open(inode, filp); nonseekable_open(inode, filp);
@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)
goto error_out; goto error_out;
} }
if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { if (flags & O_EXCL) {
retval = -EPERM; /* Can't lock it with read only access */ if (O_RDONLY == (flags & O_ACCMODE)) {
retval = -EPERM; /* Can't lock it with read only access */
goto error_out;
}
if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
retval = -EBUSY;
goto error_out;
}
res = wait_event_interruptible(sdp->o_excl_wait,
((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
if (res) {
retval = res; /* -ERESTARTSYS because signal hit process */
goto error_out;
}
} else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
if (flags & O_NONBLOCK) {
retval = -EBUSY;
goto error_out;
}
res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
if (res) {
retval = res; /* -ERESTARTSYS because signal hit process */
goto error_out;
}
}
if (sdp->detached) {
retval = -ENODEV;
goto error_out; goto error_out;
} }
if (flags & O_NONBLOCK) {
if (flags & O_EXCL) {
if (!down_write_trylock(&sdp->o_sem)) {
retval = -EBUSY;
goto error_out;
}
} else {
if (!down_read_trylock(&sdp->o_sem)) {
retval = -EBUSY;
goto error_out;
}
}
} else {
if (flags & O_EXCL)
down_write(&sdp->o_sem);
else
down_read(&sdp->o_sem);
}
/* Since write lock is held, no need to check sfd_list */
if (flags & O_EXCL)
sdp->exclude = 1; /* used by release lock */
if (sfds_list_empty(sdp)) { /* no existing opens on this device */ if (sfds_list_empty(sdp)) { /* no existing opens on this device */
sdp->sgdebug = 0; sdp->sgdebug = 0;
q = sdp->device->request_queue; q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q); sdp->sg_tablesize = queue_max_segments(q);
} }
sfp = sg_add_sfp(sdp, dev); if ((sfp = sg_add_sfp(sdp, dev)))
if (!IS_ERR(sfp))
filp->private_data = sfp; filp->private_data = sfp;
/* retval is already provably zero at this point because of the
* check after retval = scsi_autopm_get_device(sdp->device))
*/
else { else {
retval = PTR_ERR(sfp);
if (flags & O_EXCL) { if (flags & O_EXCL) {
sdp->exclude = 0; /* undo if error */ set_exclude(sdp, 0); /* undo if error */
up_write(&sdp->o_sem); wake_up_interruptible(&sdp->o_excl_wait);
} else }
up_read(&sdp->o_sem); retval = -ENOMEM;
goto error_out;
}
retval = 0;
error_out: error_out:
if (retval) {
scsi_autopm_put_device(sdp->device); scsi_autopm_put_device(sdp->device);
sdp_put: sdp_put:
scsi_device_put(sdp->device); scsi_device_put(sdp->device);
@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)
{ {
Sg_device *sdp; Sg_device *sdp;
Sg_fd *sfp; Sg_fd *sfp;
int excl;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO; return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
excl = sdp->exclude; set_exclude(sdp, 0);
sdp->exclude = 0; wake_up_interruptible(&sdp->o_excl_wait);
if (excl)
up_write(&sdp->o_sem);
else
up_read(&sdp->o_sem);
scsi_autopm_put_device(sdp->device); scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp); kref_put(&sfp->f_ref, sg_remove_sfp);
@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
disk->first_minor = k; disk->first_minor = k;
sdp->disk = disk; sdp->disk = disk;
sdp->device = scsidp; sdp->device = scsidp;
spin_lock_init(&sdp->sfd_lock);
INIT_LIST_HEAD(&sdp->sfds); INIT_LIST_HEAD(&sdp->sfds);
init_rwsem(&sdp->o_sem); init_waitqueue_head(&sdp->o_excl_wait);
sdp->sg_tablesize = queue_max_segments(q); sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k; sdp->index = k;
kref_init(&sdp->d_ref); kref_init(&sdp->d_ref);
@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
/* Need a write lock to set sdp->detached. */ /* Need a write lock to set sdp->detached. */
write_lock_irqsave(&sg_index_lock, iflags); write_lock_irqsave(&sg_index_lock, iflags);
spin_lock(&sdp->sfd_lock);
sdp->detached = 1; sdp->detached = 1;
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible(&sfp->read_wait); wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
} }
spin_unlock(&sdp->sfd_lock);
write_unlock_irqrestore(&sg_index_lock, iflags); write_unlock_irqrestore(&sg_index_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp) if (!sfp)
return ERR_PTR(-ENOMEM); return NULL;
init_waitqueue_head(&sfp->read_wait); init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock); rwlock_init(&sfp->rq_list_lock);
@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp; sfp->parentdp = sdp;
spin_lock_irqsave(&sdp->sfd_lock, iflags); write_lock_irqsave(&sg_index_lock, iflags);
if (sdp->detached) {
spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds); list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
spin_unlock_irqrestore(&sdp->sfd_lock, iflags); write_unlock_irqrestore(&sg_index_lock, iflags);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size)) if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size; sg_big_buff = def_reserved_size;
@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref)
struct sg_device *sdp = sfp->parentdp; struct sg_device *sdp = sfp->parentdp;
unsigned long iflags; unsigned long iflags;
spin_lock_irqsave(&sdp->sfd_lock, iflags); write_lock_irqsave(&sg_index_lock, iflags);
list_del(&sfp->sfd_siblings); list_del(&sfp->sfd_siblings);
spin_unlock_irqrestore(&sdp->sfd_lock, iflags); write_unlock_irqrestore(&sg_index_lock, iflags);
wake_up_interruptible(&sdp->o_excl_wait);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work); schedule_work(&sfp->ew.work);
@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
return 0; return 0;
} }
/* must be called while holding sg_index_lock and sfd_lock */ /* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{ {
int k, m, new_interface, blen, usg; int k, m, new_interface, blen, usg;
@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags); read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL; sdp = it ? sg_lookup_dev(it->index) : NULL;
if (sdp) { if (sdp && !list_empty(&sdp->sfds)) {
spin_lock(&sdp->sfd_lock); struct scsi_device *scsidp = sdp->device;
if (!list_empty(&sdp->sfds)) {
struct scsi_device *scsidp = sdp->device;
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
if (sdp->detached) if (sdp->detached)
seq_printf(s, "detached pending close "); seq_printf(s, "detached pending close ");
else else
seq_printf seq_printf
(s, "scsi%d chan=%d id=%d lun=%d em=%d", (s, "scsi%d chan=%d id=%d lun=%d em=%d",
scsidp->host->host_no, scsidp->host->host_no,
scsidp->channel, scsidp->id, scsidp->channel, scsidp->id,
scsidp->lun, scsidp->lun,
scsidp->host->hostt->emulated); scsidp->host->hostt->emulated);
seq_printf(s, " sg_tablesize=%d excl=%d\n", seq_printf(s, " sg_tablesize=%d excl=%d\n",
sdp->sg_tablesize, sdp->exclude); sdp->sg_tablesize, get_exclude(sdp));
sg_proc_debug_helper(s, sdp); sg_proc_debug_helper(s, sdp);
}
spin_unlock(&sdp->sfd_lock);
} }
read_unlock_irqrestore(&sg_index_lock, iflags); read_unlock_irqrestore(&sg_index_lock, iflags);
return 0; return 0;

View File

@ -1960,6 +1960,7 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
memset(&DevInfo, 0, sizeof(DevInfo));
DevInfo.MaxRDMBufferSize = BUFFER_4K; DevInfo.MaxRDMBufferSize = BUFFER_4K;
DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START; DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
DevInfo.u32RxAlignmentCorrection = 0; DevInfo.u32RxAlignmentCorrection = 0;

View File

@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr; struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx; struct oz_serial_ctx *ctx;
if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
return -EINVAL;
spin_lock_bh(&g_cdev.lock); spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd; pd = g_cdev.active_pd;
if (pd) if (pd)

View File

@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt) static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
{ {
struct serial_icounter_struct icount; struct serial_icounter_struct icount = {};
struct sb_uart_icount cnow; struct sb_uart_icount cnow;
struct sb_uart_port *port = state->port; struct sb_uart_port *port = state->port;

View File

@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
ltv_t *pLtv; ltv_t *pLtv;
bool_t ltvAllocated = FALSE; bool_t ltvAllocated = FALSE;
ENCSTRCT sEncryption; ENCSTRCT sEncryption;
size_t len;
#ifdef USE_WDS #ifdef USE_WDS
hcf_16 hcfPort = HCF_PORT_0; hcf_16 hcfPort = HCF_PORT_0;
@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
break; break;
case CFG_CNF_OWN_NAME: case CFG_CNF_OWN_NAME:
memset(lp->StationName, 0, sizeof(lp->StationName)); memset(lp->StationName, 0, sizeof(lp->StationName));
memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
strlcpy(lp->StationName, &pLtv->u.u8[2], len);
pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
break; break;
case CFG_CNF_LOAD_BALANCING: case CFG_CNF_LOAD_BALANCING:
@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
{ {
struct wl_private *lp = wl_priv(dev); struct wl_private *lp = wl_priv(dev);
unsigned long flags; unsigned long flags;
size_t len;
int ret = 0; int ret = 0;
/*------------------------------------------------------------------------*/ /*------------------------------------------------------------------------*/
@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
wl_lock(lp, &flags); wl_lock(lp, &flags);
memset(lp->StationName, 0, sizeof(lp->StationName)); memset(lp->StationName, 0, sizeof(lp->StationName));
len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
memcpy(lp->StationName, extra, wrqu->data.length); strlcpy(lp->StationName, extra, len);
/* Commit the adapter parameters */ /* Commit the adapter parameters */
wl_apply(lp); wl_apply(lp);

View File

@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port)
/* /*
* Get ip name usart or uart * Get ip name usart or uart
*/ */
static int atmel_get_ip_name(struct uart_port *port) static void atmel_get_ip_name(struct uart_port *port)
{ {
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int name = UART_GET_IP_NAME(port); int name = UART_GET_IP_NAME(port);
@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port)
atmel_port->is_usart = false; atmel_port->is_usart = false;
} else { } else {
dev_err(port->dev, "Not supported ip name, set to uart\n"); dev_err(port->dev, "Not supported ip name, set to uart\n");
return -EINVAL;
} }
return 0;
} }
/* /*
@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
/* /*
* Get port name of usart or uart * Get port name of usart or uart
*/ */
ret = atmel_get_ip_name(&port->uart); atmel_get_ip_name(&port->uart);
if (ret < 0)
goto err_add_port;
return 0; return 0;

View File

@ -642,16 +642,29 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
{ {
struct uio_device *idev = vma->vm_private_data; struct uio_device *idev = vma->vm_private_data;
int mi = uio_find_mem_index(vma); int mi = uio_find_mem_index(vma);
struct uio_mem *mem;
if (mi < 0) if (mi < 0)
return -EINVAL; return -EINVAL;
mem = idev->info->mem + mi;
if (vma->vm_end - vma->vm_start > mem->size)
return -EINVAL;
vma->vm_ops = &uio_physical_vm_ops; vma->vm_ops = &uio_physical_vm_ops;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/*
* We cannot use the vm_iomap_memory() helper here,
* because vma->vm_pgoff is the map index we looked
* up above in uio_find_mem_index(), rather than an
* actual page offset into the mmap.
*
* So we just do the physical mmap without a page
* offset.
*/
return remap_pfn_range(vma, return remap_pfn_range(vma,
vma->vm_start, vma->vm_start,
idev->info->mem[mi].addr >> PAGE_SHIFT, mem->addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot); vma->vm_page_prot);
} }

View File

@ -904,6 +904,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
/* Crucible Devices */ /* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };

View File

@ -1307,3 +1307,9 @@
* Manufacturer: Crucible Technologies * Manufacturer: Crucible Technologies
*/ */
#define FTDI_CT_COMET_PID 0x8e08 #define FTDI_CT_COMET_PID 0x8e08
/*
* Product: Z3X Box
* Manufacturer: Smart GSM Team
*/
#define FTDI_Z3X_PID 0x0011

View File

@ -4,11 +4,6 @@
* Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2003 IBM Corp. * Copyright (C) 2003 IBM Corp.
* *
* Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
* - fixes, improvements and documentation for the baud rate encoding methods
* Copyright (C) 2013 Reinhard Max <max@suse.de>
* - fixes and improvements for the divisor based baud rate encoding method
*
* Original driver for 2.2.x by anonymous * Original driver for 2.2.x by anonymous
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
@ -134,18 +129,10 @@ MODULE_DEVICE_TABLE(usb, id_table);
enum pl2303_type { enum pl2303_type {
type_0, /* H version ? */ type_0, /* don't know the difference between type 0 and */
type_1, /* H version ? */ type_1, /* type 1, until someone from prolific tells us... */
HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ HX, /* HX version of the pl2303 chip */
HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
TB, /* TB version */
HX_CLONE, /* Cheap and less functional clone of the HX chip */
}; };
/*
* NOTE: don't know the difference between type 0 and type 1,
* until someone from Prolific tells us...
* TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants
*/
struct pl2303_serial_private { struct pl2303_serial_private {
enum pl2303_type type; enum pl2303_type type;
@ -185,7 +172,6 @@ static int pl2303_startup(struct usb_serial *serial)
{ {
struct pl2303_serial_private *spriv; struct pl2303_serial_private *spriv;
enum pl2303_type type = type_0; enum pl2303_type type = type_0;
char *type_str = "unknown (treating as type_0)";
unsigned char *buf; unsigned char *buf;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
@ -198,53 +184,15 @@ static int pl2303_startup(struct usb_serial *serial)
return -ENOMEM; return -ENOMEM;
} }
if (serial->dev->descriptor.bDeviceClass == 0x02) { if (serial->dev->descriptor.bDeviceClass == 0x02)
type = type_0; type = type_0;
type_str = "type_0"; else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
} else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) { type = HX;
/* else if (serial->dev->descriptor.bDeviceClass == 0x00)
* NOTE: The bcdDevice version is the only difference between
* the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
*/
if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
/* Check if the device is a clone */
pl2303_vendor_read(0x9494, 0, serial, buf);
/*
* NOTE: Not sure if this read is really needed.
* The HX returns 0x00, the clone 0x02, but the Windows
* driver seems to ignore the value and continues.
*/
pl2303_vendor_write(0x0606, 0xaa, serial);
pl2303_vendor_read(0x8686, 0, serial, buf);
if (buf[0] != 0xaa) {
type = HX_CLONE;
type_str = "X/HX clone (limited functionality)";
} else {
type = HX_TA;
type_str = "X/HX/TA";
}
pl2303_vendor_write(0x0606, 0x00, serial);
} else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
== 0x400) {
type = HXD_EA_RA_SA;
type_str = "HXD/EA/RA/SA";
} else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
== 0x500) {
type = TB;
type_str = "TB";
} else {
dev_info(&serial->interface->dev,
"unknown/unsupported device type\n");
kfree(spriv);
kfree(buf);
return -ENODEV;
}
} else if (serial->dev->descriptor.bDeviceClass == 0x00
|| serial->dev->descriptor.bDeviceClass == 0xFF) {
type = type_1; type = type_1;
type_str = "type_1"; else if (serial->dev->descriptor.bDeviceClass == 0xFF)
} type = type_1;
dev_dbg(&serial->interface->dev, "device type: %s\n", type_str); dev_dbg(&serial->interface->dev, "device type: %d\n", type);
spriv->type = type; spriv->type = type;
usb_set_serial_data(serial, spriv); usb_set_serial_data(serial, spriv);
@ -259,10 +207,10 @@ static int pl2303_startup(struct usb_serial *serial)
pl2303_vendor_read(0x8383, 0, serial, buf); pl2303_vendor_read(0x8383, 0, serial, buf);
pl2303_vendor_write(0, 1, serial); pl2303_vendor_write(0, 1, serial);
pl2303_vendor_write(1, 0, serial); pl2303_vendor_write(1, 0, serial);
if (type == type_0 || type == type_1) if (type == HX)
pl2303_vendor_write(2, 0x24, serial);
else
pl2303_vendor_write(2, 0x44, serial); pl2303_vendor_write(2, 0x44, serial);
else
pl2303_vendor_write(2, 0x24, serial);
kfree(buf); kfree(buf);
return 0; return 0;
@ -316,174 +264,65 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
return retval; return retval;
} }
static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type, static void pl2303_encode_baudrate(struct tty_struct *tty,
u8 buf[4]) struct usb_serial_port *port,
u8 buf[4])
{ {
/*
* NOTE: Only the values defined in baud_sup are supported !
* => if unsupported values are set, the PL2303 uses 9600 baud instead
* => HX clones just don't work at unsupported baud rates < 115200 baud,
* for baud rates > 115200 they run at 115200 baud
*/
const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
4800, 7200, 9600, 14400, 19200, 28800, 38400, 4800, 7200, 9600, 14400, 19200, 28800, 38400,
57600, 115200, 230400, 460800, 614400, 921600, 57600, 115200, 230400, 460800, 500000, 614400,
1228800, 2457600, 3000000, 6000000, 12000000 }; 921600, 1228800, 2457600, 3000000, 6000000 };
/*
* NOTE: With the exception of type_0/1 devices, the following struct usb_serial *serial = port->serial;
* additional baud rates are supported (tested with HX rev. 3A only): struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
* 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, int baud;
* 403200, 806400. (*: not HX and HX clones)
*
* Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
* type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
*
* As long as we are not using this encoding method for anything else
* than the type_0+1, HX and HX clone chips, there is no point in
* complicating the code to support them.
*/
int i; int i;
/*
* NOTE: Only the values defined in baud_sup are supported!
* => if unsupported values are set, the PL2303 seems to use
* 9600 baud (at least my PL2303X always does)
*/
baud = tty_get_baud_rate(tty);
dev_dbg(&port->dev, "baud requested = %d\n", baud);
if (!baud)
return;
/* Set baudrate to nearest supported value */ /* Set baudrate to nearest supported value */
for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) { for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
if (baud_sup[i] > baud) if (baud_sup[i] > baud)
break; break;
} }
if (i == ARRAY_SIZE(baud_sup)) if (i == ARRAY_SIZE(baud_sup))
baud = baud_sup[i - 1]; baud = baud_sup[i - 1];
else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1])) else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
baud = baud_sup[i - 1]; baud = baud_sup[i - 1];
else else
baud = baud_sup[i]; baud = baud_sup[i];
/* Respect the chip type specific baud rate limits */
/* /* type_0, type_1 only support up to 1228800 baud */
* FIXME: as long as we don't know how to distinguish between the if (spriv->type != HX)
* HXD, EA, RA, and SA chip variants, allow the max. value of 12M.
*/
if (type == HX_TA)
baud = min_t(int, baud, 6000000);
else if (type == type_0 || type == type_1)
baud = min_t(int, baud, 1228800); baud = min_t(int, baud, 1228800);
else if (type == HX_CLONE)
baud = min_t(int, baud, 115200);
/* Direct (standard) baud rate encoding method */
put_unaligned_le32(baud, buf);
return baud; if (baud <= 115200) {
} put_unaligned_le32(baud, buf);
static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
u8 buf[4])
{
/*
* Divisor based baud rate encoding method
*
* NOTE: HX clones do NOT support this method.
* It's not clear if the type_0/1 chips support it.
*
* divisor = 12MHz * 32 / baudrate = 2^A * B
*
* with
*
* A = buf[1] & 0x0e
* B = buf[0] + (buf[1] & 0x01) << 8
*
* Special cases:
* => 8 < B < 16: device seems to work not properly
* => B <= 8: device uses the max. value B = 512 instead
*/
unsigned int A, B;
/*
* NOTE: The Windows driver allows maximum baud rates of 110% of the
* specified maximium value.
* Quick tests with early (2004) HX (rev. A) chips suggest, that even
* higher baud rates (up to the maximum of 24M baud !) are working fine,
* but that should really be tested carefully in "real life" scenarios
* before removing the upper limit completely.
* Baud rates smaller than the specified 75 baud are definitely working
* fine.
*/
if (type == type_0 || type == type_1)
baud = min_t(int, baud, 1228800 * 1.1);
else if (type == HX_TA)
baud = min_t(int, baud, 6000000 * 1.1);
else if (type == HXD_EA_RA_SA)
/* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */
/*
* FIXME: as long as we don't know how to distinguish between
* these chip variants, allow the max. of these values
*/
baud = min_t(int, baud, 12000000 * 1.1);
else if (type == TB)
baud = min_t(int, baud, 12000000 * 1.1);
/* Determine factors A and B */
A = 0;
B = 12000000 * 32 / baud; /* 12MHz */
B <<= 1; /* Add one bit for rounding */
while (B > (512 << 1) && A <= 14) {
A += 2;
B >>= 2;
}
if (A > 14) { /* max. divisor = min. baudrate reached */
A = 14;
B = 512;
/* => ~45.78 baud */
} else { } else {
B = (B + 1) >> 1; /* Round the last bit */
}
/* Handle special cases */
if (B == 512)
B = 0; /* also: 1 to 8 */
else if (B < 16)
/* /*
* NOTE: With the current algorithm this happens * Apparently the formula for higher speeds is:
* only for A=0 and means that the min. divisor * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
* (respectively: the max. baudrate) is reached.
*/ */
B = 16; /* => 24 MBaud */ unsigned tmp = 12000000 * 32 / baud;
/* Encode the baud rate */ buf[3] = 0x80;
buf[3] = 0x80; /* Select divisor encoding method */ buf[2] = 0;
buf[2] = 0; buf[1] = (tmp >= 256);
buf[1] = (A & 0x0e); /* A */ while (tmp >= 256) {
buf[1] |= ((B & 0x100) >> 8); /* MSB of B */ tmp >>= 2;
buf[0] = B & 0xff; /* 8 LSBs of B */ buf[1] <<= 1;
/* Calculate the actual/resulting baud rate */ }
if (B <= 8) buf[0] = tmp;
B = 512; }
baud = 12000000 * 32 / ((1 << A) * B);
return baud;
}
static void pl2303_encode_baudrate(struct tty_struct *tty,
struct usb_serial_port *port,
enum pl2303_type type,
u8 buf[4])
{
int baud;
baud = tty_get_baud_rate(tty);
dev_dbg(&port->dev, "baud requested = %d\n", baud);
if (!baud)
return;
/*
* There are two methods for setting/encoding the baud rate
* 1) Direct method: encodes the baud rate value directly
* => supported by all chip types
* 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
* => not supported by HX clones (and likely type_0/1 chips)
*
* NOTE: Although the divisor based baud rate encoding method is much
* more flexible, some of the standard baud rate values can not be
* realized exactly. But the difference is very small (max. 0.2%) and
* the device likely uses the same baud rate generator for both methods
* so that there is likley no difference.
*/
if (type == type_0 || type == type_1 || type == HX_CLONE)
baud = pl2303_baudrate_encode_direct(baud, type, buf);
else
baud = pl2303_baudrate_encode_divisor(baud, type, buf);
/* Save resulting baud rate */ /* Save resulting baud rate */
tty_encode_baud_rate(tty, baud, baud); tty_encode_baud_rate(tty, baud, baud);
dev_dbg(&port->dev, "baud set = %d\n", baud); dev_dbg(&port->dev, "baud set = %d\n", baud);
@ -540,8 +379,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "data bits = %d\n", buf[6]); dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
} }
/* For reference: buf[0]:buf[3] baud rate value */ /* For reference buf[0]:buf[3] baud rate value */
pl2303_encode_baudrate(tty, port, spriv->type, buf); pl2303_encode_baudrate(tty, port, &buf[0]);
/* For reference buf[4]=0 is 1 stop bits */ /* For reference buf[4]=0 is 1 stop bits */
/* For reference buf[4]=1 is 1.5 stop bits */ /* For reference buf[4]=1 is 1.5 stop bits */
@ -618,10 +457,10 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
if (C_CRTSCTS(tty)) { if (C_CRTSCTS(tty)) {
if (spriv->type == type_0 || spriv->type == type_1) if (spriv->type == HX)
pl2303_vendor_write(0x0, 0x41, serial);
else
pl2303_vendor_write(0x0, 0x61, serial); pl2303_vendor_write(0x0, 0x61, serial);
else
pl2303_vendor_write(0x0, 0x41, serial);
} else { } else {
pl2303_vendor_write(0x0, 0x0, serial); pl2303_vendor_write(0x0, 0x0, serial);
} }
@ -658,7 +497,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
struct pl2303_serial_private *spriv = usb_get_serial_data(serial); struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result; int result;
if (spriv->type == type_0 || spriv->type == type_1) { if (spriv->type != HX) {
usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe);
} else { } else {
@ -833,7 +672,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
BREAK_REQUEST, BREAK_REQUEST_TYPE, state, BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
0, NULL, 0, 100); 0, NULL, 0, 100);
/* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
if (result) if (result)
dev_err(&port->dev, "error sending break = %d\n", result); dev_err(&port->dev, "error sending break = %d\n", result);
} }

View File

@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{ {
struct au1100fb_device *fbdev; struct au1100fb_device *fbdev;
unsigned int len;
unsigned long start=0, off;
fbdev = to_au1100fb_device(fbi); fbdev = to_au1100fb_device(fbi);
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
return -EAGAIN;
}
return 0;
} }
static struct fb_ops au1100fb_ops = static struct fb_ops au1100fb_ops =

View File

@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
* method mainly to allow the use of the TLB streaming flag (CCA=6) * method mainly to allow the use of the TLB streaming flag (CCA=6)
*/ */
static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{ {
unsigned int len;
unsigned long start=0, off;
struct au1200fb_device *fbdev = info->par; struct au1200fb_device *fbdev = info->par;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
start = fbdev->fb_phys & PAGE_MASK;
len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
off = vma->vm_pgoff << PAGE_SHIFT;
if ((vma->vm_end - vma->vm_start + off) > len) {
return -EINVAL;
}
off += start;
vma->vm_pgoff = off >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
} }
static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)

View File

@ -542,7 +542,7 @@ EXPORT_SYMBOL(d_drop);
* If ref is non-zero, then decrement the refcount too. * If ref is non-zero, then decrement the refcount too.
* Returns dentry requiring refcount drop, or NULL if we're done. * Returns dentry requiring refcount drop, or NULL if we're done.
*/ */
static inline struct dentry * static struct dentry *
dentry_kill(struct dentry *dentry, int unlock_on_failure) dentry_kill(struct dentry *dentry, int unlock_on_failure)
__releases(dentry->d_lock) __releases(dentry->d_lock)
{ {
@ -630,7 +630,8 @@ repeat:
goto kill_it; goto kill_it;
} }
dentry->d_flags |= DCACHE_REFERENCED; if (!(dentry->d_flags & DCACHE_REFERENCED))
dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry); dentry_lru_add(dentry);
dentry->d_lockref.count--; dentry->d_lockref.count--;

View File

@ -34,7 +34,6 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/anon_inodes.h> #include <linux/anon_inodes.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/freezer.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/mman.h> #include <asm/mman.h>
@ -1605,8 +1604,7 @@ fetch_events:
} }
spin_unlock_irqrestore(&ep->lock, flags); spin_unlock_irqrestore(&ep->lock, flags);
if (!freezable_schedule_hrtimeout_range(to, slack, if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
HRTIMER_MODE_ABS))
timed_out = 1; timed_out = 1;
spin_lock_irqsave(&ep->lock, flags); spin_lock_irqsave(&ep->lock, flags);

View File

@ -238,8 +238,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
set_current_state(state); set_current_state(state);
if (!pwq->triggered) if (!pwq->triggered)
rc = freezable_schedule_hrtimeout_range(expires, slack, rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
HRTIMER_MODE_ABS);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
/* /*

View File

@ -34,9 +34,9 @@ struct ipc_namespace {
int sem_ctls[4]; int sem_ctls[4];
int used_sems; int used_sems;
int msg_ctlmax; unsigned int msg_ctlmax;
int msg_ctlmnb; unsigned int msg_ctlmnb;
int msg_ctlmni; unsigned int msg_ctlmni;
atomic_t msg_bytes; atomic_t msg_bytes;
atomic_t msg_hdrs; atomic_t msg_hdrs;
int auto_msgmni; int auto_msgmni;

View File

@ -332,7 +332,7 @@ do { \
#endif #endif
#ifndef this_cpu_sub #ifndef this_cpu_sub
# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
#endif #endif
#ifndef this_cpu_inc #ifndef this_cpu_inc
@ -418,7 +418,7 @@ do { \
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
#endif #endif
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
@ -586,7 +586,7 @@ do { \
#endif #endif
#ifndef __this_cpu_sub #ifndef __this_cpu_sub
# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
#endif #endif
#ifndef __this_cpu_inc #ifndef __this_cpu_inc
@ -668,7 +668,7 @@ do { \
__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val) __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
#endif #endif
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val)) #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)

View File

@ -479,13 +479,15 @@ struct perf_event_mmap_page {
/* /*
* Control data for the mmap() data buffer. * Control data for the mmap() data buffer.
* *
* User-space reading the @data_head value should issue an rmb(), on * User-space reading the @data_head value should issue an smp_rmb(),
* SMP capable platforms, after reading this value -- see * after reading this value.
* perf_event_wakeup().
* *
* When the mapping is PROT_WRITE the @data_tail value should be * When the mapping is PROT_WRITE the @data_tail value should be
* written by userspace to reflect the last read data. In this case * written by userspace to reflect the last read data, after issueing
* the kernel will not over-write unread data. * an smp_mb() to separate the data read from the ->data_tail store.
* In this case the kernel will not over-write unread data.
*
* See perf_output_put_handle() for the data ordering.
*/ */
__u64 data_head; /* head in the data section */ __u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */ __u64 data_tail; /* user-space written tail */

View File

@ -62,7 +62,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
return err; return err;
} }
static int proc_ipc_callback_dointvec(ctl_table *table, int write, static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
struct ctl_table ipc_table; struct ctl_table ipc_table;
@ -72,7 +72,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
memcpy(&ipc_table, table, sizeof(ipc_table)); memcpy(&ipc_table, table, sizeof(ipc_table));
ipc_table.data = get_ipc(table); ipc_table.data = get_ipc(table);
rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos); rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
if (write && !rc && lenp_bef == *lenp) if (write && !rc && lenp_bef == *lenp)
/* /*
@ -152,15 +152,13 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
#define proc_ipc_dointvec NULL #define proc_ipc_dointvec NULL
#define proc_ipc_dointvec_minmax NULL #define proc_ipc_dointvec_minmax NULL
#define proc_ipc_dointvec_minmax_orphans NULL #define proc_ipc_dointvec_minmax_orphans NULL
#define proc_ipc_callback_dointvec NULL #define proc_ipc_callback_dointvec_minmax NULL
#define proc_ipcauto_dointvec_minmax NULL #define proc_ipcauto_dointvec_minmax NULL
#endif #endif
static int zero; static int zero;
static int one = 1; static int one = 1;
#ifdef CONFIG_CHECKPOINT_RESTORE
static int int_max = INT_MAX; static int int_max = INT_MAX;
#endif
static struct ctl_table ipc_kern_table[] = { static struct ctl_table ipc_kern_table[] = {
{ {
@ -198,21 +196,27 @@ static struct ctl_table ipc_kern_table[] = {
.data = &init_ipc_ns.msg_ctlmax, .data = &init_ipc_ns.msg_ctlmax,
.maxlen = sizeof (init_ipc_ns.msg_ctlmax), .maxlen = sizeof (init_ipc_ns.msg_ctlmax),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_dointvec, .proc_handler = proc_ipc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &int_max,
}, },
{ {
.procname = "msgmni", .procname = "msgmni",
.data = &init_ipc_ns.msg_ctlmni, .data = &init_ipc_ns.msg_ctlmni,
.maxlen = sizeof (init_ipc_ns.msg_ctlmni), .maxlen = sizeof (init_ipc_ns.msg_ctlmni),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_callback_dointvec, .proc_handler = proc_ipc_callback_dointvec_minmax,
.extra1 = &zero,
.extra2 = &int_max,
}, },
{ {
.procname = "msgmnb", .procname = "msgmnb",
.data = &init_ipc_ns.msg_ctlmnb, .data = &init_ipc_ns.msg_ctlmnb,
.maxlen = sizeof (init_ipc_ns.msg_ctlmnb), .maxlen = sizeof (init_ipc_ns.msg_ctlmnb),
.mode = 0644, .mode = 0644,
.proc_handler = proc_ipc_dointvec, .proc_handler = proc_ipc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &int_max,
}, },
{ {
.procname = "sem", .procname = "sem",

View File

@ -87,10 +87,31 @@ again:
goto out; goto out;
/* /*
* Publish the known good head. Rely on the full barrier implied * Since the mmap() consumer (userspace) can run on a different CPU:
* by atomic_dec_and_test() order the rb->head read and this *
* write. * kernel user
*
* READ ->data_tail READ ->data_head
* smp_mb() (A) smp_rmb() (C)
* WRITE $data READ $data
* smp_wmb() (B) smp_mb() (D)
* STORE ->data_head WRITE ->data_tail
*
* Where A pairs with D, and B pairs with C.
*
* I don't think A needs to be a full barrier because we won't in fact
* write data until we see the store from userspace. So we simply don't
* issue the data WRITE until we observe it. Be conservative for now.
*
* OTOH, D needs to be a full barrier since it separates the data READ
* from the tail WRITE.
*
* For B a WMB is sufficient since it separates two WRITEs, and for C
* an RMB is sufficient since it separates two READs.
*
* See perf_output_begin().
*/ */
smp_wmb();
rb->user_page->data_head = head; rb->user_page->data_head = head;
/* /*
@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle,
* Userspace could choose to issue a mb() before updating the * Userspace could choose to issue a mb() before updating the
* tail pointer. So that all reads will be completed before the * tail pointer. So that all reads will be completed before the
* write is issued. * write is issued.
*
* See perf_output_put_handle().
*/ */
tail = ACCESS_ONCE(rb->user_page->data_tail); tail = ACCESS_ONCE(rb->user_page->data_tail);
smp_rmb(); smp_mb();
offset = head = local_read(&rb->head); offset = head = local_read(&rb->head);
head += size; head += size;
if (unlikely(!perf_output_space(rb, tail, offset, head))) if (unlikely(!perf_output_space(rb, tail, offset, head)))

View File

@ -983,7 +983,7 @@ config DEBUG_KOBJECT
config DEBUG_KOBJECT_RELEASE config DEBUG_KOBJECT_RELEASE
bool "kobject release debugging" bool "kobject release debugging"
depends on DEBUG_KERNEL depends on DEBUG_OBJECTS_TIMERS
help help
kobjects are reference counted objects. This means that their kobjects are reference counted objects. This means that their
last reference count put is not predictable, and the kobject can last reference count put is not predictable, and the kobject can

View File

@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed; miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed; miter->__remaining -= miter->consumed;
if (miter->__flags & SG_MITER_TO_SG) if ((miter->__flags & SG_MITER_TO_SG) &&
!PageSlab(miter->page))
flush_kernel_dcache_page(miter->page); flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) { if (miter->__flags & SG_MITER_ATOMIC) {

View File

@ -1278,64 +1278,90 @@ out:
int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp) unsigned long addr, pmd_t pmd, pmd_t *pmdp)
{ {
struct anon_vma *anon_vma = NULL;
struct page *page; struct page *page;
unsigned long haddr = addr & HPAGE_PMD_MASK; unsigned long haddr = addr & HPAGE_PMD_MASK;
int page_nid = -1, this_nid = numa_node_id();
int target_nid; int target_nid;
int current_nid = -1; bool page_locked;
bool migrated; bool migrated = false;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp))) if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock; goto out_unlock;
page = pmd_page(pmd); page = pmd_page(pmd);
get_page(page); page_nid = page_to_nid(page);
current_nid = page_to_nid(page);
count_vm_numa_event(NUMA_HINT_FAULTS); count_vm_numa_event(NUMA_HINT_FAULTS);
if (current_nid == numa_node_id()) if (page_nid == this_nid)
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
/*
* Acquire the page lock to serialise THP migrations but avoid dropping
* page_table_lock if at all possible
*/
page_locked = trylock_page(page);
target_nid = mpol_misplaced(page, vma, haddr); target_nid = mpol_misplaced(page, vma, haddr);
if (target_nid == -1) { if (target_nid == -1) {
put_page(page); /* If the page was locked, there are no parallel migrations */
goto clear_pmdnuma; if (page_locked)
goto clear_pmdnuma;
/*
* Otherwise wait for potential migrations and retry. We do
* relock and check_same as the page may no longer be mapped.
* As the fault is being retried, do not account for it.
*/
spin_unlock(&mm->page_table_lock);
wait_on_page_locked(page);
page_nid = -1;
goto out;
} }
/* Acquire the page lock to serialise THP migrations */ /* Page is misplaced, serialise migrations and parallel THP splits */
get_page(page);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
lock_page(page); if (!page_locked)
lock_page(page);
anon_vma = page_lock_anon_vma_read(page);
/* Confirm the PTE did not while locked */ /* Confirm the PTE did not while locked */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp))) { if (unlikely(!pmd_same(pmd, *pmdp))) {
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
page_nid = -1;
goto out_unlock; goto out_unlock;
} }
spin_unlock(&mm->page_table_lock);
/* Migrate the THP to the requested node */ /*
* Migrate the THP to the requested node, returns with page unlocked
* and pmd_numa cleared.
*/
spin_unlock(&mm->page_table_lock);
migrated = migrate_misplaced_transhuge_page(mm, vma, migrated = migrate_misplaced_transhuge_page(mm, vma,
pmdp, pmd, addr, page, target_nid); pmdp, pmd, addr, page, target_nid);
if (!migrated) if (migrated)
goto check_same; page_nid = target_nid;
task_numa_fault(target_nid, HPAGE_PMD_NR, true); goto out;
return 0;
check_same:
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
clear_pmdnuma: clear_pmdnuma:
BUG_ON(!PageLocked(page));
pmd = pmd_mknonnuma(pmd); pmd = pmd_mknonnuma(pmd);
set_pmd_at(mm, haddr, pmdp, pmd); set_pmd_at(mm, haddr, pmdp, pmd);
VM_BUG_ON(pmd_numa(*pmdp)); VM_BUG_ON(pmd_numa(*pmdp));
update_mmu_cache_pmd(vma, addr, pmdp); update_mmu_cache_pmd(vma, addr, pmdp);
unlock_page(page);
out_unlock: out_unlock:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
if (current_nid != -1)
task_numa_fault(current_nid, HPAGE_PMD_NR, false); out:
if (anon_vma)
page_unlock_anon_vma_read(anon_vma);
if (page_nid != -1)
task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
return 0; return 0;
} }

View File

@ -81,8 +81,9 @@ restart:
* decrement nr_to_walk first so that we don't livelock if we * decrement nr_to_walk first so that we don't livelock if we
* get stuck on large numbesr of LRU_RETRY items * get stuck on large numbesr of LRU_RETRY items
*/ */
if (--(*nr_to_walk) == 0) if (!*nr_to_walk)
break; break;
--*nr_to_walk;
ret = isolate(item, &nlru->lock, cb_arg); ret = isolate(item, &nlru->lock, cb_arg);
switch (ret) { switch (ret) {

View File

@ -54,6 +54,7 @@
#include <linux/page_cgroup.h> #include <linux/page_cgroup.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/oom.h> #include <linux/oom.h>
#include <linux/lockdep.h>
#include "internal.h" #include "internal.h"
#include <net/sock.h> #include <net/sock.h>
#include <net/ip.h> #include <net/ip.h>
@ -2046,6 +2047,12 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
return total; return total;
} }
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
.name = "memcg_oom_lock",
};
#endif
static DEFINE_SPINLOCK(memcg_oom_lock); static DEFINE_SPINLOCK(memcg_oom_lock);
/* /*
@ -2083,7 +2090,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
} }
iter->oom_lock = false; iter->oom_lock = false;
} }
} } else
mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
spin_unlock(&memcg_oom_lock); spin_unlock(&memcg_oom_lock);
@ -2095,6 +2103,7 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
struct mem_cgroup *iter; struct mem_cgroup *iter;
spin_lock(&memcg_oom_lock); spin_lock(&memcg_oom_lock);
mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
for_each_mem_cgroup_tree(iter, memcg) for_each_mem_cgroup_tree(iter, memcg)
iter->oom_lock = false; iter->oom_lock = false;
spin_unlock(&memcg_oom_lock); spin_unlock(&memcg_oom_lock);
@ -2765,10 +2774,10 @@ done:
*ptr = memcg; *ptr = memcg;
return 0; return 0;
nomem: nomem:
*ptr = NULL; if (!(gfp_mask & __GFP_NOFAIL)) {
if (gfp_mask & __GFP_NOFAIL) *ptr = NULL;
return 0; return -ENOMEM;
return -ENOMEM; }
bypass: bypass:
*ptr = root_mem_cgroup; *ptr = root_mem_cgroup;
return -EINTR; return -EINTR;
@ -3773,8 +3782,7 @@ void mem_cgroup_move_account_page_stat(struct mem_cgroup *from,
{ {
/* Update stat data for mem_cgroup */ /* Update stat data for mem_cgroup */
preempt_disable(); preempt_disable();
WARN_ON_ONCE(from->stat->count[idx] < nr_pages); __this_cpu_sub(from->stat->count[idx], nr_pages);
__this_cpu_add(from->stat->count[idx], -nr_pages);
__this_cpu_add(to->stat->count[idx], nr_pages); __this_cpu_add(to->stat->count[idx], nr_pages);
preempt_enable(); preempt_enable();
} }
@ -4950,31 +4958,18 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
} while (usage > 0); } while (usage > 0);
} }
/*
* This mainly exists for tests during the setting of set of use_hierarchy.
* Since this is the very setting we are changing, the current hierarchy value
* is meaningless
*/
static inline bool __memcg_has_children(struct mem_cgroup *memcg)
{
struct cgroup_subsys_state *pos;
/* bounce at first found */
css_for_each_child(pos, &memcg->css)
return true;
return false;
}
/*
* Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
* to be already dead (as in mem_cgroup_force_empty, for instance). This is
* from mem_cgroup_count_children(), in the sense that we don't really care how
* many children we have; we only need to know if we have any. It also counts
* any memcg without hierarchy as infertile.
*/
static inline bool memcg_has_children(struct mem_cgroup *memcg) static inline bool memcg_has_children(struct mem_cgroup *memcg)
{ {
return memcg->use_hierarchy && __memcg_has_children(memcg); lockdep_assert_held(&memcg_create_mutex);
/*
* The lock does not prevent addition or deletion to the list
* of children, but it prevents a new child from being
* initialized based on this parent in css_online(), so it's
* enough to decide whether hierarchically inherited
* attributes can still be changed or not.
*/
return memcg->use_hierarchy &&
!list_empty(&memcg->css.cgroup->children);
} }
/* /*
@ -5054,7 +5049,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
*/ */
if ((!parent_memcg || !parent_memcg->use_hierarchy) && if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
(val == 1 || val == 0)) { (val == 1 || val == 0)) {
if (!__memcg_has_children(memcg)) if (list_empty(&memcg->css.cgroup->children))
memcg->use_hierarchy = val; memcg->use_hierarchy = val;
else else
retval = -EBUSY; retval = -EBUSY;

View File

@ -3521,12 +3521,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
} }
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int current_nid) unsigned long addr, int page_nid)
{ {
get_page(page); get_page(page);
count_vm_numa_event(NUMA_HINT_FAULTS); count_vm_numa_event(NUMA_HINT_FAULTS);
if (current_nid == numa_node_id()) if (page_nid == numa_node_id())
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
return mpol_misplaced(page, vma, addr); return mpol_misplaced(page, vma, addr);
@ -3537,7 +3537,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
{ {
struct page *page = NULL; struct page *page = NULL;
spinlock_t *ptl; spinlock_t *ptl;
int current_nid = -1; int page_nid = -1;
int target_nid; int target_nid;
bool migrated = false; bool migrated = false;
@ -3567,15 +3567,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
return 0; return 0;
} }
current_nid = page_to_nid(page); page_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, addr, current_nid); target_nid = numa_migrate_prep(page, vma, addr, page_nid);
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
if (target_nid == -1) { if (target_nid == -1) {
/*
* Account for the fault against the current node if it not
* being replaced regardless of where the page is located.
*/
current_nid = numa_node_id();
put_page(page); put_page(page);
goto out; goto out;
} }
@ -3583,11 +3578,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Migrate to the requested node */ /* Migrate to the requested node */
migrated = migrate_misplaced_page(page, target_nid); migrated = migrate_misplaced_page(page, target_nid);
if (migrated) if (migrated)
current_nid = target_nid; page_nid = target_nid;
out: out:
if (current_nid != -1) if (page_nid != -1)
task_numa_fault(current_nid, 1, migrated); task_numa_fault(page_nid, 1, migrated);
return 0; return 0;
} }
@ -3602,7 +3597,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long offset; unsigned long offset;
spinlock_t *ptl; spinlock_t *ptl;
bool numa = false; bool numa = false;
int local_nid = numa_node_id();
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
pmd = *pmdp; pmd = *pmdp;
@ -3625,9 +3619,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) { for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
pte_t pteval = *pte; pte_t pteval = *pte;
struct page *page; struct page *page;
int curr_nid = local_nid; int page_nid = -1;
int target_nid; int target_nid;
bool migrated; bool migrated = false;
if (!pte_present(pteval)) if (!pte_present(pteval))
continue; continue;
if (!pte_numa(pteval)) if (!pte_numa(pteval))
@ -3649,25 +3644,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(page_mapcount(page) != 1)) if (unlikely(page_mapcount(page) != 1))
continue; continue;
/* page_nid = page_to_nid(page);
* Note that the NUMA fault is later accounted to either target_nid = numa_migrate_prep(page, vma, addr, page_nid);
* the node that is currently running or where the page is pte_unmap_unlock(pte, ptl);
* migrated to. if (target_nid != -1) {
*/ migrated = migrate_misplaced_page(page, target_nid);
curr_nid = local_nid; if (migrated)
target_nid = numa_migrate_prep(page, vma, addr, page_nid = target_nid;
page_to_nid(page)); } else {
if (target_nid == -1) {
put_page(page); put_page(page);
continue;
} }
/* Migrate to the requested node */ if (page_nid != -1)
pte_unmap_unlock(pte, ptl); task_numa_fault(page_nid, 1, migrated);
migrated = migrate_misplaced_page(page, target_nid);
if (migrated)
curr_nid = target_nid;
task_numa_fault(curr_nid, 1, migrated);
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
} }

View File

@ -1715,12 +1715,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unlock_page(new_page); unlock_page(new_page);
put_page(new_page); /* Free it */ put_page(new_page); /* Free it */
unlock_page(page); /* Retake the callers reference and putback on LRU */
get_page(page);
putback_lru_page(page); putback_lru_page(page);
mod_zone_page_state(page_zone(page),
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
isolated = 0; goto out_fail;
goto out;
} }
/* /*
@ -1737,9 +1737,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry); entry = pmd_mkhuge(entry);
page_add_new_anon_rmap(new_page, vma, haddr); pmdp_clear_flush(vma, haddr, pmd);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
page_add_new_anon_rmap(new_page, vma, haddr);
update_mmu_cache_pmd(vma, address, &entry); update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(page); page_remove_rmap(page);
/* /*
@ -1758,7 +1758,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
out:
mod_zone_page_state(page_zone(page), mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru, NR_ISOLATED_ANON + page_lru,
-HPAGE_PMD_NR); -HPAGE_PMD_NR);
@ -1767,6 +1766,10 @@ out:
out_fail: out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
out_dropref: out_dropref:
entry = pmd_mknonnuma(entry);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
return 0; return 0;

View File

@ -148,7 +148,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
split_huge_page_pmd(vma, addr, pmd); split_huge_page_pmd(vma, addr, pmd);
else if (change_huge_pmd(vma, pmd, addr, newprot, else if (change_huge_pmd(vma, pmd, addr, newprot,
prot_numa)) { prot_numa)) {
pages += HPAGE_PMD_NR; pages++;
continue; continue;
} }
/* fall through */ /* fall through */

View File

@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
if (err) if (err)
break; break;
pgd++; pgd++;
} while (addr = next, addr != end); } while (addr = next, addr < end);
return err; return err;
} }

View File

@ -55,6 +55,7 @@ static struct sym_entry *table;
static unsigned int table_size, table_cnt; static unsigned int table_size, table_cnt;
static int all_symbols = 0; static int all_symbols = 0;
static char symbol_prefix_char = '\0'; static char symbol_prefix_char = '\0';
static unsigned long long kernel_start_addr = 0;
int token_profit[0x10000]; int token_profit[0x10000];
@ -65,7 +66,10 @@ unsigned char best_table_len[256];
static void usage(void) static void usage(void)
{ {
fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n"); fprintf(stderr, "Usage: kallsyms [--all-symbols] "
"[--symbol-prefix=<prefix char>] "
"[--page-offset=<CONFIG_PAGE_OFFSET>] "
"< in.map > out.S\n");
exit(1); exit(1);
} }
@ -194,6 +198,9 @@ static int symbol_valid(struct sym_entry *s)
int i; int i;
int offset = 1; int offset = 1;
if (s->addr < kernel_start_addr)
return 0;
/* skip prefix char */ /* skip prefix char */
if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char) if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
offset++; offset++;
@ -646,6 +653,9 @@ int main(int argc, char **argv)
if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
p++; p++;
symbol_prefix_char = *p; symbol_prefix_char = *p;
} else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
const char *p = &argv[i][14];
kernel_start_addr = strtoull(p, NULL, 16);
} else } else
usage(); usage();
} }

View File

@ -82,6 +82,8 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols" kallsymopt="${kallsymopt} --all-symbols"
fi fi
kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}" ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"

View File

@ -49,6 +49,8 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device)
struct snd_pcm *pcm; struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) { list_for_each_entry(pcm, &snd_pcm_devices, list) {
if (pcm->internal)
continue;
if (pcm->card == card && pcm->device == device) if (pcm->card == card && pcm->device == device)
return pcm; return pcm;
} }
@ -60,6 +62,8 @@ static int snd_pcm_next(struct snd_card *card, int device)
struct snd_pcm *pcm; struct snd_pcm *pcm;
list_for_each_entry(pcm, &snd_pcm_devices, list) { list_for_each_entry(pcm, &snd_pcm_devices, list) {
if (pcm->internal)
continue;
if (pcm->card == card && pcm->device > device) if (pcm->card == card && pcm->device > device)
return pcm->device; return pcm->device;
else if (pcm->card->number > card->number) else if (pcm->card->number > card->number)

View File

@ -4864,8 +4864,8 @@ static void hda_power_work(struct work_struct *work)
spin_unlock(&codec->power_lock); spin_unlock(&codec->power_lock);
state = hda_call_codec_suspend(codec, true); state = hda_call_codec_suspend(codec, true);
codec->pm_down_notified = 0; if (!codec->pm_down_notified &&
if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) { !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
codec->pm_down_notified = 1; codec->pm_down_notified = 1;
hda_call_pm_notify(bus, false); hda_call_pm_notify(bus, false);
} }

View File

@ -4475,9 +4475,11 @@ int snd_hda_gen_build_controls(struct hda_codec *codec)
true, &spec->vmaster_mute.sw_kctl); true, &spec->vmaster_mute.sw_kctl);
if (err < 0) if (err < 0)
return err; return err;
if (spec->vmaster_mute.hook) if (spec->vmaster_mute.hook) {
snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
spec->vmaster_mute_enum); spec->vmaster_mute_enum);
snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
}
} }
free_kctls(spec); /* no longer needed */ free_kctls(spec); /* no longer needed */

View File

@ -968,6 +968,15 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
} }
} }
static void ad1884_fixup_thinkpad(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct ad198x_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE)
spec->gen.keep_eapd_on = 1;
}
/* set magic COEFs for dmic */ /* set magic COEFs for dmic */
static const struct hda_verb ad1884_dmic_init_verbs[] = { static const struct hda_verb ad1884_dmic_init_verbs[] = {
{0x01, AC_VERB_SET_COEF_INDEX, 0x13f7}, {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
@ -979,6 +988,7 @@ enum {
AD1884_FIXUP_AMP_OVERRIDE, AD1884_FIXUP_AMP_OVERRIDE,
AD1884_FIXUP_HP_EAPD, AD1884_FIXUP_HP_EAPD,
AD1884_FIXUP_DMIC_COEF, AD1884_FIXUP_DMIC_COEF,
AD1884_FIXUP_THINKPAD,
AD1884_FIXUP_HP_TOUCHSMART, AD1884_FIXUP_HP_TOUCHSMART,
}; };
@ -997,6 +1007,12 @@ static const struct hda_fixup ad1884_fixups[] = {
.type = HDA_FIXUP_VERBS, .type = HDA_FIXUP_VERBS,
.v.verbs = ad1884_dmic_init_verbs, .v.verbs = ad1884_dmic_init_verbs,
}, },
[AD1884_FIXUP_THINKPAD] = {
.type = HDA_FIXUP_FUNC,
.v.func = ad1884_fixup_thinkpad,
.chained = true,
.chain_id = AD1884_FIXUP_DMIC_COEF,
},
[AD1884_FIXUP_HP_TOUCHSMART] = { [AD1884_FIXUP_HP_TOUCHSMART] = {
.type = HDA_FIXUP_VERBS, .type = HDA_FIXUP_VERBS,
.v.verbs = ad1884_dmic_init_verbs, .v.verbs = ad1884_dmic_init_verbs,
@ -1008,7 +1024,7 @@ static const struct hda_fixup ad1884_fixups[] = {
static const struct snd_pci_quirk ad1884_fixup_tbl[] = { static const struct snd_pci_quirk ad1884_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART), SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD), SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD),
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF), SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_THINKPAD),
{} {}
}; };

View File

@ -4623,6 +4623,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),

View File

@ -530,6 +530,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
hubs->hp_startup_mode); hubs->hp_startup_mode);
break; break;
} }
break;
case SND_SOC_DAPM_PRE_PMD: case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1, snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,

View File

@ -1949,7 +1949,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
w->active ? "active" : "inactive"); w->active ? "active" : "inactive");
list_for_each_entry(p, &w->sources, list_sink) { list_for_each_entry(p, &w->sources, list_sink) {
if (p->connected && !p->connected(w, p->sink)) if (p->connected && !p->connected(w, p->source))
continue; continue;
if (p->connect) if (p->connect)
@ -3495,6 +3495,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
if (!w) { if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->playback.stream_name); dai->driver->playback.stream_name);
return -ENOMEM;
} }
w->priv = dai; w->priv = dai;
@ -3513,6 +3514,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
if (!w) { if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n", dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->capture.stream_name); dai->driver->capture.stream_name);
return -ENOMEM;
} }
w->priv = dai; w->priv = dai;

View File

@ -44,9 +44,9 @@ Following tests are defined (with perf commands):
perf record -c 123 kill (test-record-count) perf record -c 123 kill (test-record-count)
perf record -d kill (test-record-data) perf record -d kill (test-record-data)
perf record -F 100 kill (test-record-freq) perf record -F 100 kill (test-record-freq)
perf record -g -- kill (test-record-graph-default) perf record -g kill (test-record-graph-default)
perf record -g dwarf -- kill (test-record-graph-dwarf) perf record --call-graph dwarf kill (test-record-graph-dwarf)
perf record -g fp kill (test-record-graph-fp) perf record --call-graph fp kill (test-record-graph-fp)
perf record --group -e cycles,instructions kill (test-record-group) perf record --group -e cycles,instructions kill (test-record-group)
perf record -e '{cycles,instructions}' kill (test-record-group1) perf record -e '{cycles,instructions}' kill (test-record-group1)
perf record -D kill (test-record-no-delay) perf record -D kill (test-record-no-delay)

View File

@ -1,6 +1,6 @@
[config] [config]
command = record command = record
args = -g -- kill >/dev/null 2>&1 args = -g kill >/dev/null 2>&1
[event:base-record] [event:base-record]
sample_type=295 sample_type=295

View File

@ -1,6 +1,6 @@
[config] [config]
command = record command = record
args = -g dwarf -- kill >/dev/null 2>&1 args = --call-graph dwarf -- kill >/dev/null 2>&1
[event:base-record] [event:base-record]
sample_type=12583 sample_type=12583

View File

@ -1,6 +1,6 @@
[config] [config]
command = record command = record
args = -g fp kill >/dev/null 2>&1 args = --call-graph fp kill >/dev/null 2>&1
[event:base-record] [event:base-record]
sample_type=295 sample_type=295

View File

@ -117,7 +117,7 @@ static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \ struct perf_hpp *hpp, struct hist_entry *he) \
{ \ { \
return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \ return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
(hpp_snprint_fn)percent_color_snprintf, true); \ percent_color_snprintf, true); \
} }
#define __HPP_ENTRY_PERCENT_FN(_type, _field) \ #define __HPP_ENTRY_PERCENT_FN(_type, _field) \

View File

@ -318,8 +318,15 @@ int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
return r; return r;
} }
int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent) int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...)
{ {
const char *color = get_percent_color(percent); va_list args;
double percent;
const char *color;
va_start(args, fmt);
percent = va_arg(args, double);
va_end(args);
color = get_percent_color(percent);
return color_snprintf(bf, size, color, fmt, percent); return color_snprintf(bf, size, color, fmt, percent);
} }

View File

@ -39,7 +39,7 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...); int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent); int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...);
int percent_color_fprintf(FILE *fp, const char *fmt, double percent); int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
const char *get_percent_color(double percent); const char *get_percent_color(double percent);

View File

@ -3091,7 +3091,7 @@ static const struct file_operations *stat_fops[] = {
static int kvm_init_debug(void) static int kvm_init_debug(void)
{ {
int r = -EFAULT; int r = -EEXIST;
struct kvm_stats_debugfs_item *p; struct kvm_stats_debugfs_item *p;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);