Merge branch 'v3.5-for-usb' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung into usb-next
This commit is contained in:
commit
32535bd563
@ -1974,7 +1974,9 @@ S: Maintained
|
|||||||
F: drivers/net/ethernet/ti/cpmac.c
|
F: drivers/net/ethernet/ti/cpmac.c
|
||||||
|
|
||||||
CPU FREQUENCY DRIVERS
|
CPU FREQUENCY DRIVERS
|
||||||
|
M: Rafael J. Wysocki <rjw@sisk.pl>
|
||||||
L: cpufreq@vger.kernel.org
|
L: cpufreq@vger.kernel.org
|
||||||
|
L: linux-pm@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/cpufreq/
|
F: drivers/cpufreq/
|
||||||
F: include/linux/cpufreq.h
|
F: include/linux/cpufreq.h
|
||||||
@ -4040,6 +4042,7 @@ F: Documentation/scsi/53c700.txt
|
|||||||
F: drivers/scsi/53c700*
|
F: drivers/scsi/53c700*
|
||||||
|
|
||||||
LED SUBSYSTEM
|
LED SUBSYSTEM
|
||||||
|
M: Bryan Wu <bryan.wu@canonical.com>
|
||||||
M: Richard Purdie <rpurdie@rpsys.net>
|
M: Richard Purdie <rpurdie@rpsys.net>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/leds/
|
F: drivers/leds/
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Saber-toothed Squirrel
|
NAME = Saber-toothed Squirrel
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __ARMEB__
|
|
||||||
#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
|
|
||||||
#else
|
|
||||||
#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
|
|
||||||
#endif
|
|
||||||
|
|
||||||
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
|
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
|
||||||
{
|
{
|
||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
|
|
||||||
/*
|
if (why)
|
||||||
* Save IP. IP is used to denote syscall entry/exit:
|
|
||||||
* IP = 0 -> entry, = 1 -> exit
|
|
||||||
*/
|
|
||||||
ip = regs->ARM_ip;
|
|
||||||
regs->ARM_ip = why;
|
|
||||||
|
|
||||||
if (!ip)
|
|
||||||
audit_syscall_exit(regs);
|
audit_syscall_exit(regs);
|
||||||
else
|
else
|
||||||
audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
|
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
|
||||||
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
|
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
|
||||||
|
|
||||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||||
@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
|
|||||||
|
|
||||||
current_thread_info()->syscall = scno;
|
current_thread_info()->syscall = scno;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IP is used to denote syscall entry/exit:
|
||||||
|
* IP = 0 -> entry, =1 -> exit
|
||||||
|
*/
|
||||||
|
ip = regs->ARM_ip;
|
||||||
|
regs->ARM_ip = why;
|
||||||
|
|
||||||
/* the 0x80 provides a way for the tracing parent to distinguish
|
/* the 0x80 provides a way for the tracing parent to distinguish
|
||||||
between a syscall stop and SIGTRAP delivery */
|
between a syscall stop and SIGTRAP delivery */
|
||||||
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
|
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
|
||||||
|
@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||||||
struct mm_struct *mm = &init_mm;
|
struct mm_struct *mm = &init_mm;
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
printk("CPU%u: Booted secondary processor\n", cpu);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All kernel threads share the same mm context; grab a
|
* All kernel threads share the same mm context; grab a
|
||||||
* reference and switch to it.
|
* reference and switch to it.
|
||||||
@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||||||
enter_lazy_tlb(mm, current);
|
enter_lazy_tlb(mm, current);
|
||||||
local_flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
|
|
||||||
|
printk("CPU%u: Booted secondary processor\n", cpu);
|
||||||
|
|
||||||
cpu_init();
|
cpu_init();
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
trace_hardirqs_off();
|
trace_hardirqs_off();
|
||||||
|
@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
|
|||||||
"Ir" (THREAD_START_SP - sizeof(regs)),
|
"Ir" (THREAD_START_SP - sizeof(regs)),
|
||||||
"r" (®s),
|
"r" (®s),
|
||||||
"Ir" (sizeof(regs))
|
"Ir" (sizeof(regs))
|
||||||
: "r0", "r1", "r2", "r3", "ip", "lr", "memory");
|
: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -232,6 +232,9 @@ config MACH_ARMLEX4210
|
|||||||
config MACH_UNIVERSAL_C210
|
config MACH_UNIVERSAL_C210
|
||||||
bool "Mobile UNIVERSAL_C210 Board"
|
bool "Mobile UNIVERSAL_C210 Board"
|
||||||
select CPU_EXYNOS4210
|
select CPU_EXYNOS4210
|
||||||
|
select S5P_HRT
|
||||||
|
select CLKSRC_MMIO
|
||||||
|
select HAVE_SCHED_CLOCK
|
||||||
select S5P_GPIO_INT
|
select S5P_GPIO_INT
|
||||||
select S5P_DEV_FIMC0
|
select S5P_DEV_FIMC0
|
||||||
select S5P_DEV_FIMC1
|
select S5P_DEV_FIMC1
|
||||||
@ -247,6 +250,7 @@ config MACH_UNIVERSAL_C210
|
|||||||
select S3C_DEV_I2C1
|
select S3C_DEV_I2C1
|
||||||
select S3C_DEV_I2C3
|
select S3C_DEV_I2C3
|
||||||
select S3C_DEV_I2C5
|
select S3C_DEV_I2C5
|
||||||
|
select S3C_DEV_USB_HSOTG
|
||||||
select S5P_DEV_I2C_HDMIPHY
|
select S5P_DEV_I2C_HDMIPHY
|
||||||
select S5P_DEV_MFC
|
select S5P_DEV_MFC
|
||||||
select S5P_DEV_ONENAND
|
select S5P_DEV_ONENAND
|
||||||
@ -259,6 +263,7 @@ config MACH_UNIVERSAL_C210
|
|||||||
select EXYNOS4_SETUP_SDHCI
|
select EXYNOS4_SETUP_SDHCI
|
||||||
select EXYNOS4_SETUP_FIMC
|
select EXYNOS4_SETUP_FIMC
|
||||||
select S5P_SETUP_MIPIPHY
|
select S5P_SETUP_MIPIPHY
|
||||||
|
select EXYNOS4_SETUP_USB_PHY
|
||||||
help
|
help
|
||||||
Machine support for Samsung Mobile Universal S5PC210 Reference
|
Machine support for Samsung Mobile Universal S5PC210 Reference
|
||||||
Board.
|
Board.
|
||||||
@ -277,6 +282,7 @@ config MACH_NURI
|
|||||||
select S3C_DEV_I2C3
|
select S3C_DEV_I2C3
|
||||||
select S3C_DEV_I2C5
|
select S3C_DEV_I2C5
|
||||||
select S3C_DEV_I2C6
|
select S3C_DEV_I2C6
|
||||||
|
select S3C_DEV_USB_HSOTG
|
||||||
select S5P_DEV_CSIS0
|
select S5P_DEV_CSIS0
|
||||||
select S5P_DEV_JPEG
|
select S5P_DEV_JPEG
|
||||||
select S5P_DEV_FIMC0
|
select S5P_DEV_FIMC0
|
||||||
|
@ -678,7 +678,7 @@ static struct clk exynos5_clk_pdma1 = {
|
|||||||
.name = "dma",
|
.name = "dma",
|
||||||
.devname = "dma-pl330.1",
|
.devname = "dma-pl330.1",
|
||||||
.enable = exynos5_clk_ip_fsys_ctrl,
|
.enable = exynos5_clk_ip_fsys_ctrl,
|
||||||
.ctrlbit = (1 << 1),
|
.ctrlbit = (1 << 2),
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct clk exynos5_clk_mdma1 = {
|
static struct clk exynos5_clk_mdma1 = {
|
||||||
|
@ -189,6 +189,7 @@
|
|||||||
#define IRQ_IIC7 EXYNOS4_IRQ_IIC7
|
#define IRQ_IIC7 EXYNOS4_IRQ_IIC7
|
||||||
|
|
||||||
#define IRQ_USB_HOST EXYNOS4_IRQ_USB_HOST
|
#define IRQ_USB_HOST EXYNOS4_IRQ_USB_HOST
|
||||||
|
#define IRQ_OTG EXYNOS4_IRQ_USB_HSOTG
|
||||||
|
|
||||||
#define IRQ_HSMMC0 EXYNOS4_IRQ_HSMMC0
|
#define IRQ_HSMMC0 EXYNOS4_IRQ_HSMMC0
|
||||||
#define IRQ_HSMMC1 EXYNOS4_IRQ_HSMMC1
|
#define IRQ_HSMMC1 EXYNOS4_IRQ_HSMMC1
|
||||||
|
@ -130,6 +130,9 @@
|
|||||||
#define EXYNOS4_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
|
#define EXYNOS4_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
|
||||||
#define EXYNOS4_PA_DWMCI 0x12550000
|
#define EXYNOS4_PA_DWMCI 0x12550000
|
||||||
|
|
||||||
|
#define EXYNOS4_PA_HSOTG 0x12480000
|
||||||
|
#define EXYNOS4_PA_USB_HSPHY 0x125B0000
|
||||||
|
|
||||||
#define EXYNOS4_PA_SATA 0x12560000
|
#define EXYNOS4_PA_SATA 0x12560000
|
||||||
#define EXYNOS4_PA_SATAPHY 0x125D0000
|
#define EXYNOS4_PA_SATAPHY 0x125D0000
|
||||||
#define EXYNOS4_PA_SATAPHY_CTRL 0x126B0000
|
#define EXYNOS4_PA_SATAPHY_CTRL 0x126B0000
|
||||||
@ -186,6 +189,7 @@
|
|||||||
#define S3C_PA_SPI0 EXYNOS4_PA_SPI0
|
#define S3C_PA_SPI0 EXYNOS4_PA_SPI0
|
||||||
#define S3C_PA_SPI1 EXYNOS4_PA_SPI1
|
#define S3C_PA_SPI1 EXYNOS4_PA_SPI1
|
||||||
#define S3C_PA_SPI2 EXYNOS4_PA_SPI2
|
#define S3C_PA_SPI2 EXYNOS4_PA_SPI2
|
||||||
|
#define S3C_PA_USB_HSOTG EXYNOS4_PA_HSOTG
|
||||||
|
|
||||||
#define S5P_PA_EHCI EXYNOS4_PA_EHCI
|
#define S5P_PA_EHCI EXYNOS4_PA_EHCI
|
||||||
#define S5P_PA_FIMC0 EXYNOS4_PA_FIMC0
|
#define S5P_PA_FIMC0 EXYNOS4_PA_FIMC0
|
||||||
|
@ -163,6 +163,9 @@
|
|||||||
#define S5P_CHECK_SLEEP 0x00000BAD
|
#define S5P_CHECK_SLEEP 0x00000BAD
|
||||||
|
|
||||||
/* Only for EXYNOS4210 */
|
/* Only for EXYNOS4210 */
|
||||||
|
#define S5P_USBDEVICE_PHY_CONTROL S5P_PMUREG(0x0704)
|
||||||
|
#define S5P_USBDEVICE_PHY_ENABLE (1 << 0)
|
||||||
|
|
||||||
#define S5P_USBHOST_PHY_CONTROL S5P_PMUREG(0x0708)
|
#define S5P_USBHOST_PHY_CONTROL S5P_PMUREG(0x0708)
|
||||||
#define S5P_USBHOST_PHY_ENABLE (1 << 0)
|
#define S5P_USBHOST_PHY_ENABLE (1 << 0)
|
||||||
|
|
||||||
|
@ -352,6 +352,7 @@ static struct regulator_consumer_supply __initdata max8997_ldo1_[] = {
|
|||||||
REGULATOR_SUPPLY("vdd", "s5p-adc"), /* Used by CPU's ADC drv */
|
REGULATOR_SUPPLY("vdd", "s5p-adc"), /* Used by CPU's ADC drv */
|
||||||
};
|
};
|
||||||
static struct regulator_consumer_supply __initdata max8997_ldo3_[] = {
|
static struct regulator_consumer_supply __initdata max8997_ldo3_[] = {
|
||||||
|
REGULATOR_SUPPLY("vusb_d", "s3c-hsotg"), /* USB */
|
||||||
REGULATOR_SUPPLY("vdd11", "s5p-mipi-csis.0"), /* MIPI */
|
REGULATOR_SUPPLY("vdd11", "s5p-mipi-csis.0"), /* MIPI */
|
||||||
};
|
};
|
||||||
static struct regulator_consumer_supply __initdata max8997_ldo4_[] = {
|
static struct regulator_consumer_supply __initdata max8997_ldo4_[] = {
|
||||||
@ -367,7 +368,7 @@ static struct regulator_consumer_supply __initdata max8997_ldo7_[] = {
|
|||||||
REGULATOR_SUPPLY("dig_18", "0-001f"), /* HCD803 */
|
REGULATOR_SUPPLY("dig_18", "0-001f"), /* HCD803 */
|
||||||
};
|
};
|
||||||
static struct regulator_consumer_supply __initdata max8997_ldo8_[] = {
|
static struct regulator_consumer_supply __initdata max8997_ldo8_[] = {
|
||||||
REGULATOR_SUPPLY("vusb_d", NULL), /* Used by CPU */
|
REGULATOR_SUPPLY("vusb_a", "s3c-hsotg"), /* USB */
|
||||||
REGULATOR_SUPPLY("vdac", NULL), /* Used by CPU */
|
REGULATOR_SUPPLY("vdac", NULL), /* Used by CPU */
|
||||||
};
|
};
|
||||||
static struct regulator_consumer_supply __initdata max8997_ldo11_[] = {
|
static struct regulator_consumer_supply __initdata max8997_ldo11_[] = {
|
||||||
@ -823,6 +824,7 @@ static struct regulator_init_data __initdata max8997_esafeout1_data = {
|
|||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "SAFEOUT1",
|
.name = "SAFEOUT1",
|
||||||
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
|
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
|
||||||
|
.always_on = 1,
|
||||||
.state_mem = {
|
.state_mem = {
|
||||||
.disabled = 1,
|
.disabled = 1,
|
||||||
},
|
},
|
||||||
@ -1080,6 +1082,9 @@ static void __init nuri_ehci_init(void)
|
|||||||
s5p_ehci_set_platdata(pdata);
|
s5p_ehci_set_platdata(pdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* USB OTG */
|
||||||
|
static struct s3c_hsotg_plat nuri_hsotg_pdata;
|
||||||
|
|
||||||
/* CAMERA */
|
/* CAMERA */
|
||||||
static struct regulator_consumer_supply cam_vt_cam15_supply =
|
static struct regulator_consumer_supply cam_vt_cam15_supply =
|
||||||
REGULATOR_SUPPLY("vdd_core", "6-003c");
|
REGULATOR_SUPPLY("vdd_core", "6-003c");
|
||||||
@ -1292,6 +1297,7 @@ static struct platform_device *nuri_devices[] __initdata = {
|
|||||||
&s5p_device_mfc_l,
|
&s5p_device_mfc_l,
|
||||||
&s5p_device_mfc_r,
|
&s5p_device_mfc_r,
|
||||||
&s5p_device_fimc_md,
|
&s5p_device_fimc_md,
|
||||||
|
&s3c_device_usb_hsotg,
|
||||||
|
|
||||||
/* NURI Devices */
|
/* NURI Devices */
|
||||||
&nuri_gpio_keys,
|
&nuri_gpio_keys,
|
||||||
@ -1340,6 +1346,7 @@ static void __init nuri_machine_init(void)
|
|||||||
nuri_camera_init();
|
nuri_camera_init();
|
||||||
|
|
||||||
nuri_ehci_init();
|
nuri_ehci_init();
|
||||||
|
s3c_hsotg_set_platdata(&nuri_hsotg_pdata);
|
||||||
|
|
||||||
/* Last */
|
/* Last */
|
||||||
platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices));
|
platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices));
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
#include <plat/pd.h>
|
#include <plat/pd.h>
|
||||||
#include <plat/regs-fb-v4.h>
|
#include <plat/regs-fb-v4.h>
|
||||||
#include <plat/fimc-core.h>
|
#include <plat/fimc-core.h>
|
||||||
|
#include <plat/s5p-time.h>
|
||||||
#include <plat/camport.h>
|
#include <plat/camport.h>
|
||||||
#include <plat/mipi_csis.h>
|
#include <plat/mipi_csis.h>
|
||||||
|
|
||||||
@ -205,6 +206,7 @@ static struct regulator_init_data lp3974_ldo2_data = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct regulator_consumer_supply lp3974_ldo3_consumer[] = {
|
static struct regulator_consumer_supply lp3974_ldo3_consumer[] = {
|
||||||
|
REGULATOR_SUPPLY("vusb_a", "s3c-hsotg"),
|
||||||
REGULATOR_SUPPLY("vdd", "exynos4-hdmi"),
|
REGULATOR_SUPPLY("vdd", "exynos4-hdmi"),
|
||||||
REGULATOR_SUPPLY("vdd_pll", "exynos4-hdmi"),
|
REGULATOR_SUPPLY("vdd_pll", "exynos4-hdmi"),
|
||||||
REGULATOR_SUPPLY("vdd11", "s5p-mipi-csis.0"),
|
REGULATOR_SUPPLY("vdd11", "s5p-mipi-csis.0"),
|
||||||
@ -290,6 +292,7 @@ static struct regulator_init_data lp3974_ldo7_data = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct regulator_consumer_supply lp3974_ldo8_consumer[] = {
|
static struct regulator_consumer_supply lp3974_ldo8_consumer[] = {
|
||||||
|
REGULATOR_SUPPLY("vusb_d", "s3c-hsotg"),
|
||||||
REGULATOR_SUPPLY("vdd33a_dac", "s5p-sdo"),
|
REGULATOR_SUPPLY("vdd33a_dac", "s5p-sdo"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -486,7 +489,10 @@ static struct regulator_init_data lp3974_vichg_data = {
|
|||||||
static struct regulator_init_data lp3974_esafeout1_data = {
|
static struct regulator_init_data lp3974_esafeout1_data = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "SAFEOUT1",
|
.name = "SAFEOUT1",
|
||||||
|
.min_uV = 4800000,
|
||||||
|
.max_uV = 4800000,
|
||||||
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
|
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
|
||||||
|
.always_on = 1,
|
||||||
.state_mem = {
|
.state_mem = {
|
||||||
.enabled = 1,
|
.enabled = 1,
|
||||||
},
|
},
|
||||||
@ -994,6 +1000,9 @@ static struct gpio universal_camera_gpios[] = {
|
|||||||
{ GPIO_CAM_VGA_NSTBY, GPIOF_OUT_INIT_LOW, "CAM_VGA_NSTBY" },
|
{ GPIO_CAM_VGA_NSTBY, GPIOF_OUT_INIT_LOW, "CAM_VGA_NSTBY" },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* USB OTG */
|
||||||
|
static struct s3c_hsotg_plat universal_hsotg_pdata;
|
||||||
|
|
||||||
static void __init universal_camera_init(void)
|
static void __init universal_camera_init(void)
|
||||||
{
|
{
|
||||||
s3c_set_platdata(&mipi_csis_platdata, sizeof(mipi_csis_platdata),
|
s3c_set_platdata(&mipi_csis_platdata, sizeof(mipi_csis_platdata),
|
||||||
@ -1049,6 +1058,7 @@ static struct platform_device *universal_devices[] __initdata = {
|
|||||||
&s5p_device_onenand,
|
&s5p_device_onenand,
|
||||||
&s5p_device_fimd0,
|
&s5p_device_fimd0,
|
||||||
&s5p_device_jpeg,
|
&s5p_device_jpeg,
|
||||||
|
&s3c_device_usb_hsotg,
|
||||||
&s5p_device_mfc,
|
&s5p_device_mfc,
|
||||||
&s5p_device_mfc_l,
|
&s5p_device_mfc_l,
|
||||||
&s5p_device_mfc_r,
|
&s5p_device_mfc_r,
|
||||||
@ -1064,6 +1074,7 @@ static void __init universal_map_io(void)
|
|||||||
exynos_init_io(NULL, 0);
|
exynos_init_io(NULL, 0);
|
||||||
s3c24xx_init_clocks(24000000);
|
s3c24xx_init_clocks(24000000);
|
||||||
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
|
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
|
||||||
|
s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void s5p_tv_setup(void)
|
static void s5p_tv_setup(void)
|
||||||
@ -1101,6 +1112,7 @@ static void __init universal_machine_init(void)
|
|||||||
i2c_register_board_info(I2C_GPIO_BUS_12, i2c_gpio12_devs,
|
i2c_register_board_info(I2C_GPIO_BUS_12, i2c_gpio12_devs,
|
||||||
ARRAY_SIZE(i2c_gpio12_devs));
|
ARRAY_SIZE(i2c_gpio12_devs));
|
||||||
|
|
||||||
|
s3c_hsotg_set_platdata(&universal_hsotg_pdata);
|
||||||
universal_camera_init();
|
universal_camera_init();
|
||||||
|
|
||||||
/* Last */
|
/* Last */
|
||||||
@ -1114,7 +1126,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
|
|||||||
.map_io = universal_map_io,
|
.map_io = universal_map_io,
|
||||||
.handle_irq = gic_handle_irq,
|
.handle_irq = gic_handle_irq,
|
||||||
.init_machine = universal_machine_init,
|
.init_machine = universal_machine_init,
|
||||||
.timer = &exynos4_timer,
|
.timer = &s5p_timer,
|
||||||
.reserve = &universal_reserve,
|
.reserve = &universal_reserve,
|
||||||
.restart = exynos4_restart,
|
.restart = exynos4_restart,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
|
@ -26,11 +26,71 @@ static int exynos4_usb_host_phy_is_on(void)
|
|||||||
return (readl(EXYNOS4_PHYPWR) & PHY1_STD_ANALOG_POWERDOWN) ? 0 : 1;
|
return (readl(EXYNOS4_PHYPWR) & PHY1_STD_ANALOG_POWERDOWN) ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos4_usb_phy1_init(struct platform_device *pdev)
|
static void exynos4210_usb_phy_clkset(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct clk *otg_clk;
|
|
||||||
struct clk *xusbxti_clk;
|
struct clk *xusbxti_clk;
|
||||||
u32 phyclk;
|
u32 phyclk;
|
||||||
|
|
||||||
|
/* set clock frequency for PLL */
|
||||||
|
phyclk = readl(EXYNOS4_PHYCLK) & ~CLKSEL_MASK;
|
||||||
|
|
||||||
|
xusbxti_clk = clk_get(&pdev->dev, "xusbxti");
|
||||||
|
if (xusbxti_clk && !IS_ERR(xusbxti_clk)) {
|
||||||
|
switch (clk_get_rate(xusbxti_clk)) {
|
||||||
|
case 12 * MHZ:
|
||||||
|
phyclk |= CLKSEL_12M;
|
||||||
|
break;
|
||||||
|
case 24 * MHZ:
|
||||||
|
phyclk |= CLKSEL_24M;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
case 48 * MHZ:
|
||||||
|
/* default reference clock */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
clk_put(xusbxti_clk);
|
||||||
|
}
|
||||||
|
|
||||||
|
writel(phyclk, EXYNOS4_PHYCLK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int exynos4210_usb_phy0_init(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
u32 rstcon;
|
||||||
|
|
||||||
|
writel(readl(S5P_USBDEVICE_PHY_CONTROL) | S5P_USBDEVICE_PHY_ENABLE,
|
||||||
|
S5P_USBDEVICE_PHY_CONTROL);
|
||||||
|
|
||||||
|
exynos4210_usb_phy_clkset(pdev);
|
||||||
|
|
||||||
|
/* set to normal PHY0 */
|
||||||
|
writel((readl(EXYNOS4_PHYPWR) & ~PHY0_NORMAL_MASK), EXYNOS4_PHYPWR);
|
||||||
|
|
||||||
|
/* reset PHY0 and Link */
|
||||||
|
rstcon = readl(EXYNOS4_RSTCON) | PHY0_SWRST_MASK;
|
||||||
|
writel(rstcon, EXYNOS4_RSTCON);
|
||||||
|
udelay(10);
|
||||||
|
|
||||||
|
rstcon &= ~PHY0_SWRST_MASK;
|
||||||
|
writel(rstcon, EXYNOS4_RSTCON);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int exynos4210_usb_phy0_exit(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
writel((readl(EXYNOS4_PHYPWR) | PHY0_ANALOG_POWERDOWN |
|
||||||
|
PHY0_OTG_DISABLE), EXYNOS4_PHYPWR);
|
||||||
|
|
||||||
|
writel(readl(S5P_USBDEVICE_PHY_CONTROL) & ~S5P_USBDEVICE_PHY_ENABLE,
|
||||||
|
S5P_USBDEVICE_PHY_CONTROL);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int exynos4210_usb_phy1_init(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct clk *otg_clk;
|
||||||
u32 rstcon;
|
u32 rstcon;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -54,27 +114,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
|
|||||||
writel(readl(S5P_USBHOST_PHY_CONTROL) | S5P_USBHOST_PHY_ENABLE,
|
writel(readl(S5P_USBHOST_PHY_CONTROL) | S5P_USBHOST_PHY_ENABLE,
|
||||||
S5P_USBHOST_PHY_CONTROL);
|
S5P_USBHOST_PHY_CONTROL);
|
||||||
|
|
||||||
/* set clock frequency for PLL */
|
exynos4210_usb_phy_clkset(pdev);
|
||||||
phyclk = readl(EXYNOS4_PHYCLK) & ~CLKSEL_MASK;
|
|
||||||
|
|
||||||
xusbxti_clk = clk_get(&pdev->dev, "xusbxti");
|
|
||||||
if (xusbxti_clk && !IS_ERR(xusbxti_clk)) {
|
|
||||||
switch (clk_get_rate(xusbxti_clk)) {
|
|
||||||
case 12 * MHZ:
|
|
||||||
phyclk |= CLKSEL_12M;
|
|
||||||
break;
|
|
||||||
case 24 * MHZ:
|
|
||||||
phyclk |= CLKSEL_24M;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
case 48 * MHZ:
|
|
||||||
/* default reference clock */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
clk_put(xusbxti_clk);
|
|
||||||
}
|
|
||||||
|
|
||||||
writel(phyclk, EXYNOS4_PHYCLK);
|
|
||||||
|
|
||||||
/* floating prevention logic: disable */
|
/* floating prevention logic: disable */
|
||||||
writel((readl(EXYNOS4_PHY1CON) | FPENABLEN), EXYNOS4_PHY1CON);
|
writel((readl(EXYNOS4_PHY1CON) | FPENABLEN), EXYNOS4_PHY1CON);
|
||||||
@ -102,7 +142,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos4_usb_phy1_exit(struct platform_device *pdev)
|
static int exynos4210_usb_phy1_exit(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct clk *otg_clk;
|
struct clk *otg_clk;
|
||||||
int err;
|
int err;
|
||||||
@ -136,16 +176,20 @@ static int exynos4_usb_phy1_exit(struct platform_device *pdev)
|
|||||||
|
|
||||||
int s5p_usb_phy_init(struct platform_device *pdev, int type)
|
int s5p_usb_phy_init(struct platform_device *pdev, int type)
|
||||||
{
|
{
|
||||||
if (type == S5P_USB_PHY_HOST)
|
if (type == S5P_USB_PHY_DEVICE)
|
||||||
return exynos4_usb_phy1_init(pdev);
|
return exynos4210_usb_phy0_init(pdev);
|
||||||
|
else if (type == S5P_USB_PHY_HOST)
|
||||||
|
return exynos4210_usb_phy1_init(pdev);
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int s5p_usb_phy_exit(struct platform_device *pdev, int type)
|
int s5p_usb_phy_exit(struct platform_device *pdev, int type)
|
||||||
{
|
{
|
||||||
if (type == S5P_USB_PHY_HOST)
|
if (type == S5P_USB_PHY_DEVICE)
|
||||||
return exynos4_usb_phy1_exit(pdev);
|
return exynos4210_usb_phy0_exit(pdev);
|
||||||
|
else if (type == S5P_USB_PHY_HOST)
|
||||||
|
return exynos4210_usb_phy1_exit(pdev);
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
|
#include <linux/kexec.h>
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
#include <mach/bridge-regs.h>
|
#include <mach/bridge-regs.h>
|
||||||
|
@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
|
|||||||
struct irq_chip *irq_chip = NULL;
|
struct irq_chip *irq_chip = NULL;
|
||||||
int gpio, irq_num, fiq_count;
|
int gpio, irq_num, fiq_count;
|
||||||
|
|
||||||
irq_desc = irq_to_desc(IH_GPIO_BASE);
|
irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
|
||||||
if (irq_desc)
|
if (irq_desc)
|
||||||
irq_chip = irq_desc->irq_data.chip;
|
irq_chip = irq_desc->irq_data.chip;
|
||||||
|
|
||||||
|
@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = {
|
|||||||
|
|
||||||
static void __init igep_init(void)
|
static void __init igep_init(void)
|
||||||
{
|
{
|
||||||
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
|
regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies));
|
||||||
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
|
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
|
||||||
|
|
||||||
/* Get IGEP2 hardware revision */
|
/* Get IGEP2 hardware revision */
|
||||||
|
@ -941,10 +941,10 @@
|
|||||||
#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
|
#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
|
||||||
#define OMAP4_DSI1_LANEENABLE_SHIFT 24
|
#define OMAP4_DSI1_LANEENABLE_SHIFT 24
|
||||||
#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
|
#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
|
||||||
#define OMAP4_DSI2_PIPD_SHIFT 19
|
#define OMAP4_DSI1_PIPD_SHIFT 19
|
||||||
#define OMAP4_DSI2_PIPD_MASK (0x1f << 19)
|
#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
|
||||||
#define OMAP4_DSI1_PIPD_SHIFT 14
|
#define OMAP4_DSI2_PIPD_SHIFT 14
|
||||||
#define OMAP4_DSI1_PIPD_MASK (0x1f << 14)
|
#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
|
||||||
|
|
||||||
/* CONTROL_MCBSPLP */
|
/* CONTROL_MCBSPLP */
|
||||||
#define OMAP4_ALBCTRLRX_FSX_SHIFT 31
|
#define OMAP4_ALBCTRLRX_FSX_SHIFT 31
|
||||||
|
@ -65,8 +65,8 @@
|
|||||||
#define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1)
|
#define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1)
|
||||||
|
|
||||||
#define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1)
|
#define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1)
|
||||||
#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1)
|
#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1)
|
||||||
#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1)
|
#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1)
|
||||||
|
|
||||||
#define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1)
|
#define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1)
|
||||||
#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1)
|
#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1)
|
||||||
|
@ -85,6 +85,7 @@ config MACH_AQUILA
|
|||||||
select S5P_DEV_ONENAND
|
select S5P_DEV_ONENAND
|
||||||
select S5PV210_SETUP_FB_24BPP
|
select S5PV210_SETUP_FB_24BPP
|
||||||
select S5PV210_SETUP_SDHCI
|
select S5PV210_SETUP_SDHCI
|
||||||
|
select S5PV210_SETUP_USB_PHY
|
||||||
help
|
help
|
||||||
Machine support for the Samsung Aquila target based on S5PC110 SoC
|
Machine support for the Samsung Aquila target based on S5PC110 SoC
|
||||||
|
|
||||||
|
@ -279,6 +279,9 @@ static void __init goni_tsp_init(void)
|
|||||||
i2c2_devs[0].irq = gpio_to_irq(gpio);
|
i2c2_devs[0].irq = gpio_to_irq(gpio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* USB OTG */
|
||||||
|
static struct s3c_hsotg_plat goni_hsotg_pdata;
|
||||||
|
|
||||||
static void goni_camera_init(void)
|
static void goni_camera_init(void)
|
||||||
{
|
{
|
||||||
s5pv210_fimc_setup_gpio(S5P_CAMPORT_A);
|
s5pv210_fimc_setup_gpio(S5P_CAMPORT_A);
|
||||||
@ -942,6 +945,8 @@ static void __init goni_machine_init(void)
|
|||||||
s3c_set_platdata(&goni_fimc_md_platdata, sizeof(goni_fimc_md_platdata),
|
s3c_set_platdata(&goni_fimc_md_platdata, sizeof(goni_fimc_md_platdata),
|
||||||
&s5p_device_fimc_md);
|
&s5p_device_fimc_md);
|
||||||
|
|
||||||
|
s3c_hsotg_set_platdata(&goni_hsotg_pdata);
|
||||||
|
|
||||||
goni_camera_init();
|
goni_camera_init();
|
||||||
|
|
||||||
/* SPI */
|
/* SPI */
|
||||||
|
@ -365,23 +365,13 @@ static struct platform_device mipidsi0_device = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* SDHI0 */
|
/* SDHI0 */
|
||||||
static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg)
|
|
||||||
{
|
|
||||||
struct device *dev = arg;
|
|
||||||
struct sh_mobile_sdhi_info *info = dev->platform_data;
|
|
||||||
struct tmio_mmc_data *pdata = info->pdata;
|
|
||||||
|
|
||||||
tmio_mmc_cd_wakeup(pdata);
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sh_mobile_sdhi_info sdhi0_info = {
|
static struct sh_mobile_sdhi_info sdhi0_info = {
|
||||||
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
|
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
|
||||||
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
|
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
|
||||||
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
|
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
|
||||||
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
|
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
|
||||||
.tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
|
.tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
|
||||||
|
.cd_gpio = GPIO_PORT251,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource sdhi0_resources[] = {
|
static struct resource sdhi0_resources[] = {
|
||||||
@ -557,7 +547,6 @@ static void __init ag5evm_init(void)
|
|||||||
lcd_backlight_reset();
|
lcd_backlight_reset();
|
||||||
|
|
||||||
/* enable SDHI0 on CN15 [SD I/F] */
|
/* enable SDHI0 on CN15 [SD I/F] */
|
||||||
gpio_request(GPIO_FN_SDHICD0, NULL);
|
|
||||||
gpio_request(GPIO_FN_SDHIWP0, NULL);
|
gpio_request(GPIO_FN_SDHIWP0, NULL);
|
||||||
gpio_request(GPIO_FN_SDHICMD0, NULL);
|
gpio_request(GPIO_FN_SDHICMD0, NULL);
|
||||||
gpio_request(GPIO_FN_SDHICLK0, NULL);
|
gpio_request(GPIO_FN_SDHICLK0, NULL);
|
||||||
@ -566,13 +555,6 @@ static void __init ag5evm_init(void)
|
|||||||
gpio_request(GPIO_FN_SDHID0_1, NULL);
|
gpio_request(GPIO_FN_SDHID0_1, NULL);
|
||||||
gpio_request(GPIO_FN_SDHID0_0, NULL);
|
gpio_request(GPIO_FN_SDHID0_0, NULL);
|
||||||
|
|
||||||
if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd,
|
|
||||||
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
|
|
||||||
"sdhi0 cd", &sdhi0_device.dev))
|
|
||||||
sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
|
|
||||||
else
|
|
||||||
pr_warn("Unable to setup SDHI0 GPIO IRQ\n");
|
|
||||||
|
|
||||||
/* enable SDHI1 on CN4 [WLAN I/F] */
|
/* enable SDHI1 on CN4 [WLAN I/F] */
|
||||||
gpio_request(GPIO_FN_SDHICLK1, NULL);
|
gpio_request(GPIO_FN_SDHICLK1, NULL);
|
||||||
gpio_request(GPIO_FN_SDHICMD1_PU, NULL);
|
gpio_request(GPIO_FN_SDHICMD1_PU, NULL);
|
||||||
|
@ -1011,21 +1011,12 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* SDHI0 */
|
/* SDHI0 */
|
||||||
static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
|
|
||||||
{
|
|
||||||
struct device *dev = arg;
|
|
||||||
struct sh_mobile_sdhi_info *info = dev->platform_data;
|
|
||||||
struct tmio_mmc_data *pdata = info->pdata;
|
|
||||||
|
|
||||||
tmio_mmc_cd_wakeup(pdata);
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sh_mobile_sdhi_info sdhi0_info = {
|
static struct sh_mobile_sdhi_info sdhi0_info = {
|
||||||
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
|
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
|
||||||
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
|
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
|
||||||
|
.tmio_flags = TMIO_MMC_USE_GPIO_CD,
|
||||||
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
|
||||||
|
.cd_gpio = GPIO_PORT172,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource sdhi0_resources[] = {
|
static struct resource sdhi0_resources[] = {
|
||||||
@ -1384,7 +1375,6 @@ static void __init mackerel_init(void)
|
|||||||
{
|
{
|
||||||
u32 srcr4;
|
u32 srcr4;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* External clock source */
|
/* External clock source */
|
||||||
clk_set_rate(&sh7372_dv_clki_clk, 27000000);
|
clk_set_rate(&sh7372_dv_clki_clk, 27000000);
|
||||||
@ -1481,7 +1471,6 @@ static void __init mackerel_init(void)
|
|||||||
irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
|
irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
|
||||||
|
|
||||||
/* enable SDHI0 */
|
/* enable SDHI0 */
|
||||||
gpio_request(GPIO_FN_SDHICD0, NULL);
|
|
||||||
gpio_request(GPIO_FN_SDHIWP0, NULL);
|
gpio_request(GPIO_FN_SDHIWP0, NULL);
|
||||||
gpio_request(GPIO_FN_SDHICMD0, NULL);
|
gpio_request(GPIO_FN_SDHICMD0, NULL);
|
||||||
gpio_request(GPIO_FN_SDHICLK0, NULL);
|
gpio_request(GPIO_FN_SDHICLK0, NULL);
|
||||||
@ -1490,13 +1479,6 @@ static void __init mackerel_init(void)
|
|||||||
gpio_request(GPIO_FN_SDHID0_1, NULL);
|
gpio_request(GPIO_FN_SDHID0_1, NULL);
|
||||||
gpio_request(GPIO_FN_SDHID0_0, NULL);
|
gpio_request(GPIO_FN_SDHID0_0, NULL);
|
||||||
|
|
||||||
ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
|
|
||||||
IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
|
|
||||||
if (!ret)
|
|
||||||
sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
|
|
||||||
else
|
|
||||||
pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
|
|
||||||
|
|
||||||
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
|
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
|
||||||
/* enable SDHI1 */
|
/* enable SDHI1 */
|
||||||
gpio_request(GPIO_FN_SDHICMD1, NULL);
|
gpio_request(GPIO_FN_SDHICMD1, NULL);
|
||||||
|
@ -16,6 +16,59 @@
|
|||||||
|
|
||||||
__CPUINIT
|
__CPUINIT
|
||||||
|
|
||||||
|
/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks!
|
||||||
|
*
|
||||||
|
* The secondary kernel init calls v7_flush_dcache_all before it enables
|
||||||
|
* the L1; however, the L1 comes out of reset in an undefined state, so
|
||||||
|
* the clean + invalidate performed by v7_flush_dcache_all causes a bunch
|
||||||
|
* of cache lines with uninitialized data and uninitialized tags to get
|
||||||
|
* written out to memory, which does really unpleasant things to the main
|
||||||
|
* processor. We fix this by performing an invalidate, rather than a
|
||||||
|
* clean + invalidate, before jumping into the kernel.
|
||||||
|
*
|
||||||
|
* This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs
|
||||||
|
* to be called for both secondary cores startup and primary core resume
|
||||||
|
* procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S.
|
||||||
|
*/
|
||||||
|
ENTRY(v7_invalidate_l1)
|
||||||
|
mov r0, #0
|
||||||
|
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
||||||
|
mcr p15, 2, r0, c0, c0, 0
|
||||||
|
mrc p15, 1, r0, c0, c0, 0
|
||||||
|
|
||||||
|
ldr r1, =0x7fff
|
||||||
|
and r2, r1, r0, lsr #13
|
||||||
|
|
||||||
|
ldr r1, =0x3ff
|
||||||
|
|
||||||
|
and r3, r1, r0, lsr #3 @ NumWays - 1
|
||||||
|
add r2, r2, #1 @ NumSets
|
||||||
|
|
||||||
|
and r0, r0, #0x7
|
||||||
|
add r0, r0, #4 @ SetShift
|
||||||
|
|
||||||
|
clz r1, r3 @ WayShift
|
||||||
|
add r4, r3, #1 @ NumWays
|
||||||
|
1: sub r2, r2, #1 @ NumSets--
|
||||||
|
mov r3, r4 @ Temp = NumWays
|
||||||
|
2: subs r3, r3, #1 @ Temp--
|
||||||
|
mov r5, r3, lsl r1
|
||||||
|
mov r6, r2, lsl r0
|
||||||
|
orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
|
||||||
|
mcr p15, 0, r5, c7, c6, 2
|
||||||
|
bgt 2b
|
||||||
|
cmp r2, #0
|
||||||
|
bgt 1b
|
||||||
|
dsb
|
||||||
|
isb
|
||||||
|
mov pc, lr
|
||||||
|
ENDPROC(v7_invalidate_l1)
|
||||||
|
|
||||||
|
ENTRY(shmobile_invalidate_start)
|
||||||
|
bl v7_invalidate_l1
|
||||||
|
b secondary_startup
|
||||||
|
ENDPROC(shmobile_invalidate_start)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset vector for secondary CPUs.
|
* Reset vector for secondary CPUs.
|
||||||
* This will be mapped at address 0 by SBAR register.
|
* This will be mapped at address 0 by SBAR register.
|
||||||
@ -24,4 +77,5 @@
|
|||||||
.align 12
|
.align 12
|
||||||
ENTRY(shmobile_secondary_vector)
|
ENTRY(shmobile_secondary_vector)
|
||||||
ldr pc, 1f
|
ldr pc, 1f
|
||||||
1: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET
|
1: .long shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET
|
||||||
|
ENDPROC(shmobile_secondary_vector)
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
extern void shmobile_earlytimer_init(void);
|
extern void shmobile_earlytimer_init(void);
|
||||||
extern struct sys_timer shmobile_timer;
|
extern struct sys_timer shmobile_timer;
|
||||||
struct twd_local_timer;
|
struct twd_local_timer;
|
||||||
void shmobile_twd_init(struct twd_local_timer *twd_local_timer);
|
|
||||||
extern void shmobile_setup_console(void);
|
extern void shmobile_setup_console(void);
|
||||||
extern void shmobile_secondary_vector(void);
|
extern void shmobile_secondary_vector(void);
|
||||||
extern int shmobile_platform_cpu_kill(unsigned int cpu);
|
extern int shmobile_platform_cpu_kill(unsigned int cpu);
|
||||||
@ -82,5 +81,6 @@ extern int r8a7779_platform_cpu_kill(unsigned int cpu);
|
|||||||
extern void r8a7779_secondary_init(unsigned int cpu);
|
extern void r8a7779_secondary_init(unsigned int cpu);
|
||||||
extern int r8a7779_boot_secondary(unsigned int cpu);
|
extern int r8a7779_boot_secondary(unsigned int cpu);
|
||||||
extern void r8a7779_smp_prepare_cpus(void);
|
extern void r8a7779_smp_prepare_cpus(void);
|
||||||
|
extern void r8a7779_register_twd(void);
|
||||||
|
|
||||||
#endif /* __ARCH_MACH_COMMON_H */
|
#endif /* __ARCH_MACH_COMMON_H */
|
||||||
|
@ -262,10 +262,14 @@ void __init r8a7779_add_standard_devices(void)
|
|||||||
ARRAY_SIZE(r8a7779_late_devices));
|
ARRAY_SIZE(r8a7779_late_devices));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
|
||||||
|
void __init __weak r8a7779_register_twd(void) { }
|
||||||
|
|
||||||
static void __init r8a7779_earlytimer_init(void)
|
static void __init r8a7779_earlytimer_init(void)
|
||||||
{
|
{
|
||||||
r8a7779_clock_init();
|
r8a7779_clock_init();
|
||||||
shmobile_earlytimer_init();
|
shmobile_earlytimer_init();
|
||||||
|
r8a7779_register_twd();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init r8a7779_add_early_devices(void)
|
void __init r8a7779_add_early_devices(void)
|
||||||
|
@ -688,10 +688,14 @@ void __init sh73a0_add_standard_devices(void)
|
|||||||
ARRAY_SIZE(sh73a0_late_devices));
|
ARRAY_SIZE(sh73a0_late_devices));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
|
||||||
|
void __init __weak sh73a0_register_twd(void) { }
|
||||||
|
|
||||||
static void __init sh73a0_earlytimer_init(void)
|
static void __init sh73a0_earlytimer_init(void)
|
||||||
{
|
{
|
||||||
sh73a0_clock_init();
|
sh73a0_clock_init();
|
||||||
shmobile_earlytimer_init();
|
shmobile_earlytimer_init();
|
||||||
|
sh73a0_register_twd();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init sh73a0_add_early_devices(void)
|
void __init sh73a0_add_early_devices(void)
|
||||||
|
@ -64,8 +64,15 @@ static void __iomem *scu_base_addr(void)
|
|||||||
static DEFINE_SPINLOCK(scu_lock);
|
static DEFINE_SPINLOCK(scu_lock);
|
||||||
static unsigned long tmp;
|
static unsigned long tmp;
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_ARM_TWD
|
||||||
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
|
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
|
||||||
|
|
||||||
|
void __init r8a7779_register_twd(void)
|
||||||
|
{
|
||||||
|
twd_local_timer_register(&twd_local_timer);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
|
static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
|
||||||
{
|
{
|
||||||
void __iomem *scu_base = scu_base_addr();
|
void __iomem *scu_base = scu_base_addr();
|
||||||
@ -84,7 +91,6 @@ unsigned int __init r8a7779_get_core_count(void)
|
|||||||
{
|
{
|
||||||
void __iomem *scu_base = scu_base_addr();
|
void __iomem *scu_base = scu_base_addr();
|
||||||
|
|
||||||
shmobile_twd_init(&twd_local_timer);
|
|
||||||
return scu_get_core_count(scu_base);
|
return scu_get_core_count(scu_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,7 +42,13 @@ static void __iomem *scu_base_addr(void)
|
|||||||
static DEFINE_SPINLOCK(scu_lock);
|
static DEFINE_SPINLOCK(scu_lock);
|
||||||
static unsigned long tmp;
|
static unsigned long tmp;
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_ARM_TWD
|
||||||
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
|
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
|
||||||
|
void __init sh73a0_register_twd(void)
|
||||||
|
{
|
||||||
|
twd_local_timer_register(&twd_local_timer);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
|
static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
|
||||||
{
|
{
|
||||||
@ -62,7 +68,6 @@ unsigned int __init sh73a0_get_core_count(void)
|
|||||||
{
|
{
|
||||||
void __iomem *scu_base = scu_base_addr();
|
void __iomem *scu_base = scu_base_addr();
|
||||||
|
|
||||||
shmobile_twd_init(&twd_local_timer);
|
|
||||||
return scu_get_core_count(scu_base);
|
return scu_get_core_count(scu_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,15 +46,6 @@ static void __init shmobile_timer_init(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init shmobile_twd_init(struct twd_local_timer *twd_local_timer)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_HAVE_ARM_TWD
|
|
||||||
int err = twd_local_timer_register(twd_local_timer);
|
|
||||||
if (err)
|
|
||||||
pr_err("twd_local_timer_register failed %d\n", err);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
struct sys_timer shmobile_timer = {
|
struct sys_timer shmobile_timer = {
|
||||||
.init = shmobile_timer_init,
|
.init = shmobile_timer_init,
|
||||||
};
|
};
|
||||||
|
@ -1174,7 +1174,7 @@ out:
|
|||||||
|
|
||||||
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
|
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
|
return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
|
|
||||||
static void __init m520x_qspi_init(void)
|
static void __init m520x_qspi_init(void)
|
||||||
{
|
{
|
||||||
@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void)
|
|||||||
writew(par, MCF_GPIO_PAR_UART);
|
writew(par, MCF_GPIO_PAR_UART);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SPI_COLDFIRE_QSPI */
|
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size)
|
|||||||
mach_sched_init = hw_timer_init;
|
mach_sched_init = hw_timer_init;
|
||||||
m520x_uarts_init();
|
m520x_uarts_init();
|
||||||
m520x_fec_init();
|
m520x_fec_init();
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
m520x_qspi_init();
|
m520x_qspi_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
|
|
||||||
static void __init m523x_qspi_init(void)
|
static void __init m523x_qspi_init(void)
|
||||||
{
|
{
|
||||||
@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void)
|
|||||||
writew(par, MCFGPIO_PAR_TIMER);
|
writew(par, MCFGPIO_PAR_TIMER);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SPI_COLDFIRE_QSPI */
|
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size)
|
|||||||
{
|
{
|
||||||
mach_sched_init = hw_timer_init;
|
mach_sched_init = hw_timer_init;
|
||||||
m523x_fec_init();
|
m523x_fec_init();
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
m523x_qspi_init();
|
m523x_qspi_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = {
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
|
|
||||||
static void __init m5249_qspi_init(void)
|
static void __init m5249_qspi_init(void)
|
||||||
{
|
{
|
||||||
@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void)
|
|||||||
mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
|
mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SPI_COLDFIRE_QSPI */
|
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
|
|||||||
#ifdef CONFIG_M5249C3
|
#ifdef CONFIG_M5249C3
|
||||||
m5249_smc91x_init();
|
m5249_smc91x_init();
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
m5249_qspi_init();
|
m5249_qspi_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
|
|
||||||
static void __init m527x_qspi_init(void)
|
static void __init m527x_qspi_init(void)
|
||||||
{
|
{
|
||||||
@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SPI_COLDFIRE_QSPI */
|
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
|
|||||||
mach_sched_init = hw_timer_init;
|
mach_sched_init = hw_timer_init;
|
||||||
m527x_uarts_init();
|
m527x_uarts_init();
|
||||||
m527x_fec_init();
|
m527x_fec_init();
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
m527x_qspi_init();
|
m527x_qspi_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
|
|
||||||
static void __init m528x_qspi_init(void)
|
static void __init m528x_qspi_init(void)
|
||||||
{
|
{
|
||||||
@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void)
|
|||||||
__raw_writeb(0x07, MCFGPIO_PQSPAR);
|
__raw_writeb(0x07, MCFGPIO_PQSPAR);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SPI_COLDFIRE_QSPI */
|
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size)
|
|||||||
mach_sched_init = hw_timer_init;
|
mach_sched_init = hw_timer_init;
|
||||||
m528x_uarts_init();
|
m528x_uarts_init();
|
||||||
m528x_fec_init();
|
m528x_fec_init();
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
m528x_qspi_init();
|
m528x_qspi_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
|
|
||||||
static void __init m532x_qspi_init(void)
|
static void __init m532x_qspi_init(void)
|
||||||
{
|
{
|
||||||
@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void)
|
|||||||
writew(0x01f0, MCF_GPIO_PAR_QSPI);
|
writew(0x01f0, MCF_GPIO_PAR_QSPI);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SPI_COLDFIRE_QSPI */
|
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size)
|
|||||||
mach_sched_init = hw_timer_init;
|
mach_sched_init = hw_timer_init;
|
||||||
m532x_uarts_init();
|
m532x_uarts_init();
|
||||||
m532x_fec_init();
|
m532x_fec_init();
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
m532x_qspi_init();
|
m532x_qspi_init();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ static struct platform_device mcf_fec1 = {
|
|||||||
#endif /* MCFFEC_BASE1 */
|
#endif /* MCFFEC_BASE1 */
|
||||||
#endif /* CONFIG_FEC */
|
#endif /* CONFIG_FEC */
|
||||||
|
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
/*
|
/*
|
||||||
* The ColdFire QSPI module is an SPI protocol hardware block used
|
* The ColdFire QSPI module is an SPI protocol hardware block used
|
||||||
* on a number of different ColdFire CPUs.
|
* on a number of different ColdFire CPUs.
|
||||||
@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = {
|
|||||||
.resource = mcf_qspi_resources,
|
.resource = mcf_qspi_resources,
|
||||||
.dev.platform_data = &mcf_qspi_data,
|
.dev.platform_data = &mcf_qspi_data,
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_SPI_COLDFIRE_QSPI */
|
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
|
||||||
|
|
||||||
static struct platform_device *mcf_devices[] __initdata = {
|
static struct platform_device *mcf_devices[] __initdata = {
|
||||||
&mcf_uart,
|
&mcf_uart,
|
||||||
@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = {
|
|||||||
&mcf_fec1,
|
&mcf_fec1,
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SPI_COLDFIRE_QSPI
|
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||||
&mcf_qspi,
|
&mcf_qspi,
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
#define _PARISC_HARDWARE_H
|
#define _PARISC_HARDWARE_H
|
||||||
|
|
||||||
#include <linux/mod_devicetable.h>
|
#include <linux/mod_devicetable.h>
|
||||||
#include <asm/pdc.h>
|
|
||||||
|
|
||||||
#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID
|
#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID
|
||||||
#define HVERSION_ANY_ID PA_HVERSION_ANY_ID
|
#define HVERSION_ANY_ID PA_HVERSION_ANY_ID
|
||||||
@ -95,12 +94,14 @@ struct bc_module {
|
|||||||
#define HPHW_MC 15
|
#define HPHW_MC 15
|
||||||
#define HPHW_FAULTY 31
|
#define HPHW_FAULTY 31
|
||||||
|
|
||||||
|
struct parisc_device_id;
|
||||||
|
|
||||||
/* hardware.c: */
|
/* hardware.c: */
|
||||||
extern const char *parisc_hardware_description(struct parisc_device_id *id);
|
extern const char *parisc_hardware_description(struct parisc_device_id *id);
|
||||||
extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
|
extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
|
||||||
|
|
||||||
struct pci_dev;
|
struct pci_dev;
|
||||||
|
struct hardware_path;
|
||||||
|
|
||||||
/* drivers.c: */
|
/* drivers.c: */
|
||||||
extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
|
extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
|
||||||
|
@ -160,5 +160,11 @@ extern int npmem_ranges;
|
|||||||
|
|
||||||
#include <asm-generic/memory_model.h>
|
#include <asm-generic/memory_model.h>
|
||||||
#include <asm-generic/getorder.h>
|
#include <asm-generic/getorder.h>
|
||||||
|
#include <asm/pdc.h>
|
||||||
|
|
||||||
|
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
|
||||||
|
|
||||||
|
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
|
||||||
|
/* based on work by Jason Eckhardt (jason@equator.com) */
|
||||||
|
|
||||||
#endif /* _PARISC_PAGE_H */
|
#endif /* _PARISC_PAGE_H */
|
||||||
|
@ -343,8 +343,6 @@
|
|||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#include <asm/page.h> /* for __PAGE_OFFSET */
|
|
||||||
|
|
||||||
extern int pdc_type;
|
extern int pdc_type;
|
||||||
|
|
||||||
/* Values for pdc_type */
|
/* Values for pdc_type */
|
||||||
@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) {
|
|||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
|
|
||||||
|
|
||||||
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
|
|
||||||
/* based on work by Jason Eckhardt (jason@equator.com) */
|
|
||||||
|
|
||||||
/* flags of the device_path */
|
/* flags of the device_path */
|
||||||
#define PF_AUTOBOOT 0x80
|
#define PF_AUTOBOOT 0x80
|
||||||
#define PF_AUTOSEARCH 0x40
|
#define PF_AUTOSEARCH 0x40
|
||||||
|
@ -44,6 +44,8 @@ struct vm_area_struct;
|
|||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
|
#include <asm/page.h>
|
||||||
|
|
||||||
#define pte_ERROR(e) \
|
#define pte_ERROR(e) \
|
||||||
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
|
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||||
#define pmd_ERROR(e) \
|
#define pmd_ERROR(e) \
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#ifndef __ASM_SPINLOCK_H
|
#ifndef __ASM_SPINLOCK_H
|
||||||
#define __ASM_SPINLOCK_H
|
#define __ASM_SPINLOCK_H
|
||||||
|
|
||||||
|
#include <asm/barrier.h>
|
||||||
|
#include <asm/ldcw.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/spinlock_types.h>
|
#include <asm/spinlock_types.h>
|
||||||
|
|
||||||
|
@ -50,6 +50,7 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/major.h>
|
#include <linux/major.h>
|
||||||
#include <linux/tty.h>
|
#include <linux/tty.h>
|
||||||
|
#include <asm/page.h> /* for PAGE0 */
|
||||||
#include <asm/pdc.h> /* for iodc_call() proto and friends */
|
#include <asm/pdc.h> /* for iodc_call() proto and friends */
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(pdc_console_lock);
|
static DEFINE_SPINLOCK(pdc_console_lock);
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
#include <asm/page.h>
|
||||||
#include <asm/param.h>
|
#include <asm/param.h>
|
||||||
#include <asm/pdc.h>
|
#include <asm/pdc.h>
|
||||||
#include <asm/led.h>
|
#include <asm/led.h>
|
||||||
|
@ -288,13 +288,6 @@ label##_hv: \
|
|||||||
/* Exception addition: Hard disable interrupts */
|
/* Exception addition: Hard disable interrupts */
|
||||||
#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
|
#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
|
||||||
|
|
||||||
/* Exception addition: Keep interrupt state */
|
|
||||||
#define ENABLE_INTS \
|
|
||||||
ld r11,PACAKMSR(r13); \
|
|
||||||
ld r12,_MSR(r1); \
|
|
||||||
rlwimi r11,r12,0,MSR_EE; \
|
|
||||||
mtmsrd r11,1
|
|
||||||
|
|
||||||
#define ADD_NVGPRS \
|
#define ADD_NVGPRS \
|
||||||
bl .save_nvgprs
|
bl .save_nvgprs
|
||||||
|
|
||||||
|
@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite)
|
|||||||
fast_exc_return_irq:
|
fast_exc_return_irq:
|
||||||
restore:
|
restore:
|
||||||
/*
|
/*
|
||||||
* This is the main kernel exit path, we first check if we
|
* This is the main kernel exit path. First we check if we
|
||||||
* have to change our interrupt state.
|
* are about to re-enable interrupts
|
||||||
*/
|
*/
|
||||||
ld r5,SOFTE(r1)
|
ld r5,SOFTE(r1)
|
||||||
lbz r6,PACASOFTIRQEN(r13)
|
lbz r6,PACASOFTIRQEN(r13)
|
||||||
cmpwi cr1,r5,0
|
cmpwi cr0,r5,0
|
||||||
cmpw cr0,r5,r6
|
beq restore_irq_off
|
||||||
beq cr0,4f
|
|
||||||
|
|
||||||
/* We do, handle disable first, which is easy */
|
/* We are enabling, were we already enabled ? Yes, just return */
|
||||||
bne cr1,3f;
|
cmpwi cr0,r6,1
|
||||||
li r0,0
|
beq cr0,do_restore
|
||||||
stb r0,PACASOFTIRQEN(r13);
|
|
||||||
TRACE_DISABLE_INTS
|
|
||||||
b 4f
|
|
||||||
|
|
||||||
3: /*
|
/*
|
||||||
* We are about to soft-enable interrupts (we are hard disabled
|
* We are about to soft-enable interrupts (we are hard disabled
|
||||||
* at this point). We check if there's anything that needs to
|
* at this point). We check if there's anything that needs to
|
||||||
* be replayed first.
|
* be replayed first.
|
||||||
@ -626,7 +622,7 @@ restore_no_replay:
|
|||||||
/*
|
/*
|
||||||
* Final return path. BookE is handled in a different file
|
* Final return path. BookE is handled in a different file
|
||||||
*/
|
*/
|
||||||
4:
|
do_restore:
|
||||||
#ifdef CONFIG_PPC_BOOK3E
|
#ifdef CONFIG_PPC_BOOK3E
|
||||||
b .exception_return_book3e
|
b .exception_return_book3e
|
||||||
#else
|
#else
|
||||||
@ -699,6 +695,25 @@ fast_exception_return:
|
|||||||
|
|
||||||
#endif /* CONFIG_PPC_BOOK3E */
|
#endif /* CONFIG_PPC_BOOK3E */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are returning to a context with interrupts soft disabled.
|
||||||
|
*
|
||||||
|
* However, we may also about to hard enable, so we need to
|
||||||
|
* make sure that in this case, we also clear PACA_IRQ_HARD_DIS
|
||||||
|
* or that bit can get out of sync and bad things will happen
|
||||||
|
*/
|
||||||
|
restore_irq_off:
|
||||||
|
ld r3,_MSR(r1)
|
||||||
|
lbz r7,PACAIRQHAPPENED(r13)
|
||||||
|
andi. r0,r3,MSR_EE
|
||||||
|
beq 1f
|
||||||
|
rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
|
||||||
|
stb r7,PACAIRQHAPPENED(r13)
|
||||||
|
1: li r0,0
|
||||||
|
stb r0,PACASOFTIRQEN(r13);
|
||||||
|
TRACE_DISABLE_INTS
|
||||||
|
b do_restore
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Something did happen, check if a re-emit is needed
|
* Something did happen, check if a re-emit is needed
|
||||||
* (this also clears paca->irq_happened)
|
* (this also clears paca->irq_happened)
|
||||||
@ -748,6 +763,9 @@ restore_check_irq_replay:
|
|||||||
#endif /* CONFIG_PPC_BOOK3E */
|
#endif /* CONFIG_PPC_BOOK3E */
|
||||||
1: b .ret_from_except /* What else to do here ? */
|
1: b .ret_from_except /* What else to do here ? */
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
3:
|
||||||
do_work:
|
do_work:
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
andi. r0,r3,MSR_PR /* Returning to user mode? */
|
andi. r0,r3,MSR_PR /* Returning to user mode? */
|
||||||
@ -767,16 +785,6 @@ do_work:
|
|||||||
SOFT_DISABLE_INTS(r3,r4)
|
SOFT_DISABLE_INTS(r3,r4)
|
||||||
1: bl .preempt_schedule_irq
|
1: bl .preempt_schedule_irq
|
||||||
|
|
||||||
/* Hard-disable interrupts again (and update PACA) */
|
|
||||||
#ifdef CONFIG_PPC_BOOK3E
|
|
||||||
wrteei 0
|
|
||||||
#else
|
|
||||||
ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
|
|
||||||
mtmsrd r10,1
|
|
||||||
#endif /* CONFIG_PPC_BOOK3E */
|
|
||||||
li r0,PACA_IRQ_HARD_DIS
|
|
||||||
stb r0,PACAIRQHAPPENED(r13)
|
|
||||||
|
|
||||||
/* Re-test flags and eventually loop */
|
/* Re-test flags and eventually loop */
|
||||||
clrrdi r9,r1,THREAD_SHIFT
|
clrrdi r9,r1,THREAD_SHIFT
|
||||||
ld r4,TI_FLAGS(r9)
|
ld r4,TI_FLAGS(r9)
|
||||||
@ -787,14 +795,6 @@ do_work:
|
|||||||
user_work:
|
user_work:
|
||||||
#endif /* CONFIG_PREEMPT */
|
#endif /* CONFIG_PREEMPT */
|
||||||
|
|
||||||
/* Enable interrupts */
|
|
||||||
#ifdef CONFIG_PPC_BOOK3E
|
|
||||||
wrteei 1
|
|
||||||
#else
|
|
||||||
ori r10,r10,MSR_EE
|
|
||||||
mtmsrd r10,1
|
|
||||||
#endif /* CONFIG_PPC_BOOK3E */
|
|
||||||
|
|
||||||
andi. r0,r4,_TIF_NEED_RESCHED
|
andi. r0,r4,_TIF_NEED_RESCHED
|
||||||
beq 1f
|
beq 1f
|
||||||
bl .restore_interrupts
|
bl .restore_interrupts
|
||||||
|
@ -768,8 +768,8 @@ alignment_common:
|
|||||||
std r3,_DAR(r1)
|
std r3,_DAR(r1)
|
||||||
std r4,_DSISR(r1)
|
std r4,_DSISR(r1)
|
||||||
bl .save_nvgprs
|
bl .save_nvgprs
|
||||||
|
DISABLE_INTS
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
ENABLE_INTS
|
|
||||||
bl .alignment_exception
|
bl .alignment_exception
|
||||||
b .ret_from_except
|
b .ret_from_except
|
||||||
|
|
||||||
|
@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en)
|
|||||||
*/
|
*/
|
||||||
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
|
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
|
||||||
__hard_irq_disable();
|
__hard_irq_disable();
|
||||||
|
#ifdef CONFIG_TRACE_IRQFLAG
|
||||||
|
else {
|
||||||
|
/*
|
||||||
|
* We should already be hard disabled here. We had bugs
|
||||||
|
* where that wasn't the case so let's dbl check it and
|
||||||
|
* warn if we are wrong. Only do that when IRQ tracing
|
||||||
|
* is enabled as mfmsr() can be costly.
|
||||||
|
*/
|
||||||
|
if (WARN_ON(mfmsr() & MSR_EE))
|
||||||
|
__hard_irq_disable();
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_TRACE_IRQFLAG */
|
||||||
|
|
||||||
set_soft_enabled(0);
|
set_soft_enabled(0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore);
|
|||||||
* if they are currently disabled. This is typically called before
|
* if they are currently disabled. This is typically called before
|
||||||
* schedule() or do_signal() when returning to userspace. We do it
|
* schedule() or do_signal() when returning to userspace. We do it
|
||||||
* in C to avoid the burden of dealing with lockdep etc...
|
* in C to avoid the burden of dealing with lockdep etc...
|
||||||
|
*
|
||||||
|
* NOTE: This is called with interrupts hard disabled but not marked
|
||||||
|
* as such in paca->irq_happened, so we need to resync this.
|
||||||
*/
|
*/
|
||||||
void restore_interrupts(void)
|
void restore_interrupts(void)
|
||||||
{
|
{
|
||||||
if (irqs_disabled())
|
if (irqs_disabled()) {
|
||||||
|
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
} else
|
||||||
|
__hard_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
|
|||||||
addr, regs->nip, regs->link, code);
|
addr, regs->nip, regs->link, code);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!arch_irq_disabled_regs(regs))
|
if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
memset(&info, 0, sizeof(info));
|
memset(&info, 0, sizeof(info));
|
||||||
@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_enable();
|
/* We restore the interrupt state now */
|
||||||
|
if (!arch_irq_disabled_regs(regs))
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
#ifdef CONFIG_MATH_EMULATION
|
#ifdef CONFIG_MATH_EMULATION
|
||||||
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
|
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
|
||||||
@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
int sig, code, fixed = 0;
|
int sig, code, fixed = 0;
|
||||||
|
|
||||||
|
/* We restore the interrupt state now */
|
||||||
|
if (!arch_irq_disabled_regs(regs))
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
/* we don't implement logging of alignment exceptions */
|
/* we don't implement logging of alignment exceptions */
|
||||||
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
|
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
|
||||||
fixed = fix_alignment(regs);
|
fixed = fix_alignment(regs);
|
||||||
|
@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
|
|||||||
!(memslot->userspace_addr & (s - 1))) {
|
!(memslot->userspace_addr & (s - 1))) {
|
||||||
start &= ~(s - 1);
|
start &= ~(s - 1);
|
||||||
pgsize = s;
|
pgsize = s;
|
||||||
|
get_page(hpage);
|
||||||
|
put_page(page);
|
||||||
page = hpage;
|
page = hpage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
|
|||||||
err = 0;
|
err = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (got) {
|
if (got)
|
||||||
if (PageHuge(page))
|
|
||||||
page = compound_head(page);
|
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
up_err:
|
up_err:
|
||||||
@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
SetPageDirty(page);
|
SetPageDirty(page);
|
||||||
|
|
||||||
out_put:
|
out_put:
|
||||||
if (page)
|
if (page) {
|
||||||
put_page(page);
|
/*
|
||||||
|
* We drop pages[0] here, not page because page might
|
||||||
|
* have been set to the head page of a compound, but
|
||||||
|
* we have to drop the reference on the correct tail
|
||||||
|
* page to match the get inside gup()
|
||||||
|
*/
|
||||||
|
put_page(pages[0]);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
|
|||||||
pa = *physp;
|
pa = *physp;
|
||||||
}
|
}
|
||||||
page = pfn_to_page(pa >> PAGE_SHIFT);
|
page = pfn_to_page(pa >> PAGE_SHIFT);
|
||||||
|
get_page(page);
|
||||||
} else {
|
} else {
|
||||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||||
npages = get_user_pages_fast(hva, 1, 1, pages);
|
npages = get_user_pages_fast(hva, 1, 1, pages);
|
||||||
@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
|
|||||||
page = compound_head(page);
|
page = compound_head(page);
|
||||||
psize <<= compound_order(page);
|
psize <<= compound_order(page);
|
||||||
}
|
}
|
||||||
if (!kvm->arch.using_mmu_notifiers)
|
|
||||||
get_page(page);
|
|
||||||
offset = gpa & (psize - 1);
|
offset = gpa & (psize - 1);
|
||||||
if (nb_ret)
|
if (nb_ret)
|
||||||
*nb_ret = psize - offset;
|
*nb_ret = psize - offset;
|
||||||
@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
|
|||||||
{
|
{
|
||||||
struct page *page = virt_to_page(va);
|
struct page *page = virt_to_page(va);
|
||||||
|
|
||||||
page = compound_head(page);
|
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
|
|||||||
continue;
|
continue;
|
||||||
pfn = physp[j] >> PAGE_SHIFT;
|
pfn = physp[j] >> PAGE_SHIFT;
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
if (PageHuge(page))
|
|
||||||
page = compound_head(page);
|
|
||||||
SetPageDirty(page);
|
SetPageDirty(page);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
@ -269,4 +269,4 @@ static int __init sunfire_init(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
subsys_initcall(sunfire_init);
|
fs_initcall(sunfire_init);
|
||||||
|
@ -495,11 +495,11 @@ xcall_fetch_glob_regs:
|
|||||||
stx %o7, [%g1 + GR_SNAP_O7]
|
stx %o7, [%g1 + GR_SNAP_O7]
|
||||||
stx %i7, [%g1 + GR_SNAP_I7]
|
stx %i7, [%g1 + GR_SNAP_I7]
|
||||||
/* Don't try this at home kids... */
|
/* Don't try this at home kids... */
|
||||||
rdpr %cwp, %g2
|
rdpr %cwp, %g3
|
||||||
sub %g2, 1, %g7
|
sub %g3, 1, %g7
|
||||||
wrpr %g7, %cwp
|
wrpr %g7, %cwp
|
||||||
mov %i7, %g7
|
mov %i7, %g7
|
||||||
wrpr %g2, %cwp
|
wrpr %g3, %cwp
|
||||||
stx %g7, [%g1 + GR_SNAP_RPC]
|
stx %g7, [%g1 + GR_SNAP_RPC]
|
||||||
sethi %hi(trap_block), %g7
|
sethi %hi(trap_block), %g7
|
||||||
or %g7, %lo(trap_block), %g7
|
or %g7, %lo(trap_block), %g7
|
||||||
|
@ -79,7 +79,6 @@ struct kvm_task_sleep_node {
|
|||||||
u32 token;
|
u32 token;
|
||||||
int cpu;
|
int cpu;
|
||||||
bool halted;
|
bool halted;
|
||||||
struct mm_struct *mm;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kvm_task_sleep_head {
|
static struct kvm_task_sleep_head {
|
||||||
@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token)
|
|||||||
|
|
||||||
n.token = token;
|
n.token = token;
|
||||||
n.cpu = smp_processor_id();
|
n.cpu = smp_processor_id();
|
||||||
n.mm = current->active_mm;
|
|
||||||
n.halted = idle || preempt_count() > 1;
|
n.halted = idle || preempt_count() > 1;
|
||||||
atomic_inc(&n.mm->mm_count);
|
|
||||||
init_waitqueue_head(&n.wq);
|
init_waitqueue_head(&n.wq);
|
||||||
hlist_add_head(&n.link, &b->list);
|
hlist_add_head(&n.link, &b->list);
|
||||||
spin_unlock(&b->lock);
|
spin_unlock(&b->lock);
|
||||||
@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
|
|||||||
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
|
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
|
||||||
{
|
{
|
||||||
hlist_del_init(&n->link);
|
hlist_del_init(&n->link);
|
||||||
if (!n->mm)
|
|
||||||
return;
|
|
||||||
mmdrop(n->mm);
|
|
||||||
if (n->halted)
|
if (n->halted)
|
||||||
smp_send_reschedule(n->cpu);
|
smp_send_reschedule(n->cpu);
|
||||||
else if (waitqueue_active(&n->wq))
|
else if (waitqueue_active(&n->wq))
|
||||||
@ -207,7 +201,7 @@ again:
|
|||||||
* async PF was not yet handled.
|
* async PF was not yet handled.
|
||||||
* Add dummy entry for the token.
|
* Add dummy entry for the token.
|
||||||
*/
|
*/
|
||||||
n = kmalloc(sizeof(*n), GFP_ATOMIC);
|
n = kzalloc(sizeof(*n), GFP_ATOMIC);
|
||||||
if (!n) {
|
if (!n) {
|
||||||
/*
|
/*
|
||||||
* Allocation failed! Busy wait while other cpu
|
* Allocation failed! Busy wait while other cpu
|
||||||
@ -219,7 +213,6 @@ again:
|
|||||||
}
|
}
|
||||||
n->token = token;
|
n->token = token;
|
||||||
n->cpu = smp_processor_id();
|
n->cpu = smp_processor_id();
|
||||||
n->mm = NULL;
|
|
||||||
init_waitqueue_head(&n->wq);
|
init_waitqueue_head(&n->wq);
|
||||||
hlist_add_head(&n->link, &b->list);
|
hlist_add_head(&n->link, &b->list);
|
||||||
} else
|
} else
|
||||||
|
@ -423,6 +423,7 @@ void set_personality_ia32(bool x32)
|
|||||||
current_thread_info()->status |= TS_COMPAT;
|
current_thread_info()->status |= TS_COMPAT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(set_personality_ia32);
|
||||||
|
|
||||||
unsigned long get_wchan(struct task_struct *p)
|
unsigned long get_wchan(struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
|
|||||||
#endif
|
#endif
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
|
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
|
||||||
const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
|
|
||||||
const size_t dyn_size = PERCPU_MODULE_RESERVE +
|
const size_t dyn_size = PERCPU_MODULE_RESERVE +
|
||||||
PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
|
PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
|
||||||
|
size_t atom_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On 64bit, use PMD_SIZE for atom_size so that embedded
|
||||||
|
* percpu areas are aligned to PMD. This, in the future,
|
||||||
|
* can also allow using PMD mappings in vmalloc area. Use
|
||||||
|
* PAGE_SIZE on 32bit as vmalloc space is highly contended
|
||||||
|
* and large vmalloc area allocs can easily fail.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
atom_size = PMD_SIZE;
|
||||||
|
#else
|
||||||
|
atom_size = PAGE_SIZE;
|
||||||
|
#endif
|
||||||
rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
|
rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
|
||||||
dyn_size, atom_size,
|
dyn_size, atom_size,
|
||||||
pcpu_cpu_distance,
|
pcpu_cpu_distance,
|
||||||
|
@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
|||||||
kvm_inject_page_fault(vcpu, &fault);
|
kvm_inject_page_fault(vcpu, &fault);
|
||||||
}
|
}
|
||||||
vcpu->arch.apf.halted = false;
|
vcpu->arch.apf.halted = false;
|
||||||
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
||||||
|
@ -63,6 +63,7 @@
|
|||||||
#include <asm/stackprotector.h>
|
#include <asm/stackprotector.h>
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
#include <asm/mwait.h>
|
#include <asm/mwait.h>
|
||||||
|
#include <asm/pci_x86.h>
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
@ -809,9 +810,40 @@ static void xen_io_delay(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
static unsigned long xen_set_apic_id(unsigned int x)
|
||||||
|
{
|
||||||
|
WARN_ON(1);
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
static unsigned int xen_get_apic_id(unsigned long x)
|
||||||
|
{
|
||||||
|
return ((x)>>24) & 0xFFu;
|
||||||
|
}
|
||||||
static u32 xen_apic_read(u32 reg)
|
static u32 xen_apic_read(u32 reg)
|
||||||
{
|
{
|
||||||
return 0;
|
struct xen_platform_op op = {
|
||||||
|
.cmd = XENPF_get_cpuinfo,
|
||||||
|
.interface_version = XENPF_INTERFACE_VERSION,
|
||||||
|
.u.pcpu_info.xen_cpuid = 0,
|
||||||
|
};
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/* Shouldn't need this as APIC is turned off for PV, and we only
|
||||||
|
* get called on the bootup processor. But just in case. */
|
||||||
|
if (!xen_initial_domain() || smp_processor_id())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (reg == APIC_LVR)
|
||||||
|
return 0x10;
|
||||||
|
|
||||||
|
if (reg != APIC_ID)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = HYPERVISOR_dom0_op(&op);
|
||||||
|
if (ret)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return op.u.pcpu_info.apic_id << 24;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_apic_write(u32 reg, u32 val)
|
static void xen_apic_write(u32 reg, u32 val)
|
||||||
@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void)
|
|||||||
apic->icr_write = xen_apic_icr_write;
|
apic->icr_write = xen_apic_icr_write;
|
||||||
apic->wait_icr_idle = xen_apic_wait_icr_idle;
|
apic->wait_icr_idle = xen_apic_wait_icr_idle;
|
||||||
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
|
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
|
||||||
|
apic->set_apic_id = xen_set_apic_id;
|
||||||
|
apic->get_apic_id = xen_get_apic_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void)
|
|||||||
/* Make sure ACS will be enabled */
|
/* Make sure ACS will be enabled */
|
||||||
pci_request_acs();
|
pci_request_acs();
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_PCI
|
||||||
|
/* PCI BIOS service won't work from a PV guest. */
|
||||||
|
pci_probe &= ~PCI_PROBE_BIOS;
|
||||||
|
#endif
|
||||||
xen_raw_console_write("about to get started...\n");
|
xen_raw_console_write("about to get started...\n");
|
||||||
|
|
||||||
xen_setup_runstate_info(0);
|
xen_setup_runstate_info(0);
|
||||||
|
@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
|
|||||||
{
|
{
|
||||||
if (val & _PAGE_PRESENT) {
|
if (val & _PAGE_PRESENT) {
|
||||||
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
|
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||||
|
unsigned long pfn = mfn_to_pfn(mfn);
|
||||||
|
|
||||||
pteval_t flags = val & PTE_FLAGS_MASK;
|
pteval_t flags = val & PTE_FLAGS_MASK;
|
||||||
val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
|
if (unlikely(pfn == ~0))
|
||||||
|
val = flags & ~_PAGE_PRESENT;
|
||||||
|
else
|
||||||
|
val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
|
@ -775,9 +775,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
|||||||
map->format.parse_val(val + i);
|
map->format.parse_val(val + i);
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < val_count; i++) {
|
for (i = 0; i < val_count; i++) {
|
||||||
ret = regmap_read(map, reg + i, val + (i * val_bytes));
|
unsigned int ival;
|
||||||
|
ret = regmap_read(map, reg + i, &ival);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
memcpy(val + (i * val_bytes), &ival, val_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
|
if (!capable(CAP_SYS_ADMIN)) {
|
||||||
retcode = ERR_PERM;
|
retcode = ERR_PERM;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -965,18 +965,15 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
|
|||||||
}
|
}
|
||||||
|
|
||||||
_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
|
_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
|
||||||
_gpio_rmw(base, bank->regs->irqstatus, l,
|
_gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
|
||||||
bank->regs->irqenable_inv == false);
|
|
||||||
_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
|
|
||||||
_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
|
|
||||||
if (bank->regs->debounce_en)
|
if (bank->regs->debounce_en)
|
||||||
_gpio_rmw(base, bank->regs->debounce_en, 0, 1);
|
__raw_writel(0, base + bank->regs->debounce_en);
|
||||||
|
|
||||||
/* Save OE default value (0xffffffff) in the context */
|
/* Save OE default value (0xffffffff) in the context */
|
||||||
bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
|
bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
|
||||||
/* Initialize interface clk ungated, module enabled */
|
/* Initialize interface clk ungated, module enabled */
|
||||||
if (bank->regs->ctrl)
|
if (bank->regs->ctrl)
|
||||||
_gpio_rmw(base, bank->regs->ctrl, 0, 1);
|
__raw_writel(0, base + bank->regs->ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __devinit void
|
static __devinit void
|
||||||
|
@ -230,16 +230,12 @@ static void pch_gpio_setup(struct pch_gpio *chip)
|
|||||||
|
|
||||||
static int pch_irq_type(struct irq_data *d, unsigned int type)
|
static int pch_irq_type(struct irq_data *d, unsigned int type)
|
||||||
{
|
{
|
||||||
u32 im;
|
|
||||||
u32 __iomem *im_reg;
|
|
||||||
u32 ien;
|
|
||||||
u32 im_pos;
|
|
||||||
int ch;
|
|
||||||
unsigned long flags;
|
|
||||||
u32 val;
|
|
||||||
int irq = d->irq;
|
|
||||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||||
struct pch_gpio *chip = gc->private;
|
struct pch_gpio *chip = gc->private;
|
||||||
|
u32 im, im_pos, val;
|
||||||
|
u32 __iomem *im_reg;
|
||||||
|
unsigned long flags;
|
||||||
|
int ch, irq = d->irq;
|
||||||
|
|
||||||
ch = irq - chip->irq_base;
|
ch = irq - chip->irq_base;
|
||||||
if (irq <= chip->irq_base + 7) {
|
if (irq <= chip->irq_base + 7) {
|
||||||
@ -270,30 +266,22 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
|
|||||||
case IRQ_TYPE_LEVEL_LOW:
|
case IRQ_TYPE_LEVEL_LOW:
|
||||||
val = PCH_LEVEL_L;
|
val = PCH_LEVEL_L;
|
||||||
break;
|
break;
|
||||||
case IRQ_TYPE_PROBE:
|
|
||||||
goto end;
|
|
||||||
default:
|
default:
|
||||||
dev_warn(chip->dev, "%s: unknown type(%dd)",
|
goto unlock;
|
||||||
__func__, type);
|
|
||||||
goto end;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set interrupt mode */
|
/* Set interrupt mode */
|
||||||
im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
|
im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
|
||||||
iowrite32(im | (val << (im_pos * 4)), im_reg);
|
iowrite32(im | (val << (im_pos * 4)), im_reg);
|
||||||
|
|
||||||
/* iclr */
|
/* And the handler */
|
||||||
iowrite32(BIT(ch), &chip->reg->iclr);
|
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
|
||||||
|
__irq_set_handler_locked(d->irq, handle_level_irq);
|
||||||
|
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
|
||||||
|
__irq_set_handler_locked(d->irq, handle_edge_irq);
|
||||||
|
|
||||||
/* IMASKCLR */
|
unlock:
|
||||||
iowrite32(BIT(ch), &chip->reg->imaskclr);
|
|
||||||
|
|
||||||
/* Enable interrupt */
|
|
||||||
ien = ioread32(&chip->reg->ien);
|
|
||||||
iowrite32(ien | BIT(ch), &chip->reg->ien);
|
|
||||||
end:
|
|
||||||
spin_unlock_irqrestore(&chip->spinlock, flags);
|
spin_unlock_irqrestore(&chip->spinlock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -313,18 +301,24 @@ static void pch_irq_mask(struct irq_data *d)
|
|||||||
iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
|
iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pch_irq_ack(struct irq_data *d)
|
||||||
|
{
|
||||||
|
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||||
|
struct pch_gpio *chip = gc->private;
|
||||||
|
|
||||||
|
iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
|
||||||
|
}
|
||||||
|
|
||||||
static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
|
static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct pch_gpio *chip = dev_id;
|
struct pch_gpio *chip = dev_id;
|
||||||
u32 reg_val = ioread32(&chip->reg->istatus);
|
u32 reg_val = ioread32(&chip->reg->istatus);
|
||||||
int i;
|
int i, ret = IRQ_NONE;
|
||||||
int ret = IRQ_NONE;
|
|
||||||
|
|
||||||
for (i = 0; i < gpio_pins[chip->ioh]; i++) {
|
for (i = 0; i < gpio_pins[chip->ioh]; i++) {
|
||||||
if (reg_val & BIT(i)) {
|
if (reg_val & BIT(i)) {
|
||||||
dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n",
|
dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n",
|
||||||
__func__, i, irq, reg_val);
|
__func__, i, irq, reg_val);
|
||||||
iowrite32(BIT(i), &chip->reg->iclr);
|
|
||||||
generic_handle_irq(chip->irq_base + i);
|
generic_handle_irq(chip->irq_base + i);
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
@ -343,6 +337,7 @@ static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
|
|||||||
gc->private = chip;
|
gc->private = chip;
|
||||||
ct = gc->chip_types;
|
ct = gc->chip_types;
|
||||||
|
|
||||||
|
ct->chip.irq_ack = pch_irq_ack;
|
||||||
ct->chip.irq_mask = pch_irq_mask;
|
ct->chip.irq_mask = pch_irq_mask;
|
||||||
ct->chip.irq_unmask = pch_irq_unmask;
|
ct->chip.irq_unmask = pch_irq_unmask;
|
||||||
ct->chip.irq_set_type = pch_irq_type;
|
ct->chip.irq_set_type = pch_irq_type;
|
||||||
@ -357,6 +352,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
|
|||||||
s32 ret;
|
s32 ret;
|
||||||
struct pch_gpio *chip;
|
struct pch_gpio *chip;
|
||||||
int irq_base;
|
int irq_base;
|
||||||
|
u32 msk;
|
||||||
|
|
||||||
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
|
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
|
||||||
if (chip == NULL)
|
if (chip == NULL)
|
||||||
@ -408,8 +404,13 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
|
|||||||
}
|
}
|
||||||
chip->irq_base = irq_base;
|
chip->irq_base = irq_base;
|
||||||
|
|
||||||
|
/* Mask all interrupts, but enable them */
|
||||||
|
msk = (1 << gpio_pins[chip->ioh]) - 1;
|
||||||
|
iowrite32(msk, &chip->reg->imask);
|
||||||
|
iowrite32(msk, &chip->reg->ien);
|
||||||
|
|
||||||
ret = request_irq(pdev->irq, pch_gpio_handler,
|
ret = request_irq(pdev->irq, pch_gpio_handler,
|
||||||
IRQF_SHARED, KBUILD_MODNAME, chip);
|
IRQF_SHARED, KBUILD_MODNAME, chip);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
dev_err(&pdev->dev,
|
dev_err(&pdev->dev,
|
||||||
"%s request_irq failed\n", __func__);
|
"%s request_irq failed\n", __func__);
|
||||||
@ -418,8 +419,6 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
|
|||||||
|
|
||||||
pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
|
pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
|
||||||
|
|
||||||
/* Initialize interrupt ien register */
|
|
||||||
iowrite32(0, &chip->reg->ien);
|
|
||||||
end:
|
end:
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -452,12 +452,14 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
|
||||||
static struct samsung_gpio_cfg exynos_gpio_cfg = {
|
static struct samsung_gpio_cfg exynos_gpio_cfg = {
|
||||||
.set_pull = exynos_gpio_setpull,
|
.set_pull = exynos_gpio_setpull,
|
||||||
.get_pull = exynos_gpio_getpull,
|
.get_pull = exynos_gpio_getpull,
|
||||||
.set_config = samsung_gpio_setcfg_4bit,
|
.set_config = samsung_gpio_setcfg_4bit,
|
||||||
.get_config = samsung_gpio_getcfg_4bit,
|
.get_config = samsung_gpio_getcfg_4bit,
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
|
#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
|
||||||
static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
|
static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
|
||||||
@ -2123,8 +2125,8 @@ static struct samsung_gpio_chip s5pv210_gpios_4bit[] = {
|
|||||||
* uses the above macro and depends on the banks being listed in order here.
|
* uses the above macro and depends on the banks being listed in order here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static struct samsung_gpio_chip exynos4_gpios_1[] = {
|
|
||||||
#ifdef CONFIG_ARCH_EXYNOS4
|
#ifdef CONFIG_ARCH_EXYNOS4
|
||||||
|
static struct samsung_gpio_chip exynos4_gpios_1[] = {
|
||||||
{
|
{
|
||||||
.chip = {
|
.chip = {
|
||||||
.base = EXYNOS4_GPA0(0),
|
.base = EXYNOS4_GPA0(0),
|
||||||
@ -2222,11 +2224,11 @@ static struct samsung_gpio_chip exynos4_gpios_1[] = {
|
|||||||
.label = "GPF3",
|
.label = "GPF3",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct samsung_gpio_chip exynos4_gpios_2[] = {
|
|
||||||
#ifdef CONFIG_ARCH_EXYNOS4
|
#ifdef CONFIG_ARCH_EXYNOS4
|
||||||
|
static struct samsung_gpio_chip exynos4_gpios_2[] = {
|
||||||
{
|
{
|
||||||
.chip = {
|
.chip = {
|
||||||
.base = EXYNOS4_GPJ0(0),
|
.base = EXYNOS4_GPJ0(0),
|
||||||
@ -2367,11 +2369,11 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
|
|||||||
.to_irq = samsung_gpiolib_to_irq,
|
.to_irq = samsung_gpiolib_to_irq,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct samsung_gpio_chip exynos4_gpios_3[] = {
|
|
||||||
#ifdef CONFIG_ARCH_EXYNOS4
|
#ifdef CONFIG_ARCH_EXYNOS4
|
||||||
|
static struct samsung_gpio_chip exynos4_gpios_3[] = {
|
||||||
{
|
{
|
||||||
.chip = {
|
.chip = {
|
||||||
.base = EXYNOS4_GPZ(0),
|
.base = EXYNOS4_GPZ(0),
|
||||||
@ -2379,8 +2381,8 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
|
|||||||
.label = "GPZ",
|
.label = "GPZ",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_EXYNOS5
|
#ifdef CONFIG_ARCH_EXYNOS5
|
||||||
static struct samsung_gpio_chip exynos5_gpios_1[] = {
|
static struct samsung_gpio_chip exynos5_gpios_1[] = {
|
||||||
@ -2719,7 +2721,9 @@ static __init int samsung_gpiolib_init(void)
|
|||||||
{
|
{
|
||||||
struct samsung_gpio_chip *chip;
|
struct samsung_gpio_chip *chip;
|
||||||
int i, nr_chips;
|
int i, nr_chips;
|
||||||
|
#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
|
||||||
void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
|
void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
|
||||||
|
#endif
|
||||||
int group = 0;
|
int group = 0;
|
||||||
|
|
||||||
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
|
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
|
||||||
@ -2971,6 +2975,7 @@ static __init int samsung_gpiolib_init(void)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
|
||||||
err_ioremap4:
|
err_ioremap4:
|
||||||
iounmap(gpio_base3);
|
iounmap(gpio_base3);
|
||||||
err_ioremap3:
|
err_ioremap3:
|
||||||
@ -2979,6 +2984,7 @@ err_ioremap2:
|
|||||||
iounmap(gpio_base1);
|
iounmap(gpio_base1);
|
||||||
err_ioremap1:
|
err_ioremap1:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
core_initcall(samsung_gpiolib_init);
|
core_initcall(samsung_gpiolib_init);
|
||||||
|
|
||||||
|
@ -398,10 +398,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 6) {
|
|
||||||
I915_WRITE(INSTPM,
|
|
||||||
INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
|
|
||||||
|
|
||||||
|
if (IS_GEN6(dev)) {
|
||||||
/* From the Sandybridge PRM, volume 1 part 3, page 24:
|
/* From the Sandybridge PRM, volume 1 part 3, page 24:
|
||||||
* "If this bit is set, STCunit will have LRA as replacement
|
* "If this bit is set, STCunit will have LRA as replacement
|
||||||
* policy. [...] This bit must be reset. LRA replacement
|
* policy. [...] This bit must be reset. LRA replacement
|
||||||
@ -411,6 +409,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
|||||||
CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
|
CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (INTEL_INFO(dev)->gen >= 6) {
|
||||||
|
I915_WRITE(INSTPM,
|
||||||
|
INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1220,8 +1220,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
|
|||||||
|
|
||||||
static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
|
static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
|
||||||
{
|
{
|
||||||
|
struct drm_device *dev = intel_sdvo->base.base.dev;
|
||||||
u8 response[2];
|
u8 response[2];
|
||||||
|
|
||||||
|
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
|
||||||
|
* on the line. */
|
||||||
|
if (IS_I945G(dev) || IS_I945GM(dev))
|
||||||
|
return false;
|
||||||
|
|
||||||
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
|
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
|
||||||
&response, 2) && response[0];
|
&response, 2) && response[0];
|
||||||
}
|
}
|
||||||
|
@ -29,10 +29,6 @@
|
|||||||
#include "nouveau_i2c.h"
|
#include "nouveau_i2c.h"
|
||||||
#include "nouveau_hw.h"
|
#include "nouveau_hw.h"
|
||||||
|
|
||||||
#define T_TIMEOUT 2200000
|
|
||||||
#define T_RISEFALL 1000
|
|
||||||
#define T_HOLD 5000
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
i2c_drive_scl(void *data, int state)
|
i2c_drive_scl(void *data, int state)
|
||||||
{
|
{
|
||||||
@ -113,175 +109,6 @@ i2c_sense_sda(void *data)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
|
|
||||||
{
|
|
||||||
udelay((nsec + 500) / 1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
|
||||||
i2c_raise_scl(struct nouveau_i2c_chan *port)
|
|
||||||
{
|
|
||||||
u32 timeout = T_TIMEOUT / T_RISEFALL;
|
|
||||||
|
|
||||||
i2c_drive_scl(port, 1);
|
|
||||||
do {
|
|
||||||
i2c_delay(port, T_RISEFALL);
|
|
||||||
} while (!i2c_sense_scl(port) && --timeout);
|
|
||||||
|
|
||||||
return timeout != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i2c_start(struct nouveau_i2c_chan *port)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
port->state = i2c_sense_scl(port);
|
|
||||||
port->state |= i2c_sense_sda(port) << 1;
|
|
||||||
if (port->state != 3) {
|
|
||||||
i2c_drive_scl(port, 0);
|
|
||||||
i2c_drive_sda(port, 1);
|
|
||||||
if (!i2c_raise_scl(port))
|
|
||||||
ret = -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
i2c_drive_sda(port, 0);
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
i2c_drive_scl(port, 0);
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
i2c_stop(struct nouveau_i2c_chan *port)
|
|
||||||
{
|
|
||||||
i2c_drive_scl(port, 0);
|
|
||||||
i2c_drive_sda(port, 0);
|
|
||||||
i2c_delay(port, T_RISEFALL);
|
|
||||||
|
|
||||||
i2c_drive_scl(port, 1);
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
i2c_drive_sda(port, 1);
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i2c_bitw(struct nouveau_i2c_chan *port, int sda)
|
|
||||||
{
|
|
||||||
i2c_drive_sda(port, sda);
|
|
||||||
i2c_delay(port, T_RISEFALL);
|
|
||||||
|
|
||||||
if (!i2c_raise_scl(port))
|
|
||||||
return -ETIMEDOUT;
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
|
|
||||||
i2c_drive_scl(port, 0);
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i2c_bitr(struct nouveau_i2c_chan *port)
|
|
||||||
{
|
|
||||||
int sda;
|
|
||||||
|
|
||||||
i2c_drive_sda(port, 1);
|
|
||||||
i2c_delay(port, T_RISEFALL);
|
|
||||||
|
|
||||||
if (!i2c_raise_scl(port))
|
|
||||||
return -ETIMEDOUT;
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
|
|
||||||
sda = i2c_sense_sda(port);
|
|
||||||
|
|
||||||
i2c_drive_scl(port, 0);
|
|
||||||
i2c_delay(port, T_HOLD);
|
|
||||||
return sda;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
|
|
||||||
{
|
|
||||||
int i, bit;
|
|
||||||
|
|
||||||
*byte = 0;
|
|
||||||
for (i = 7; i >= 0; i--) {
|
|
||||||
bit = i2c_bitr(port);
|
|
||||||
if (bit < 0)
|
|
||||||
return bit;
|
|
||||||
*byte |= bit << i;
|
|
||||||
}
|
|
||||||
|
|
||||||
return i2c_bitw(port, last ? 1 : 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
|
|
||||||
{
|
|
||||||
int i, ret;
|
|
||||||
for (i = 7; i >= 0; i--) {
|
|
||||||
ret = i2c_bitw(port, !!(byte & (1 << i)));
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = i2c_bitr(port);
|
|
||||||
if (ret == 1) /* nack */
|
|
||||||
ret = -EIO;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
|
|
||||||
{
|
|
||||||
u32 addr = msg->addr << 1;
|
|
||||||
if (msg->flags & I2C_M_RD)
|
|
||||||
addr |= 1;
|
|
||||||
return i2c_put_byte(port, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
|
|
||||||
{
|
|
||||||
struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
|
|
||||||
struct i2c_msg *msg = msgs;
|
|
||||||
int ret = 0, mcnt = num;
|
|
||||||
|
|
||||||
while (!ret && mcnt--) {
|
|
||||||
u8 remaining = msg->len;
|
|
||||||
u8 *ptr = msg->buf;
|
|
||||||
|
|
||||||
ret = i2c_start(port);
|
|
||||||
if (ret == 0)
|
|
||||||
ret = i2c_addr(port, msg);
|
|
||||||
|
|
||||||
if (msg->flags & I2C_M_RD) {
|
|
||||||
while (!ret && remaining--)
|
|
||||||
ret = i2c_get_byte(port, ptr++, !remaining);
|
|
||||||
} else {
|
|
||||||
while (!ret && remaining--)
|
|
||||||
ret = i2c_put_byte(port, *ptr++);
|
|
||||||
}
|
|
||||||
|
|
||||||
msg++;
|
|
||||||
}
|
|
||||||
|
|
||||||
i2c_stop(port);
|
|
||||||
return (ret < 0) ? ret : num;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u32
|
|
||||||
i2c_bit_func(struct i2c_adapter *adap)
|
|
||||||
{
|
|
||||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct i2c_algorithm nouveau_i2c_bit_algo = {
|
|
||||||
.master_xfer = i2c_bit_xfer,
|
|
||||||
.functionality = i2c_bit_func
|
|
||||||
};
|
|
||||||
|
|
||||||
static const uint32_t nv50_i2c_port[] = {
|
static const uint32_t nv50_i2c_port[] = {
|
||||||
0x00e138, 0x00e150, 0x00e168, 0x00e180,
|
0x00e138, 0x00e150, 0x00e168, 0x00e180,
|
||||||
0x00e254, 0x00e274, 0x00e764, 0x00e780,
|
0x00e254, 0x00e274, 0x00e764, 0x00e780,
|
||||||
@ -384,12 +211,10 @@ nouveau_i2c_init(struct drm_device *dev)
|
|||||||
case 0: /* NV04:NV50 */
|
case 0: /* NV04:NV50 */
|
||||||
port->drive = entry[0];
|
port->drive = entry[0];
|
||||||
port->sense = entry[1];
|
port->sense = entry[1];
|
||||||
port->adapter.algo = &nouveau_i2c_bit_algo;
|
|
||||||
break;
|
break;
|
||||||
case 4: /* NV4E */
|
case 4: /* NV4E */
|
||||||
port->drive = 0x600800 + entry[1];
|
port->drive = 0x600800 + entry[1];
|
||||||
port->sense = port->drive;
|
port->sense = port->drive;
|
||||||
port->adapter.algo = &nouveau_i2c_bit_algo;
|
|
||||||
break;
|
break;
|
||||||
case 5: /* NV50- */
|
case 5: /* NV50- */
|
||||||
port->drive = entry[0] & 0x0f;
|
port->drive = entry[0] & 0x0f;
|
||||||
@ -402,7 +227,6 @@ nouveau_i2c_init(struct drm_device *dev)
|
|||||||
port->drive = 0x00d014 + (port->drive * 0x20);
|
port->drive = 0x00d014 + (port->drive * 0x20);
|
||||||
port->sense = port->drive;
|
port->sense = port->drive;
|
||||||
}
|
}
|
||||||
port->adapter.algo = &nouveau_i2c_bit_algo;
|
|
||||||
break;
|
break;
|
||||||
case 6: /* NV50- DP AUX */
|
case 6: /* NV50- DP AUX */
|
||||||
port->drive = entry[0];
|
port->drive = entry[0];
|
||||||
@ -413,7 +237,7 @@ nouveau_i2c_init(struct drm_device *dev)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!port->adapter.algo) {
|
if (!port->adapter.algo && !port->drive) {
|
||||||
NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
|
NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
|
||||||
i, port->type, port->drive, port->sense);
|
i, port->type, port->drive, port->sense);
|
||||||
kfree(port);
|
kfree(port);
|
||||||
@ -429,7 +253,26 @@ nouveau_i2c_init(struct drm_device *dev)
|
|||||||
port->dcb = ROM32(entry[0]);
|
port->dcb = ROM32(entry[0]);
|
||||||
i2c_set_adapdata(&port->adapter, i2c);
|
i2c_set_adapdata(&port->adapter, i2c);
|
||||||
|
|
||||||
ret = i2c_add_adapter(&port->adapter);
|
if (port->adapter.algo != &nouveau_dp_i2c_algo) {
|
||||||
|
port->adapter.algo_data = &port->bit;
|
||||||
|
port->bit.udelay = 10;
|
||||||
|
port->bit.timeout = usecs_to_jiffies(2200);
|
||||||
|
port->bit.data = port;
|
||||||
|
port->bit.setsda = i2c_drive_sda;
|
||||||
|
port->bit.setscl = i2c_drive_scl;
|
||||||
|
port->bit.getsda = i2c_sense_sda;
|
||||||
|
port->bit.getscl = i2c_sense_scl;
|
||||||
|
|
||||||
|
i2c_drive_scl(port, 0);
|
||||||
|
i2c_drive_sda(port, 1);
|
||||||
|
i2c_drive_scl(port, 1);
|
||||||
|
|
||||||
|
ret = i2c_bit_add_bus(&port->adapter);
|
||||||
|
} else {
|
||||||
|
port->adapter.algo = &nouveau_dp_i2c_algo;
|
||||||
|
ret = i2c_add_adapter(&port->adapter);
|
||||||
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
|
NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
|
||||||
kfree(port);
|
kfree(port);
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
struct nouveau_i2c_chan {
|
struct nouveau_i2c_chan {
|
||||||
struct i2c_adapter adapter;
|
struct i2c_adapter adapter;
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
|
struct i2c_algo_bit_data bit;
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
u8 index;
|
u8 index;
|
||||||
u8 type;
|
u8 type;
|
||||||
|
@ -112,7 +112,7 @@ err_free_addr:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
|
static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -294,7 +294,7 @@ static ssize_t netxbig_led_sata_show(struct device *dev,
|
|||||||
|
|
||||||
static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
|
static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
|
||||||
|
|
||||||
static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat)
|
static void delete_netxbig_led(struct netxbig_led_data *led_dat)
|
||||||
{
|
{
|
||||||
if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
|
if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
|
||||||
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
|
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
|
||||||
|
@ -255,7 +255,7 @@ err_free_cmd:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __devexit delete_ns2_led(struct ns2_led_data *led_dat)
|
static void delete_ns2_led(struct ns2_led_data *led_dat)
|
||||||
{
|
{
|
||||||
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
|
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
|
||||||
led_classdev_unregister(&led_dat->cdev);
|
led_classdev_unregister(&led_dat->cdev);
|
||||||
|
@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
|||||||
{
|
{
|
||||||
struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
|
struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
|
||||||
|
|
||||||
if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&receiving_list_lock);
|
spin_lock(&receiving_list_lock);
|
||||||
|
@ -718,8 +718,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
|
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
|
||||||
request_module("scsi_dh_%s", m->hw_handler_name);
|
if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
|
||||||
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
|
"scsi_dh_%s", m->hw_handler_name)) {
|
||||||
ti->error = "unknown hardware handler type";
|
ti->error = "unknown hardware handler type";
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -279,8 +279,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
|
|||||||
|
|
||||||
hlist_del(&cell->list);
|
hlist_del(&cell->list);
|
||||||
|
|
||||||
bio_list_add(inmates, cell->holder);
|
if (inmates) {
|
||||||
bio_list_merge(inmates, &cell->bios);
|
bio_list_add(inmates, cell->holder);
|
||||||
|
bio_list_merge(inmates, &cell->bios);
|
||||||
|
}
|
||||||
|
|
||||||
mempool_free(cell, prison->cell_pool);
|
mempool_free(cell, prison->cell_pool);
|
||||||
}
|
}
|
||||||
@ -303,9 +305,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
|
|||||||
*/
|
*/
|
||||||
static void __cell_release_singleton(struct cell *cell, struct bio *bio)
|
static void __cell_release_singleton(struct cell *cell, struct bio *bio)
|
||||||
{
|
{
|
||||||
hlist_del(&cell->list);
|
|
||||||
BUG_ON(cell->holder != bio);
|
BUG_ON(cell->holder != bio);
|
||||||
BUG_ON(!bio_list_empty(&cell->bios));
|
BUG_ON(!bio_list_empty(&cell->bios));
|
||||||
|
|
||||||
|
__cell_release(cell, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cell_release_singleton(struct cell *cell, struct bio *bio)
|
static void cell_release_singleton(struct cell *cell, struct bio *bio)
|
||||||
@ -1177,6 +1180,7 @@ static void no_space(struct cell *cell)
|
|||||||
static void process_discard(struct thin_c *tc, struct bio *bio)
|
static void process_discard(struct thin_c *tc, struct bio *bio)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
unsigned long flags;
|
||||||
struct pool *pool = tc->pool;
|
struct pool *pool = tc->pool;
|
||||||
struct cell *cell, *cell2;
|
struct cell *cell, *cell2;
|
||||||
struct cell_key key, key2;
|
struct cell_key key, key2;
|
||||||
@ -1218,7 +1222,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
|
|||||||
m->bio = bio;
|
m->bio = bio;
|
||||||
|
|
||||||
if (!ds_add_work(&pool->all_io_ds, &m->list)) {
|
if (!ds_add_work(&pool->all_io_ds, &m->list)) {
|
||||||
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
list_add(&m->list, &pool->prepared_discards);
|
list_add(&m->list, &pool->prepared_discards);
|
||||||
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
wake_worker(pool);
|
wake_worker(pool);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -2626,8 +2632,10 @@ static int thin_endio(struct dm_target *ti,
|
|||||||
if (h->all_io_entry) {
|
if (h->all_io_entry) {
|
||||||
INIT_LIST_HEAD(&work);
|
INIT_LIST_HEAD(&work);
|
||||||
ds_dec(h->all_io_entry, &work);
|
ds_dec(h->all_io_entry, &work);
|
||||||
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
list_for_each_entry_safe(m, tmp, &work, list)
|
list_for_each_entry_safe(m, tmp, &work, list)
|
||||||
list_add(&m->list, &pool->prepared_discards);
|
list_add(&m->list, &pool->prepared_discards);
|
||||||
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
mempool_free(h, pool->endio_hook_pool);
|
mempool_free(h, pool->endio_hook_pool);
|
||||||
@ -2759,6 +2767,6 @@ static void dm_thin_exit(void)
|
|||||||
module_init(dm_thin_init);
|
module_init(dm_thin_init);
|
||||||
module_exit(dm_thin_exit);
|
module_exit(dm_thin_exit);
|
||||||
|
|
||||||
MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target");
|
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
|
||||||
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -2173,9 +2173,10 @@ re_arm:
|
|||||||
* received frames (loopback). Since only the payload is given to this
|
* received frames (loopback). Since only the payload is given to this
|
||||||
* function, it check for loopback.
|
* function, it check for loopback.
|
||||||
*/
|
*/
|
||||||
static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
|
static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
|
||||||
{
|
{
|
||||||
struct port *port;
|
struct port *port;
|
||||||
|
int ret = RX_HANDLER_ANOTHER;
|
||||||
|
|
||||||
if (length >= sizeof(struct lacpdu)) {
|
if (length >= sizeof(struct lacpdu)) {
|
||||||
|
|
||||||
@ -2184,11 +2185,12 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
|
|||||||
if (!port->slave) {
|
if (!port->slave) {
|
||||||
pr_warning("%s: Warning: port of slave %s is uninitialized\n",
|
pr_warning("%s: Warning: port of slave %s is uninitialized\n",
|
||||||
slave->dev->name, slave->dev->master->name);
|
slave->dev->name, slave->dev->master->name);
|
||||||
return;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (lacpdu->subtype) {
|
switch (lacpdu->subtype) {
|
||||||
case AD_TYPE_LACPDU:
|
case AD_TYPE_LACPDU:
|
||||||
|
ret = RX_HANDLER_CONSUMED;
|
||||||
pr_debug("Received LACPDU on port %d\n",
|
pr_debug("Received LACPDU on port %d\n",
|
||||||
port->actor_port_number);
|
port->actor_port_number);
|
||||||
/* Protect against concurrent state machines */
|
/* Protect against concurrent state machines */
|
||||||
@ -2198,6 +2200,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case AD_TYPE_MARKER:
|
case AD_TYPE_MARKER:
|
||||||
|
ret = RX_HANDLER_CONSUMED;
|
||||||
// No need to convert fields to Little Endian since we don't use the marker's fields.
|
// No need to convert fields to Little Endian since we don't use the marker's fields.
|
||||||
|
|
||||||
switch (((struct bond_marker *)lacpdu)->tlv_type) {
|
switch (((struct bond_marker *)lacpdu)->tlv_type) {
|
||||||
@ -2219,6 +2222,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2456,18 +2460,20 @@ out:
|
|||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
|
int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
|
||||||
struct slave *slave)
|
struct slave *slave)
|
||||||
{
|
{
|
||||||
|
int ret = RX_HANDLER_ANOTHER;
|
||||||
if (skb->protocol != PKT_TYPE_LACPDU)
|
if (skb->protocol != PKT_TYPE_LACPDU)
|
||||||
return;
|
return ret;
|
||||||
|
|
||||||
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
|
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
|
||||||
return;
|
return ret;
|
||||||
|
|
||||||
read_lock(&bond->lock);
|
read_lock(&bond->lock);
|
||||||
bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
|
ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
|
||||||
read_unlock(&bond->lock);
|
read_unlock(&bond->lock);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -274,7 +274,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
|
|||||||
void bond_3ad_handle_link_change(struct slave *slave, char link);
|
void bond_3ad_handle_link_change(struct slave *slave, char link);
|
||||||
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
|
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
|
||||||
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
|
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
|
||||||
void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
|
int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
|
||||||
struct slave *slave);
|
struct slave *slave);
|
||||||
int bond_3ad_set_carrier(struct bonding *bond);
|
int bond_3ad_set_carrier(struct bonding *bond);
|
||||||
void bond_3ad_update_lacp_rate(struct bonding *bond);
|
void bond_3ad_update_lacp_rate(struct bonding *bond);
|
||||||
|
@ -1444,8 +1444,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||||||
struct sk_buff *skb = *pskb;
|
struct sk_buff *skb = *pskb;
|
||||||
struct slave *slave;
|
struct slave *slave;
|
||||||
struct bonding *bond;
|
struct bonding *bond;
|
||||||
void (*recv_probe)(struct sk_buff *, struct bonding *,
|
int (*recv_probe)(struct sk_buff *, struct bonding *,
|
||||||
struct slave *);
|
struct slave *);
|
||||||
|
int ret = RX_HANDLER_ANOTHER;
|
||||||
|
|
||||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
@ -1464,8 +1465,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||||
|
|
||||||
if (likely(nskb)) {
|
if (likely(nskb)) {
|
||||||
recv_probe(nskb, bond, slave);
|
ret = recv_probe(nskb, bond, slave);
|
||||||
dev_kfree_skb(nskb);
|
dev_kfree_skb(nskb);
|
||||||
|
if (ret == RX_HANDLER_CONSUMED) {
|
||||||
|
consume_skb(skb);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1487,7 +1492,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||||||
memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
|
memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
return RX_HANDLER_ANOTHER;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* enslave device <slave> to bond device <master> */
|
/* enslave device <slave> to bond device <master> */
|
||||||
@ -2723,7 +2728,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
|
static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
|
||||||
struct slave *slave)
|
struct slave *slave)
|
||||||
{
|
{
|
||||||
struct arphdr *arp;
|
struct arphdr *arp;
|
||||||
@ -2731,7 +2736,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
|
|||||||
__be32 sip, tip;
|
__be32 sip, tip;
|
||||||
|
|
||||||
if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
|
if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
|
||||||
return;
|
return RX_HANDLER_ANOTHER;
|
||||||
|
|
||||||
read_lock(&bond->lock);
|
read_lock(&bond->lock);
|
||||||
|
|
||||||
@ -2776,6 +2781,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
|
|||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
read_unlock(&bond->lock);
|
read_unlock(&bond->lock);
|
||||||
|
return RX_HANDLER_ANOTHER;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -9122,13 +9122,34 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
|
|||||||
return bnx2x_prev_mcp_done(bp);
|
return bnx2x_prev_mcp_done(bp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
|
||||||
|
* and boot began, or when kdump kernel was loaded. Either case would invalidate
|
||||||
|
* the addresses of the transaction, resulting in was-error bit set in the pci
|
||||||
|
* causing all hw-to-host pcie transactions to timeout. If this happened we want
|
||||||
|
* to clear the interrupt which detected this from the pglueb and the was done
|
||||||
|
* bit
|
||||||
|
*/
|
||||||
|
static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
|
||||||
|
{
|
||||||
|
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
|
||||||
|
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
|
||||||
|
BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
|
||||||
|
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
|
static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
|
||||||
{
|
{
|
||||||
int time_counter = 10;
|
int time_counter = 10;
|
||||||
u32 rc, fw, hw_lock_reg, hw_lock_val;
|
u32 rc, fw, hw_lock_reg, hw_lock_val;
|
||||||
BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
|
BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
|
||||||
|
|
||||||
/* Release previously held locks */
|
/* clear hw from errors which may have resulted from an interrupted
|
||||||
|
* dmae transaction.
|
||||||
|
*/
|
||||||
|
bnx2x_prev_interrupted_dmae(bp);
|
||||||
|
|
||||||
|
/* Release previously held locks */
|
||||||
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
|
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
|
||||||
(MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
|
(MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
|
||||||
(MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
|
(MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
|
||||||
|
@ -3335,6 +3335,8 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
|
|||||||
goto out_shutdown_ports;
|
goto out_shutdown_ports;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Handle any events that might be pending. */
|
||||||
|
tasklet_hi_schedule(&adapter->neq_tasklet);
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -3380,7 +3380,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
|
|||||||
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
|
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
|
||||||
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
|
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
|
||||||
struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
|
struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
|
||||||
struct my_u { u64 a; u64 b; };
|
struct my_u { __le64 a; __le64 b; };
|
||||||
struct my_u *u = (struct my_u *)tx_desc;
|
struct my_u *u = (struct my_u *)tx_desc;
|
||||||
const char *type;
|
const char *type;
|
||||||
|
|
||||||
@ -3424,7 +3424,7 @@ rx_ring_summary:
|
|||||||
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
|
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
|
||||||
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
|
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
|
||||||
struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
|
struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
|
||||||
struct my_u { u64 a; u64 b; };
|
struct my_u { __le64 a; __le64 b; };
|
||||||
struct my_u *u = (struct my_u *)rx_desc;
|
struct my_u *u = (struct my_u *)rx_desc;
|
||||||
const char *type;
|
const char *type;
|
||||||
|
|
||||||
|
@ -1111,9 +1111,12 @@ msi_only:
|
|||||||
adapter->flags |= IGB_FLAG_HAS_MSI;
|
adapter->flags |= IGB_FLAG_HAS_MSI;
|
||||||
out:
|
out:
|
||||||
/* Notify the stack of the (possibly) reduced queue counts. */
|
/* Notify the stack of the (possibly) reduced queue counts. */
|
||||||
|
rtnl_lock();
|
||||||
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
|
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
|
||||||
return netif_set_real_num_rx_queues(adapter->netdev,
|
err = netif_set_real_num_rx_queues(adapter->netdev,
|
||||||
adapter->num_rx_queues);
|
adapter->num_rx_queues);
|
||||||
|
rtnl_unlock();
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2771,8 +2774,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
|
|||||||
|
|
||||||
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
|
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
|
||||||
wr32(E1000_TXDCTL(reg_idx), txdctl);
|
wr32(E1000_TXDCTL(reg_idx), txdctl);
|
||||||
|
|
||||||
netdev_tx_reset_queue(txring_txq(ring));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3282,6 +3283,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
|
|||||||
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
|
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
netdev_tx_reset_queue(txring_txq(tx_ring));
|
||||||
|
|
||||||
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
|
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
|
||||||
memset(tx_ring->tx_buffer_info, 0, size);
|
memset(tx_ring->tx_buffer_info, 0, size);
|
||||||
|
|
||||||
@ -6796,18 +6799,7 @@ static int igb_resume(struct device *dev)
|
|||||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||||
|
|
||||||
if (!rtnl_is_locked()) {
|
if (igb_init_interrupt_scheme(adapter)) {
|
||||||
/*
|
|
||||||
* shut up ASSERT_RTNL() warning in
|
|
||||||
* netif_set_real_num_tx/rx_queues.
|
|
||||||
*/
|
|
||||||
rtnl_lock();
|
|
||||||
err = igb_init_interrupt_scheme(adapter);
|
|
||||||
rtnl_unlock();
|
|
||||||
} else {
|
|
||||||
err = igb_init_interrupt_scheme(adapter);
|
|
||||||
}
|
|
||||||
if (err) {
|
|
||||||
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
|
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -574,9 +574,6 @@ extern struct ixgbe_info ixgbe_82599_info;
|
|||||||
extern struct ixgbe_info ixgbe_X540_info;
|
extern struct ixgbe_info ixgbe_X540_info;
|
||||||
#ifdef CONFIG_IXGBE_DCB
|
#ifdef CONFIG_IXGBE_DCB
|
||||||
extern const struct dcbnl_rtnl_ops dcbnl_ops;
|
extern const struct dcbnl_rtnl_ops dcbnl_ops;
|
||||||
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
|
|
||||||
struct ixgbe_dcb_config *dst_dcb_cfg,
|
|
||||||
int tc_max);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern char ixgbe_driver_name[];
|
extern char ixgbe_driver_name[];
|
||||||
|
@ -44,18 +44,26 @@
|
|||||||
#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
|
#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
|
||||||
#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
|
#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
|
||||||
|
|
||||||
int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
|
static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
|
||||||
struct ixgbe_dcb_config *dcfg, int tc_max)
|
|
||||||
{
|
{
|
||||||
|
struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
|
||||||
|
struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg;
|
||||||
struct tc_configuration *src = NULL;
|
struct tc_configuration *src = NULL;
|
||||||
struct tc_configuration *dst = NULL;
|
struct tc_configuration *dst = NULL;
|
||||||
int i, j;
|
int i, j;
|
||||||
int tx = DCB_TX_CONFIG;
|
int tx = DCB_TX_CONFIG;
|
||||||
int rx = DCB_RX_CONFIG;
|
int rx = DCB_RX_CONFIG;
|
||||||
int changes = 0;
|
int changes = 0;
|
||||||
|
#ifdef IXGBE_FCOE
|
||||||
|
struct dcb_app app = {
|
||||||
|
.selector = DCB_APP_IDTYPE_ETHTYPE,
|
||||||
|
.protocol = ETH_P_FCOE,
|
||||||
|
};
|
||||||
|
u8 up = dcb_getapp(adapter->netdev, &app);
|
||||||
|
|
||||||
if (!scfg || !dcfg)
|
if (up && !(up & (1 << adapter->fcoe.up)))
|
||||||
return changes;
|
changes |= BIT_APP_UPCHG;
|
||||||
|
#endif
|
||||||
|
|
||||||
for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
|
for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
|
||||||
src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
|
src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
|
||||||
@ -332,28 +340,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
|
|||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
int ret = DCB_NO_HW_CHG;
|
int ret = DCB_NO_HW_CHG;
|
||||||
int i;
|
int i;
|
||||||
#ifdef IXGBE_FCOE
|
|
||||||
struct dcb_app app = {
|
|
||||||
.selector = DCB_APP_IDTYPE_ETHTYPE,
|
|
||||||
.protocol = ETH_P_FCOE,
|
|
||||||
};
|
|
||||||
u8 up;
|
|
||||||
|
|
||||||
/* In IEEE mode, use the IEEE Ethertype selector value */
|
|
||||||
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
|
|
||||||
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
|
|
||||||
up = dcb_ieee_getapp_mask(netdev, &app);
|
|
||||||
} else {
|
|
||||||
up = dcb_getapp(netdev, &app);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Fail command if not in CEE mode */
|
/* Fail command if not in CEE mode */
|
||||||
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
|
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
|
adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
|
||||||
&adapter->dcb_cfg,
|
|
||||||
MAX_TRAFFIC_CLASS);
|
MAX_TRAFFIC_CLASS);
|
||||||
if (!adapter->dcb_set_bitmap)
|
if (!adapter->dcb_set_bitmap)
|
||||||
return ret;
|
return ret;
|
||||||
@ -440,8 +432,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
|
|||||||
* FCoE is using changes. This happens if the APP info
|
* FCoE is using changes. This happens if the APP info
|
||||||
* changes or the up2tc mapping is updated.
|
* changes or the up2tc mapping is updated.
|
||||||
*/
|
*/
|
||||||
if ((up && !(up & (1 << adapter->fcoe.up))) ||
|
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
|
||||||
(adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
|
struct dcb_app app = {
|
||||||
|
.selector = DCB_APP_IDTYPE_ETHTYPE,
|
||||||
|
.protocol = ETH_P_FCOE,
|
||||||
|
};
|
||||||
|
u8 up = dcb_getapp(netdev, &app);
|
||||||
|
|
||||||
adapter->fcoe.up = ffs(up) - 1;
|
adapter->fcoe.up = ffs(up) - 1;
|
||||||
ixgbe_dcbnl_devreset(netdev);
|
ixgbe_dcbnl_devreset(netdev);
|
||||||
ret = DCB_HW_CHG_RST;
|
ret = DCB_HW_CHG_RST;
|
||||||
|
@ -1780,6 +1780,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
|
|||||||
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
|
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
netdev_tx_reset_queue(txring_txq(tx_ring));
|
||||||
|
|
||||||
/* re-map buffers to ring, store next to clean values */
|
/* re-map buffers to ring, store next to clean values */
|
||||||
ixgbe_alloc_rx_buffers(rx_ring, count);
|
ixgbe_alloc_rx_buffers(rx_ring, count);
|
||||||
rx_ring->next_to_clean = rx_ntc;
|
rx_ring->next_to_clean = rx_ntc;
|
||||||
|
@ -2671,8 +2671,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
|||||||
/* enable queue */
|
/* enable queue */
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
|
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
|
||||||
|
|
||||||
netdev_tx_reset_queue(txring_txq(ring));
|
|
||||||
|
|
||||||
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
|
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
|
||||||
if (hw->mac.type == ixgbe_mac_82598EB &&
|
if (hw->mac.type == ixgbe_mac_82598EB &&
|
||||||
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
|
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
|
||||||
@ -4167,6 +4165,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
|||||||
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
|
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
netdev_tx_reset_queue(txring_txq(tx_ring));
|
||||||
|
|
||||||
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
|
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
|
||||||
memset(tx_ring->tx_buffer_info, 0, size);
|
memset(tx_ring->tx_buffer_info, 0, size);
|
||||||
|
|
||||||
@ -4418,8 +4418,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
|
|||||||
adapter->dcb_cfg.pfc_mode_enable = false;
|
adapter->dcb_cfg.pfc_mode_enable = false;
|
||||||
adapter->dcb_set_bitmap = 0x00;
|
adapter->dcb_set_bitmap = 0x00;
|
||||||
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
|
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
|
||||||
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
|
memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
|
||||||
MAX_TRAFFIC_CLASS);
|
sizeof(adapter->temp_dcb_cfg));
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -4866,10 +4866,12 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||||||
netif_device_detach(netdev);
|
netif_device_detach(netdev);
|
||||||
|
|
||||||
if (netif_running(netdev)) {
|
if (netif_running(netdev)) {
|
||||||
|
rtnl_lock();
|
||||||
ixgbe_down(adapter);
|
ixgbe_down(adapter);
|
||||||
ixgbe_free_irq(adapter);
|
ixgbe_free_irq(adapter);
|
||||||
ixgbe_free_all_tx_resources(adapter);
|
ixgbe_free_all_tx_resources(adapter);
|
||||||
ixgbe_free_all_rx_resources(adapter);
|
ixgbe_free_all_rx_resources(adapter);
|
||||||
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
ixgbe_clear_interrupt_scheme(adapter);
|
ixgbe_clear_interrupt_scheme(adapter);
|
||||||
|
@ -618,10 +618,8 @@ static void ks8851_irq_work(struct work_struct *work)
|
|||||||
netif_dbg(ks, intr, ks->netdev,
|
netif_dbg(ks, intr, ks->netdev,
|
||||||
"%s: status 0x%04x\n", __func__, status);
|
"%s: status 0x%04x\n", __func__, status);
|
||||||
|
|
||||||
if (status & IRQ_LCI) {
|
if (status & IRQ_LCI)
|
||||||
/* should do something about checking link status */
|
|
||||||
handled |= IRQ_LCI;
|
handled |= IRQ_LCI;
|
||||||
}
|
|
||||||
|
|
||||||
if (status & IRQ_LDI) {
|
if (status & IRQ_LDI) {
|
||||||
u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
|
u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
|
||||||
@ -684,6 +682,9 @@ static void ks8851_irq_work(struct work_struct *work)
|
|||||||
|
|
||||||
mutex_unlock(&ks->lock);
|
mutex_unlock(&ks->lock);
|
||||||
|
|
||||||
|
if (status & IRQ_LCI)
|
||||||
|
mii_check_link(&ks->mii);
|
||||||
|
|
||||||
if (status & IRQ_TXI)
|
if (status & IRQ_TXI)
|
||||||
netif_wake_queue(ks->netdev);
|
netif_wake_queue(ks->netdev);
|
||||||
|
|
||||||
|
@ -61,8 +61,12 @@
|
|||||||
#define R8169_MSG_DEFAULT \
|
#define R8169_MSG_DEFAULT \
|
||||||
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
|
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
|
||||||
|
|
||||||
#define TX_BUFFS_AVAIL(tp) \
|
#define TX_SLOTS_AVAIL(tp) \
|
||||||
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
|
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
|
||||||
|
|
||||||
|
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
|
||||||
|
#define TX_FRAGS_READY_FOR(tp,nr_frags) \
|
||||||
|
(TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
|
||||||
|
|
||||||
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
|
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
|
||||||
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
|
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
|
||||||
@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
u32 opts[2];
|
u32 opts[2];
|
||||||
int frags;
|
int frags;
|
||||||
|
|
||||||
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
|
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
|
||||||
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
|
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
|
||||||
goto err_stop_0;
|
goto err_stop_0;
|
||||||
}
|
}
|
||||||
@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
|
|
||||||
mmiowb();
|
mmiowb();
|
||||||
|
|
||||||
if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
|
if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
|
||||||
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
||||||
* not miss a ring update when it notices a stopped queue.
|
* not miss a ring update when it notices a stopped queue.
|
||||||
*/
|
*/
|
||||||
@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||||||
* can't.
|
* can't.
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
|
if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
|||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
if (netif_queue_stopped(dev) &&
|
if (netif_queue_stopped(dev) &&
|
||||||
(TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
|
TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -1349,7 +1349,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* RSS might be usable on VFs even if it is disabled on the PF */
|
/* RSS might be usable on VFs even if it is disabled on the PF */
|
||||||
efx->rss_spread = (efx->n_rx_channels > 1 ?
|
efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
|
||||||
efx->n_rx_channels : efx_vf_size(efx));
|
efx->n_rx_channels : efx_vf_size(efx));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -259,7 +259,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
|
|
||||||
xmit_world:
|
xmit_world:
|
||||||
skb->ip_summed = ip_summed;
|
skb->ip_summed = ip_summed;
|
||||||
skb_set_dev(skb, vlan->lowerdev);
|
skb->dev = vlan->lowerdev;
|
||||||
return dev_queue_xmit(skb);
|
return dev_queue_xmit(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include <linux/etherdevice.h>
|
#include <linux/etherdevice.h>
|
||||||
#include <linux/if_macvlan.h>
|
#include <linux/if_macvlan.h>
|
||||||
|
#include <linux/if_vlan.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/nsproxy.h>
|
#include <linux/nsproxy.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
@ -759,6 +760,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
|||||||
struct macvlan_dev *vlan;
|
struct macvlan_dev *vlan;
|
||||||
int ret;
|
int ret;
|
||||||
int vnet_hdr_len = 0;
|
int vnet_hdr_len = 0;
|
||||||
|
int vlan_offset = 0;
|
||||||
|
int copied;
|
||||||
|
|
||||||
if (q->flags & IFF_VNET_HDR) {
|
if (q->flags & IFF_VNET_HDR) {
|
||||||
struct virtio_net_hdr vnet_hdr;
|
struct virtio_net_hdr vnet_hdr;
|
||||||
@ -773,18 +776,48 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
|||||||
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
|
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
copied = vnet_hdr_len;
|
||||||
|
|
||||||
len = min_t(int, skb->len, len);
|
if (!vlan_tx_tag_present(skb))
|
||||||
|
len = min_t(int, skb->len, len);
|
||||||
|
else {
|
||||||
|
int copy;
|
||||||
|
struct {
|
||||||
|
__be16 h_vlan_proto;
|
||||||
|
__be16 h_vlan_TCI;
|
||||||
|
} veth;
|
||||||
|
veth.h_vlan_proto = htons(ETH_P_8021Q);
|
||||||
|
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
|
||||||
|
|
||||||
ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
|
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
|
||||||
|
len = min_t(int, skb->len + VLAN_HLEN, len);
|
||||||
|
|
||||||
|
copy = min_t(int, vlan_offset, len);
|
||||||
|
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
|
||||||
|
len -= copy;
|
||||||
|
copied += copy;
|
||||||
|
if (ret || !len)
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
copy = min_t(int, sizeof(veth), len);
|
||||||
|
ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
|
||||||
|
len -= copy;
|
||||||
|
copied += copy;
|
||||||
|
if (ret || !len)
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
|
||||||
|
copied += len;
|
||||||
|
|
||||||
|
done:
|
||||||
rcu_read_lock_bh();
|
rcu_read_lock_bh();
|
||||||
vlan = rcu_dereference_bh(q->vlan);
|
vlan = rcu_dereference_bh(q->vlan);
|
||||||
if (vlan)
|
if (vlan)
|
||||||
macvlan_count_rx(vlan, len, ret == 0, 0);
|
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
|
||||||
rcu_read_unlock_bh();
|
rcu_read_unlock_bh();
|
||||||
|
|
||||||
return ret ? ret : (len + vnet_hdr_len);
|
return ret ? ret : copied;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
|
static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
|
||||||
|
@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||||||
struct cdc_state *info = (void *) &dev->data;
|
struct cdc_state *info = (void *) &dev->data;
|
||||||
int status;
|
int status;
|
||||||
int rndis;
|
int rndis;
|
||||||
|
bool android_rndis_quirk = false;
|
||||||
struct usb_driver *driver = driver_of(intf);
|
struct usb_driver *driver = driver_of(intf);
|
||||||
struct usb_cdc_mdlm_desc *desc = NULL;
|
struct usb_cdc_mdlm_desc *desc = NULL;
|
||||||
struct usb_cdc_mdlm_detail_desc *detail = NULL;
|
struct usb_cdc_mdlm_detail_desc *detail = NULL;
|
||||||
@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||||||
info->control,
|
info->control,
|
||||||
info->u->bSlaveInterface0,
|
info->u->bSlaveInterface0,
|
||||||
info->data);
|
info->data);
|
||||||
|
/* fall back to hard-wiring for RNDIS */
|
||||||
|
if (rndis) {
|
||||||
|
android_rndis_quirk = true;
|
||||||
|
goto next_desc;
|
||||||
|
}
|
||||||
goto bad_desc;
|
goto bad_desc;
|
||||||
}
|
}
|
||||||
if (info->control != intf) {
|
if (info->control != intf) {
|
||||||
@ -271,11 +277,15 @@ next_desc:
|
|||||||
/* Microsoft ActiveSync based and some regular RNDIS devices lack the
|
/* Microsoft ActiveSync based and some regular RNDIS devices lack the
|
||||||
* CDC descriptors, so we'll hard-wire the interfaces and not check
|
* CDC descriptors, so we'll hard-wire the interfaces and not check
|
||||||
* for descriptors.
|
* for descriptors.
|
||||||
|
*
|
||||||
|
* Some Android RNDIS devices have a CDC Union descriptor pointing
|
||||||
|
* to non-existing interfaces. Ignore that and attempt the same
|
||||||
|
* hard-wired 0 and 1 interfaces.
|
||||||
*/
|
*/
|
||||||
if (rndis && !info->u) {
|
if (rndis && (!info->u || android_rndis_quirk)) {
|
||||||
info->control = usb_ifnum_to_if(dev->udev, 0);
|
info->control = usb_ifnum_to_if(dev->udev, 0);
|
||||||
info->data = usb_ifnum_to_if(dev->udev, 1);
|
info->data = usb_ifnum_to_if(dev->udev, 1);
|
||||||
if (!info->control || !info->data) {
|
if (!info->control || !info->data || info->control != intf) {
|
||||||
dev_dbg(&intf->dev,
|
dev_dbg(&intf->dev,
|
||||||
"rndis: master #0/%p slave #1/%p\n",
|
"rndis: master #0/%p slave #1/%p\n",
|
||||||
info->control,
|
info->control,
|
||||||
|
@ -373,7 +373,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
|
|||||||
else
|
else
|
||||||
spur_subchannel_sd = 0;
|
spur_subchannel_sd = 0;
|
||||||
|
|
||||||
spur_freq_sd = (freq_offset << 9) / 11;
|
spur_freq_sd = ((freq_offset + 10) << 9) / 11;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
|
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
|
||||||
@ -382,7 +382,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
|
|||||||
else
|
else
|
||||||
spur_subchannel_sd = 1;
|
spur_subchannel_sd = 1;
|
||||||
|
|
||||||
spur_freq_sd = (freq_offset << 9) / 11;
|
spur_freq_sd = ((freq_offset - 10) << 9) / 11;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2637,6 +2637,7 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
|
|||||||
/* after stopping the bus, exit thread */
|
/* after stopping the bus, exit thread */
|
||||||
brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
|
brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
|
||||||
bus->dpc_tsk = NULL;
|
bus->dpc_tsk = NULL;
|
||||||
|
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -773,8 +773,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
__le16 fc = hdr->frame_control;
|
__le16 fc = hdr->frame_control;
|
||||||
struct iwl_rxon_context *ctx;
|
struct iwl_rxon_context *ctx;
|
||||||
struct page *p;
|
unsigned int hdrlen, fraglen;
|
||||||
int offset;
|
|
||||||
|
|
||||||
/* We only process data packets if the interface is open */
|
/* We only process data packets if the interface is open */
|
||||||
if (unlikely(!priv->is_open)) {
|
if (unlikely(!priv->is_open)) {
|
||||||
@ -788,16 +787,24 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
|
iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
skb = dev_alloc_skb(128);
|
/* Dont use dev_alloc_skb(), we'll have enough headroom once
|
||||||
|
* ieee80211_hdr pulled.
|
||||||
|
*/
|
||||||
|
skb = alloc_skb(128, GFP_ATOMIC);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
IWL_ERR(priv, "dev_alloc_skb failed\n");
|
IWL_ERR(priv, "alloc_skb failed\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
hdrlen = min_t(unsigned int, len, skb_tailroom(skb));
|
||||||
|
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
|
||||||
|
fraglen = len - hdrlen;
|
||||||
|
|
||||||
offset = (void *)hdr - rxb_addr(rxb);
|
if (fraglen) {
|
||||||
p = rxb_steal_page(rxb);
|
int offset = (void *)hdr + hdrlen - rxb_addr(rxb);
|
||||||
skb_add_rx_frag(skb, 0, p, offset, len, len);
|
|
||||||
|
|
||||||
|
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
||||||
|
fraglen, rxb->truesize);
|
||||||
|
}
|
||||||
iwl_update_stats(priv, false, fc, len);
|
iwl_update_stats(priv, false, fc, len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -374,8 +374,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
|||||||
if (WARN_ON(!rxb))
|
if (WARN_ON(!rxb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order;
|
||||||
dma_unmap_page(trans->dev, rxb->page_dma,
|
dma_unmap_page(trans->dev, rxb->page_dma,
|
||||||
PAGE_SIZE << hw_params(trans).rx_page_order,
|
rxcb.truesize,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
rxcb._page = rxb->page;
|
rxcb._page = rxb->page;
|
||||||
|
@ -260,6 +260,7 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
|
|||||||
|
|
||||||
struct iwl_rx_cmd_buffer {
|
struct iwl_rx_cmd_buffer {
|
||||||
struct page *_page;
|
struct page *_page;
|
||||||
|
unsigned int truesize;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
|
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
#include <asm/ropes.h>
|
#include <asm/ropes.h>
|
||||||
#include <asm/mckinley.h> /* for proc_mckinley_root */
|
#include <asm/mckinley.h> /* for proc_mckinley_root */
|
||||||
#include <asm/runway.h> /* for proc_runway_root */
|
#include <asm/runway.h> /* for proc_runway_root */
|
||||||
|
#include <asm/page.h> /* for PAGE0 */
|
||||||
#include <asm/pdc.h> /* for PDC_MODEL_* */
|
#include <asm/pdc.h> /* for PDC_MODEL_* */
|
||||||
#include <asm/pdcpat.h> /* for is_pdc_pat() */
|
#include <asm/pdcpat.h> /* for is_pdc_pat() */
|
||||||
#include <asm/parisc-device.h>
|
#include <asm/parisc-device.h>
|
||||||
|
@ -1431,7 +1431,10 @@ void devm_regulator_put(struct regulator *regulator)
|
|||||||
|
|
||||||
rc = devres_destroy(regulator->dev, devm_regulator_release,
|
rc = devres_destroy(regulator->dev, devm_regulator_release,
|
||||||
devm_regulator_match, regulator);
|
devm_regulator_match, regulator);
|
||||||
WARN_ON(rc);
|
if (rc == 0)
|
||||||
|
regulator_put(regulator);
|
||||||
|
else
|
||||||
|
WARN_ON(rc);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(devm_regulator_put);
|
EXPORT_SYMBOL_GPL(devm_regulator_put);
|
||||||
|
|
||||||
|
@ -684,7 +684,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
new_val++;
|
new_val++;
|
||||||
} while (desc->min + desc->step + new_val <= desc->max);
|
} while (desc->min + desc->step * new_val <= desc->max);
|
||||||
|
|
||||||
new_idx = tmp_idx;
|
new_idx = tmp_idx;
|
||||||
new_val = tmp_val;
|
new_val = tmp_val;
|
||||||
|
@ -218,6 +218,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
|||||||
|
|
||||||
if (!shost->shost_gendev.parent)
|
if (!shost->shost_gendev.parent)
|
||||||
shost->shost_gendev.parent = dev ? dev : &platform_bus;
|
shost->shost_gendev.parent = dev ? dev : &platform_bus;
|
||||||
|
if (!dma_dev)
|
||||||
|
dma_dev = shost->shost_gendev.parent;
|
||||||
|
|
||||||
shost->dma_dev = dma_dev;
|
shost->dma_dev = dma_dev;
|
||||||
|
|
||||||
error = device_add(&shost->shost_gendev);
|
error = device_add(&shost->shost_gendev);
|
||||||
|
@ -1367,6 +1367,9 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
|
|||||||
struct qla_hw_data *ha = vha->hw;
|
struct qla_hw_data *ha = vha->hw;
|
||||||
int rval = 0;
|
int rval = 0;
|
||||||
|
|
||||||
|
if (ha->flags.isp82xx_reset_hdlr_active)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
|
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
|
||||||
if (rval)
|
if (rval)
|
||||||
return rval;
|
return rval;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user