Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/intel/e1000e/param.c drivers/net/wireless/iwlwifi/iwl-agn-rx.c drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c drivers/net/wireless/iwlwifi/iwl-trans.h Resolved the iwlwifi conflict with mainline using 3-way diff posted by John Linville and Stephen Rothwell. In 'net' we added a bug fix to make iwlwifi report a more accurate skb->truesize but this conflicted with RX path changes that happened meanwhile in net-next. In e1000e a conflict arose in the validation code for settings of adapter->itr. 'net-next' had more sophisticated logic so that logic was used. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0d6c4a2e46
19
Documentation/ABI/testing/sysfs-bus-hsi
Normal file
19
Documentation/ABI/testing/sysfs-bus-hsi
Normal file
@ -0,0 +1,19 @@
|
||||
What: /sys/bus/hsi
|
||||
Date: April 2012
|
||||
KernelVersion: 3.4
|
||||
Contact: Carlos Chinea <carlos.chinea@nokia.com>
|
||||
Description:
|
||||
High Speed Synchronous Serial Interface (HSI) is a
|
||||
serial interface mainly used for connecting application
|
||||
engines (APE) with cellular modem engines (CMT) in cellular
|
||||
handsets.
|
||||
The bus will be populated with devices (hsi_clients) representing
|
||||
the protocols available in the system. Bus drivers implement
|
||||
those protocols.
|
||||
|
||||
What: /sys/bus/hsi/devices/.../modalias
|
||||
Date: April 2012
|
||||
KernelVersion: 3.4
|
||||
Contact: Carlos Chinea <carlos.chinea@nokia.com>
|
||||
Description: Stores the same MODALIAS value emitted by uevent
|
||||
Format: hsi:<hsi_client device name>
|
@ -1,10 +1,10 @@
|
||||
* Calxeda SATA Controller
|
||||
* AHCI SATA Controller
|
||||
|
||||
SATA nodes are defined to describe on-chip Serial ATA controllers.
|
||||
Each SATA controller should have its own node.
|
||||
|
||||
Required properties:
|
||||
- compatible : compatible list, contains "calxeda,hb-ahci"
|
||||
- compatible : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci"
|
||||
- interrupts : <interrupt mapping for SATA IRQ>
|
||||
- reg : <registers mapping>
|
||||
|
||||
@ -14,4 +14,3 @@ Example:
|
||||
reg = <0xffe08000 0x1000>;
|
||||
interrupts = <115>;
|
||||
};
|
||||
|
@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER
|
||||
(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
|
||||
if it is <= 0.
|
||||
Possible values are [-31, 31], inclusive.
|
||||
Default: 2
|
||||
Default: 1
|
||||
|
||||
tcp_allowed_congestion_control - STRING
|
||||
Show/set the congestion control choices available to non-privileged
|
||||
@ -424,7 +424,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
|
||||
net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
|
||||
automatic tuning of that socket's receive buffer size, in which
|
||||
case this value is ignored.
|
||||
Default: between 87380B and 4MB, depending on RAM size.
|
||||
Default: between 87380B and 6MB, depending on RAM size.
|
||||
|
||||
tcp_sack - BOOLEAN
|
||||
Enable select acknowledgments (SACKS).
|
||||
|
@ -9,7 +9,7 @@ architectures).
|
||||
|
||||
II. How does it work?
|
||||
|
||||
There are four per-task flags used for that, PF_NOFREEZE, PF_FROZEN, TIF_FREEZE
|
||||
There are three per-task flags used for that, PF_NOFREEZE, PF_FROZEN
|
||||
and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have
|
||||
PF_NOFREEZE unset (all user space processes and some kernel threads) are
|
||||
regarded as 'freezable' and treated in a special way before the system enters a
|
||||
@ -17,30 +17,31 @@ suspend state as well as before a hibernation image is created (in what follows
|
||||
we only consider hibernation, but the description also applies to suspend).
|
||||
|
||||
Namely, as the first step of the hibernation procedure the function
|
||||
freeze_processes() (defined in kernel/power/process.c) is called. It executes
|
||||
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
|
||||
either wakes them up, if they are kernel threads, or sends fake signals to them,
|
||||
if they are user space processes. A task that has TIF_FREEZE set, should react
|
||||
to it by calling the function called __refrigerator() (defined in
|
||||
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
|
||||
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
|
||||
Then, we say that the task is 'frozen' and therefore the set of functions
|
||||
handling this mechanism is referred to as 'the freezer' (these functions are
|
||||
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
|
||||
User space processes are generally frozen before kernel threads.
|
||||
freeze_processes() (defined in kernel/power/process.c) is called. A system-wide
|
||||
variable system_freezing_cnt (as opposed to a per-task flag) is used to indicate
|
||||
whether the system is to undergo a freezing operation. And freeze_processes()
|
||||
sets this variable. After this, it executes try_to_freeze_tasks() that sends a
|
||||
fake signal to all user space processes, and wakes up all the kernel threads.
|
||||
All freezable tasks must react to that by calling try_to_freeze(), which
|
||||
results in a call to __refrigerator() (defined in kernel/freezer.c), which sets
|
||||
the task's PF_FROZEN flag, changes its state to TASK_UNINTERRUPTIBLE and makes
|
||||
it loop until PF_FROZEN is cleared for it. Then, we say that the task is
|
||||
'frozen' and therefore the set of functions handling this mechanism is referred
|
||||
to as 'the freezer' (these functions are defined in kernel/power/process.c,
|
||||
kernel/freezer.c & include/linux/freezer.h). User space processes are generally
|
||||
frozen before kernel threads.
|
||||
|
||||
__refrigerator() must not be called directly. Instead, use the
|
||||
try_to_freeze() function (defined in include/linux/freezer.h), that checks
|
||||
the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
|
||||
flag is set.
|
||||
if the task is to be frozen and makes the task enter __refrigerator().
|
||||
|
||||
For user space processes try_to_freeze() is called automatically from the
|
||||
signal-handling code, but the freezable kernel threads need to call it
|
||||
explicitly in suitable places or use the wait_event_freezable() or
|
||||
wait_event_freezable_timeout() macros (defined in include/linux/freezer.h)
|
||||
that combine interruptible sleep with checking if TIF_FREEZE is set and calling
|
||||
try_to_freeze(). The main loop of a freezable kernel thread may look like the
|
||||
following one:
|
||||
that combine interruptible sleep with checking if the task is to be frozen and
|
||||
calling try_to_freeze(). The main loop of a freezable kernel thread may look
|
||||
like the following one:
|
||||
|
||||
set_freezable();
|
||||
do {
|
||||
@ -53,7 +54,7 @@ following one:
|
||||
(from drivers/usb/core/hub.c::hub_thread()).
|
||||
|
||||
If a freezable kernel thread fails to call try_to_freeze() after the freezer has
|
||||
set TIF_FREEZE for it, the freezing of tasks will fail and the entire
|
||||
initiated a freezing operation, the freezing of tasks will fail and the entire
|
||||
hibernation operation will be cancelled. For this reason, freezable kernel
|
||||
threads must call try_to_freeze() somewhere or use one of the
|
||||
wait_event_freezable() and wait_event_freezable_timeout() macros.
|
||||
|
@ -123,7 +123,7 @@ KEY SERVICE OVERVIEW
|
||||
|
||||
The key service provides a number of features besides keys:
|
||||
|
||||
(*) The key service defines two special key types:
|
||||
(*) The key service defines three special key types:
|
||||
|
||||
(+) "keyring"
|
||||
|
||||
@ -137,6 +137,18 @@ The key service provides a number of features besides keys:
|
||||
blobs of data. These can be created, updated and read by userspace,
|
||||
and aren't intended for use by kernel services.
|
||||
|
||||
(+) "logon"
|
||||
|
||||
Like a "user" key, a "logon" key has a payload that is an arbitrary
|
||||
blob of data. It is intended as a place to store secrets which are
|
||||
accessible to the kernel but not to userspace programs.
|
||||
|
||||
The description can be arbitrary, but must be prefixed with a non-zero
|
||||
length string that describes the key "subclass". The subclass is
|
||||
separated from the rest of the description by a ':'. "logon" keys can
|
||||
be created and updated from userspace, but the payload is only
|
||||
readable from kernel space.
|
||||
|
||||
(*) Each process subscribes to three keyrings: a thread-specific keyring, a
|
||||
process-specific keyring, and a session-specific keyring.
|
||||
|
||||
|
@ -5887,11 +5887,11 @@ F: Documentation/scsi/st.txt
|
||||
F: drivers/scsi/st*
|
||||
|
||||
SCTP PROTOCOL
|
||||
M: Vlad Yasevich <vladislav.yasevich@hp.com>
|
||||
M: Vlad Yasevich <vyasevich@gmail.com>
|
||||
M: Sridhar Samudrala <sri@us.ibm.com>
|
||||
L: linux-sctp@vger.kernel.org
|
||||
W: http://lksctp.sourceforge.net
|
||||
S: Supported
|
||||
S: Maintained
|
||||
F: Documentation/networking/sctp.txt
|
||||
F: include/linux/sctp.h
|
||||
F: include/net/sctp/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1186,6 +1186,15 @@ if !MMU
|
||||
source "arch/arm/Kconfig-nommu"
|
||||
endif
|
||||
|
||||
config ARM_ERRATA_326103
|
||||
bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
|
||||
depends on CPU_V6
|
||||
help
|
||||
Executing a SWP instruction to read-only memory does not set bit 11
|
||||
of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
|
||||
treat the access as a read, preventing a COW from occurring and
|
||||
causing the faulting task to livelock.
|
||||
|
||||
config ARM_ERRATA_411920
|
||||
bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
|
||||
depends on CPU_V6 || CPU_V6K
|
||||
|
@ -10,7 +10,7 @@
|
||||
intc: interrupt-controller@02080000 {
|
||||
compatible = "qcom,msm-8660-qgic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <3>;
|
||||
reg = < 0x02080000 0x1000 >,
|
||||
< 0x02081000 0x1000 >;
|
||||
};
|
||||
@ -19,6 +19,6 @@
|
||||
compatible = "qcom,msm-hsuart", "qcom,msm-uart";
|
||||
reg = <0x19c40000 0x1000>,
|
||||
<0x19c00000 0x1000>;
|
||||
interrupts = <195>;
|
||||
interrupts = <0 195 0x0>;
|
||||
};
|
||||
};
|
||||
|
@ -173,7 +173,7 @@
|
||||
mmc@5000 {
|
||||
compatible = "arm,primecell";
|
||||
reg = < 0x5000 0x1000>;
|
||||
interrupts = <22>;
|
||||
interrupts = <22 34>;
|
||||
};
|
||||
kmi@6000 {
|
||||
compatible = "arm,pl050", "arm,primecell";
|
||||
|
@ -41,7 +41,7 @@
|
||||
mmc@b000 {
|
||||
compatible = "arm,primecell";
|
||||
reg = <0xb000 0x1000>;
|
||||
interrupts = <23>;
|
||||
interrupts = <23 34>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -14,6 +14,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_BLK_DEV_INTEGRITY=y
|
||||
CONFIG_ARCH_S3C24XX=y
|
||||
# CONFIG_CPU_S3C2410 is not set
|
||||
CONFIG_CPU_S3C2440=y
|
||||
CONFIG_S3C_ADC=y
|
||||
CONFIG_S3C24XX_PWM=y
|
||||
CONFIG_MACH_MINI2440=y
|
||||
|
@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *);
|
||||
extern void vfp_sync_hwstate(struct thread_info *);
|
||||
extern void vfp_flush_hwstate(struct thread_info *);
|
||||
|
||||
struct user_vfp;
|
||||
struct user_vfp_exc;
|
||||
|
||||
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
|
||||
struct user_vfp_exc __user *);
|
||||
extern int vfp_restore_user_hwstate(struct user_vfp __user *,
|
||||
struct user_vfp_exc __user *);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
.macro set_tls_v6k, tp, tmp1, tmp2
|
||||
mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
|
||||
mov \tmp1, #0
|
||||
mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
|
||||
.endm
|
||||
|
||||
.macro set_tls_v6, tp, tmp1, tmp2
|
||||
@ -15,6 +17,8 @@
|
||||
mov \tmp2, #0xffff0fff
|
||||
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
|
||||
mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
|
||||
movne \tmp1, #0
|
||||
mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
|
||||
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
|
||||
.endm
|
||||
|
||||
|
@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
||||
}
|
||||
|
||||
c = irq_data_get_irq_chip(d);
|
||||
if (c->irq_set_affinity)
|
||||
c->irq_set_affinity(d, affinity, true);
|
||||
else
|
||||
if (!c->irq_set_affinity)
|
||||
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
||||
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
|
||||
cpumask_copy(d->affinity, affinity);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
|
||||
|
||||
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct vfp_hard_struct *h = &thread->vfpstate.hard;
|
||||
const unsigned long magic = VFP_MAGIC;
|
||||
const unsigned long size = VFP_STORAGE_SIZE;
|
||||
int err = 0;
|
||||
|
||||
vfp_sync_hwstate(thread);
|
||||
__put_user_error(magic, &frame->magic, err);
|
||||
__put_user_error(size, &frame->size, err);
|
||||
|
||||
/*
|
||||
* Copy the floating point registers. There can be unused
|
||||
* registers see asm/hwcap.h for details.
|
||||
*/
|
||||
err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
|
||||
sizeof(h->fpregs));
|
||||
/*
|
||||
* Copy the status and control register.
|
||||
*/
|
||||
__put_user_error(h->fpscr, &frame->ufp.fpscr, err);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Copy the exception registers.
|
||||
*/
|
||||
__put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
|
||||
__put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
|
||||
__put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
|
||||
|
||||
return err ? -EFAULT : 0;
|
||||
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
|
||||
}
|
||||
|
||||
static int restore_vfp_context(struct vfp_sigframe __user *frame)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct vfp_hard_struct *h = &thread->vfpstate.hard;
|
||||
unsigned long magic;
|
||||
unsigned long size;
|
||||
unsigned long fpexc;
|
||||
int err = 0;
|
||||
|
||||
__get_user_error(magic, &frame->magic, err);
|
||||
@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
|
||||
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vfp_flush_hwstate(thread);
|
||||
|
||||
/*
|
||||
* Copy the floating point registers. There can be unused
|
||||
* registers see asm/hwcap.h for details.
|
||||
*/
|
||||
err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
|
||||
sizeof(h->fpregs));
|
||||
/*
|
||||
* Copy the status and control register.
|
||||
*/
|
||||
__get_user_error(h->fpscr, &frame->ufp.fpscr, err);
|
||||
|
||||
/*
|
||||
* Sanitise and restore the exception registers.
|
||||
*/
|
||||
__get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
|
||||
/* Ensure the VFP is enabled. */
|
||||
fpexc |= FPEXC_EN;
|
||||
/* Ensure FPINST2 is invalid and the exception flag is cleared. */
|
||||
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
|
||||
h->fpexc = fpexc;
|
||||
|
||||
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
|
||||
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
|
||||
|
||||
return err ? -EFAULT : 0;
|
||||
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu)
|
||||
local_fiq_disable();
|
||||
local_irq_disable();
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
platform_cpu_kill(cpu);
|
||||
#endif
|
||||
|
||||
while (1)
|
||||
cpu_relax();
|
||||
}
|
||||
@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu)
|
||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void smp_kill_cpus(cpumask_t *mask)
|
||||
{
|
||||
unsigned int cpu;
|
||||
for_each_cpu(cpu, mask)
|
||||
platform_cpu_kill(cpu);
|
||||
}
|
||||
#else
|
||||
static void smp_kill_cpus(cpumask_t *mask) { }
|
||||
#endif
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
unsigned long timeout;
|
||||
struct cpumask mask;
|
||||
|
||||
if (num_online_cpus() > 1) {
|
||||
struct cpumask mask;
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
smp_cross_call(&mask, IPI_CPU_STOP);
|
||||
}
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
smp_cross_call(&mask, IPI_CPU_STOP);
|
||||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
timeout = USEC_PER_SEC;
|
||||
@ -595,6 +599,8 @@ void smp_send_stop(void)
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
pr_warning("SMP: failed to stop secondary CPUs\n");
|
||||
|
||||
smp_kill_cpus(&mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -497,25 +497,25 @@ static struct clk exynos4_init_clocks_off[] = {
|
||||
.ctrlbit = (1 << 3),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.0",
|
||||
.devname = "exynos4-sdhci.0",
|
||||
.parent = &exynos4_clk_aclk_133.clk,
|
||||
.enable = exynos4_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 5),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.1",
|
||||
.devname = "exynos4-sdhci.1",
|
||||
.parent = &exynos4_clk_aclk_133.clk,
|
||||
.enable = exynos4_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 6),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.2",
|
||||
.devname = "exynos4-sdhci.2",
|
||||
.parent = &exynos4_clk_aclk_133.clk,
|
||||
.enable = exynos4_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 7),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.3",
|
||||
.devname = "exynos4-sdhci.3",
|
||||
.parent = &exynos4_clk_aclk_133.clk,
|
||||
.enable = exynos4_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 8),
|
||||
@ -1202,7 +1202,7 @@ static struct clksrc_clk exynos4_clk_sclk_uart3 = {
|
||||
static struct clksrc_clk exynos4_clk_sclk_mmc0 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.0",
|
||||
.devname = "exynos4-sdhci.0",
|
||||
.parent = &exynos4_clk_dout_mmc0.clk,
|
||||
.enable = exynos4_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 0),
|
||||
@ -1213,7 +1213,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc0 = {
|
||||
static struct clksrc_clk exynos4_clk_sclk_mmc1 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.1",
|
||||
.devname = "exynos4-sdhci.1",
|
||||
.parent = &exynos4_clk_dout_mmc1.clk,
|
||||
.enable = exynos4_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 4),
|
||||
@ -1224,7 +1224,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc1 = {
|
||||
static struct clksrc_clk exynos4_clk_sclk_mmc2 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.2",
|
||||
.devname = "exynos4-sdhci.2",
|
||||
.parent = &exynos4_clk_dout_mmc2.clk,
|
||||
.enable = exynos4_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 8),
|
||||
@ -1235,7 +1235,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc2 = {
|
||||
static struct clksrc_clk exynos4_clk_sclk_mmc3 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.3",
|
||||
.devname = "exynos4-sdhci.3",
|
||||
.parent = &exynos4_clk_dout_mmc3.clk,
|
||||
.enable = exynos4_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 12),
|
||||
@ -1340,10 +1340,10 @@ static struct clk_lookup exynos4_clk_lookup[] = {
|
||||
CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos4_clk_sclk_uart1.clk),
|
||||
CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos4_clk_sclk_uart2.clk),
|
||||
CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos4_clk_sclk_uart3.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk),
|
||||
CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0),
|
||||
CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0),
|
||||
CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1),
|
||||
|
@ -455,25 +455,25 @@ static struct clk exynos5_init_clocks_off[] = {
|
||||
.ctrlbit = (1 << 20),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.0",
|
||||
.devname = "exynos4-sdhci.0",
|
||||
.parent = &exynos5_clk_aclk_200.clk,
|
||||
.enable = exynos5_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 12),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.1",
|
||||
.devname = "exynos4-sdhci.1",
|
||||
.parent = &exynos5_clk_aclk_200.clk,
|
||||
.enable = exynos5_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 13),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.2",
|
||||
.devname = "exynos4-sdhci.2",
|
||||
.parent = &exynos5_clk_aclk_200.clk,
|
||||
.enable = exynos5_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 14),
|
||||
}, {
|
||||
.name = "hsmmc",
|
||||
.devname = "s3c-sdhci.3",
|
||||
.devname = "exynos4-sdhci.3",
|
||||
.parent = &exynos5_clk_aclk_200.clk,
|
||||
.enable = exynos5_clk_ip_fsys_ctrl,
|
||||
.ctrlbit = (1 << 15),
|
||||
@ -813,7 +813,7 @@ static struct clksrc_clk exynos5_clk_sclk_uart3 = {
|
||||
static struct clksrc_clk exynos5_clk_sclk_mmc0 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.0",
|
||||
.devname = "exynos4-sdhci.0",
|
||||
.parent = &exynos5_clk_dout_mmc0.clk,
|
||||
.enable = exynos5_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 0),
|
||||
@ -824,7 +824,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc0 = {
|
||||
static struct clksrc_clk exynos5_clk_sclk_mmc1 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.1",
|
||||
.devname = "exynos4-sdhci.1",
|
||||
.parent = &exynos5_clk_dout_mmc1.clk,
|
||||
.enable = exynos5_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 4),
|
||||
@ -835,7 +835,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc1 = {
|
||||
static struct clksrc_clk exynos5_clk_sclk_mmc2 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.2",
|
||||
.devname = "exynos4-sdhci.2",
|
||||
.parent = &exynos5_clk_dout_mmc2.clk,
|
||||
.enable = exynos5_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 8),
|
||||
@ -846,7 +846,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc2 = {
|
||||
static struct clksrc_clk exynos5_clk_sclk_mmc3 = {
|
||||
.clk = {
|
||||
.name = "sclk_mmc",
|
||||
.devname = "s3c-sdhci.3",
|
||||
.devname = "exynos4-sdhci.3",
|
||||
.parent = &exynos5_clk_dout_mmc3.clk,
|
||||
.enable = exynos5_clksrc_mask_fsys_ctrl,
|
||||
.ctrlbit = (1 << 12),
|
||||
@ -990,10 +990,10 @@ static struct clk_lookup exynos5_clk_lookup[] = {
|
||||
CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos5_clk_sclk_uart1.clk),
|
||||
CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos5_clk_sclk_uart2.clk),
|
||||
CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos5_clk_sclk_uart3.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk),
|
||||
CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk),
|
||||
CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk),
|
||||
CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0),
|
||||
CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1),
|
||||
CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1),
|
||||
|
@ -326,6 +326,11 @@ static void __init exynos4_map_io(void)
|
||||
s3c_fimc_setname(2, "exynos4-fimc");
|
||||
s3c_fimc_setname(3, "exynos4-fimc");
|
||||
|
||||
s3c_sdhci_setname(0, "exynos4-sdhci");
|
||||
s3c_sdhci_setname(1, "exynos4-sdhci");
|
||||
s3c_sdhci_setname(2, "exynos4-sdhci");
|
||||
s3c_sdhci_setname(3, "exynos4-sdhci");
|
||||
|
||||
/* The I2C bus controllers are directly compatible with s3c2440 */
|
||||
s3c_i2c0_setname("s3c2440-i2c");
|
||||
s3c_i2c1_setname("s3c2440-i2c");
|
||||
@ -344,6 +349,11 @@ static void __init exynos5_map_io(void)
|
||||
s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
|
||||
s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC;
|
||||
|
||||
s3c_sdhci_setname(0, "exynos4-sdhci");
|
||||
s3c_sdhci_setname(1, "exynos4-sdhci");
|
||||
s3c_sdhci_setname(2, "exynos4-sdhci");
|
||||
s3c_sdhci_setname(3, "exynos4-sdhci");
|
||||
|
||||
/* The I2C bus controllers are directly compatible with s3c2440 */
|
||||
s3c_i2c0_setname("s3c2440-i2c");
|
||||
s3c_i2c1_setname("s3c2440-i2c");
|
||||
@ -537,7 +547,9 @@ void __init exynos5_init_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
|
||||
#ifdef CONFIG_OF
|
||||
of_irq_init(exynos4_dt_irq_match);
|
||||
#endif
|
||||
|
||||
for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
|
||||
combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/mmc/dw_mmc.h>
|
||||
|
||||
#include <plat/devs.h>
|
||||
@ -33,16 +34,8 @@ static int exynos4_dwmci_init(u32 slot_id, irq_handler_t handler, void *data)
|
||||
}
|
||||
|
||||
static struct resource exynos4_dwmci_resource[] = {
|
||||
[0] = {
|
||||
.start = EXYNOS4_PA_DWMCI,
|
||||
.end = EXYNOS4_PA_DWMCI + SZ_4K - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ_DWMCI,
|
||||
.end = IRQ_DWMCI,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
[0] = DEFINE_RES_MEM(EXYNOS4_PA_DWMCI, SZ_4K),
|
||||
[1] = DEFINE_RES_IRQ(EXYNOS4_IRQ_DWMCI),
|
||||
};
|
||||
|
||||
static struct dw_mci_board exynos4_dwci_pdata = {
|
||||
|
@ -112,6 +112,7 @@ static struct s3c_sdhci_platdata nuri_hsmmc0_data __initdata = {
|
||||
.host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
|
||||
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
|
||||
MMC_CAP_ERASE),
|
||||
.host_caps2 = MMC_CAP2_BROKEN_VOLTAGE,
|
||||
.cd_type = S3C_SDHCI_CD_PERMANENT,
|
||||
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
|
||||
};
|
||||
|
@ -747,6 +747,7 @@ static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = {
|
||||
.max_width = 8,
|
||||
.host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
|
||||
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
|
||||
.host_caps2 = MMC_CAP2_BROKEN_VOLTAGE,
|
||||
.cd_type = S3C_SDHCI_CD_PERMANENT,
|
||||
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
|
||||
};
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
@ -49,10 +50,22 @@ static void __init msm8x60_map_io(void)
|
||||
msm_map_msm8x60_io();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static struct of_device_id msm_dt_gic_match[] __initdata = {
|
||||
{ .compatible = "qcom,msm-8660-qgic", .data = gic_of_init },
|
||||
{}
|
||||
};
|
||||
#endif
|
||||
|
||||
static void __init msm8x60_init_irq(void)
|
||||
{
|
||||
gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
|
||||
(void *)MSM_QGIC_CPU_BASE);
|
||||
if (!of_have_populated_dt())
|
||||
gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
|
||||
(void *)MSM_QGIC_CPU_BASE);
|
||||
#ifdef CONFIG_OF
|
||||
else
|
||||
of_irq_init(msm_dt_gic_match);
|
||||
#endif
|
||||
|
||||
/* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */
|
||||
writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4);
|
||||
@ -73,16 +86,8 @@ static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
|
||||
{}
|
||||
};
|
||||
|
||||
static struct of_device_id msm_dt_gic_match[] __initdata = {
|
||||
{ .compatible = "qcom,msm-8660-qgic", },
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init msm8x60_dt_init(void)
|
||||
{
|
||||
irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS,
|
||||
GIC_SPI_START);
|
||||
|
||||
if (of_machine_is_compatible("qcom,msm8660-surf")) {
|
||||
printk(KERN_INFO "Init surf UART registers\n");
|
||||
msm8x60_init_uart12dm();
|
||||
|
@ -17,6 +17,7 @@
|
||||
*
|
||||
* bit 23 - Input/Output (PXA2xx specific)
|
||||
* bit 24 - Wakeup Enable(PXA2xx specific)
|
||||
* bit 25 - Keep Output (PXA2xx specific)
|
||||
*/
|
||||
|
||||
#define MFP_DIR_IN (0x0 << 23)
|
||||
@ -25,6 +26,12 @@
|
||||
#define MFP_DIR(x) (((x) >> 23) & 0x1)
|
||||
|
||||
#define MFP_LPM_CAN_WAKEUP (0x1 << 24)
|
||||
|
||||
/*
|
||||
* MFP_LPM_KEEP_OUTPUT must be specified for pins that need to
|
||||
* retain their last output level (low or high).
|
||||
* Note: MFP_LPM_KEEP_OUTPUT has no effect on pins configured for input.
|
||||
*/
|
||||
#define MFP_LPM_KEEP_OUTPUT (0x1 << 25)
|
||||
|
||||
#define WAKEUP_ON_EDGE_RISE (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_RISE)
|
||||
|
@ -33,6 +33,8 @@
|
||||
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
|
||||
#define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5))
|
||||
#define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c)
|
||||
#define GPSR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x18)
|
||||
#define GPCR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x24)
|
||||
|
||||
#define PWER_WE35 (1 << 24)
|
||||
|
||||
@ -348,6 +350,7 @@ static inline void pxa27x_mfp_init(void) {}
|
||||
#ifdef CONFIG_PM
|
||||
static unsigned long saved_gafr[2][4];
|
||||
static unsigned long saved_gpdr[4];
|
||||
static unsigned long saved_gplr[4];
|
||||
static unsigned long saved_pgsr[4];
|
||||
|
||||
static int pxa2xx_mfp_suspend(void)
|
||||
@ -366,14 +369,26 @@ static int pxa2xx_mfp_suspend(void)
|
||||
}
|
||||
|
||||
for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) {
|
||||
|
||||
saved_gafr[0][i] = GAFR_L(i);
|
||||
saved_gafr[1][i] = GAFR_U(i);
|
||||
saved_gpdr[i] = GPDR(i * 32);
|
||||
saved_gplr[i] = GPLR(i * 32);
|
||||
saved_pgsr[i] = PGSR(i);
|
||||
|
||||
GPDR(i * 32) = gpdr_lpm[i];
|
||||
GPSR(i * 32) = PGSR(i);
|
||||
GPCR(i * 32) = ~PGSR(i);
|
||||
}
|
||||
|
||||
/* set GPDR bits taking into account MFP_LPM_KEEP_OUTPUT */
|
||||
for (i = 0; i < pxa_last_gpio; i++) {
|
||||
if ((gpdr_lpm[gpio_to_bank(i)] & GPIO_bit(i)) ||
|
||||
((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
|
||||
(saved_gpdr[gpio_to_bank(i)] & GPIO_bit(i))))
|
||||
GPDR(i) |= GPIO_bit(i);
|
||||
else
|
||||
GPDR(i) &= ~GPIO_bit(i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -384,6 +399,8 @@ static void pxa2xx_mfp_resume(void)
|
||||
for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) {
|
||||
GAFR_L(i) = saved_gafr[0][i];
|
||||
GAFR_U(i) = saved_gafr[1][i];
|
||||
GPSR(i * 32) = saved_gplr[i];
|
||||
GPCR(i * 32) = ~saved_gplr[i];
|
||||
GPDR(i * 32) = saved_gpdr[i];
|
||||
PGSR(i) = saved_pgsr[i];
|
||||
}
|
||||
|
@ -421,8 +421,11 @@ void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info)
|
||||
pxa_register_device(&pxa27x_device_i2c_power, info);
|
||||
}
|
||||
|
||||
static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = {
|
||||
.gpio_set_wake = gpio_set_wake,
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
&pxa_device_gpio,
|
||||
&pxa27x_device_udc,
|
||||
&pxa_device_pmu,
|
||||
&pxa_device_i2s,
|
||||
@ -458,6 +461,7 @@ static int __init pxa27x_init(void)
|
||||
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
|
||||
register_syscore_ops(&pxa2xx_clock_syscore_ops);
|
||||
|
||||
pxa_register_device(&pxa_device_gpio, &pxa27x_gpio_info);
|
||||
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
|
||||
}
|
||||
|
||||
|
@ -111,10 +111,6 @@ config S3C24XX_SETUP_TS
|
||||
help
|
||||
Compile in platform device definition for Samsung TouchScreen.
|
||||
|
||||
# cpu-specific sections
|
||||
|
||||
if CPU_S3C2410
|
||||
|
||||
config S3C2410_DMA
|
||||
bool
|
||||
depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442)
|
||||
@ -127,6 +123,10 @@ config S3C2410_PM
|
||||
help
|
||||
Power Management code common to S3C2410 and better
|
||||
|
||||
# cpu-specific sections
|
||||
|
||||
if CPU_S3C2410
|
||||
|
||||
config S3C24XX_SIMTEC_NOR
|
||||
bool
|
||||
help
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/gpio_keys.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/hardware/vic.h>
|
||||
@ -765,6 +766,7 @@ static void __init goni_pmic_init(void)
|
||||
/* MoviNAND */
|
||||
static struct s3c_sdhci_platdata goni_hsmmc0_data __initdata = {
|
||||
.max_width = 4,
|
||||
.host_caps2 = MMC_CAP2_BROKEN_VOLTAGE,
|
||||
.cd_type = S3C_SDHCI_CD_PERMANENT,
|
||||
};
|
||||
|
||||
|
@ -306,7 +306,7 @@ void sa11x0_register_irda(struct irda_platform_data *irda)
|
||||
}
|
||||
|
||||
static struct resource sa1100_rtc_resources[] = {
|
||||
DEFINE_RES_MEM(0x90010000, 0x9001003f),
|
||||
DEFINE_RES_MEM(0x90010000, 0x40),
|
||||
DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"),
|
||||
DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"),
|
||||
};
|
||||
|
@ -1667,8 +1667,10 @@ void __init u300_init_irq(void)
|
||||
|
||||
for (i = 0; i < U300_VIC_IRQS_END; i++)
|
||||
set_bit(i, (unsigned long *) &mask[0]);
|
||||
vic_init((void __iomem *) U300_INTCON0_VBASE, 0, mask[0], mask[0]);
|
||||
vic_init((void __iomem *) U300_INTCON1_VBASE, 32, mask[1], mask[1]);
|
||||
vic_init((void __iomem *) U300_INTCON0_VBASE, IRQ_U300_INTCON0_START,
|
||||
mask[0], mask[0]);
|
||||
vic_init((void __iomem *) U300_INTCON1_VBASE, IRQ_U300_INTCON1_START,
|
||||
mask[1], mask[1]);
|
||||
}
|
||||
|
||||
|
||||
|
@ -146,9 +146,6 @@ static struct ab3100_platform_data ab3100_plf_data = {
|
||||
.min_uV = 1800000,
|
||||
.max_uV = 1800000,
|
||||
.valid_modes_mask = REGULATOR_MODE_NORMAL,
|
||||
.valid_ops_mask =
|
||||
REGULATOR_CHANGE_VOLTAGE |
|
||||
REGULATOR_CHANGE_STATUS,
|
||||
.always_on = 1,
|
||||
.boot_on = 1,
|
||||
},
|
||||
@ -160,9 +157,6 @@ static struct ab3100_platform_data ab3100_plf_data = {
|
||||
.min_uV = 2500000,
|
||||
.max_uV = 2500000,
|
||||
.valid_modes_mask = REGULATOR_MODE_NORMAL,
|
||||
.valid_ops_mask =
|
||||
REGULATOR_CHANGE_VOLTAGE |
|
||||
REGULATOR_CHANGE_STATUS,
|
||||
.always_on = 1,
|
||||
.boot_on = 1,
|
||||
},
|
||||
@ -230,8 +224,7 @@ static struct ab3100_platform_data ab3100_plf_data = {
|
||||
.max_uV = 1800000,
|
||||
.valid_modes_mask = REGULATOR_MODE_NORMAL,
|
||||
.valid_ops_mask =
|
||||
REGULATOR_CHANGE_VOLTAGE |
|
||||
REGULATOR_CHANGE_STATUS,
|
||||
REGULATOR_CHANGE_VOLTAGE,
|
||||
.always_on = 1,
|
||||
.boot_on = 1,
|
||||
},
|
||||
|
@ -12,101 +12,101 @@
|
||||
#ifndef __MACH_IRQS_H
|
||||
#define __MACH_IRQS_H
|
||||
|
||||
#define IRQ_U300_INTCON0_START 0
|
||||
#define IRQ_U300_INTCON1_START 32
|
||||
#define IRQ_U300_INTCON0_START 1
|
||||
#define IRQ_U300_INTCON1_START 33
|
||||
/* These are on INTCON0 - 30 lines */
|
||||
#define IRQ_U300_IRQ0_EXT 0
|
||||
#define IRQ_U300_IRQ1_EXT 1
|
||||
#define IRQ_U300_DMA 2
|
||||
#define IRQ_U300_VIDEO_ENC_0 3
|
||||
#define IRQ_U300_VIDEO_ENC_1 4
|
||||
#define IRQ_U300_AAIF_RX 5
|
||||
#define IRQ_U300_AAIF_TX 6
|
||||
#define IRQ_U300_AAIF_VGPIO 7
|
||||
#define IRQ_U300_AAIF_WAKEUP 8
|
||||
#define IRQ_U300_PCM_I2S0_FRAME 9
|
||||
#define IRQ_U300_PCM_I2S0_FIFO 10
|
||||
#define IRQ_U300_PCM_I2S1_FRAME 11
|
||||
#define IRQ_U300_PCM_I2S1_FIFO 12
|
||||
#define IRQ_U300_XGAM_GAMCON 13
|
||||
#define IRQ_U300_XGAM_CDI 14
|
||||
#define IRQ_U300_XGAM_CDICON 15
|
||||
#define IRQ_U300_IRQ0_EXT 1
|
||||
#define IRQ_U300_IRQ1_EXT 2
|
||||
#define IRQ_U300_DMA 3
|
||||
#define IRQ_U300_VIDEO_ENC_0 4
|
||||
#define IRQ_U300_VIDEO_ENC_1 5
|
||||
#define IRQ_U300_AAIF_RX 6
|
||||
#define IRQ_U300_AAIF_TX 7
|
||||
#define IRQ_U300_AAIF_VGPIO 8
|
||||
#define IRQ_U300_AAIF_WAKEUP 9
|
||||
#define IRQ_U300_PCM_I2S0_FRAME 10
|
||||
#define IRQ_U300_PCM_I2S0_FIFO 11
|
||||
#define IRQ_U300_PCM_I2S1_FRAME 12
|
||||
#define IRQ_U300_PCM_I2S1_FIFO 13
|
||||
#define IRQ_U300_XGAM_GAMCON 14
|
||||
#define IRQ_U300_XGAM_CDI 15
|
||||
#define IRQ_U300_XGAM_CDICON 16
|
||||
#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330)
|
||||
/* MMIACC not used on the DB3210 or DB3350 chips */
|
||||
#define IRQ_U300_XGAM_MMIACC 16
|
||||
#define IRQ_U300_XGAM_MMIACC 17
|
||||
#endif
|
||||
#define IRQ_U300_XGAM_PDI 17
|
||||
#define IRQ_U300_XGAM_PDICON 18
|
||||
#define IRQ_U300_XGAM_GAMEACC 19
|
||||
#define IRQ_U300_XGAM_MCIDCT 20
|
||||
#define IRQ_U300_APEX 21
|
||||
#define IRQ_U300_UART0 22
|
||||
#define IRQ_U300_SPI 23
|
||||
#define IRQ_U300_TIMER_APP_OS 24
|
||||
#define IRQ_U300_TIMER_APP_DD 25
|
||||
#define IRQ_U300_TIMER_APP_GP1 26
|
||||
#define IRQ_U300_TIMER_APP_GP2 27
|
||||
#define IRQ_U300_TIMER_OS 28
|
||||
#define IRQ_U300_TIMER_MS 29
|
||||
#define IRQ_U300_KEYPAD_KEYBF 30
|
||||
#define IRQ_U300_KEYPAD_KEYBR 31
|
||||
#define IRQ_U300_XGAM_PDI 18
|
||||
#define IRQ_U300_XGAM_PDICON 19
|
||||
#define IRQ_U300_XGAM_GAMEACC 20
|
||||
#define IRQ_U300_XGAM_MCIDCT 21
|
||||
#define IRQ_U300_APEX 22
|
||||
#define IRQ_U300_UART0 23
|
||||
#define IRQ_U300_SPI 24
|
||||
#define IRQ_U300_TIMER_APP_OS 25
|
||||
#define IRQ_U300_TIMER_APP_DD 26
|
||||
#define IRQ_U300_TIMER_APP_GP1 27
|
||||
#define IRQ_U300_TIMER_APP_GP2 28
|
||||
#define IRQ_U300_TIMER_OS 29
|
||||
#define IRQ_U300_TIMER_MS 30
|
||||
#define IRQ_U300_KEYPAD_KEYBF 31
|
||||
#define IRQ_U300_KEYPAD_KEYBR 32
|
||||
/* These are on INTCON1 - 32 lines */
|
||||
#define IRQ_U300_GPIO_PORT0 32
|
||||
#define IRQ_U300_GPIO_PORT1 33
|
||||
#define IRQ_U300_GPIO_PORT2 34
|
||||
#define IRQ_U300_GPIO_PORT0 33
|
||||
#define IRQ_U300_GPIO_PORT1 34
|
||||
#define IRQ_U300_GPIO_PORT2 35
|
||||
|
||||
#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) || \
|
||||
defined(CONFIG_MACH_U300_BS335)
|
||||
/* These are for DB3150, DB3200 and DB3350 */
|
||||
#define IRQ_U300_WDOG 35
|
||||
#define IRQ_U300_EVHIST 36
|
||||
#define IRQ_U300_MSPRO 37
|
||||
#define IRQ_U300_MMCSD_MCIINTR0 38
|
||||
#define IRQ_U300_MMCSD_MCIINTR1 39
|
||||
#define IRQ_U300_I2C0 40
|
||||
#define IRQ_U300_I2C1 41
|
||||
#define IRQ_U300_RTC 42
|
||||
#define IRQ_U300_NFIF 43
|
||||
#define IRQ_U300_NFIF2 44
|
||||
#define IRQ_U300_WDOG 36
|
||||
#define IRQ_U300_EVHIST 37
|
||||
#define IRQ_U300_MSPRO 38
|
||||
#define IRQ_U300_MMCSD_MCIINTR0 39
|
||||
#define IRQ_U300_MMCSD_MCIINTR1 40
|
||||
#define IRQ_U300_I2C0 41
|
||||
#define IRQ_U300_I2C1 42
|
||||
#define IRQ_U300_RTC 43
|
||||
#define IRQ_U300_NFIF 44
|
||||
#define IRQ_U300_NFIF2 45
|
||||
#endif
|
||||
|
||||
/* DB3150 and DB3200 have only 45 IRQs */
|
||||
#if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330)
|
||||
#define U300_VIC_IRQS_END 45
|
||||
#define U300_VIC_IRQS_END 46
|
||||
#endif
|
||||
|
||||
/* The DB3350-specific interrupt lines */
|
||||
#ifdef CONFIG_MACH_U300_BS335
|
||||
#define IRQ_U300_ISP_F0 45
|
||||
#define IRQ_U300_ISP_F1 46
|
||||
#define IRQ_U300_ISP_F2 47
|
||||
#define IRQ_U300_ISP_F3 48
|
||||
#define IRQ_U300_ISP_F4 49
|
||||
#define IRQ_U300_GPIO_PORT3 50
|
||||
#define IRQ_U300_SYSCON_PLL_LOCK 51
|
||||
#define IRQ_U300_UART1 52
|
||||
#define IRQ_U300_GPIO_PORT4 53
|
||||
#define IRQ_U300_GPIO_PORT5 54
|
||||
#define IRQ_U300_GPIO_PORT6 55
|
||||
#define U300_VIC_IRQS_END 56
|
||||
#define IRQ_U300_ISP_F0 46
|
||||
#define IRQ_U300_ISP_F1 47
|
||||
#define IRQ_U300_ISP_F2 48
|
||||
#define IRQ_U300_ISP_F3 49
|
||||
#define IRQ_U300_ISP_F4 50
|
||||
#define IRQ_U300_GPIO_PORT3 51
|
||||
#define IRQ_U300_SYSCON_PLL_LOCK 52
|
||||
#define IRQ_U300_UART1 53
|
||||
#define IRQ_U300_GPIO_PORT4 54
|
||||
#define IRQ_U300_GPIO_PORT5 55
|
||||
#define IRQ_U300_GPIO_PORT6 56
|
||||
#define U300_VIC_IRQS_END 57
|
||||
#endif
|
||||
|
||||
/* The DB3210-specific interrupt lines */
|
||||
#ifdef CONFIG_MACH_U300_BS365
|
||||
#define IRQ_U300_GPIO_PORT3 35
|
||||
#define IRQ_U300_GPIO_PORT4 36
|
||||
#define IRQ_U300_WDOG 37
|
||||
#define IRQ_U300_EVHIST 38
|
||||
#define IRQ_U300_MSPRO 39
|
||||
#define IRQ_U300_MMCSD_MCIINTR0 40
|
||||
#define IRQ_U300_MMCSD_MCIINTR1 41
|
||||
#define IRQ_U300_I2C0 42
|
||||
#define IRQ_U300_I2C1 43
|
||||
#define IRQ_U300_RTC 44
|
||||
#define IRQ_U300_NFIF 45
|
||||
#define IRQ_U300_NFIF2 46
|
||||
#define IRQ_U300_SYSCON_PLL_LOCK 47
|
||||
#define U300_VIC_IRQS_END 48
|
||||
#define IRQ_U300_GPIO_PORT3 36
|
||||
#define IRQ_U300_GPIO_PORT4 37
|
||||
#define IRQ_U300_WDOG 38
|
||||
#define IRQ_U300_EVHIST 39
|
||||
#define IRQ_U300_MSPRO 40
|
||||
#define IRQ_U300_MMCSD_MCIINTR0 41
|
||||
#define IRQ_U300_MMCSD_MCIINTR1 42
|
||||
#define IRQ_U300_I2C0 43
|
||||
#define IRQ_U300_I2C1 44
|
||||
#define IRQ_U300_RTC 45
|
||||
#define IRQ_U300_NFIF 46
|
||||
#define IRQ_U300_NFIF2 47
|
||||
#define IRQ_U300_SYSCON_PLL_LOCK 48
|
||||
#define U300_VIC_IRQS_END 49
|
||||
#endif
|
||||
|
||||
/* Maximum 8*7 GPIO lines */
|
||||
@ -117,6 +117,6 @@
|
||||
#define IRQ_U300_GPIO_END (U300_VIC_IRQS_END)
|
||||
#endif
|
||||
|
||||
#define NR_IRQS (IRQ_U300_GPIO_END)
|
||||
#define NR_IRQS (IRQ_U300_GPIO_END - IRQ_U300_INTCON0_START)
|
||||
|
||||
#endif
|
||||
|
@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
|
||||
return sprintf(buf, "0x%X\n", mbox_value);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
|
||||
static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
|
||||
|
||||
static int mbox_show(struct seq_file *s, void *data)
|
||||
{
|
||||
|
@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
|
||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
/*
|
||||
* Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103).
|
||||
* The test below covers all the write situations, including Java bytecodes
|
||||
* Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
|
||||
*/
|
||||
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
|
||||
tst r5, #PSR_J_BIT @ Java?
|
||||
#ifdef CONFIG_ARM_ERRATA_326103
|
||||
ldr ip, =0x4107b36
|
||||
mrc p15, 0, r3, c0, c0, 0 @ get processor id
|
||||
teq ip, r3, lsr #4 @ r0 ARM1136?
|
||||
bne do_DataAbort
|
||||
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
|
||||
ldreq r3, [r4] @ read aborted ARM instruction
|
||||
tst r5, #PSR_J_BIT @ Java?
|
||||
tsteq r5, #PSR_T_BIT @ Thumb?
|
||||
bne do_DataAbort
|
||||
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
|
||||
ldr r3, [r4] @ read aborted ARM instruction
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
reveq r3, r3
|
||||
rev r3, r3
|
||||
#endif
|
||||
do_ldrd_abort tmp=ip, insn=r3
|
||||
tst r3, #1 << 20 @ L = 0 -> write
|
||||
orreq r1, r1, #1 << 11 @ yes.
|
||||
#endif
|
||||
b do_DataAbort
|
||||
|
@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
|
||||
static DEFINE_RAW_SPINLOCK(l2x0_lock);
|
||||
static u32 l2x0_way_mask; /* Bitmask of active ways */
|
||||
static u32 l2x0_size;
|
||||
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
|
||||
|
||||
struct l2x0_regs l2x0_saved_regs;
|
||||
|
||||
@ -61,12 +62,7 @@ static inline void cache_sync(void)
|
||||
{
|
||||
void __iomem *base = l2x0_base;
|
||||
|
||||
#ifdef CONFIG_PL310_ERRATA_753970
|
||||
/* write to an unmmapped register */
|
||||
writel_relaxed(0, base + L2X0_DUMMY_REG);
|
||||
#else
|
||||
writel_relaxed(0, base + L2X0_CACHE_SYNC);
|
||||
#endif
|
||||
writel_relaxed(0, base + sync_reg_offset);
|
||||
cache_wait(base + L2X0_CACHE_SYNC, 1);
|
||||
}
|
||||
|
||||
@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
|
||||
static inline void debug_writel(unsigned long val)
|
||||
{
|
||||
if (outer_cache.set_debug)
|
||||
outer_cache.set_debug(val);
|
||||
}
|
||||
|
||||
#define debug_writel(val) outer_cache.set_debug(val)
|
||||
|
||||
static void l2x0_set_debug(unsigned long val)
|
||||
static void pl310_set_debug(unsigned long val)
|
||||
{
|
||||
writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
|
||||
}
|
||||
@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
|
||||
{
|
||||
}
|
||||
|
||||
#define l2x0_set_debug NULL
|
||||
#define pl310_set_debug NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PL310_ERRATA_588369
|
||||
@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
||||
else
|
||||
ways = 8;
|
||||
type = "L310";
|
||||
#ifdef CONFIG_PL310_ERRATA_753970
|
||||
/* Unmapped register. */
|
||||
sync_reg_offset = L2X0_DUMMY_REG;
|
||||
#endif
|
||||
outer_cache.set_debug = pl310_set_debug;
|
||||
break;
|
||||
case L2X0_CACHE_ID_PART_L210:
|
||||
ways = (aux >> 13) & 0xf;
|
||||
@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
||||
outer_cache.flush_all = l2x0_flush_all;
|
||||
outer_cache.inv_all = l2x0_inv_all;
|
||||
outer_cache.disable = l2x0_disable;
|
||||
outer_cache.set_debug = l2x0_set_debug;
|
||||
|
||||
printk(KERN_INFO "%s cache controller enabled\n", type);
|
||||
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
|
||||
|
@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SPARSEMEM
|
||||
static void arm_memory_present(void)
|
||||
static void __init arm_memory_present(void)
|
||||
{
|
||||
}
|
||||
#else
|
||||
static void arm_memory_present(void)
|
||||
static void __init arm_memory_present(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
|
||||
|
@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
unsigned long phys, const struct mem_type *type)
|
||||
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long end, unsigned long phys, const struct mem_type *type)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
unsigned long next;
|
||||
|
@ -916,6 +916,13 @@ void omap_start_dma(int lch)
|
||||
l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
|
||||
l |= OMAP_DMA_CCR_EN;
|
||||
|
||||
/*
|
||||
* As dma_write() uses IO accessors which are weakly ordered, there
|
||||
* is no guarantee that data in coherent DMA memory will be visible
|
||||
* to the DMA device. Add a memory barrier here to ensure that any
|
||||
* such data is visible prior to enabling DMA.
|
||||
*/
|
||||
mb();
|
||||
p->dma_write(l, CCR, lch);
|
||||
|
||||
dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
|
||||
@ -965,6 +972,13 @@ void omap_stop_dma(int lch)
|
||||
p->dma_write(l, CCR, lch);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that data transferred by DMA is visible to any access
|
||||
* after DMA has been disabled. This is important for coherent
|
||||
* DMA regions.
|
||||
*/
|
||||
mb();
|
||||
|
||||
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
|
||||
int next_lch, cur_lch = lch;
|
||||
char dma_chan_link_map[dma_lch_count];
|
||||
|
@ -18,6 +18,8 @@
|
||||
#ifndef __PLAT_S3C_SDHCI_H
|
||||
#define __PLAT_S3C_SDHCI_H __FILE__
|
||||
|
||||
#include <plat/devs.h>
|
||||
|
||||
struct platform_device;
|
||||
struct mmc_host;
|
||||
struct mmc_card;
|
||||
@ -356,4 +358,30 @@ static inline void exynos4_default_sdhci3(void) { }
|
||||
|
||||
#endif /* CONFIG_EXYNOS4_SETUP_SDHCI */
|
||||
|
||||
static inline void s3c_sdhci_setname(int id, char *name)
|
||||
{
|
||||
switch (id) {
|
||||
#ifdef CONFIG_S3C_DEV_HSMMC
|
||||
case 0:
|
||||
s3c_device_hsmmc0.name = name;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_S3C_DEV_HSMMC1
|
||||
case 1:
|
||||
s3c_device_hsmmc1.name = name;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_S3C_DEV_HSMMC2
|
||||
case 2:
|
||||
s3c_device_hsmmc2.name = name;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_S3C_DEV_HSMMC3
|
||||
case 3:
|
||||
s3c_device_hsmmc3.name = name;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __PLAT_S3C_SDHCI_H */
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/user.h>
|
||||
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/cputype.h>
|
||||
@ -528,6 +530,103 @@ void vfp_flush_hwstate(struct thread_info *thread)
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the current VFP state into the provided structures and prepare
|
||||
* for entry into a new function (signal handler).
|
||||
*/
|
||||
int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
||||
struct user_vfp_exc __user *ufp_exc)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
||||
int err = 0;
|
||||
|
||||
/* Ensure that the saved hwstate is up-to-date. */
|
||||
vfp_sync_hwstate(thread);
|
||||
|
||||
/*
|
||||
* Copy the floating point registers. There can be unused
|
||||
* registers see asm/hwcap.h for details.
|
||||
*/
|
||||
err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
|
||||
sizeof(hwstate->fpregs));
|
||||
/*
|
||||
* Copy the status and control register.
|
||||
*/
|
||||
__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
|
||||
|
||||
/*
|
||||
* Copy the exception registers.
|
||||
*/
|
||||
__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
|
||||
__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
|
||||
__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
/* Ensure that VFP is disabled. */
|
||||
vfp_flush_hwstate(thread);
|
||||
|
||||
/*
|
||||
* As per the PCS, clear the length and stride bits for function
|
||||
* entry.
|
||||
*/
|
||||
hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
|
||||
|
||||
/*
|
||||
* Disable VFP in the hwstate so that we can detect if it gets
|
||||
* used.
|
||||
*/
|
||||
hwstate->fpexc &= ~FPEXC_EN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Sanitise and restore the current VFP state from the provided structures. */
|
||||
int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
||||
struct user_vfp_exc __user *ufp_exc)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
||||
unsigned long fpexc;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* If VFP has been used, then disable it to avoid corrupting
|
||||
* the new thread state.
|
||||
*/
|
||||
if (hwstate->fpexc & FPEXC_EN)
|
||||
vfp_flush_hwstate(thread);
|
||||
|
||||
/*
|
||||
* Copy the floating point registers. There can be unused
|
||||
* registers see asm/hwcap.h for details.
|
||||
*/
|
||||
err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
|
||||
sizeof(hwstate->fpregs));
|
||||
/*
|
||||
* Copy the status and control register.
|
||||
*/
|
||||
__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
|
||||
|
||||
/*
|
||||
* Sanitise and restore the exception registers.
|
||||
*/
|
||||
__get_user_error(fpexc, &ufp_exc->fpexc, err);
|
||||
|
||||
/* Ensure the VFP is enabled. */
|
||||
fpexc |= FPEXC_EN;
|
||||
|
||||
/* Ensure FPINST2 is invalid and the exception flag is cleared. */
|
||||
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
|
||||
hwstate->fpexc = fpexc;
|
||||
|
||||
__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
|
||||
__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
|
||||
|
||||
return err ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* VFP hardware can lose all context when a CPU goes offline.
|
||||
* As we will be running in SMP mode with CPU hotplug, we will save the
|
||||
|
@ -38,7 +38,7 @@ static struct platform_device rtc_device = {
|
||||
.name = "rtc-bfin",
|
||||
.id = -1,
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_RTC_DRV_BFIN */
|
||||
|
||||
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
|
||||
#ifdef CONFIG_SERIAL_BFIN_UART0
|
||||
@ -100,7 +100,7 @@ static struct platform_device bfin_uart0_device = {
|
||||
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_SERIAL_BFIN_UART0 */
|
||||
#ifdef CONFIG_SERIAL_BFIN_UART1
|
||||
static struct resource bfin_uart1_resources[] = {
|
||||
{
|
||||
@ -148,7 +148,7 @@ static struct platform_device bfin_uart1_device = {
|
||||
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_SERIAL_BFIN_UART1 */
|
||||
#ifdef CONFIG_SERIAL_BFIN_UART2
|
||||
static struct resource bfin_uart2_resources[] = {
|
||||
{
|
||||
@ -196,8 +196,8 @@ static struct platform_device bfin_uart2_device = {
|
||||
.platform_data = &bfin_uart2_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
#endif /* CONFIG_SERIAL_BFIN_UART2 */
|
||||
#endif /* CONFIG_SERIAL_BFIN */
|
||||
|
||||
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
|
||||
#ifdef CONFIG_BFIN_SIR0
|
||||
@ -224,7 +224,7 @@ static struct platform_device bfin_sir0_device = {
|
||||
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
|
||||
.resource = bfin_sir0_resources,
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_BFIN_SIR0 */
|
||||
#ifdef CONFIG_BFIN_SIR1
|
||||
static struct resource bfin_sir1_resources[] = {
|
||||
{
|
||||
@ -249,7 +249,7 @@ static struct platform_device bfin_sir1_device = {
|
||||
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
|
||||
.resource = bfin_sir1_resources,
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_BFIN_SIR1 */
|
||||
#ifdef CONFIG_BFIN_SIR2
|
||||
static struct resource bfin_sir2_resources[] = {
|
||||
{
|
||||
@ -274,8 +274,8 @@ static struct platform_device bfin_sir2_device = {
|
||||
.num_resources = ARRAY_SIZE(bfin_sir2_resources),
|
||||
.resource = bfin_sir2_resources,
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
#endif /* CONFIG_BFIN_SIR2 */
|
||||
#endif /* CONFIG_BFIN_SIR */
|
||||
|
||||
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
|
||||
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
|
||||
@ -311,7 +311,7 @@ static struct platform_device bfin_sport0_uart_device = {
|
||||
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_SERIAL_BFIN_SPORT0_UART */
|
||||
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
|
||||
static struct resource bfin_sport1_uart_resources[] = {
|
||||
{
|
||||
@ -345,7 +345,7 @@ static struct platform_device bfin_sport1_uart_device = {
|
||||
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_SERIAL_BFIN_SPORT1_UART */
|
||||
#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
|
||||
static struct resource bfin_sport2_uart_resources[] = {
|
||||
{
|
||||
@ -379,7 +379,7 @@ static struct platform_device bfin_sport2_uart_device = {
|
||||
.platform_data = &bfin_sport2_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_SERIAL_BFIN_SPORT2_UART */
|
||||
#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
|
||||
static struct resource bfin_sport3_uart_resources[] = {
|
||||
{
|
||||
@ -413,8 +413,8 @@ static struct platform_device bfin_sport3_uart_device = {
|
||||
.platform_data = &bfin_sport3_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
#endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */
|
||||
#endif /* CONFIG_SERIAL_BFIN_SPORT */
|
||||
|
||||
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
|
||||
static unsigned short bfin_can_peripherals[] = {
|
||||
@ -452,7 +452,7 @@ static struct platform_device bfin_can_device = {
|
||||
.platform_data = &bfin_can_peripherals, /* Passed to driver */
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_CAN_BFIN */
|
||||
|
||||
/*
|
||||
* USB-LAN EzExtender board
|
||||
@ -488,7 +488,7 @@ static struct platform_device smc91x_device = {
|
||||
.platform_data = &smc91x_info,
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_SMC91X */
|
||||
|
||||
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
|
||||
/* all SPI peripherals info goes here */
|
||||
@ -518,7 +518,8 @@ static struct flash_platform_data bfin_spi_flash_data = {
|
||||
static struct bfin5xx_spi_chip spi_flash_chip_info = {
|
||||
.enable_dma = 0, /* use dma transfer with this chip*/
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_MTD_M25P80 */
|
||||
#endif /* CONFIG_SPI_BFIN5XX */
|
||||
|
||||
#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
|
||||
#include <linux/spi/ad7879.h>
|
||||
@ -535,7 +536,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
|
||||
.gpio_export = 1, /* Export GPIO to gpiolib */
|
||||
.gpio_base = -1, /* Dynamic allocation */
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_TOUCHSCREEN_AD7879 */
|
||||
|
||||
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
|
||||
#include <asm/bfin-lq035q1.h>
|
||||
@ -564,7 +565,7 @@ static struct platform_device bfin_lq035q1_device = {
|
||||
.platform_data = &bfin_lq035q1_data,
|
||||
},
|
||||
};
|
||||
#endif
|
||||
#endif /* CONFIG_FB_BFIN_LQ035Q1 */
|
||||
|
||||
static struct spi_board_info bf538_spi_board_info[] __initdata = {
|
||||
#if defined(CONFIG_MTD_M25P80) \
|
||||
@ -579,7 +580,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
|
||||
.controller_data = &spi_flash_chip_info,
|
||||
.mode = SPI_MODE_3,
|
||||
},
|
||||
#endif
|
||||
#endif /* CONFIG_MTD_M25P80 */
|
||||
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
|
||||
{
|
||||
.modalias = "ad7879",
|
||||
@ -590,7 +591,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
|
||||
.chip_select = 1,
|
||||
.mode = SPI_CPHA | SPI_CPOL,
|
||||
},
|
||||
#endif
|
||||
#endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */
|
||||
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
|
||||
{
|
||||
.modalias = "bfin-lq035q1-spi",
|
||||
@ -599,7 +600,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
|
||||
.chip_select = 2,
|
||||
.mode = SPI_CPHA | SPI_CPOL,
|
||||
},
|
||||
#endif
|
||||
#endif /* CONFIG_FB_BFIN_LQ035Q1 */
|
||||
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
|
||||
{
|
||||
.modalias = "spidev",
|
||||
@ -607,7 +608,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
|
||||
.bus_num = 0,
|
||||
.chip_select = 1,
|
||||
},
|
||||
#endif
|
||||
#endif /* CONFIG_SPI_SPIDEV */
|
||||
};
|
||||
|
||||
/* SPI (0) */
|
||||
@ -716,8 +717,6 @@ static struct platform_device bf538_spi_master2 = {
|
||||
},
|
||||
};
|
||||
|
||||
#endif /* spi master and devices */
|
||||
|
||||
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
|
||||
static struct resource bfin_twi0_resource[] = {
|
||||
[0] = {
|
||||
@ -759,8 +758,8 @@ static struct platform_device i2c_bfin_twi1_device = {
|
||||
.num_resources = ARRAY_SIZE(bfin_twi1_resource),
|
||||
.resource = bfin_twi1_resource,
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
#endif /* CONFIG_BF542 */
|
||||
#endif /* CONFIG_I2C_BLACKFIN_TWI */
|
||||
|
||||
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
|
||||
#include <linux/gpio_keys.h>
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <asm/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Process creation support for Hexagon
|
||||
*
|
||||
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
|
||||
* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@ -88,7 +88,7 @@ void (*idle_sleep)(void) = default_idle;
|
||||
void cpu_idle(void)
|
||||
{
|
||||
while (1) {
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
tick_nohz_idle_enter();
|
||||
local_irq_disable();
|
||||
while (!need_resched()) {
|
||||
idle_sleep();
|
||||
@ -97,7 +97,7 @@ void cpu_idle(void)
|
||||
local_irq_disable();
|
||||
}
|
||||
local_irq_enable();
|
||||
tick_nohz_restart_sched_tick();
|
||||
tick_nohz_idle_exit();
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/elf.h>
|
||||
|
||||
#include <asm/user.h>
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* SMP support for Hexagon
|
||||
*
|
||||
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
|
||||
* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@ -28,6 +28,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/time.h> /* timer_interrupt */
|
||||
#include <asm/hexagon_vm.h>
|
||||
@ -177,7 +178,12 @@ void __cpuinit start_secondary(void)
|
||||
|
||||
printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
ipi_call_lock();
|
||||
set_cpu_online(cpu, true);
|
||||
ipi_call_unlock();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
cpu_idle();
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/timer-regs.h>
|
||||
#include <asm/hexagon_vm.h>
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/binfmts.h>
|
||||
|
||||
#include <asm/vdso.h>
|
||||
|
||||
|
@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void)
|
||||
|
||||
static int ar933x_wmac_reset(void)
|
||||
{
|
||||
ath79_device_reset_clear(AR933X_RESET_WMAC);
|
||||
ath79_device_reset_set(AR933X_RESET_WMAC);
|
||||
ath79_device_reset_clear(AR933X_RESET_WMAC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -45,7 +45,7 @@
|
||||
#define JZ4740_IRQ_LCD JZ4740_IRQ(30)
|
||||
|
||||
/* 2nd-level interrupts */
|
||||
#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X))
|
||||
#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x))
|
||||
|
||||
#define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x))
|
||||
#define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x))
|
||||
|
@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
|
||||
write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
|
||||
} while (0)
|
||||
|
||||
|
||||
static inline unsigned long get_current_pgd(void)
|
||||
{
|
||||
return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL);
|
||||
}
|
||||
|
||||
#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
|
||||
|
||||
/*
|
||||
|
@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
|
||||
return -EFAULT;
|
||||
sigdelsetmask(&newset, ~_BLOCKABLE);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
current->blocked = newset;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&newset);
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
|
||||
return -EFAULT;
|
||||
sigdelsetmask(&newset, ~_BLOCKABLE);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
current->blocked = newset;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&newset);
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&blocked, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = blocked;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&blocked);
|
||||
|
||||
sig = restore_sigcontext(®s, &frame->sf_sc);
|
||||
if (sig < 0)
|
||||
@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&set);
|
||||
|
||||
sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext);
|
||||
if (sig < 0)
|
||||
@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||
sigaddset(¤t->blocked, sig);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
block_sigmask(ka, sig);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
|
||||
return -EFAULT;
|
||||
sigdelsetmask(&newset, ~_BLOCKABLE);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
current->blocked = newset;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&newset);
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
|
||||
return -EFAULT;
|
||||
sigdelsetmask(&newset, ~_BLOCKABLE);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
current->blocked = newset;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&newset);
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&blocked, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = blocked;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&blocked);
|
||||
|
||||
sig = restore_sigcontext32(®s, &frame->sf_sc);
|
||||
if (sig < 0)
|
||||
@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&set);
|
||||
|
||||
sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext);
|
||||
if (sig < 0)
|
||||
|
@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
|
||||
sigset_from_compat(&newset, &uset);
|
||||
sigdelsetmask(&newset, ~_BLOCKABLE);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
current->blocked = newset;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&newset);
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
set_current_blocked(&set);
|
||||
|
||||
sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext);
|
||||
if (sig < 0)
|
||||
|
@ -18,10 +18,6 @@
|
||||
#include <linux/atomic.h>
|
||||
|
||||
|
||||
/* Define a way to iterate across irqs. */
|
||||
#define for_each_irq(i) \
|
||||
for ((i) = 0; (i) < NR_IRQS; ++(i))
|
||||
|
||||
extern atomic_t ppc_n_lost_interrupts;
|
||||
|
||||
/* This number is used when no interrupt has been assigned */
|
||||
|
@ -330,14 +330,10 @@ void migrate_irqs(void)
|
||||
|
||||
alloc_cpumask_var(&mask, GFP_KERNEL);
|
||||
|
||||
for_each_irq(irq) {
|
||||
for_each_irq_desc(irq, desc) {
|
||||
struct irq_data *data;
|
||||
struct irq_chip *chip;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
if (irqd_is_per_cpu(data))
|
||||
continue;
|
||||
|
@ -23,14 +23,11 @@
|
||||
|
||||
void machine_kexec_mask_interrupts(void) {
|
||||
unsigned int i;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_irq(i) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
for_each_irq_desc(i, desc) {
|
||||
struct irq_chip *chip;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
chip = irq_desc_get_chip(desc);
|
||||
if (!chip)
|
||||
continue;
|
||||
|
@ -48,7 +48,13 @@
|
||||
/*
|
||||
* Assembly helpers from arch/powerpc/net/bpf_jit.S:
|
||||
*/
|
||||
extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
|
||||
#define DECLARE_LOAD_FUNC(func) \
|
||||
extern u8 func[], func##_negative_offset[], func##_positive_offset[]
|
||||
|
||||
DECLARE_LOAD_FUNC(sk_load_word);
|
||||
DECLARE_LOAD_FUNC(sk_load_half);
|
||||
DECLARE_LOAD_FUNC(sk_load_byte);
|
||||
DECLARE_LOAD_FUNC(sk_load_byte_msh);
|
||||
|
||||
#define FUNCTION_DESCR_SIZE 24
|
||||
|
||||
|
@ -31,14 +31,13 @@
|
||||
* then branch directly to slow_path_XXX if required. (In fact, could
|
||||
* load a spare GPR with the address of slow_path_generic and pass size
|
||||
* as an argument, making the call site a mtlr, li and bllr.)
|
||||
*
|
||||
* Technically, the "is addr < 0" check is unnecessary & slowing down
|
||||
* the ABS path, as it's statically checked on generation.
|
||||
*/
|
||||
.globl sk_load_word
|
||||
sk_load_word:
|
||||
cmpdi r_addr, 0
|
||||
blt bpf_error
|
||||
blt bpf_slow_path_word_neg
|
||||
.globl sk_load_word_positive_offset
|
||||
sk_load_word_positive_offset:
|
||||
/* Are we accessing past headlen? */
|
||||
subi r_scratch1, r_HL, 4
|
||||
cmpd r_scratch1, r_addr
|
||||
@ -51,7 +50,9 @@ sk_load_word:
|
||||
.globl sk_load_half
|
||||
sk_load_half:
|
||||
cmpdi r_addr, 0
|
||||
blt bpf_error
|
||||
blt bpf_slow_path_half_neg
|
||||
.globl sk_load_half_positive_offset
|
||||
sk_load_half_positive_offset:
|
||||
subi r_scratch1, r_HL, 2
|
||||
cmpd r_scratch1, r_addr
|
||||
blt bpf_slow_path_half
|
||||
@ -61,7 +62,9 @@ sk_load_half:
|
||||
.globl sk_load_byte
|
||||
sk_load_byte:
|
||||
cmpdi r_addr, 0
|
||||
blt bpf_error
|
||||
blt bpf_slow_path_byte_neg
|
||||
.globl sk_load_byte_positive_offset
|
||||
sk_load_byte_positive_offset:
|
||||
cmpd r_HL, r_addr
|
||||
ble bpf_slow_path_byte
|
||||
lbzx r_A, r_D, r_addr
|
||||
@ -69,22 +72,20 @@ sk_load_byte:
|
||||
|
||||
/*
|
||||
* BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
|
||||
* r_addr is the offset value, already known positive
|
||||
* r_addr is the offset value
|
||||
*/
|
||||
.globl sk_load_byte_msh
|
||||
sk_load_byte_msh:
|
||||
cmpdi r_addr, 0
|
||||
blt bpf_slow_path_byte_msh_neg
|
||||
.globl sk_load_byte_msh_positive_offset
|
||||
sk_load_byte_msh_positive_offset:
|
||||
cmpd r_HL, r_addr
|
||||
ble bpf_slow_path_byte_msh
|
||||
lbzx r_X, r_D, r_addr
|
||||
rlwinm r_X, r_X, 2, 32-4-2, 31-2
|
||||
blr
|
||||
|
||||
bpf_error:
|
||||
/* Entered with cr0 = lt */
|
||||
li r3, 0
|
||||
/* Generated code will 'blt epilogue', returning 0. */
|
||||
blr
|
||||
|
||||
/* Call out to skb_copy_bits:
|
||||
* We'll need to back up our volatile regs first; we have
|
||||
* local variable space at r1+(BPF_PPC_STACK_BASIC).
|
||||
@ -136,3 +137,84 @@ bpf_slow_path_byte_msh:
|
||||
lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
|
||||
rlwinm r_X, r_X, 2, 32-4-2, 31-2
|
||||
blr
|
||||
|
||||
/* Call out to bpf_internal_load_pointer_neg_helper:
|
||||
* We'll need to back up our volatile regs first; we have
|
||||
* local variable space at r1+(BPF_PPC_STACK_BASIC).
|
||||
* Allocate a new stack frame here to remain ABI-compliant in
|
||||
* stashing LR.
|
||||
*/
|
||||
#define sk_negative_common(SIZE) \
|
||||
mflr r0; \
|
||||
std r0, 16(r1); \
|
||||
/* R3 goes in parameter space of caller's frame */ \
|
||||
std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
|
||||
std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
|
||||
std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
|
||||
stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
|
||||
/* R3 = r_skb, as passed */ \
|
||||
mr r4, r_addr; \
|
||||
li r5, SIZE; \
|
||||
bl bpf_internal_load_pointer_neg_helper; \
|
||||
/* R3 != 0 on success */ \
|
||||
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
|
||||
ld r0, 16(r1); \
|
||||
ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
|
||||
ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
|
||||
mtlr r0; \
|
||||
cmpldi r3, 0; \
|
||||
beq bpf_error_slow; /* cr0 = EQ */ \
|
||||
mr r_addr, r3; \
|
||||
ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
|
||||
/* Great success! */
|
||||
|
||||
bpf_slow_path_word_neg:
|
||||
lis r_scratch1,-32 /* SKF_LL_OFF */
|
||||
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
||||
blt bpf_error /* cr0 = LT */
|
||||
.globl sk_load_word_negative_offset
|
||||
sk_load_word_negative_offset:
|
||||
sk_negative_common(4)
|
||||
lwz r_A, 0(r_addr)
|
||||
blr
|
||||
|
||||
bpf_slow_path_half_neg:
|
||||
lis r_scratch1,-32 /* SKF_LL_OFF */
|
||||
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
||||
blt bpf_error /* cr0 = LT */
|
||||
.globl sk_load_half_negative_offset
|
||||
sk_load_half_negative_offset:
|
||||
sk_negative_common(2)
|
||||
lhz r_A, 0(r_addr)
|
||||
blr
|
||||
|
||||
bpf_slow_path_byte_neg:
|
||||
lis r_scratch1,-32 /* SKF_LL_OFF */
|
||||
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
||||
blt bpf_error /* cr0 = LT */
|
||||
.globl sk_load_byte_negative_offset
|
||||
sk_load_byte_negative_offset:
|
||||
sk_negative_common(1)
|
||||
lbz r_A, 0(r_addr)
|
||||
blr
|
||||
|
||||
bpf_slow_path_byte_msh_neg:
|
||||
lis r_scratch1,-32 /* SKF_LL_OFF */
|
||||
cmpd r_addr, r_scratch1 /* addr < SKF_* */
|
||||
blt bpf_error /* cr0 = LT */
|
||||
.globl sk_load_byte_msh_negative_offset
|
||||
sk_load_byte_msh_negative_offset:
|
||||
sk_negative_common(1)
|
||||
lbz r_X, 0(r_addr)
|
||||
rlwinm r_X, r_X, 2, 32-4-2, 31-2
|
||||
blr
|
||||
|
||||
bpf_error_slow:
|
||||
/* fabricate a cr0 = lt */
|
||||
li r_scratch1, -1
|
||||
cmpdi r_scratch1, 0
|
||||
bpf_error:
|
||||
/* Entered with cr0 = lt */
|
||||
li r3, 0
|
||||
/* Generated code will 'blt epilogue', returning 0. */
|
||||
blr
|
||||
|
@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
|
||||
PPC_BLR();
|
||||
}
|
||||
|
||||
#define CHOOSE_LOAD_FUNC(K, func) \
|
||||
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
|
||||
|
||||
/* Assemble the body code between the prologue & epilogue. */
|
||||
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
|
||||
struct codegen_context *ctx,
|
||||
@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
|
||||
|
||||
/*** Absolute loads from packet header/data ***/
|
||||
case BPF_S_LD_W_ABS:
|
||||
func = sk_load_word;
|
||||
func = CHOOSE_LOAD_FUNC(K, sk_load_word);
|
||||
goto common_load;
|
||||
case BPF_S_LD_H_ABS:
|
||||
func = sk_load_half;
|
||||
func = CHOOSE_LOAD_FUNC(K, sk_load_half);
|
||||
goto common_load;
|
||||
case BPF_S_LD_B_ABS:
|
||||
func = sk_load_byte;
|
||||
func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
|
||||
common_load:
|
||||
/*
|
||||
* Load from [K]. Reference with the (negative)
|
||||
* SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
|
||||
*/
|
||||
/* Load from [K]. */
|
||||
ctx->seen |= SEEN_DATAREF;
|
||||
if ((int)K < 0)
|
||||
return -ENOTSUPP;
|
||||
PPC_LI64(r_scratch1, func);
|
||||
PPC_MTLR(r_scratch1);
|
||||
PPC_LI32(r_addr, K);
|
||||
@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
|
||||
common_load_ind:
|
||||
/*
|
||||
* Load from [X + K]. Negative offsets are tested for
|
||||
* in the helper functions, and result in a 'ret 0'.
|
||||
* in the helper functions.
|
||||
*/
|
||||
ctx->seen |= SEEN_DATAREF | SEEN_XREG;
|
||||
PPC_LI64(r_scratch1, func);
|
||||
@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
|
||||
break;
|
||||
|
||||
case BPF_S_LDX_B_MSH:
|
||||
/*
|
||||
* x86 version drops packet (RET 0) when K<0, whereas
|
||||
* interpreter does allow K<0 (__load_pointer, special
|
||||
* ancillary data). common_load returns ENOTSUPP if K<0,
|
||||
* so we fall back to interpreter & filter works.
|
||||
*/
|
||||
func = sk_load_byte_msh;
|
||||
func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
|
||||
goto common_load;
|
||||
break;
|
||||
|
||||
|
@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
|
||||
pr_devel("axon_msi: woff %x roff %x msi %x\n",
|
||||
write_offset, msic->read_offset, msi);
|
||||
|
||||
if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
|
||||
if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
|
||||
generic_handle_irq(msi);
|
||||
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
|
||||
} else {
|
||||
@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* We rely on being able to stash a virq in a u16 */
|
||||
BUILD_BUG_ON(NR_IRQS > 65536);
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
virq = irq_create_direct_mapping(msic->irq_domain);
|
||||
if (virq == NO_IRQ) {
|
||||
@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device)
|
||||
}
|
||||
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
|
||||
|
||||
msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
|
||||
/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
|
||||
msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
|
||||
if (!msic->irq_domain) {
|
||||
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
|
||||
dn->full_name);
|
||||
|
@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 1; i < NR_IRQS; i++)
|
||||
for (i = 1; i < nr_irqs; i++)
|
||||
beat_destruct_irq_plug(i);
|
||||
}
|
||||
|
@ -57,9 +57,9 @@ static int max_real_irqs;
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
|
||||
|
||||
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
|
||||
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
|
||||
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
|
||||
/* The max irq number this driver deals with is 128; see max_irqs */
|
||||
static DECLARE_BITMAP(ppc_lost_interrupts, 128);
|
||||
static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
|
||||
static int pmac_irq_cascade = -1;
|
||||
static struct irq_domain *pmac_pic_host;
|
||||
|
||||
|
@ -30,9 +30,9 @@ config PPC_SPLPAR
|
||||
two or more partitions.
|
||||
|
||||
config EEH
|
||||
bool "PCI Extended Error Handling (EEH)" if EXPERT
|
||||
bool
|
||||
depends on PPC_PSERIES && PCI
|
||||
default y if !EXPERT
|
||||
default y
|
||||
|
||||
config PSERIES_MSI
|
||||
bool
|
||||
|
@ -51,8 +51,7 @@
|
||||
static intctl_cpm2_t __iomem *cpm2_intctl;
|
||||
|
||||
static struct irq_domain *cpm2_pic_host;
|
||||
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
|
||||
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
|
||||
static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
|
||||
|
||||
static const u_char irq_to_siureg[] = {
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
|
@ -18,69 +18,45 @@
|
||||
extern int cpm_get_irq(struct pt_regs *regs);
|
||||
|
||||
static struct irq_domain *mpc8xx_pic_host;
|
||||
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
|
||||
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
|
||||
static unsigned long mpc8xx_cached_irq_mask;
|
||||
static sysconf8xx_t __iomem *siu_reg;
|
||||
|
||||
int cpm_get_irq(struct pt_regs *regs);
|
||||
static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d)
|
||||
{
|
||||
return 0x80000000 >> irqd_to_hwirq(d);
|
||||
}
|
||||
|
||||
static void mpc8xx_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
int bit, word;
|
||||
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
|
||||
|
||||
bit = irq_nr & 0x1f;
|
||||
word = irq_nr >> 5;
|
||||
|
||||
ppc_cached_irq_mask[word] |= (1 << (31-bit));
|
||||
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
|
||||
mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
|
||||
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
|
||||
}
|
||||
|
||||
static void mpc8xx_mask_irq(struct irq_data *d)
|
||||
{
|
||||
int bit, word;
|
||||
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
|
||||
|
||||
bit = irq_nr & 0x1f;
|
||||
word = irq_nr >> 5;
|
||||
|
||||
ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
|
||||
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
|
||||
mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d);
|
||||
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
|
||||
}
|
||||
|
||||
static void mpc8xx_ack(struct irq_data *d)
|
||||
{
|
||||
int bit;
|
||||
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
|
||||
|
||||
bit = irq_nr & 0x1f;
|
||||
out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
|
||||
out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d));
|
||||
}
|
||||
|
||||
static void mpc8xx_end_irq(struct irq_data *d)
|
||||
{
|
||||
int bit, word;
|
||||
unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
|
||||
|
||||
bit = irq_nr & 0x1f;
|
||||
word = irq_nr >> 5;
|
||||
|
||||
ppc_cached_irq_mask[word] |= (1 << (31-bit));
|
||||
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
|
||||
mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
|
||||
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
|
||||
}
|
||||
|
||||
static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
|
||||
{
|
||||
if (flow_type & IRQ_TYPE_EDGE_FALLING) {
|
||||
irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);
|
||||
/* only external IRQ senses are programmable */
|
||||
if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) {
|
||||
unsigned int siel = in_be32(&siu_reg->sc_siel);
|
||||
|
||||
/* only external IRQ senses are programmable */
|
||||
if ((hw & 1) == 0) {
|
||||
siel |= (0x80000000 >> hw);
|
||||
out_be32(&siu_reg->sc_siel, siel);
|
||||
__irq_set_handler_locked(d->irq, handle_edge_irq);
|
||||
}
|
||||
siel |= mpc8xx_irqd_to_bit(d);
|
||||
out_be32(&siu_reg->sc_siel, siel);
|
||||
__irq_set_handler_locked(d->irq, handle_edge_irq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
|
||||
IRQ_TYPE_EDGE_FALLING,
|
||||
};
|
||||
|
||||
if (intspec[0] > 0x1f)
|
||||
return 0;
|
||||
|
||||
*out_hwirq = intspec[0];
|
||||
if (intsize > 1 && intspec[1] < 4)
|
||||
*out_flags = map_pic_senses[intspec[1]];
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/scom.h>
|
||||
|
||||
|
@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void)
|
||||
{
|
||||
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
|
||||
unsigned int irq, virq;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/* If we used to be the default server, move to the new "boot_cpuid" */
|
||||
if (hw_cpu == xics_default_server)
|
||||
@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void)
|
||||
/* Allow IPIs again... */
|
||||
icp_ops->set_priority(DEFAULT_PRIORITY);
|
||||
|
||||
for_each_irq(virq) {
|
||||
struct irq_desc *desc;
|
||||
for_each_irq_desc(virq, desc) {
|
||||
struct irq_chip *chip;
|
||||
long server;
|
||||
unsigned long flags;
|
||||
@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void)
|
||||
/* We can't set affinity on ISA interrupts */
|
||||
if (virq < NUM_ISA_INTERRUPTS)
|
||||
continue;
|
||||
desc = irq_to_desc(virq);
|
||||
/* We only need to migrate enabled IRQS */
|
||||
if (!desc || !desc->action)
|
||||
if (!desc->action)
|
||||
continue;
|
||||
if (desc->irq_data.domain != xics_host)
|
||||
continue;
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
||||
#define atomic_set(v,i) ((v)->counter = (i))
|
||||
|
@ -86,7 +86,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
||||
pte_t *pte_k;
|
||||
|
||||
/* Make sure we are in vmalloc/module/P3 area: */
|
||||
if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
|
||||
if (!(address >= P3SEG && address < P3_ADDR_MAX))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
|
@ -47,8 +47,8 @@ struct pci_controller {
|
||||
*/
|
||||
#define PCI_DMA_BUS_IS_PHYS 1
|
||||
|
||||
int __devinit tile_pci_init(void);
|
||||
int __devinit pcibios_init(void);
|
||||
int __init tile_pci_init(void);
|
||||
int __init pcibios_init(void);
|
||||
|
||||
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
|
||||
|
||||
|
@ -141,7 +141,7 @@ static int __devinit tile_init_irqs(int controller_id,
|
||||
*
|
||||
* Returns the number of controllers discovered.
|
||||
*/
|
||||
int __devinit tile_pci_init(void)
|
||||
int __init tile_pci_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -287,7 +287,7 @@ static void __devinit fixup_read_and_payload_sizes(void)
|
||||
* The controllers have been set up by the time we get here, by a call to
|
||||
* tile_pci_init.
|
||||
*/
|
||||
int __devinit pcibios_init(void)
|
||||
int __init pcibios_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -33,6 +33,9 @@
|
||||
__HEAD
|
||||
ENTRY(startup_32)
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
jmp preferred_addr
|
||||
|
||||
.balign 0x10
|
||||
/*
|
||||
* We don't need the return address, so set up the stack so
|
||||
* efi_main() can find its arugments.
|
||||
@ -41,12 +44,17 @@ ENTRY(startup_32)
|
||||
|
||||
call efi_main
|
||||
cmpl $0, %eax
|
||||
je preferred_addr
|
||||
movl %eax, %esi
|
||||
call 1f
|
||||
jne 2f
|
||||
1:
|
||||
/* EFI init failed, so hang. */
|
||||
hlt
|
||||
jmp 1b
|
||||
2:
|
||||
call 3f
|
||||
3:
|
||||
popl %eax
|
||||
subl $1b, %eax
|
||||
subl $3b, %eax
|
||||
subl BP_pref_address(%esi), %eax
|
||||
add BP_code32_start(%esi), %eax
|
||||
leal preferred_addr(%eax), %eax
|
||||
|
@ -200,18 +200,28 @@ ENTRY(startup_64)
|
||||
* entire text+data+bss and hopefully all of memory.
|
||||
*/
|
||||
#ifdef CONFIG_EFI_STUB
|
||||
pushq %rsi
|
||||
/*
|
||||
* The entry point for the PE/COFF executable is 0x210, so only
|
||||
* legacy boot loaders will execute this jmp.
|
||||
*/
|
||||
jmp preferred_addr
|
||||
|
||||
.org 0x210
|
||||
mov %rcx, %rdi
|
||||
mov %rdx, %rsi
|
||||
call efi_main
|
||||
popq %rsi
|
||||
cmpq $0,%rax
|
||||
je preferred_addr
|
||||
movq %rax,%rsi
|
||||
call 1f
|
||||
cmpq $0,%rax
|
||||
jne 2f
|
||||
1:
|
||||
/* EFI init failed, so hang. */
|
||||
hlt
|
||||
jmp 1b
|
||||
2:
|
||||
call 3f
|
||||
3:
|
||||
popq %rax
|
||||
subq $1b, %rax
|
||||
subq $3b, %rax
|
||||
subq BP_pref_address(%rsi), %rax
|
||||
add BP_code32_start(%esi), %eax
|
||||
leaq preferred_addr(%rax), %rax
|
||||
|
@ -205,8 +205,13 @@ int main(int argc, char ** argv)
|
||||
put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Address of entry point */
|
||||
put_unaligned_le32(i, &buf[pe_header + 0x28]);
|
||||
/*
|
||||
* Address of entry point.
|
||||
*
|
||||
* The EFI stub entry point is +16 bytes from the start of
|
||||
* the .text section.
|
||||
*/
|
||||
put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
|
||||
|
||||
/* .text size */
|
||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
|
||||
@ -217,9 +222,11 @@ int main(int argc, char ** argv)
|
||||
/*
|
||||
* Address of entry point. startup_32 is at the beginning and
|
||||
* the 64-bit entry point (startup_64) is always 512 bytes
|
||||
* after.
|
||||
* after. The EFI stub entry point is 16 bytes after that, as
|
||||
* the first instruction allows legacy loaders to jump over
|
||||
* the EFI stub initialisation
|
||||
*/
|
||||
put_unaligned_le32(i + 512, &buf[pe_header + 0x28]);
|
||||
put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
|
||||
|
||||
/* .text size */
|
||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
|
||||
|
@ -7,9 +7,9 @@
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include "posix_types_32.h"
|
||||
# elif defined(__LP64__)
|
||||
# include "posix_types_64.h"
|
||||
# else
|
||||
# elif defined(__ILP32__)
|
||||
# include "posix_types_x32.h"
|
||||
# else
|
||||
# include "posix_types_64.h"
|
||||
# endif
|
||||
#endif
|
||||
|
@ -257,7 +257,7 @@ struct sigcontext {
|
||||
__u64 oldmask;
|
||||
__u64 cr2;
|
||||
struct _fpstate __user *fpstate; /* zero when no FPU context */
|
||||
#ifndef __LP64__
|
||||
#ifdef __ILP32__
|
||||
__u32 __fpstate_pad;
|
||||
#endif
|
||||
__u64 reserved1[8];
|
||||
|
@ -2,7 +2,13 @@
|
||||
#define _ASM_X86_SIGINFO_H
|
||||
|
||||
#ifdef __x86_64__
|
||||
# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
|
||||
# ifdef __ILP32__ /* x32 */
|
||||
typedef long long __kernel_si_clock_t __attribute__((aligned(4)));
|
||||
# define __ARCH_SI_CLOCK_T __kernel_si_clock_t
|
||||
# define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8)))
|
||||
# else /* x86-64 */
|
||||
# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include <asm-generic/siginfo.h>
|
||||
|
@ -63,10 +63,10 @@
|
||||
#else
|
||||
# ifdef __i386__
|
||||
# include <asm/unistd_32.h>
|
||||
# elif defined(__LP64__)
|
||||
# include <asm/unistd_64.h>
|
||||
# else
|
||||
# elif defined(__ILP32__)
|
||||
# include <asm/unistd_x32.h>
|
||||
# else
|
||||
# include <asm/unistd_64.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
@ -195,6 +195,5 @@ extern struct x86_msi_ops x86_msi;
|
||||
|
||||
extern void x86_init_noop(void);
|
||||
extern void x86_init_uint_noop(unsigned int unused);
|
||||
extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
|
||||
|
||||
#endif
|
||||
|
@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags;
|
||||
static char temp_stack[4096];
|
||||
#endif
|
||||
|
||||
asmlinkage void acpi_enter_s3(void)
|
||||
{
|
||||
acpi_enter_sleep_state(3, wake_sleep_flags);
|
||||
}
|
||||
/**
|
||||
* acpi_suspend_lowlevel - save kernel state
|
||||
*
|
||||
|
@ -3,12 +3,16 @@
|
||||
*/
|
||||
|
||||
#include <asm/trampoline.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
extern unsigned long saved_video_mode;
|
||||
extern long saved_magic;
|
||||
|
||||
extern int wakeup_pmode_return;
|
||||
|
||||
extern u8 wake_sleep_flags;
|
||||
extern asmlinkage void acpi_enter_s3(void);
|
||||
|
||||
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
|
||||
extern void wakeup_long64(void);
|
||||
|
||||
|
@ -74,9 +74,7 @@ restore_registers:
|
||||
ENTRY(do_suspend_lowlevel)
|
||||
call save_processor_state
|
||||
call save_registers
|
||||
pushl $3
|
||||
call acpi_enter_sleep_state
|
||||
addl $4, %esp
|
||||
call acpi_enter_s3
|
||||
|
||||
# In case of S3 failure, we'll emerge here. Jump
|
||||
# to ret_point to recover
|
||||
|
@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel)
|
||||
movq %rsi, saved_rsi
|
||||
|
||||
addq $8, %rsp
|
||||
movl $3, %edi
|
||||
xorl %eax, %eax
|
||||
call acpi_enter_sleep_state
|
||||
call acpi_enter_s3
|
||||
/* in case something went wrong, restore the machine status and go on */
|
||||
jmp resume_point
|
||||
|
||||
|
@ -1637,9 +1637,11 @@ static int __init apic_verify(void)
|
||||
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
||||
|
||||
/* The BIOS may have set up the APIC at some other address */
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (l & MSR_IA32_APICBASE_ENABLE)
|
||||
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
|
||||
if (boot_cpu_data.x86 >= 6) {
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (l & MSR_IA32_APICBASE_ENABLE)
|
||||
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
|
||||
}
|
||||
|
||||
pr_info("Found and enabled local APIC!\n");
|
||||
return 0;
|
||||
@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr)
|
||||
* MSR. This can only be done in software for Intel P6 or later
|
||||
* and AMD K7 (Model > 1) or later.
|
||||
*/
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
|
||||
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
enabled_via_apicbase = 1;
|
||||
if (boot_cpu_data.x86 >= 6) {
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
|
||||
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
enabled_via_apicbase = 1;
|
||||
}
|
||||
}
|
||||
return apic_verify();
|
||||
}
|
||||
@ -2209,10 +2213,12 @@ static void lapic_resume(void)
|
||||
* FIXME! This will be wrong if we ever support suspend on
|
||||
* SMP! We'll need to do this as part of the CPU restore!
|
||||
*/
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
if (boot_cpu_data.x86 >= 6) {
|
||||
rdmsr(MSR_IA32_APICBASE, l, h);
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
}
|
||||
}
|
||||
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
|
@ -207,8 +207,11 @@ static void __init map_csrs(void)
|
||||
|
||||
static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
|
||||
{
|
||||
c->phys_proc_id = node;
|
||||
per_cpu(cpu_llc_id, smp_processor_id()) = node;
|
||||
|
||||
if (c->phys_proc_id != node) {
|
||||
c->phys_proc_id = node;
|
||||
per_cpu(cpu_llc_id, smp_processor_id()) = node;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init numachip_system_init(void)
|
||||
|
@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
{
|
||||
if (x2apic_phys)
|
||||
return x2apic_enabled();
|
||||
else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
|
||||
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
|
||||
x2apic_enabled()) {
|
||||
printk(KERN_DEBUG "System requires x2apic physical mode\n");
|
||||
return 1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
@ -26,7 +26,8 @@
|
||||
* contact AMD for precise details and a CPU swap.
|
||||
*
|
||||
* See http://www.multimania.com/poulot/k6bug.html
|
||||
* http://www.amd.com/K6/k6docs/revgd.html
|
||||
* and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
|
||||
* (Publication # 21266 Issue Date: August 1998)
|
||||
*
|
||||
* The following test is erm.. interesting. AMD neglected to up
|
||||
* the chip setting when fixing the bug but they also tweaked some
|
||||
@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
|
||||
"system stability may be impaired when more than 32 MB are used.\n");
|
||||
else
|
||||
printk(KERN_CONT "probably OK (after B9730xxxx).\n");
|
||||
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
|
||||
}
|
||||
|
||||
/* K6 with old style WHCR */
|
||||
@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
|
||||
/*
|
||||
* If core numbers are inconsistent, it's likely a multi-fabric platform,
|
||||
* so invoke platform-specific handler
|
||||
* On multi-fabric platform (e.g. Numascale NumaChip) a
|
||||
* platform-specific handler needs to be called to fixup some
|
||||
* IDs of the CPU.
|
||||
*/
|
||||
if (c->phys_proc_id != node)
|
||||
if (x86_cpuinit.fixup_cpu_id)
|
||||
x86_cpuinit.fixup_cpu_id(c, node);
|
||||
|
||||
if (!node_online(node)) {
|
||||
|
@ -1162,15 +1162,6 @@ static void dbg_restore_debug_regs(void)
|
||||
#define dbg_restore_debug_regs()
|
||||
#endif /* ! CONFIG_KGDB */
|
||||
|
||||
/*
|
||||
* Prints an error where the NUMA and configured core-number mismatch and the
|
||||
* platform didn't override this to fix it up
|
||||
*/
|
||||
void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
|
||||
{
|
||||
pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_init() initializes state that is per-CPU. Some data is already
|
||||
* initialized (naturally) in the bootstrap process, such as the GDT
|
||||
|
@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
|
||||
/* check if @slot is already used or the index is already disabled */
|
||||
ret = amd_get_l3_disable_slot(nb, slot);
|
||||
if (ret >= 0)
|
||||
return -EINVAL;
|
||||
return -EEXIST;
|
||||
|
||||
if (index > nb->l3_cache.indices)
|
||||
return -EINVAL;
|
||||
|
||||
/* check whether the other slot has disabled the same index already */
|
||||
if (index == amd_get_l3_disable_slot(nb, !slot))
|
||||
return -EINVAL;
|
||||
return -EEXIST;
|
||||
|
||||
amd_l3_disable_index(nb, cpu, slot, index);
|
||||
|
||||
@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
||||
err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
|
||||
if (err) {
|
||||
if (err == -EEXIST)
|
||||
printk(KERN_WARNING "L3 disable slot %d in use!\n",
|
||||
slot);
|
||||
pr_warning("L3 slot %d in use/index already disabled!\n",
|
||||
slot);
|
||||
return err;
|
||||
}
|
||||
return count;
|
||||
|
@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk)
|
||||
if (tsk_used_math(tsk)) {
|
||||
if (HAVE_HWFP && tsk == current)
|
||||
unlazy_fpu(tsk);
|
||||
tsk->thread.fpu.last_cpu = ~0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
|
||||
pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
|
||||
return -1;
|
||||
}
|
||||
|
||||
csig->rev = c->microcode;
|
||||
pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
|
||||
|
||||
@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = {
|
||||
|
||||
struct microcode_ops * __init init_amd_microcode(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
|
||||
pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
patch = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!patch)
|
||||
return NULL;
|
||||
|
@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (microcode_init_cpu(cpu) == UCODE_ERROR) {
|
||||
sysfs_remove_group(&dev->kobj, &mc_attr_group);
|
||||
if (microcode_init_cpu(cpu) == UCODE_ERROR)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -528,11 +526,11 @@ static int __init microcode_init(void)
|
||||
microcode_ops = init_intel_microcode();
|
||||
else if (c->x86_vendor == X86_VENDOR_AMD)
|
||||
microcode_ops = init_amd_microcode();
|
||||
|
||||
if (!microcode_ops) {
|
||||
else
|
||||
pr_err("no support for this CPU vendor\n");
|
||||
|
||||
if (!microcode_ops)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
microcode_pdev = platform_device_register_simple("microcode", -1,
|
||||
NULL, 0);
|
||||
|
@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = {
|
||||
struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
|
||||
.early_percpu_clock_init = x86_init_noop,
|
||||
.setup_percpu_clockev = setup_secondary_APIC_clock,
|
||||
.fixup_cpu_id = x86_default_fixup_cpu_id,
|
||||
};
|
||||
|
||||
static void default_nmi_init(void) { };
|
||||
|
@ -805,7 +805,7 @@ void intel_scu_devices_create(void)
|
||||
} else
|
||||
i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
|
||||
}
|
||||
intel_scu_notifier_post(SCU_AVAILABLE, 0L);
|
||||
intel_scu_notifier_post(SCU_AVAILABLE, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(intel_scu_devices_create);
|
||||
|
||||
@ -814,7 +814,7 @@ void intel_scu_devices_destroy(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
intel_scu_notifier_post(SCU_DOWN, 0L);
|
||||
intel_scu_notifier_post(SCU_DOWN, NULL);
|
||||
|
||||
for (i = 0; i < ipc_next_dev; i++)
|
||||
platform_device_del(ipc_devs[i]);
|
||||
|
@ -261,7 +261,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
|
||||
|
||||
static bool __init xen_check_mwait(void)
|
||||
{
|
||||
#ifdef CONFIG_ACPI
|
||||
#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \
|
||||
!defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
|
||||
struct xen_platform_op op = {
|
||||
.cmd = XENPF_set_processor_pminfo,
|
||||
.u.set_pminfo.id = -1,
|
||||
@ -349,7 +350,6 @@ static void __init xen_init_cpuid_mask(void)
|
||||
/* Xen will set CR4.OSXSAVE if supported and not disabled by force */
|
||||
if ((cx & xsave_mask) != xsave_mask)
|
||||
cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
|
||||
|
||||
if (xen_check_mwait())
|
||||
cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
|
||||
}
|
||||
|
@ -178,6 +178,7 @@ static void __init xen_fill_possible_map(void)
|
||||
static void __init xen_filter_cpu_maps(void)
|
||||
{
|
||||
int i, rc;
|
||||
unsigned int subtract = 0;
|
||||
|
||||
if (!xen_initial_domain())
|
||||
return;
|
||||
@ -192,8 +193,22 @@ static void __init xen_filter_cpu_maps(void)
|
||||
} else {
|
||||
set_cpu_possible(i, false);
|
||||
set_cpu_present(i, false);
|
||||
subtract++;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* This is akin to using 'nr_cpus' on the Linux command line.
|
||||
* Which is OK as when we use 'dom0_max_vcpus=X' we can only
|
||||
* have up to X, while nr_cpu_ids is greater than X. This
|
||||
* normally is not a problem, except when CPU hotplugging
|
||||
* is involved and then there might be more than X CPUs
|
||||
* in the guest - which will not work as there is no
|
||||
* hypercall to expand the max number of VCPUs an already
|
||||
* running guest has. So cap it up to X. */
|
||||
if (subtract)
|
||||
nr_cpu_ids = nr_cpu_ids - subtract;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
static void __init xen_smp_prepare_boot_cpu(void)
|
||||
|
@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
|
||||
|
||||
/* check for unmasked and pending */
|
||||
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
|
||||
jz 1f
|
||||
jnz 1f
|
||||
2: call check_events
|
||||
1:
|
||||
ENDPATCH(xen_restore_fl_direct)
|
||||
|
@ -11,9 +11,6 @@
|
||||
#ifndef _XTENSA_HARDIRQ_H
|
||||
#define _XTENSA_HARDIRQ_H
|
||||
|
||||
void ack_bad_irq(unsigned int irq);
|
||||
#define ack_bad_irq ack_bad_irq
|
||||
|
||||
#include <asm-generic/hardirq.h>
|
||||
|
||||
#endif /* _XTENSA_HARDIRQ_H */
|
||||
|
@ -14,6 +14,7 @@
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/page.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user