Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: [S390] topology: fix topology on z10 machines [S390] crypto: avoid MSA3 and MSA4 instructions in ESA mode [S390] avoid STCKF if running in ESA mode [S390] zfcpdump: Do not initialize zfcpdump in kdump mode [S390] ap: Setup processing for messages in request queue. [S390] Kconfig: Select CONFIG_KEXEC for CONFIG_CRASH_DUMP [S390] incorrect note program header [S390] pfault: ignore leftover completion interrupts [S390] fix pgste update logic [S390] wire up process_vm syscalls
This commit is contained in:
commit
6aaf05f472
@ -572,6 +572,7 @@ config KEXEC
|
|||||||
config CRASH_DUMP
|
config CRASH_DUMP
|
||||||
bool "kernel crash dumps"
|
bool "kernel crash dumps"
|
||||||
depends on 64BIT
|
depends on 64BIT
|
||||||
|
select KEXEC
|
||||||
help
|
help
|
||||||
Generate crash dump after being started by kexec.
|
Generate crash dump after being started by kexec.
|
||||||
Crash dump kernels are loaded in the main kernel with kexec-tools
|
Crash dump kernels are loaded in the main kernel with kexec-tools
|
||||||
|
@ -368,9 +368,12 @@ static inline int crypt_s390_func_available(int func,
|
|||||||
|
|
||||||
if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
|
if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
|
||||||
return 0;
|
return 0;
|
||||||
if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
|
|
||||||
|
if (facility_mask & CRYPT_S390_MSA3 &&
|
||||||
|
(!test_facility(2) || !test_facility(76)))
|
||||||
return 0;
|
return 0;
|
||||||
if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
|
if (facility_mask & CRYPT_S390_MSA4 &&
|
||||||
|
(!test_facility(2) || !test_facility(77)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (func & CRYPT_S390_OP_MASK) {
|
switch (func & CRYPT_S390_OP_MASK) {
|
||||||
|
@ -593,6 +593,8 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
|
|||||||
unsigned long address, bits;
|
unsigned long address, bits;
|
||||||
unsigned char skey;
|
unsigned char skey;
|
||||||
|
|
||||||
|
if (!pte_present(*ptep))
|
||||||
|
return pgste;
|
||||||
address = pte_val(*ptep) & PAGE_MASK;
|
address = pte_val(*ptep) & PAGE_MASK;
|
||||||
skey = page_get_storage_key(address);
|
skey = page_get_storage_key(address);
|
||||||
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
|
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
|
||||||
@ -625,6 +627,8 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
|
|||||||
#ifdef CONFIG_PGSTE
|
#ifdef CONFIG_PGSTE
|
||||||
int young;
|
int young;
|
||||||
|
|
||||||
|
if (!pte_present(*ptep))
|
||||||
|
return pgste;
|
||||||
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
|
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
|
||||||
/* Transfer page referenced bit to pte software bit (host view) */
|
/* Transfer page referenced bit to pte software bit (host view) */
|
||||||
if (young || (pgste_val(pgste) & RCP_HR_BIT))
|
if (young || (pgste_val(pgste) & RCP_HR_BIT))
|
||||||
@ -638,13 +642,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
|
static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PGSTE
|
#ifdef CONFIG_PGSTE
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
unsigned long okey, nkey;
|
unsigned long okey, nkey;
|
||||||
|
|
||||||
address = pte_val(*ptep) & PAGE_MASK;
|
if (!pte_present(entry))
|
||||||
|
return;
|
||||||
|
address = pte_val(entry) & PAGE_MASK;
|
||||||
okey = nkey = page_get_storage_key(address);
|
okey = nkey = page_get_storage_key(address);
|
||||||
nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
|
nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
|
||||||
/* Set page access key and fetch protection bit from pgste */
|
/* Set page access key and fetch protection bit from pgste */
|
||||||
@ -712,7 +718,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||||||
|
|
||||||
if (mm_has_pgste(mm)) {
|
if (mm_has_pgste(mm)) {
|
||||||
pgste = pgste_get_lock(ptep);
|
pgste = pgste_get_lock(ptep);
|
||||||
pgste_set_pte(ptep, pgste);
|
pgste_set_pte(ptep, pgste, entry);
|
||||||
*ptep = entry;
|
*ptep = entry;
|
||||||
pgste_set_unlock(ptep, pgste);
|
pgste_set_unlock(ptep, pgste);
|
||||||
} else
|
} else
|
||||||
|
@ -82,6 +82,7 @@ extern unsigned int user_mode;
|
|||||||
#define MACHINE_FLAG_LPAR (1UL << 12)
|
#define MACHINE_FLAG_LPAR (1UL << 12)
|
||||||
#define MACHINE_FLAG_SPP (1UL << 13)
|
#define MACHINE_FLAG_SPP (1UL << 13)
|
||||||
#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
|
#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
|
||||||
|
#define MACHINE_FLAG_STCKF (1UL << 15)
|
||||||
|
|
||||||
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
|
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
|
||||||
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
|
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
|
||||||
@ -100,6 +101,7 @@ extern unsigned int user_mode;
|
|||||||
#define MACHINE_HAS_PFMF (0)
|
#define MACHINE_HAS_PFMF (0)
|
||||||
#define MACHINE_HAS_SPP (0)
|
#define MACHINE_HAS_SPP (0)
|
||||||
#define MACHINE_HAS_TOPOLOGY (0)
|
#define MACHINE_HAS_TOPOLOGY (0)
|
||||||
|
#define MACHINE_HAS_STCKF (0)
|
||||||
#else /* __s390x__ */
|
#else /* __s390x__ */
|
||||||
#define MACHINE_HAS_IEEE (1)
|
#define MACHINE_HAS_IEEE (1)
|
||||||
#define MACHINE_HAS_CSP (1)
|
#define MACHINE_HAS_CSP (1)
|
||||||
@ -111,6 +113,7 @@ extern unsigned int user_mode;
|
|||||||
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
|
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
|
||||||
#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
|
#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
|
||||||
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
|
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
|
||||||
|
#define MACHINE_HAS_STCKF (S390_lowcore.machine_flags & MACHINE_FLAG_STCKF)
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
|
|
||||||
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
|
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
|
||||||
|
@ -90,7 +90,7 @@ static inline unsigned long long get_clock_fast(void)
|
|||||||
{
|
{
|
||||||
unsigned long long clk;
|
unsigned long long clk;
|
||||||
|
|
||||||
if (test_facility(25))
|
if (MACHINE_HAS_STCKF)
|
||||||
asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
|
asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
|
||||||
else
|
else
|
||||||
clk = get_clock();
|
clk = get_clock();
|
||||||
|
@ -277,7 +277,9 @@
|
|||||||
#define __NR_clock_adjtime 337
|
#define __NR_clock_adjtime 337
|
||||||
#define __NR_syncfs 338
|
#define __NR_syncfs 338
|
||||||
#define __NR_setns 339
|
#define __NR_setns 339
|
||||||
#define NR_syscalls 340
|
#define __NR_process_vm_readv 340
|
||||||
|
#define __NR_process_vm_writev 341
|
||||||
|
#define NR_syscalls 342
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are some system calls that are not present on 64 bit, some
|
* There are some system calls that are not present on 64 bit, some
|
||||||
|
@ -1627,3 +1627,23 @@ ENTRY(sys_setns_wrapper)
|
|||||||
lgfr %r2,%r2 # int
|
lgfr %r2,%r2 # int
|
||||||
lgfr %r3,%r3 # int
|
lgfr %r3,%r3 # int
|
||||||
jg sys_setns
|
jg sys_setns
|
||||||
|
|
||||||
|
ENTRY(compat_sys_process_vm_readv_wrapper)
|
||||||
|
lgfr %r2,%r2 # compat_pid_t
|
||||||
|
llgtr %r3,%r3 # struct compat_iovec __user *
|
||||||
|
llgfr %r4,%r4 # unsigned long
|
||||||
|
llgtr %r5,%r5 # struct compat_iovec __user *
|
||||||
|
llgfr %r6,%r6 # unsigned long
|
||||||
|
llgf %r0,164(%r15) # unsigned long
|
||||||
|
stg %r0,160(%r15)
|
||||||
|
jg sys_process_vm_readv
|
||||||
|
|
||||||
|
ENTRY(compat_sys_process_vm_writev_wrapper)
|
||||||
|
lgfr %r2,%r2 # compat_pid_t
|
||||||
|
llgtr %r3,%r3 # struct compat_iovec __user *
|
||||||
|
llgfr %r4,%r4 # unsigned long
|
||||||
|
llgtr %r5,%r5 # struct compat_iovec __user *
|
||||||
|
llgfr %r6,%r6 # unsigned long
|
||||||
|
llgf %r0,164(%r15) # unsigned long
|
||||||
|
stg %r0,160(%r15)
|
||||||
|
jg sys_process_vm_writev
|
||||||
|
@ -390,6 +390,8 @@ static __init void detect_machine_facilities(void)
|
|||||||
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
|
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
|
||||||
if (test_facility(40))
|
if (test_facility(40))
|
||||||
S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
|
S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
|
||||||
|
if (test_facility(25))
|
||||||
|
S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,6 +211,8 @@ static void __init setup_zfcpdump(unsigned int console_devno)
|
|||||||
|
|
||||||
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
|
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
|
||||||
return;
|
return;
|
||||||
|
if (OLDMEM_BASE)
|
||||||
|
return;
|
||||||
if (console_devno != -1)
|
if (console_devno != -1)
|
||||||
sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
|
sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
|
||||||
ipl_info.data.fcp.dev_id.devno, console_devno);
|
ipl_info.data.fcp.dev_id.devno, console_devno);
|
||||||
@ -482,7 +484,7 @@ static void __init setup_memory_end(void)
|
|||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_ZFCPDUMP
|
#ifdef CONFIG_ZFCPDUMP
|
||||||
if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
|
if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
|
||||||
memory_end = ZFCPDUMP_HSA_SIZE;
|
memory_end = ZFCPDUMP_HSA_SIZE;
|
||||||
memory_end_set = 1;
|
memory_end_set = 1;
|
||||||
}
|
}
|
||||||
|
@ -348,3 +348,5 @@ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at
|
|||||||
SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
|
SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
|
||||||
SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
|
SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
|
||||||
SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
|
SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
|
||||||
|
SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
|
||||||
|
SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
|
||||||
|
@ -68,8 +68,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
|
|||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
|
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
|
||||||
struct mask_info *book, struct mask_info *core)
|
struct mask_info *book,
|
||||||
|
struct mask_info *core,
|
||||||
|
int z10)
|
||||||
{
|
{
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
@ -88,10 +90,16 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
|
|||||||
cpu_book_id[lcpu] = book->id;
|
cpu_book_id[lcpu] = book->id;
|
||||||
#endif
|
#endif
|
||||||
cpumask_set_cpu(lcpu, &core->mask);
|
cpumask_set_cpu(lcpu, &core->mask);
|
||||||
cpu_core_id[lcpu] = core->id;
|
if (z10) {
|
||||||
|
cpu_core_id[lcpu] = rcpu;
|
||||||
|
core = core->next;
|
||||||
|
} else {
|
||||||
|
cpu_core_id[lcpu] = core->id;
|
||||||
|
}
|
||||||
smp_cpu_polarization[lcpu] = tl_cpu->pp;
|
smp_cpu_polarization[lcpu] = tl_cpu->pp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return core;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clear_masks(void)
|
static void clear_masks(void)
|
||||||
@ -123,18 +131,41 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
|
|||||||
{
|
{
|
||||||
#ifdef CONFIG_SCHED_BOOK
|
#ifdef CONFIG_SCHED_BOOK
|
||||||
struct mask_info *book = &book_info;
|
struct mask_info *book = &book_info;
|
||||||
|
struct cpuid cpu_id;
|
||||||
#else
|
#else
|
||||||
struct mask_info *book = NULL;
|
struct mask_info *book = NULL;
|
||||||
#endif
|
#endif
|
||||||
struct mask_info *core = &core_info;
|
struct mask_info *core = &core_info;
|
||||||
union topology_entry *tle, *end;
|
union topology_entry *tle, *end;
|
||||||
|
int z10 = 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SCHED_BOOK
|
||||||
|
get_cpu_id(&cpu_id);
|
||||||
|
z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
|
||||||
|
#endif
|
||||||
spin_lock_irq(&topology_lock);
|
spin_lock_irq(&topology_lock);
|
||||||
clear_masks();
|
clear_masks();
|
||||||
tle = info->tle;
|
tle = info->tle;
|
||||||
end = (union topology_entry *)((unsigned long)info + info->length);
|
end = (union topology_entry *)((unsigned long)info + info->length);
|
||||||
while (tle < end) {
|
while (tle < end) {
|
||||||
|
#ifdef CONFIG_SCHED_BOOK
|
||||||
|
if (z10) {
|
||||||
|
switch (tle->nl) {
|
||||||
|
case 1:
|
||||||
|
book = book->next;
|
||||||
|
book->id = tle->container.id;
|
||||||
|
break;
|
||||||
|
case 0:
|
||||||
|
core = add_cpus_to_mask(&tle->cpu, book, core, z10);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
clear_masks();
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
tle = next_tle(tle);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
switch (tle->nl) {
|
switch (tle->nl) {
|
||||||
#ifdef CONFIG_SCHED_BOOK
|
#ifdef CONFIG_SCHED_BOOK
|
||||||
case 2:
|
case 2:
|
||||||
@ -147,7 +178,7 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
|
|||||||
core->id = tle->container.id;
|
core->id = tle->container.id;
|
||||||
break;
|
break;
|
||||||
case 0:
|
case 0:
|
||||||
add_cpus_to_mask(&tle->cpu, book, core);
|
add_cpus_to_mask(&tle->cpu, book, core, z10);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
clear_masks();
|
clear_masks();
|
||||||
@ -328,8 +359,8 @@ void __init s390_init_cpu_topology(void)
|
|||||||
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
|
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
|
||||||
printk(" %d", info->mag[i]);
|
printk(" %d", info->mag[i]);
|
||||||
printk(" / %d\n", info->mnest);
|
printk(" / %d\n", info->mnest);
|
||||||
alloc_masks(info, &core_info, 2);
|
alloc_masks(info, &core_info, 1);
|
||||||
#ifdef CONFIG_SCHED_BOOK
|
#ifdef CONFIG_SCHED_BOOK
|
||||||
alloc_masks(info, &book_info, 3);
|
alloc_masks(info, &book_info, 2);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,8 @@ SECTIONS
|
|||||||
|
|
||||||
NOTES :text :note
|
NOTES :text :note
|
||||||
|
|
||||||
|
.dummy : { *(.dummy) } :data
|
||||||
|
|
||||||
RODATA
|
RODATA
|
||||||
|
|
||||||
#ifdef CONFIG_SHARED_KERNEL
|
#ifdef CONFIG_SHARED_KERNEL
|
||||||
|
@ -587,8 +587,13 @@ static void pfault_interrupt(unsigned int ext_int_code,
|
|||||||
} else {
|
} else {
|
||||||
/* Completion interrupt was faster than initial
|
/* Completion interrupt was faster than initial
|
||||||
* interrupt. Set pfault_wait to -1 so the initial
|
* interrupt. Set pfault_wait to -1 so the initial
|
||||||
* interrupt doesn't put the task to sleep. */
|
* interrupt doesn't put the task to sleep.
|
||||||
tsk->thread.pfault_wait = -1;
|
* If the task is not running, ignore the completion
|
||||||
|
* interrupt since it must be a leftover of a PFAULT
|
||||||
|
* CANCEL operation which didn't remove all pending
|
||||||
|
* completion interrupts. */
|
||||||
|
if (tsk->state == TASK_RUNNING)
|
||||||
|
tsk->thread.pfault_wait = -1;
|
||||||
}
|
}
|
||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
} else {
|
} else {
|
||||||
|
@ -641,6 +641,8 @@ static int __init zcore_init(void)
|
|||||||
|
|
||||||
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
|
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
|
||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
|
if (OLDMEM_BASE)
|
||||||
|
return -ENODATA;
|
||||||
|
|
||||||
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
|
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
|
||||||
debug_register_view(zcore_dbf, &debug_sprintf_view);
|
debug_register_view(zcore_dbf, &debug_sprintf_view);
|
||||||
|
@ -1271,18 +1271,16 @@ ap_config_timeout(unsigned long ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ap_schedule_poll_timer(): Schedule poll timer.
|
* __ap_schedule_poll_timer(): Schedule poll timer.
|
||||||
*
|
*
|
||||||
* Set up the timer to run the poll tasklet
|
* Set up the timer to run the poll tasklet
|
||||||
*/
|
*/
|
||||||
static inline void ap_schedule_poll_timer(void)
|
static inline void __ap_schedule_poll_timer(void)
|
||||||
{
|
{
|
||||||
ktime_t hr_time;
|
ktime_t hr_time;
|
||||||
|
|
||||||
spin_lock_bh(&ap_poll_timer_lock);
|
spin_lock_bh(&ap_poll_timer_lock);
|
||||||
if (ap_using_interrupts() || ap_suspend_flag)
|
if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
|
||||||
goto out;
|
|
||||||
if (hrtimer_is_queued(&ap_poll_timer))
|
|
||||||
goto out;
|
goto out;
|
||||||
if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
|
if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
|
||||||
hr_time = ktime_set(0, poll_timeout);
|
hr_time = ktime_set(0, poll_timeout);
|
||||||
@ -1293,6 +1291,18 @@ out:
|
|||||||
spin_unlock_bh(&ap_poll_timer_lock);
|
spin_unlock_bh(&ap_poll_timer_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ap_schedule_poll_timer(): Schedule poll timer.
|
||||||
|
*
|
||||||
|
* Set up the timer to run the poll tasklet
|
||||||
|
*/
|
||||||
|
static inline void ap_schedule_poll_timer(void)
|
||||||
|
{
|
||||||
|
if (ap_using_interrupts())
|
||||||
|
return;
|
||||||
|
__ap_schedule_poll_timer();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ap_poll_read(): Receive pending reply messages from an AP device.
|
* ap_poll_read(): Receive pending reply messages from an AP device.
|
||||||
* @ap_dev: pointer to the AP device
|
* @ap_dev: pointer to the AP device
|
||||||
@ -1374,8 +1384,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
|
|||||||
*flags |= 1;
|
*flags |= 1;
|
||||||
*flags |= 2;
|
*flags |= 2;
|
||||||
break;
|
break;
|
||||||
case AP_RESPONSE_Q_FULL:
|
|
||||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||||
|
__ap_schedule_poll_timer();
|
||||||
|
case AP_RESPONSE_Q_FULL:
|
||||||
*flags |= 2;
|
*flags |= 2;
|
||||||
break;
|
break;
|
||||||
case AP_RESPONSE_MESSAGE_TOO_BIG:
|
case AP_RESPONSE_MESSAGE_TOO_BIG:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user