Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] Update default configuration.
  [S390] disassembler: fix idte instruction format.
  [S390] tape: fix race with stack local wait_queue_head_t.
  [S390] 3270: fix race with stack local wait_queue_head_t.
  [S390] dasd: use a generic wait_queue for sleep_on
  [S390] sclp_vt220: fix scheduling while atomic bug.
  [S390] showmem: Only walk spanned pages.
  [S390] appldata: prevent cpu hotplug when walking cpu_online_map.
  [S390] Fix section mismatch warnings.
  [S390] s390 types: make dma_addr_t 64 bit capable
  [S390] tape: Fix race condition in tape block device driver
  [S390] fix sparsemem related compile error with allnoconfig on s390
This commit is contained in:
Linus Torvalds 2008-05-30 07:44:19 -07:00
commit 9db8ee3d96
15 changed files with 101 additions and 87 deletions

View File

@ -308,6 +308,9 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT config ARCH_SPARSEMEM_DEFAULT
def_bool y def_bool y
config ARCH_SELECT_MEMORY_MODEL
def_bool y
source "mm/Kconfig" source "mm/Kconfig"
comment "I/O subsystem configuration" comment "I/O subsystem configuration"

View File

@ -130,6 +130,7 @@ static void appldata_work_fn(struct work_struct *work)
P_DEBUG(" -= Work Queue =-\n"); P_DEBUG(" -= Work Queue =-\n");
i = 0; i = 0;
get_online_cpus();
spin_lock(&appldata_ops_lock); spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) { list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list); ops = list_entry(lh, struct appldata_ops, list);
@ -140,6 +141,7 @@ static void appldata_work_fn(struct work_struct *work)
} }
} }
spin_unlock(&appldata_ops_lock); spin_unlock(&appldata_ops_lock);
put_online_cpus();
} }
/* /*
@ -266,12 +268,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
len = *lenp; len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT; return -EFAULT;
get_online_cpus();
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
if (buf[0] == '1') if (buf[0] == '1')
__appldata_vtimer_setup(APPLDATA_ADD_TIMER); __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else if (buf[0] == '0') else if (buf[0] == '0')
__appldata_vtimer_setup(APPLDATA_DEL_TIMER); __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
put_online_cpus();
out: out:
*lenp = len; *lenp = len;
*ppos += len; *ppos += len;
@ -314,10 +318,12 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
return -EINVAL; return -EINVAL;
} }
get_online_cpus();
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
appldata_interval = interval; appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER); __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
put_online_cpus();
P_INFO("Monitoring CPU interval set to %u milliseconds.\n", P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
interval); interval);
@ -556,8 +562,10 @@ static int __init appldata_init(void)
return -ENOMEM; return -ENOMEM;
} }
get_online_cpus();
for_each_online_cpu(i) for_each_online_cpu(i)
appldata_online_cpu(i); appldata_online_cpu(i);
put_online_cpus();
/* Register cpu hotplug notifier */ /* Register cpu hotplug notifier */
register_hotcpu_notifier(&appldata_nb); register_hotcpu_notifier(&appldata_nb);

View File

@ -1,7 +1,7 @@
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.25 # Linux kernel version: 2.6.26-rc4
# Wed Apr 30 11:07:45 2008 # Fri May 30 09:49:33 2008
# #
CONFIG_SCHED_MC=y CONFIG_SCHED_MC=y
CONFIG_MMU=y CONFIG_MMU=y
@ -103,6 +103,7 @@ CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set # CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0 CONFIG_BASE_SMALL=0
CONFIG_MODULES=y CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODULE_FORCE_UNLOAD is not set
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
@ -173,6 +174,7 @@ CONFIG_PREEMPT=y
# CONFIG_PREEMPT_RCU is not set # CONFIG_PREEMPT_RCU is not set
CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_SELECT_MEMORY_MODEL=y CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set # CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set # CONFIG_DISCONTIGMEM_MANUAL is not set
@ -210,6 +212,7 @@ CONFIG_FORCE_MAX_ZONEORDER=9
CONFIG_PFAULT=y CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set # CONFIG_SHARED_KERNEL is not set
# CONFIG_CMM is not set # CONFIG_CMM is not set
# CONFIG_PAGE_STATES is not set
CONFIG_VIRT_TIMER=y CONFIG_VIRT_TIMER=y
CONFIG_VIRT_CPU_ACCOUNTING=y CONFIG_VIRT_CPU_ACCOUNTING=y
# CONFIG_APPLDATA_BASE is not set # CONFIG_APPLDATA_BASE is not set
@ -620,6 +623,7 @@ CONFIG_S390_VMUR=m
# #
# CONFIG_MEMSTICK is not set # CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set # CONFIG_NEW_LEDS is not set
CONFIG_ACCESSIBILITY=y
# #
# File systems # File systems
@ -754,11 +758,12 @@ CONFIG_FRAME_WARN=2048
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set # CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y # CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set
# CONFIG_SCHEDSTATS is not set # CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set # CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_DEBUG_RT_MUTEXES is not set

View File

@ -208,7 +208,7 @@ static const unsigned char formats[][7] = {
[INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */
[INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */
[INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */
[INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */
[INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */
[INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */
[INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */

View File

@ -1089,7 +1089,7 @@ out:
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int smp_rescan_cpus(void) int __ref smp_rescan_cpus(void)
{ {
cpumask_t newcpus; cpumask_t newcpus;
int cpu; int cpu;

View File

@ -44,37 +44,34 @@ char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
void show_mem(void) void show_mem(void)
{ {
int i, total = 0, reserved = 0; unsigned long i, total = 0, reserved = 0;
int shared = 0, cached = 0; unsigned long shared = 0, cached = 0;
unsigned long flags;
struct page *page; struct page *page;
pg_data_t *pgdat;
printk("Mem-info:\n"); printk("Mem-info:\n");
show_free_areas(); show_free_areas();
i = max_mapnr; for_each_online_pgdat(pgdat) {
while (i-- > 0) { pgdat_resize_lock(pgdat, &flags);
if (!pfn_valid(i)) for (i = 0; i < pgdat->node_spanned_pages; i++) {
continue; if (!pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(i); continue;
total++; page = pfn_to_page(pgdat->node_start_pfn + i);
if (PageReserved(page)) total++;
reserved++; if (PageReserved(page))
else if (PageSwapCache(page)) reserved++;
cached++; else if (PageSwapCache(page))
else if (page_count(page)) cached++;
shared += page_count(page) - 1; else if (page_count(page))
shared += page_count(page) - 1;
}
pgdat_resize_unlock(pgdat, &flags);
} }
printk("%d pages of RAM\n", total); printk("%ld pages of RAM\n", total);
printk("%d reserved pages\n", reserved); printk("%ld reserved pages\n", reserved);
printk("%d pages shared\n", shared); printk("%ld pages shared\n", shared);
printk("%d pages swap cached\n", cached); printk("%ld pages swap cached\n", cached);
printk("%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
printk("%lu pages writeback\n", global_page_state(NR_WRITEBACK));
printk("%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
printk("%lu pages slab\n",
global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE));
printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
} }
/* /*

View File

@ -27,12 +27,19 @@ struct memory_segment {
static LIST_HEAD(mem_segs); static LIST_HEAD(mem_segs);
static pud_t *vmem_pud_alloc(void) static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}
static inline pud_t *vmem_pud_alloc(void)
{ {
pud_t *pud = NULL; pud_t *pud = NULL;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); pud = vmem_alloc_pages(2);
if (!pud) if (!pud)
return NULL; return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void)
return pud; return pud;
} }
static pmd_t *vmem_pmd_alloc(void) static inline pmd_t *vmem_pmd_alloc(void)
{ {
pmd_t *pmd = NULL; pmd_t *pmd = NULL;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); pmd = vmem_alloc_pages(2);
if (!pmd) if (!pmd)
return NULL; return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
if (pte_none(*pt_dir)) { if (pte_none(*pt_dir)) {
unsigned long new_page; unsigned long new_page;
new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); new_page =__pa(vmem_alloc_pages(0));
if (!new_page) if (!new_page)
goto out; goto out;
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte; *pt_dir = pte;
} }
} }
memset(start, 0, nr * sizeof(struct page));
ret = 0; ret = 0;
out: out:
flush_tlb_kernel_range(start_addr, end_addr); flush_tlb_kernel_range(start_addr, end_addr);

View File

@ -63,6 +63,7 @@ static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
*/ */
static wait_queue_head_t dasd_init_waitq; static wait_queue_head_t dasd_init_waitq;
static wait_queue_head_t dasd_flush_wq; static wait_queue_head_t dasd_flush_wq;
static wait_queue_head_t generic_waitq;
/* /*
* Allocate memory for a new device structure. * Allocate memory for a new device structure.
@ -1151,11 +1152,15 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
struct list_head *l, *n; struct list_head *l, *n;
struct dasd_ccw_req *cqr; struct dasd_ccw_req *cqr;
struct dasd_block *block; struct dasd_block *block;
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
list_for_each_safe(l, n, final_queue) { list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist); cqr = list_entry(l, struct dasd_ccw_req, devlist);
list_del_init(&cqr->devlist); list_del_init(&cqr->devlist);
block = cqr->block; block = cqr->block;
callback = cqr->callback;
callback_data = cqr->callback_data;
if (block) if (block)
spin_lock_bh(&block->queue_lock); spin_lock_bh(&block->queue_lock);
switch (cqr->status) { switch (cqr->status) {
@ -1176,7 +1181,7 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
BUG(); BUG();
} }
if (cqr->callback != NULL) if (cqr->callback != NULL)
(cqr->callback)(cqr, cqr->callback_data); (callback)(cqr, callback_data);
if (block) if (block)
spin_unlock_bh(&block->queue_lock); spin_unlock_bh(&block->queue_lock);
} }
@ -1406,17 +1411,15 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
*/ */
int dasd_sleep_on(struct dasd_ccw_req *cqr) int dasd_sleep_on(struct dasd_ccw_req *cqr)
{ {
wait_queue_head_t wait_q;
struct dasd_device *device; struct dasd_device *device;
int rc; int rc;
device = cqr->startdev; device = cqr->startdev;
init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb; cqr->callback = dasd_wakeup_cb;
cqr->callback_data = (void *) &wait_q; cqr->callback_data = (void *) &generic_waitq;
dasd_add_request_tail(cqr); dasd_add_request_tail(cqr);
wait_event(wait_q, _wait_for_wakeup(cqr)); wait_event(generic_waitq, _wait_for_wakeup(cqr));
/* Request status is either done or failed. */ /* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
@ -1429,20 +1432,18 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
*/ */
int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
{ {
wait_queue_head_t wait_q;
struct dasd_device *device; struct dasd_device *device;
int rc; int rc;
device = cqr->startdev; device = cqr->startdev;
init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb; cqr->callback = dasd_wakeup_cb;
cqr->callback_data = (void *) &wait_q; cqr->callback_data = (void *) &generic_waitq;
dasd_add_request_tail(cqr); dasd_add_request_tail(cqr);
rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
if (rc == -ERESTARTSYS) { if (rc == -ERESTARTSYS) {
dasd_cancel_req(cqr); dasd_cancel_req(cqr);
/* wait (non-interruptible) for final status */ /* wait (non-interruptible) for final status */
wait_event(wait_q, _wait_for_wakeup(cqr)); wait_event(generic_waitq, _wait_for_wakeup(cqr));
} }
rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
return rc; return rc;
@ -1466,7 +1467,6 @@ static inline int _dasd_term_running_cqr(struct dasd_device *device)
int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
{ {
wait_queue_head_t wait_q;
struct dasd_device *device; struct dasd_device *device;
int rc; int rc;
@ -1478,9 +1478,8 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
return rc; return rc;
} }
init_waitqueue_head (&wait_q);
cqr->callback = dasd_wakeup_cb; cqr->callback = dasd_wakeup_cb;
cqr->callback_data = (void *) &wait_q; cqr->callback_data = (void *) &generic_waitq;
cqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue); list_add(&cqr->devlist, &device->ccw_queue);
@ -1489,7 +1488,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
spin_unlock_irq(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(wait_q, _wait_for_wakeup(cqr)); wait_event(generic_waitq, _wait_for_wakeup(cqr));
/* Request status is either done or failed. */ /* Request status is either done or failed. */
rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
@ -2430,6 +2429,7 @@ static int __init dasd_init(void)
init_waitqueue_head(&dasd_init_waitq); init_waitqueue_head(&dasd_init_waitq);
init_waitqueue_head(&dasd_flush_wq); init_waitqueue_head(&dasd_flush_wq);
init_waitqueue_head(&generic_waitq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */ /* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));

View File

@ -549,7 +549,6 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
struct raw3270_request *rq) struct raw3270_request *rq)
{ {
unsigned long flags; unsigned long flags;
wait_queue_head_t wq;
int rc; int rc;
#ifdef CONFIG_TN3270_CONSOLE #ifdef CONFIG_TN3270_CONSOLE
@ -566,20 +565,20 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
return rq->rc; return rq->rc;
} }
#endif #endif
init_waitqueue_head(&wq);
rq->callback = raw3270_wake_init; rq->callback = raw3270_wake_init;
rq->callback_data = &wq; rq->callback_data = &raw3270_wait_queue;
spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
rc = __raw3270_start(rp, view, rq); rc = __raw3270_start(rp, view, rq);
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
if (rc) if (rc)
return rc; return rc;
/* Now wait for the completion. */ /* Now wait for the completion. */
rc = wait_event_interruptible(wq, raw3270_request_final(rq)); rc = wait_event_interruptible(raw3270_wait_queue,
raw3270_request_final(rq));
if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
raw3270_halt_io(view->dev, rq); raw3270_halt_io(view->dev, rq);
/* No wait for the halt to complete. */ /* No wait for the halt to complete. */
wait_event(wq, raw3270_request_final(rq)); wait_event(raw3270_wait_queue, raw3270_request_final(rq));
return -ERESTARTSYS; return -ERESTARTSYS;
} }
return rq->rc; return rq->rc;

View File

@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
put_online_cpus(); put_online_cpus();
} }
static void sclp_cpu_change_notify(struct work_struct *work) static void __ref sclp_cpu_change_notify(struct work_struct *work)
{ {
smp_rescan_cpus(); smp_rescan_cpus();
} }

View File

@ -71,9 +71,6 @@ static struct list_head sclp_vt220_outqueue;
/* Number of requests in outqueue */ /* Number of requests in outqueue */
static int sclp_vt220_outqueue_count; static int sclp_vt220_outqueue_count;
/* Wait queue used to delay write requests while we've run out of buffers */
static wait_queue_head_t sclp_vt220_waitq;
/* Timer used for delaying write requests to merge subsequent messages into /* Timer used for delaying write requests to merge subsequent messages into
* a single buffer */ * a single buffer */
static struct timer_list sclp_vt220_timer; static struct timer_list sclp_vt220_timer;
@ -133,7 +130,6 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
} while (request && __sclp_vt220_emit(request)); } while (request && __sclp_vt220_emit(request));
if (request == NULL && sclp_vt220_flush_later) if (request == NULL && sclp_vt220_flush_later)
sclp_vt220_emit_current(); sclp_vt220_emit_current();
wake_up(&sclp_vt220_waitq);
/* Check if the tty needs a wake up call */ /* Check if the tty needs a wake up call */
if (sclp_vt220_tty != NULL) { if (sclp_vt220_tty != NULL) {
tty_wakeup(sclp_vt220_tty); tty_wakeup(sclp_vt220_tty);
@ -383,7 +379,7 @@ sclp_vt220_timeout(unsigned long data)
*/ */
static int static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
int convertlf, int may_schedule) int convertlf, int may_fail)
{ {
unsigned long flags; unsigned long flags;
void *page; void *page;
@ -395,15 +391,14 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
overall_written = 0; overall_written = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags); spin_lock_irqsave(&sclp_vt220_lock, flags);
do { do {
/* Create a sclp output buffer if none exists yet */ /* Create an sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) { if (sclp_vt220_current_request == NULL) {
while (list_empty(&sclp_vt220_empty)) { while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags); spin_unlock_irqrestore(&sclp_vt220_lock, flags);
if (in_interrupt() || !may_schedule) if (may_fail)
sclp_sync_wait(); goto out;
else else
wait_event(sclp_vt220_waitq, sclp_sync_wait();
!list_empty(&sclp_vt220_empty));
spin_lock_irqsave(&sclp_vt220_lock, flags); spin_lock_irqsave(&sclp_vt220_lock, flags);
} }
page = (void *) sclp_vt220_empty.next; page = (void *) sclp_vt220_empty.next;
@ -437,6 +432,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
add_timer(&sclp_vt220_timer); add_timer(&sclp_vt220_timer);
} }
spin_unlock_irqrestore(&sclp_vt220_lock, flags); spin_unlock_irqrestore(&sclp_vt220_lock, flags);
out:
return overall_written; return overall_written;
} }
@ -520,19 +516,11 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
* character to the tty device. If the kernel uses this routine, * character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is * it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver. * done stuffing characters into the driver.
*
* NOTE: include/linux/tty_driver.h specifies that a character should be
* ignored if there is no room in the queue. This driver implements a different
* semantic in that it will block when there is no more room left.
*
* FIXME: putchar can currently be called from BH and other non blocking
* handlers so this semantic isn't a good idea.
*/ */
static int static int
sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
{ {
__sclp_vt220_write(&ch, 1, 0, 0, 1); return __sclp_vt220_write(&ch, 1, 0, 0, 1);
return 1;
} }
/* /*
@ -653,7 +641,6 @@ static int __init __sclp_vt220_init(void)
spin_lock_init(&sclp_vt220_lock); spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty); INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue); INIT_LIST_HEAD(&sclp_vt220_outqueue);
init_waitqueue_head(&sclp_vt220_waitq);
init_timer(&sclp_vt220_timer); init_timer(&sclp_vt220_timer);
sclp_vt220_current_request = NULL; sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0; sclp_vt220_buffered_chars = 0;

View File

@ -231,6 +231,9 @@ struct tape_device {
/* Request queue. */ /* Request queue. */
struct list_head req_queue; struct list_head req_queue;
/* Request wait queue. */
wait_queue_head_t wait_queue;
/* Each tape device has (currently) two minor numbers. */ /* Each tape device has (currently) two minor numbers. */
int first_minor; int first_minor;

View File

@ -179,11 +179,11 @@ tapeblock_requeue(struct work_struct *work) {
tapeblock_end_request(req, -EIO); tapeblock_end_request(req, -EIO);
continue; continue;
} }
blkdev_dequeue_request(req);
nr_queued++;
spin_unlock_irq(&device->blk_data.request_queue_lock); spin_unlock_irq(&device->blk_data.request_queue_lock);
rc = tapeblock_start_request(device, req); rc = tapeblock_start_request(device, req);
spin_lock_irq(&device->blk_data.request_queue_lock); spin_lock_irq(&device->blk_data.request_queue_lock);
blkdev_dequeue_request(req);
nr_queued++;
} }
spin_unlock_irq(&device->blk_data.request_queue_lock); spin_unlock_irq(&device->blk_data.request_queue_lock);
atomic_set(&device->blk_data.requeue_scheduled, 0); atomic_set(&device->blk_data.requeue_scheduled, 0);

View File

@ -449,6 +449,7 @@ tape_alloc_device(void)
INIT_LIST_HEAD(&device->req_queue); INIT_LIST_HEAD(&device->req_queue);
INIT_LIST_HEAD(&device->node); INIT_LIST_HEAD(&device->node);
init_waitqueue_head(&device->state_change_wq); init_waitqueue_head(&device->state_change_wq);
init_waitqueue_head(&device->wait_queue);
device->tape_state = TS_INIT; device->tape_state = TS_INIT;
device->medium_state = MS_UNKNOWN; device->medium_state = MS_UNKNOWN;
*device->modeset_byte = 0; *device->modeset_byte = 0;
@ -954,21 +955,19 @@ __tape_wake_up(struct tape_request *request, void *data)
int int
tape_do_io(struct tape_device *device, struct tape_request *request) tape_do_io(struct tape_device *device, struct tape_request *request)
{ {
wait_queue_head_t wq;
int rc; int rc;
init_waitqueue_head(&wq);
spin_lock_irq(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */ /* Setup callback */
request->callback = __tape_wake_up; request->callback = __tape_wake_up;
request->callback_data = &wq; request->callback_data = &device->wait_queue;
/* Add request to request queue and try to start it. */ /* Add request to request queue and try to start it. */
rc = __tape_start_request(device, request); rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc) if (rc)
return rc; return rc;
/* Request added to the queue. Wait for its completion. */ /* Request added to the queue. Wait for its completion. */
wait_event(wq, (request->callback == NULL)); wait_event(device->wait_queue, (request->callback == NULL));
/* Get rc from request */ /* Get rc from request */
return request->rc; return request->rc;
} }
@ -989,20 +988,19 @@ int
tape_do_io_interruptible(struct tape_device *device, tape_do_io_interruptible(struct tape_device *device,
struct tape_request *request) struct tape_request *request)
{ {
wait_queue_head_t wq;
int rc; int rc;
init_waitqueue_head(&wq);
spin_lock_irq(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Setup callback */ /* Setup callback */
request->callback = __tape_wake_up_interruptible; request->callback = __tape_wake_up_interruptible;
request->callback_data = &wq; request->callback_data = &device->wait_queue;
rc = __tape_start_request(device, request); rc = __tape_start_request(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (rc) if (rc)
return rc; return rc;
/* Request added to the queue. Wait for its completion. */ /* Request added to the queue. Wait for its completion. */
rc = wait_event_interruptible(wq, (request->callback == NULL)); rc = wait_event_interruptible(device->wait_queue,
(request->callback == NULL));
if (rc != -ERESTARTSYS) if (rc != -ERESTARTSYS)
/* Request finished normally. */ /* Request finished normally. */
return request->rc; return request->rc;
@ -1015,7 +1013,7 @@ tape_do_io_interruptible(struct tape_device *device,
/* Wait for the interrupt that acknowledges the halt. */ /* Wait for the interrupt that acknowledges the halt. */
do { do {
rc = wait_event_interruptible( rc = wait_event_interruptible(
wq, device->wait_queue,
(request->callback == NULL) (request->callback == NULL)
); );
} while (rc == -ERESTARTSYS); } while (rc == -ERESTARTSYS);

View File

@ -40,7 +40,13 @@ typedef __signed__ long saddr_t;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef u64 dma64_addr_t;
#ifdef __s390x__
/* DMA addresses come in 32-bit and 64-bit flavours. */
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t; typedef u32 dma_addr_t;
#endif
#ifndef __s390x__ #ifndef __s390x__
typedef union { typedef union {