2005-04-17 02:20:36 +04:00
/*
* linux / mm / page_io . c
*
* Copyright ( C ) 1991 , 1992 , 1993 , 1994 Linus Torvalds
*
* Swap reorganised 29.12 .95 ,
* Asynchronous swapping added 30.12 .95 . Stephen Tweedie
* Removed race in async swapping . 14.4 .1996 . Bruno Haible
* Add swap of shared pages through the page cache . 20.2 .1998 . Stephen Tweedie
* Always use brw_page , life becomes simpler . 12 May 1998 Eric Biederman
*/
# include <linux/mm.h>
# include <linux/kernel_stat.h>
# include <linux/pagemap.h>
# include <linux/swap.h>
# include <linux/bio.h>
# include <linux/swapops.h>
# include <linux/writeback.h>
# include <asm/pgtable.h>
2005-10-07 10:46:04 +04:00
static struct bio * get_swap_bio ( gfp_t gfp_flags , pgoff_t index ,
2005-04-17 02:20:36 +04:00
struct page * page , bio_end_io_t end_io )
{
struct bio * bio ;
bio = bio_alloc ( gfp_flags , 1 ) ;
if ( bio ) {
struct swap_info_struct * sis ;
swp_entry_t entry = { . val = index , } ;
sis = get_swap_info_struct ( swp_type ( entry ) ) ;
bio - > bi_sector = map_swap_page ( sis , swp_offset ( entry ) ) *
( PAGE_SIZE > > 9 ) ;
bio - > bi_bdev = sis - > bdev ;
bio - > bi_io_vec [ 0 ] . bv_page = page ;
bio - > bi_io_vec [ 0 ] . bv_len = PAGE_SIZE ;
bio - > bi_io_vec [ 0 ] . bv_offset = 0 ;
bio - > bi_vcnt = 1 ;
bio - > bi_idx = 0 ;
bio - > bi_size = PAGE_SIZE ;
bio - > bi_end_io = end_io ;
}
return bio ;
}
static int end_swap_bio_write ( struct bio * bio , unsigned int bytes_done , int err )
{
const int uptodate = test_bit ( BIO_UPTODATE , & bio - > bi_flags ) ;
struct page * page = bio - > bi_io_vec [ 0 ] . bv_page ;
if ( bio - > bi_size )
return 1 ;
2006-09-26 10:31:26 +04:00
if ( ! uptodate ) {
2005-04-17 02:20:36 +04:00
SetPageError ( page ) ;
2006-09-26 10:31:26 +04:00
/*
* We failed to write the page out to swap - space .
* Re - dirty the page in order to avoid it being reclaimed .
* Also print a dire warning that things will go BAD ( tm )
* very quickly .
*
* Also clear PG_reclaim to avoid rotate_reclaimable_page ( )
*/
set_page_dirty ( page ) ;
printk ( KERN_ALERT " Write-error on swap-device (%u:%u:%Lu) \n " ,
imajor ( bio - > bi_bdev - > bd_inode ) ,
iminor ( bio - > bi_bdev - > bd_inode ) ,
( unsigned long long ) bio - > bi_sector ) ;
ClearPageReclaim ( page ) ;
}
2005-04-17 02:20:36 +04:00
end_page_writeback ( page ) ;
bio_put ( bio ) ;
return 0 ;
}
2006-09-26 10:32:44 +04:00
int end_swap_bio_read ( struct bio * bio , unsigned int bytes_done , int err )
2005-04-17 02:20:36 +04:00
{
const int uptodate = test_bit ( BIO_UPTODATE , & bio - > bi_flags ) ;
struct page * page = bio - > bi_io_vec [ 0 ] . bv_page ;
if ( bio - > bi_size )
return 1 ;
if ( ! uptodate ) {
SetPageError ( page ) ;
ClearPageUptodate ( page ) ;
2006-09-26 10:31:26 +04:00
printk ( KERN_ALERT " Read-error on swap-device (%u:%u:%Lu) \n " ,
imajor ( bio - > bi_bdev - > bd_inode ) ,
iminor ( bio - > bi_bdev - > bd_inode ) ,
( unsigned long long ) bio - > bi_sector ) ;
2005-04-17 02:20:36 +04:00
} else {
SetPageUptodate ( page ) ;
}
unlock_page ( page ) ;
bio_put ( bio ) ;
return 0 ;
}
/*
* We may have stale swap cache pages in memory : notice
* them here and get rid of the unnecessary final write .
*/
int swap_writepage ( struct page * page , struct writeback_control * wbc )
{
struct bio * bio ;
int ret = 0 , rw = WRITE ;
if ( remove_exclusive_swap_page ( page ) ) {
unlock_page ( page ) ;
goto out ;
}
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:40 +03:00
bio = get_swap_bio ( GFP_NOIO , page_private ( page ) , page ,
end_swap_bio_write ) ;
2005-04-17 02:20:36 +04:00
if ( bio = = NULL ) {
set_page_dirty ( page ) ;
unlock_page ( page ) ;
ret = - ENOMEM ;
goto out ;
}
if ( wbc - > sync_mode = = WB_SYNC_ALL )
rw | = ( 1 < < BIO_RW_SYNC ) ;
2006-06-30 12:55:45 +04:00
count_vm_event ( PSWPOUT ) ;
2005-04-17 02:20:36 +04:00
set_page_writeback ( page ) ;
unlock_page ( page ) ;
submit_bio ( rw , bio ) ;
out :
return ret ;
}
int swap_readpage ( struct file * file , struct page * page )
{
struct bio * bio ;
int ret = 0 ;
BUG_ON ( ! PageLocked ( page ) ) ;
ClearPageUptodate ( page ) ;
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:40 +03:00
bio = get_swap_bio ( GFP_KERNEL , page_private ( page ) , page ,
end_swap_bio_read ) ;
2005-04-17 02:20:36 +04:00
if ( bio = = NULL ) {
unlock_page ( page ) ;
ret = - ENOMEM ;
goto out ;
}
2006-06-30 12:55:45 +04:00
count_vm_event ( PSWPIN ) ;
2005-04-17 02:20:36 +04:00
submit_bio ( READ , bio ) ;
out :
return ret ;
}
2005-06-26 01:55:09 +04:00
# ifdef CONFIG_SOFTWARE_SUSPEND
2005-04-17 02:20:36 +04:00
/*
* A scruffy utility function to read or write an arbitrary swap page
* and wait on the I / O . The caller must have a ref on the page .
*
* We use end_swap_bio_read ( ) even for writes , because it happens to do what
* we want .
*/
2006-09-26 10:32:42 +04:00
int rw_swap_page_sync ( int rw , swp_entry_t entry , struct page * page ,
struct bio * * bio_chain )
2005-04-17 02:20:36 +04:00
{
struct bio * bio ;
int ret = 0 ;
2006-09-26 10:32:42 +04:00
int bio_rw ;
2005-04-17 02:20:36 +04:00
lock_page ( page ) ;
bio = get_swap_bio ( GFP_KERNEL , entry . val , page , end_swap_bio_read ) ;
if ( bio = = NULL ) {
unlock_page ( page ) ;
ret = - ENOMEM ;
goto out ;
}
2006-09-26 10:32:42 +04:00
bio_rw = rw ;
if ( ! bio_chain )
bio_rw | = ( 1 < < BIO_RW_SYNC ) ;
if ( bio_chain )
bio_get ( bio ) ;
submit_bio ( bio_rw , bio ) ;
if ( bio_chain = = NULL ) {
wait_on_page_locked ( page ) ;
if ( ! PageUptodate ( page ) | | PageError ( page ) )
ret = - EIO ;
}
if ( bio_chain ) {
bio - > bi_private = * bio_chain ;
* bio_chain = bio ;
}
2005-04-17 02:20:36 +04:00
out :
return ret ;
}
# endif