2005-04-17 02:20:36 +04:00
/*
* linux / mm / page_io . c
*
* Copyright ( C ) 1991 , 1992 , 1993 , 1994 Linus Torvalds
*
* Swap reorganised 29.12 .95 ,
* Asynchronous swapping added 30.12 .95 . Stephen Tweedie
* Removed race in async swapping . 14.4 .1996 . Bruno Haible
* Add swap of shared pages through the page cache . 20.2 .1998 . Stephen Tweedie
* Always use brw_page , life becomes simpler . 12 May 1998 Eric Biederman
*/
# include <linux/mm.h>
# include <linux/kernel_stat.h>
# include <linux/pagemap.h>
# include <linux/swap.h>
# include <linux/bio.h>
# include <linux/swapops.h>
# include <linux/writeback.h>
# include <asm/pgtable.h>
2005-10-07 10:46:04 +04:00
static struct bio * get_swap_bio ( gfp_t gfp_flags , pgoff_t index ,
2005-04-17 02:20:36 +04:00
struct page * page , bio_end_io_t end_io )
{
struct bio * bio ;
bio = bio_alloc ( gfp_flags , 1 ) ;
if ( bio ) {
struct swap_info_struct * sis ;
swp_entry_t entry = { . val = index , } ;
sis = get_swap_info_struct ( swp_type ( entry ) ) ;
bio - > bi_sector = map_swap_page ( sis , swp_offset ( entry ) ) *
( PAGE_SIZE > > 9 ) ;
bio - > bi_bdev = sis - > bdev ;
bio - > bi_io_vec [ 0 ] . bv_page = page ;
bio - > bi_io_vec [ 0 ] . bv_len = PAGE_SIZE ;
bio - > bi_io_vec [ 0 ] . bv_offset = 0 ;
bio - > bi_vcnt = 1 ;
bio - > bi_idx = 0 ;
bio - > bi_size = PAGE_SIZE ;
bio - > bi_end_io = end_io ;
}
return bio ;
}
2007-09-27 14:47:43 +04:00
static void end_swap_bio_write ( struct bio * bio , int err )
2005-04-17 02:20:36 +04:00
{
const int uptodate = test_bit ( BIO_UPTODATE , & bio - > bi_flags ) ;
struct page * page = bio - > bi_io_vec [ 0 ] . bv_page ;
2006-09-26 10:31:26 +04:00
if ( ! uptodate ) {
2005-04-17 02:20:36 +04:00
SetPageError ( page ) ;
2006-09-26 10:31:26 +04:00
/*
* We failed to write the page out to swap - space .
* Re - dirty the page in order to avoid it being reclaimed .
* Also print a dire warning that things will go BAD ( tm )
* very quickly .
*
* Also clear PG_reclaim to avoid rotate_reclaimable_page ( )
*/
set_page_dirty ( page ) ;
printk ( KERN_ALERT " Write-error on swap-device (%u:%u:%Lu) \n " ,
imajor ( bio - > bi_bdev - > bd_inode ) ,
iminor ( bio - > bi_bdev - > bd_inode ) ,
( unsigned long long ) bio - > bi_sector ) ;
ClearPageReclaim ( page ) ;
}
2005-04-17 02:20:36 +04:00
end_page_writeback ( page ) ;
bio_put ( bio ) ;
}
2007-09-27 14:47:43 +04:00
void end_swap_bio_read ( struct bio * bio , int err )
2005-04-17 02:20:36 +04:00
{
const int uptodate = test_bit ( BIO_UPTODATE , & bio - > bi_flags ) ;
struct page * page = bio - > bi_io_vec [ 0 ] . bv_page ;
if ( ! uptodate ) {
SetPageError ( page ) ;
ClearPageUptodate ( page ) ;
2006-09-26 10:31:26 +04:00
printk ( KERN_ALERT " Read-error on swap-device (%u:%u:%Lu) \n " ,
imajor ( bio - > bi_bdev - > bd_inode ) ,
iminor ( bio - > bi_bdev - > bd_inode ) ,
( unsigned long long ) bio - > bi_sector ) ;
2005-04-17 02:20:36 +04:00
} else {
SetPageUptodate ( page ) ;
}
unlock_page ( page ) ;
bio_put ( bio ) ;
}
/*
* We may have stale swap cache pages in memory : notice
* them here and get rid of the unnecessary final write .
*/
int swap_writepage ( struct page * page , struct writeback_control * wbc )
{
struct bio * bio ;
int ret = 0 , rw = WRITE ;
2009-01-07 01:39:36 +03:00
if ( try_to_free_swap ( page ) ) {
2005-04-17 02:20:36 +04:00
unlock_page ( page ) ;
goto out ;
}
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:40 +03:00
bio = get_swap_bio ( GFP_NOIO , page_private ( page ) , page ,
end_swap_bio_write ) ;
2005-04-17 02:20:36 +04:00
if ( bio = = NULL ) {
set_page_dirty ( page ) ;
unlock_page ( page ) ;
ret = - ENOMEM ;
goto out ;
}
if ( wbc - > sync_mode = = WB_SYNC_ALL )
2009-02-16 12:25:40 +03:00
rw | = ( 1 < < BIO_RW_SYNCIO ) | ( 1 < < BIO_RW_UNPLUG ) ;
2006-06-30 12:55:45 +04:00
count_vm_event ( PSWPOUT ) ;
2005-04-17 02:20:36 +04:00
set_page_writeback ( page ) ;
unlock_page ( page ) ;
submit_bio ( rw , bio ) ;
out :
return ret ;
}
2009-06-17 02:33:02 +04:00
int swap_readpage ( struct page * page )
2005-04-17 02:20:36 +04:00
{
struct bio * bio ;
int ret = 0 ;
2009-01-07 01:39:25 +03:00
VM_BUG_ON ( ! PageLocked ( page ) ) ;
VM_BUG_ON ( PageUptodate ( page ) ) ;
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:40 +03:00
bio = get_swap_bio ( GFP_KERNEL , page_private ( page ) , page ,
end_swap_bio_read ) ;
2005-04-17 02:20:36 +04:00
if ( bio = = NULL ) {
unlock_page ( page ) ;
ret = - ENOMEM ;
goto out ;
}
2006-06-30 12:55:45 +04:00
count_vm_event ( PSWPIN ) ;
2005-04-17 02:20:36 +04:00
submit_bio ( READ , bio ) ;
out :
return ret ;
}