2005-04-16 15:20:36 -07:00
/*
* linux / kernel / power / swsusp . c
*
2005-10-30 14:59:58 -08:00
* This file provides code to write suspend image to swap and read it back .
2005-04-16 15:20:36 -07:00
*
* Copyright ( C ) 1998 - 2001 Gabor Kuti < seasons @ fornax . hu >
2005-10-30 14:59:56 -08:00
* Copyright ( C ) 1998 , 2001 - 2005 Pavel Machek < pavel @ suse . cz >
2005-04-16 15:20:36 -07:00
*
* This file is released under the GPLv2 .
*
* I ' d like to thank the following people for their work :
2005-06-25 14:55:12 -07:00
*
2005-04-16 15:20:36 -07:00
* Pavel Machek < pavel @ ucw . cz > :
* Modifications , defectiveness pointing , being with me at the very beginning ,
* suspend to swap space , stop all tasks . Port to 2.4 .18 - ac and 2.5 .17 .
*
2005-06-25 14:55:12 -07:00
* Steve Doddi < dirk @ loth . demon . co . uk > :
2005-04-16 15:20:36 -07:00
* Support the possibility of hardware state restoring .
*
* Raph < grey . havens @ earthling . net > :
* Support for preserving states of network devices and virtual console
* ( including X and svgatextmode )
*
* Kurt Garloff < garloff @ suse . de > :
* Straightened the critical function in order to prevent compilers from
* playing tricks with local variables .
*
* Andreas Mohr < a . mohr @ mailto . de >
*
* Alex Badea < vampire @ go . ro > :
* Fixed runaway init
*
2006-01-06 00:13:05 -08:00
* Rafael J . Wysocki < rjw @ sisk . pl >
2006-03-23 03:00:00 -08:00
* Reworked the freeing of memory and the handling of swap
2006-01-06 00:13:05 -08:00
*
2005-04-16 15:20:36 -07:00
* More state savers are welcome . Especially for the scsi layer . . .
*
* For TODOs , FIXMEs also look in Documentation / power / swsusp . txt
*/
# include <linux/mm.h>
# include <linux/suspend.h>
# include <linux/spinlock.h>
# include <linux/kernel.h>
# include <linux/major.h>
# include <linux/swap.h>
# include <linux/pm.h>
# include <linux/swapops.h>
# include <linux/bootmem.h>
# include <linux/syscalls.h>
# include <linux/highmem.h>
2006-12-06 20:34:32 -08:00
# include <linux/time.h>
2007-05-06 14:50:47 -07:00
# include <linux/rbtree.h>
2009-03-31 15:23:37 -07:00
# include <linux/io.h>
2005-04-16 15:20:36 -07:00
# include "power.h"
2006-03-23 02:59:59 -08:00
int in_suspend __nosavedata = 0 ;
2005-04-16 15:20:36 -07:00
/**
2006-03-23 02:59:59 -08:00
* The following functions are used for tracing the allocated
* swap pages , so that they can be freed in case of an error .
2005-04-16 15:20:36 -07:00
*/
2006-01-06 00:13:05 -08:00
2007-05-06 14:50:47 -07:00
struct swsusp_extent {
struct rb_node node ;
unsigned long start ;
unsigned long end ;
} ;
2005-04-16 15:20:36 -07:00
2007-05-06 14:50:47 -07:00
static struct rb_root swsusp_extents = RB_ROOT ;
2006-01-06 00:13:05 -08:00
2007-05-06 14:50:47 -07:00
static int swsusp_extents_insert ( unsigned long swap_offset )
2006-01-06 00:13:05 -08:00
{
2007-05-06 14:50:47 -07:00
struct rb_node * * new = & ( swsusp_extents . rb_node ) ;
struct rb_node * parent = NULL ;
struct swsusp_extent * ext ;
/* Figure out where to put the new node */
while ( * new ) {
ext = container_of ( * new , struct swsusp_extent , node ) ;
parent = * new ;
if ( swap_offset < ext - > start ) {
/* Try to merge */
if ( swap_offset = = ext - > start - 1 ) {
ext - > start - - ;
return 0 ;
}
new = & ( ( * new ) - > rb_left ) ;
} else if ( swap_offset > ext - > end ) {
/* Try to merge */
if ( swap_offset = = ext - > end + 1 ) {
ext - > end + + ;
return 0 ;
}
new = & ( ( * new ) - > rb_right ) ;
} else {
/* It already is in the tree */
return - EINVAL ;
2006-01-06 00:13:05 -08:00
}
2005-04-16 15:20:36 -07:00
}
2007-05-06 14:50:47 -07:00
/* Add the new node and rebalance the tree. */
ext = kzalloc ( sizeof ( struct swsusp_extent ) , GFP_KERNEL ) ;
if ( ! ext )
return - ENOMEM ;
ext - > start = swap_offset ;
ext - > end = swap_offset ;
rb_link_node ( & ext - > node , parent , new ) ;
rb_insert_color ( & ext - > node , & swsusp_extents ) ;
2006-03-23 02:59:59 -08:00
return 0 ;
2006-01-06 00:13:05 -08:00
}
2005-04-16 15:20:36 -07:00
2007-05-06 14:50:47 -07:00
/**
* alloc_swapdev_block - allocate a swap page and register that it has
* been allocated , so that it can be freed in case of an error .
*/
sector_t alloc_swapdev_block ( int swap )
2006-01-06 00:13:05 -08:00
{
2006-03-23 02:59:59 -08:00
unsigned long offset ;
offset = swp_offset ( get_swap_page_of_type ( swap ) ) ;
if ( offset ) {
2007-05-06 14:50:47 -07:00
if ( swsusp_extents_insert ( offset ) )
2006-03-23 02:59:59 -08:00
swap_free ( swp_entry ( swap , offset ) ) ;
2006-12-06 20:34:10 -08:00
else
return swapdev_block ( swap , offset ) ;
2006-01-06 00:13:05 -08:00
}
2006-12-06 20:34:10 -08:00
return 0 ;
2006-01-06 00:13:05 -08:00
}
2005-04-16 15:20:36 -07:00
2007-05-06 14:50:47 -07:00
/**
* free_all_swap_pages - free swap pages allocated for saving image data .
* It also frees the extents used to register which swap entres had been
* allocated .
*/
void free_all_swap_pages ( int swap )
2006-01-06 00:13:05 -08:00
{
2007-05-06 14:50:47 -07:00
struct rb_node * node ;
while ( ( node = swsusp_extents . rb_node ) ) {
struct swsusp_extent * ext ;
unsigned long offset ;
ext = container_of ( node , struct swsusp_extent , node ) ;
rb_erase ( node , & swsusp_extents ) ;
for ( offset = ext - > start ; offset < = ext - > end ; offset + + )
swap_free ( swp_entry ( swap , offset ) ) ;
kfree ( ext ) ;
2005-04-16 15:20:36 -07:00
}
2006-01-06 00:13:05 -08:00
}
2007-05-06 14:50:47 -07:00
int swsusp_swap_in_use ( void )
{
return ( swsusp_extents . rb_node ! = NULL ) ;
}
2006-12-06 20:34:32 -08:00
/**
* swsusp_show_speed - print the time elapsed between two events represented by
* @ start and @ stop
*
* @ nr_pages - number of pages processed between @ start and @ stop
* @ msg - introductory message to print
*/
void swsusp_show_speed ( struct timeval * start , struct timeval * stop ,
unsigned nr_pages , char * msg )
{
s64 elapsed_centisecs64 ;
int centisecs ;
int k ;
int kps ;
elapsed_centisecs64 = timeval_to_ns ( stop ) - timeval_to_ns ( start ) ;
do_div ( elapsed_centisecs64 , NSEC_PER_SEC / 100 ) ;
centisecs = elapsed_centisecs64 ;
if ( centisecs = = 0 )
centisecs = 1 ; /* avoid div-by-zero */
k = nr_pages * ( PAGE_SIZE / 1024 ) ;
kps = ( k * 100 ) / centisecs ;
2007-12-08 02:09:43 +01:00
printk ( KERN_INFO " PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s) \n " ,
msg , k ,
2006-12-06 20:34:32 -08:00
centisecs / 100 , centisecs % 100 ,
kps / 1000 , ( kps % 1000 ) / 10 ) ;
}
2008-10-26 20:52:15 +01:00
/*
* Platforms , like ACPI , may want us to save some memory used by them during
* hibernation and to restore the contents of this memory during the subsequent
* resume . The code below implements a mechanism allowing us to do that .
*/
struct nvs_page {
unsigned long phys_start ;
unsigned int size ;
void * kaddr ;
void * data ;
struct list_head node ;
} ;
static LIST_HEAD ( nvs_list ) ;
/**
* hibernate_nvs_register - register platform NVS memory region to save
* @ start - physical address of the region
* @ size - size of the region
*
* The NVS region need not be page - aligned ( both ends ) and we arrange
* things so that the data from page - aligned addresses in this region will
* be copied into separate RAM pages .
*/
int hibernate_nvs_register ( unsigned long start , unsigned long size )
{
struct nvs_page * entry , * next ;
while ( size > 0 ) {
unsigned int nr_bytes ;
entry = kzalloc ( sizeof ( struct nvs_page ) , GFP_KERNEL ) ;
if ( ! entry )
goto Error ;
list_add_tail ( & entry - > node , & nvs_list ) ;
entry - > phys_start = start ;
nr_bytes = PAGE_SIZE - ( start & ~ PAGE_MASK ) ;
entry - > size = ( size < nr_bytes ) ? size : nr_bytes ;
start + = entry - > size ;
size - = entry - > size ;
}
return 0 ;
Error :
list_for_each_entry_safe ( entry , next , & nvs_list , node ) {
list_del ( & entry - > node ) ;
kfree ( entry ) ;
}
return - ENOMEM ;
}
/**
* hibernate_nvs_free - free data pages allocated for saving NVS regions
*/
void hibernate_nvs_free ( void )
{
struct nvs_page * entry ;
list_for_each_entry ( entry , & nvs_list , node )
if ( entry - > data ) {
free_page ( ( unsigned long ) entry - > data ) ;
entry - > data = NULL ;
if ( entry - > kaddr ) {
iounmap ( entry - > kaddr ) ;
entry - > kaddr = NULL ;
}
}
}
/**
* hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
*/
int hibernate_nvs_alloc ( void )
{
struct nvs_page * entry ;
list_for_each_entry ( entry , & nvs_list , node ) {
entry - > data = ( void * ) __get_free_page ( GFP_KERNEL ) ;
if ( ! entry - > data ) {
hibernate_nvs_free ( ) ;
return - ENOMEM ;
}
}
return 0 ;
}
/**
* hibernate_nvs_save - save NVS memory regions
*/
void hibernate_nvs_save ( void )
{
struct nvs_page * entry ;
printk ( KERN_INFO " PM: Saving platform NVS memory \n " ) ;
list_for_each_entry ( entry , & nvs_list , node )
if ( entry - > data ) {
entry - > kaddr = ioremap ( entry - > phys_start , entry - > size ) ;
memcpy ( entry - > data , entry - > kaddr , entry - > size ) ;
}
}
/**
* hibernate_nvs_restore - restore NVS memory regions
*
* This function is going to be called with interrupts disabled , so it
* cannot iounmap the virtual addresses used to access the NVS region .
*/
void hibernate_nvs_restore ( void )
{
struct nvs_page * entry ;
printk ( KERN_INFO " PM: Restoring platform NVS memory \n " ) ;
list_for_each_entry ( entry , & nvs_list , node )
if ( entry - > data )
memcpy ( entry - > kaddr , entry - > data , entry - > size ) ;
}